diff --git "a/5163.jsonl" "b/5163.jsonl" new file mode 100644--- /dev/null +++ "b/5163.jsonl" @@ -0,0 +1,644 @@ +{"seq_id":"270664911","text":"#!/usr/bin/env python\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib as mpl\nimport csv\n\ncolors = ['r', 'g', 'b', 'c', 'm', 'k']\n\nfig = plt.figure()\nax = fig.add_subplot(111)\n\nblock_size = [10, 14, 16, 20]\n\njacobi_occupancy = [0.75,0.875,1,0.8125]\nreduction_occupancy = [0.125,0.21,0.25,0.40625]\n\n\nnames = [ 'jacobi_occupancy',\n 'reduction_occupancy']\n\nax.plot(block_size, jacobi_occupancy, marker='o', c=colors[0], alpha=0.75)\nax.plot(block_size, reduction_occupancy, marker='o', c=colors[1], alpha=0.75)\n\nax.set_xlabel('Block Size', fontsize=20)\nax.set_ylabel('Occupancy', fontsize=20)\nax.set_title('Occupancy vs Block Size (2 Kernel)')\nax.grid(True)\n\nplt.legend( (names), loc=0, borderaxespad=0. )\n#####################################\n\nplt.show()\n","sub_path":"report/occupancy_2_kernel.py","file_name":"occupancy_2_kernel.py","file_ext":"py","file_size_in_byte":788,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"188754682","text":"def add_time(start, duration, day_of_week=''):\n ## Parse inputs\n start_hour = int(start.split()[0].split(':')[0])\n start_minute = int(start.split()[0].split(':')[1])\n am_pm = start.split()[1]\n\n duration_hour = int(duration.split(':')[0])\n duration_minute = int(duration.split(':')[1])\n\n ## Days of the week\n week = ['sunday', 'monday', 'tuesday', 'wednesday', 'thursday', 'friday', 'saturday']\n if day_of_week: start_day_index = week.index(day_of_week.lower())\n \n ## Add times\n total_days = 0\n end_hour = start_hour + duration_hour\n end_minute = start_minute + duration_minute\n while end_hour >= 11 or end_minute >= 60:\n if end_minute >= 60:\n end_hour += 1\n end_minute -= 60\n if end_hour >= 11:\n if am_pm == 'AM': am_pm = 'PM'\n else:\n am_pm = 'AM'\n total_days += 1\n if end_hour == 12: break\n if end_hour > 12:\n end_hour -= 12\n\n ## Display results\n new_time = f'{end_hour}:{str(end_minute).zfill(2)} {am_pm}'\n\n if day_of_week: new_time += ', ' + week[(start_day_index + total_days) % 7].capitalize()\n\n if total_days == 1: new_time += ' (next day)'\n if total_days > 1: new_time += f' ({total_days} days later)'\n\n return new_time\n","sub_path":"Scientific Computing with Python/Project 2 - Time Calculator/time_calculator.py","file_name":"time_calculator.py","file_ext":"py","file_size_in_byte":1307,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"328454134","text":"from twisted.words.protocols import irc\nfrom txircd.utils import CaseInsensitiveDictionary, isValidChannelName, isValidMetadataKey, ModeType, now\nfrom weakref import WeakKeyDictionary\n\nclass IRCChannel(object):\n\tdef __init__(self, ircd, name):\n\t\tif not isValidChannelName(name):\n\t\t\traise InvalidChannelNameError\n\t\tself.ircd = ircd\n\t\tself.name = name[:self.ircd.config.get(\"channel_name_length\", 64)]\n\t\tself.users = WeakKeyDictionary()\n\t\tself.modes = {}\n\t\tself.existedSince = now()\n\t\tself.topic = \"\"\n\t\tself.topicSetter = \"\"\n\t\tself.topicTime = now()\n\t\tself._metadata = CaseInsensitiveDictionary()\n\t\tself.cache = {}\n\t\n\tdef sendUserMessage(self, command, *params, **kw):\n\t\t\"\"\"\n\t\tSends a message to all local users in a channel.\n\t\tAccepts a command and some parameters for that command to send.\n\t\tAccepts any keyword arguments accepted by IRCUser.sendMessage.\n\t\tAlso accepts the following keyword arguments:\n\t\t- skip: list of users in the channel to skip when sending the message\n\t\t\"\"\"\n\t\tif \"to\" not in kw:\n\t\t\tkw[\"to\"] = self.name\n\t\tif kw[\"to\"] is None:\n\t\t\tdel kw[\"to\"]\n\t\tuserList = [u for u in self.users.iterkeys() if u.uuid[:3] == self.ircd.serverID]\n\t\tif \"skip\" in kw:\n\t\t\tfor u in kw[\"skip\"]:\n\t\t\t\tif u in userList:\n\t\t\t\t\tuserList.remove(u)\n\t\tkw[\"users\"] = userList\n\t\tkw[\"channels\"] = [self]\n\t\tbaseTags = {}\n\t\tif \"tags\" in kw:\n\t\t\tbaseTags = kw[\"tags\"]\n\t\t\tdel kw[\"tags\"]\n\t\tconditionalTags = {}\n\t\tif \"conditionalTags\" in kw:\n\t\t\tconditionalTags = kw[\"conditionalTags\"]\n\t\t\tdel kw[\"conditionalTags\"]\n\t\tfor user in userList:\n\t\t\tif conditionalTags:\n\t\t\t\ttags = baseTags.copy()\n\t\t\t\taddTags = user.filterConditionalTags(conditionalTags)\n\t\t\t\ttags.update(addTags)\n\t\t\telse:\n\t\t\t\ttags = baseTags\n\t\t\tkw[\"tags\"] = tags\n\t\t\tuser.sendMessage(command, *params, **kw)\n\t\n\tdef sendServerMessage(self, command, *params, **kw):\n\t\t\"\"\"\n\t\tSends a message to all remote servers to which any user in this channel\n\t\tis connected. Accepts a command and some parameters for that command to\n\t\tsend. Also accepts the following keyword arguments:\n\t\t- skipall: list of servers to skip from the network\n\t\t- skiplocal: list of locally-connected servers to which to skip sending\n\t\t after we've determined the closest hop of all the servers to which\n\t\t we're sending\n\t\t\"\"\"\n\t\tservers = set()\n\t\tfor user in self.users.iterkeys():\n\t\t\tif user.uuid[:3] != self.ircd.serverID:\n\t\t\t\tservers.add(self.ircd.servers[user.uuid[:3]])\n\t\tif \"skipall\" in kw:\n\t\t\tfor s in kw[\"skipall\"]:\n\t\t\t\tservers.discard(s)\n\t\tlocalServers = set()\n\t\tfor server in servers:\n\t\t\tnearHop = server\n\t\t\twhile nearHop.nextClosest != self.ircd.serverID:\n\t\t\t\tnearHop = self.ircd.servers[nearHop.nextClosest]\n\t\t\tlocalServers.add(nearHop)\n\t\tif \"skiplocal\" in kw:\n\t\t\tfor s in kw[\"skiplocal\"]:\n\t\t\t\tlocalServers.discard(s)\n\t\tfor server in localServers:\n\t\t\tserver.sendMessage(command, *params, **kw)\n\t\n\tdef setTopic(self, topic, setter):\n\t\t\"\"\"\n\t\tSets the channel topic.\n\t\t\"\"\"\n\t\tif setter in self.ircd.users:\n\t\t\tsource = self.ircd.users[setter].hostmask()\n\t\telif setter == self.ircd.serverID:\n\t\t\tsource = self.ircd.name\n\t\telif setter in self.ircd.servers:\n\t\t\tsource = self.ircd.servers[setter].name\n\t\telse:\n\t\t\treturn False\n\t\tif topic == self.topic:\n\t\t\treturn True\n\t\toldTopic = self.topic\n\t\tself.topic = topic\n\t\tself.topicSetter = source\n\t\tself.topicTime = now()\n\t\tself.ircd.runActionStandard(\"topic\", self, setter, oldTopic, channels=[self])\n\t\treturn True\n\t\n\tdef metadataKeyExists(self, key):\n\t\t\"\"\"\n\t\tChecks whether a specific key exists in the channel's metadata.\n\t\t\"\"\"\n\t\treturn key in self._metadata\n\t\n\tdef metadataKeyCase(self, key):\n\t\t\"\"\"\n\t\tGets the key from the channel's metadata in its original case.\n\t\tReturns None if the key is not present.\n\t\t\"\"\"\n\t\tif key not in self._metadata:\n\t\t\treturn None\n\t\treturn self._metadata[key][0]\n\t\n\tdef metadataValue(self, key):\n\t\t\"\"\"\n\t\tGets the value for the given key in the channel's metadata.\n\t\tReturns None if the key is not present.\n\t\t\"\"\"\n\t\tif key not in self._metadata:\n\t\t\treturn None\n\t\treturn self._metadata[key][1]\n\t\n\tdef metadataVisibility(self, key):\n\t\t\"\"\"\n\t\tGets the visibility value for the given key in the channel's metadata.\n\t\tReturns None if the key is not present.\n\t\t\"\"\"\n\t\tif key not in self._metadata:\n\t\t\treturn None\n\t\treturn self._metadata[key][2]\n\t\n\tdef metadataSetByUser(self, key):\n\t\t\"\"\"\n\t\tGets whether the given metadata key/value was set by a user.\n\t\tReturns None if the key is not present.\n\t\t\"\"\"\n\t\tif key not in self._metadata:\n\t\t\treturn None\n\t\treturn self._metadata[key][3]\n\t\n\tdef metadataList(self):\n\t\t\"\"\"\n\t\tReturns the list of metadata keys/values for the channel as a list of\n\t\ttuples in the format\n\t\t[ (key, value, visibility, setByUser) ]\n\t\t\"\"\"\n\t\treturn self._metadata.values()\n\t\n\tdef setMetadata(self, key, value, visibility, setByUser, fromServer = None):\n\t\t\"\"\"\n\t\tSets metadata for the channel. Returns True if the set is successful or\n\t\tFalse if it is not. If the metadata set is caused by a message from a\n\t\tremote server, pass the server object as the fromServer parameter.\n\t\tIf value is None, deletes the key provided.\n\t\t\"\"\"\n\t\tif not isValidMetadataKey(key):\n\t\t\treturn False\n\t\toldData = None\n\t\tif key in self._metadata:\n\t\t\toldData = self._metadata[key]\n\t\tif setByUser and oldData and not oldData[3]:\n\t\t\treturn False\n\t\tif setByUser and self.ircd.runActionUntilValue(\"usercansetmetadata\", key, channels=[self]) is False:\n\t\t\treturn False\n\t\tif value is None:\n\t\t\tdel self._metadata[key]\n\t\telif not visibility:\n\t\t\treturn False\n\t\telse:\n\t\t\tself._metadata[key] = (key, value, visibility, setByUser)\n\t\toldValue = oldData[1] if oldData else None\n\t\tself.ircd.runActionStandard(\"channelmetadataupdate\", self, key, oldValue, value, visibility, setByUser, fromServer, channels=[self])\n\t\treturn True\n\t\n\tdef setModes(self, modes, defaultSource):\n\t\t\"\"\"\n\t\tSets modes on the channel. Accepts modes as a list of tuples in the\n\t\tformat:\n\t\t[ (adding, mode, param, setBy, setTime) ]\n\t\t- adding: True if we're setting the mode; False if unsetting\n\t\t- mode: The mode letter\n\t\t- param: The mode's parameter; None if no parameter is needed for that\n\t\t mode\n\t\t- setBy: Optional, only used for list modes; a human-readable string\n\t\t (typically server name or nick!user@host) for who/what set this\n\t\t mode)\n\t\t- setTime: Optional, only used for list modes; a datetime object\n\t\t containing when the mode was set\n\t\t\n\t\tThe defaultSource is a valid user ID or server ID of someone who set\n\t\tthe modes. It is used as the source for announcements about the mode\n\t\tchange and as the default setter for any list modes who do not have the\n\t\tsetBy parameter specified.\n\t\tThe default time for list modes with no setTime specified is now().\n\t\t\"\"\"\n\t\tmodeChanges = []\n\t\tdefaultSourceName = self._sourceName(defaultSource)\n\t\tif defaultSourceName is None:\n\t\t\traise ValueError (\"Source must be a valid user or server ID.\")\n\t\tnowTime = now()\n\t\tfor modeData in modes:\n\t\t\tmode = modeData[1]\n\t\t\tif mode not in self.ircd.channelModeTypes:\n\t\t\t\tcontinue\n\t\t\tsetBy = defaultSourceName\n\t\t\tsetTime = nowTime\n\t\t\tmodeType = self.ircd.channelModeTypes[mode]\n\t\t\tadding = modeData[0]\n\t\t\tif modeType in (ModeType.List, ModeType.ParamOnUnset, ModeType.Param, ModeType.Status):\n\t\t\t\tparam = modeData[2]\n\t\t\telse:\n\t\t\t\tparam = None\n\t\t\tif modeType == ModeType.List:\n\t\t\t\tdataCount = len(modeData)\n\t\t\t\tif dataCount >= 4:\n\t\t\t\t\tsetBy = modeData[3]\n\t\t\t\tif dataCount >= 5:\n\t\t\t\t\tsetTime = modeData[4]\n\t\t\tif modeType == ModeType.Status:\n\t\t\t\tif adding:\n\t\t\t\t\tparamList = self.ircd.channelStatuses[mode][2].checkSet(self, param)\n\t\t\t\telse:\n\t\t\t\t\tparamList = self.ircd.channelStatuses[mode][2].checkUnset(self, param)\n\t\t\telse:\n\t\t\t\tif adding:\n\t\t\t\t\tparamList = self.ircd.channelModes[modeType][mode].checkSet(self, param)\n\t\t\t\telse:\n\t\t\t\t\tparamList = self.ircd.channelModes[modeType][mode].checkUnset(self, param)\n\t\t\tif paramList is None:\n\t\t\t\tcontinue\n\t\t\t\n\t\t\tfor parameter in paramList:\n\t\t\t\tif self._applyMode(adding, modeType, mode, parameter, setBy, setTime):\n\t\t\t\t\tmodeChanges.append((adding, mode, parameter, setBy, setTime))\n\t\t\n\t\tself._notifyModeChanges(modeChanges, defaultSource, defaultSourceName)\n\t\treturn modeChanges\n\t\n\tdef setModesByUser(self, user, modes, params, override = False):\n\t\t\"\"\"\n\t\tParses a mode string specified by a user and sets those modes on the\n\t\tchannel.\n\t\tThe user parameter should be the user who set the modes.\n\t\tThe modes parameter is the actual modes string; parameters specified by\n\t\tthe user should be as a list of strings in params.\n\t\tThe override parameter should be used only when all permission checks\n\t\tshould be overridden.\n\t\t\"\"\"\n\t\tadding = True\n\t\tchanges = []\n\t\tsetBy = self._sourceName(user.uuid)\n\t\tsetTime = now()\n\t\tfor mode in modes:\n\t\t\tif len(changes) >= self.ircd.config.get(\"modes_per_line\", 20):\n\t\t\t\tbreak\n\t\t\tif mode == \"+\":\n\t\t\t\tadding = True\n\t\t\t\tcontinue\n\t\t\tif mode == \"-\":\n\t\t\t\tadding = False\n\t\t\t\tcontinue\n\t\t\tif mode not in self.ircd.channelModeTypes:\n\t\t\t\tuser.sendMessage(irc.ERR_UNKNOWNMODE, mode, \"is unknown mode char to me\")\n\t\t\t\tcontinue\n\t\t\tmodeType = self.ircd.channelModeTypes[mode]\n\t\t\tparam = None\n\t\t\tif modeType in (ModeType.List, ModeType.ParamOnUnset, ModeType.Status) or (adding and modeType == ModeType.Param):\n\t\t\t\ttry:\n\t\t\t\t\tparam = params.pop(0)\n\t\t\t\texcept IndexError:\n\t\t\t\t\tif modeType == ModeType.List:\n\t\t\t\t\t\tself.ircd.channelModes[modeType][mode].showListParams(user, self)\n\t\t\t\t\tcontinue\n\t\t\tif modeType == ModeType.Status:\n\t\t\t\tif adding:\n\t\t\t\t\tparamList = self.ircd.channelStatuses[mode][2].checkSet(self, param)\n\t\t\t\telse:\n\t\t\t\t\tparamList = self.ircd.channelStatuses[mode][2].checkUnset(self, param)\n\t\t\telse:\n\t\t\t\tif adding:\n\t\t\t\t\tparamList = self.ircd.channelModes[modeType][mode].checkSet(self, param)\n\t\t\t\telse:\n\t\t\t\t\tparamList = self.ircd.channelModes[modeType][mode].checkUnset(self, param)\n\t\t\tif paramList is None:\n\t\t\t\tcontinue\n\t\t\t\n\t\t\tfor parameter in paramList:\n\t\t\t\tif len(changes) >= self.ircd.config.get(\"modes_per_line\", 20):\n\t\t\t\t\tbreak\n\t\t\t\tif not override and self.ircd.runActionUntilValue(\"modepermission-channel-{}\".format(mode), self, user, adding, parameter, users=[user], channels=[self]) is False:\n\t\t\t\t\tcontinue\n\t\t\t\tif adding:\n\t\t\t\t\tif modeType == ModeType.Status:\n\t\t\t\t\t\ttry:\n\t\t\t\t\t\t\ttargetUser = self.ircd.users[self.ircd.userNicks[parameter]]\n\t\t\t\t\t\texcept KeyError:\n\t\t\t\t\t\t\tcontinue\n\t\t\t\t\t\tif targetUser not in self.users:\n\t\t\t\t\t\t\tcontinue\n\t\t\t\t\t\tif mode in self.users[targetUser][\"status\"]:\n\t\t\t\t\t\t\tcontinue\n\t\t\t\t\t\tstatusLevel = self.ircd.channelStatuses[mode][1]\n\t\t\t\t\t\tif not override and self.userRank(user) < statusLevel and not self.ircd.runActionUntilValue(\"channelstatusoverride\", self, user, mode, parameter, users=[user], channels=[self]):\n\t\t\t\t\t\t\tuser.sendMessage(irc.ERR_CHANOPRIVSNEEDED, self.name, \"You do not have permission to set channel mode +{}\".format(mode))\n\t\t\t\t\t\t\tcontinue\n\t\t\t\t\t\tparameter = targetUser.uuid\n\t\t\t\t\telif modeType == ModeType.List:\n\t\t\t\t\t\tif mode in self.modes and len(self.modes[mode]) > self.ircd.config.get(\"channel_listmode_limit\", 128):\n\t\t\t\t\t\t\tuser.sendMessage(irc.ERR_BANLISTFULL, self.name, parameter, \"Channel +{} list is full\".format(mode))\n\t\t\t\t\t\t\tcontinue\n\t\t\t\telse:\n\t\t\t\t\tif modeType == ModeType.Status:\n\t\t\t\t\t\ttry:\n\t\t\t\t\t\t\ttargetUser = self.ircd.users[self.ircd.userNicks[parameter]]\n\t\t\t\t\t\texcept KeyError:\n\t\t\t\t\t\t\tcontinue\n\t\t\t\t\t\tif mode not in self.users[targetUser][\"status\"]:\n\t\t\t\t\t\t\tcontinue\n\t\t\t\t\t\tstatusLevel = self.ircd.channelStatuses[mode][1]\n\t\t\t\t\t\tif not override and self.userRank(user) < statusLevel and not self.ircd.runActionUntilValue(\"channelstatusoverride\", self, user, mode, parameter, users=[user], channels=[self]):\n\t\t\t\t\t\t\tuser.sendMessage(irc.ERR_CHANOPRIVSNEEDED, self.name, \"You do not have permission to set channel mode -{}\".format(mode))\n\t\t\t\t\t\t\tcontinue\n\t\t\t\t\t\tparameter = targetUser.uuid\n\t\t\t\tif self._applyMode(adding, modeType, mode, parameter, setBy, setTime):\n\t\t\t\t\tchanges.append((adding, mode, parameter, setBy, setTime))\n\t\tself._notifyModeChanges(changes, user.uuid, setBy)\n\t\treturn changes\n\t\n\tdef _applyMode(self, adding, modeType, mode, parameter, setBy, setTime):\n\t\tif parameter:\n\t\t\tif len(parameter) > 255:\n\t\t\t\treturn False\n\t\t\tif \" \" in parameter:\n\t\t\t\treturn False\n\t\t\n\t\tif adding:\n\t\t\tif modeType == ModeType.Status:\n\t\t\t\ttry:\n\t\t\t\t\ttargetUser = self.ircd.users[parameter]\n\t\t\t\texcept KeyError:\n\t\t\t\t\treturn False\n\t\t\t\tif targetUser not in self.users:\n\t\t\t\t\treturn False\n\t\t\t\tif mode in self.users[targetUser]:\n\t\t\t\t\treturn False\n\t\t\t\tstatusLevel = self.ircd.channelStatuses[mode][1]\n\t\t\t\ttargetStatus = self.users[targetUser][\"status\"]\n\t\t\t\tif mode in targetStatus:\n\t\t\t\t\treturn False\n\t\t\t\tfor index, rank in enumerate(targetStatus):\n\t\t\t\t\tif self.ircd.channelStatuses[rank][1] < statusLevel:\n\t\t\t\t\t\tstatusList = list(targetStatus)\n\t\t\t\t\t\tstatusList.insert(index, mode)\n\t\t\t\t\t\tself.users[targetUser][\"status\"] = \"\".join(statusList)\n\t\t\t\t\t\treturn True\n\t\t\t\tself.users[targetUser][\"status\"] += mode\n\t\t\t\treturn True\n\t\t\tif modeType == ModeType.List:\n\t\t\t\tif mode in self.modes:\n\t\t\t\t\tif len(self.modes[mode]) > self.ircd.config.get(\"channel_listmode_limit\", 128):\n\t\t\t\t\t\treturn False\n\t\t\t\t\tfor paramData in self.modes[mode]:\n\t\t\t\t\t\tif parameter == paramData[0]:\n\t\t\t\t\t\t\treturn False\n\t\t\t\telse:\n\t\t\t\t\tself.modes[mode] = []\n\t\t\t\tself.modes[mode].append((parameter, setBy, setTime))\n\t\t\t\treturn True\n\t\t\tif mode in self.modes and self.modes[mode] == parameter:\n\t\t\t\treturn False\n\t\t\tself.modes[mode] = parameter\n\t\t\treturn True\n\t\t\n\t\tif modeType == ModeType.Status:\n\t\t\ttry:\n\t\t\t\ttargetUser = self.ircd.users[parameter]\n\t\t\texcept KeyError:\n\t\t\t\treturn False\n\t\t\tif targetUser not in self.users:\n\t\t\t\treturn False\n\t\t\tif mode not in self.users[targetUser][\"status\"]:\n\t\t\t\treturn False\n\t\t\tself.users[targetUser][\"status\"] = self.users[targetUser][\"status\"].replace(mode, \"\")\n\t\t\treturn True\n\t\tif modeType == ModeType.List:\n\t\t\tif mode not in self.modes:\n\t\t\t\treturn False\n\t\t\tfor index, paramData in enumerate(self.modes[mode]):\n\t\t\t\tif paramData[0] == parameter:\n\t\t\t\t\tdel self.modes[mode][index]\n\t\t\t\t\tbreak\n\t\t\telse:\n\t\t\t\treturn False\n\t\t\tif not self.modes[mode]:\n\t\t\t\tdel self.modes[mode]\n\t\t\treturn True\n\t\tif mode not in self.modes:\n\t\t\treturn False\n\t\tif modeType == ModeType.ParamOnUnset and parameter != self.modes[mode]:\n\t\t\treturn False\n\t\tdel self.modes[mode]\n\t\treturn True\n\t\n\tdef _notifyModeChanges(self, modeChanges, source, sourceName):\n\t\tif not modeChanges:\n\t\t\treturn\n\t\tchannelUsers = []\n\t\tfor user in self.users.iterkeys():\n\t\t\tif user.uuid[:3] == self.ircd.serverID:\n\t\t\t\tchannelUsers.append(user)\n\t\tfor change in modeChanges:\n\t\t\tself.ircd.runActionStandard(\"modechange-channel-{}\".format(change[1]), self, change[3], change[0], change[2], channels=[self])\n\t\tself.ircd.runActionProcessing(\"modemessage-channel\", channelUsers, self, source, sourceName, modeChanges, users=channelUsers, channels=[self])\n\t\tself.ircd.runActionStandard(\"modechanges-channel\", self, source, sourceName, modeChanges, channels=[self])\n\t\n\tdef _sourceName(self, source):\n\t\tif source in self.ircd.users:\n\t\t\treturn self.ircd.users[source].hostmask()\n\t\tif source == self.ircd.serverID:\n\t\t\treturn self.ircd.name\n\t\tif source in self.ircd.servers:\n\t\t\treturn self.ircd.servers[source].name\n\t\treturn None\n\t\n\tdef modeString(self, toUser):\n\t\t\"\"\"\n\t\tGet a user-reportable mode string for the modes set on the channel.\n\t\t\"\"\"\n\t\tmodeStr = [\"+\"]\n\t\tparams = []\n\t\tfor mode in self.modes:\n\t\t\tmodeType = self.ircd.channelModeTypes[mode]\n\t\t\tif modeType not in (ModeType.ParamOnUnset, ModeType.Param, ModeType.NoParam):\n\t\t\t\tcontinue\n\t\t\tif modeType != ModeType.NoParam:\n\t\t\t\tparam = self.ircd.channelModes[modeType][mode].showParam(toUser, self)\n\t\t\t\tif not param:\n\t\t\t\t\tparam = self.modes[mode]\n\t\t\telse:\n\t\t\t\tparam = None\n\t\t\tmodeStr.append(mode)\n\t\t\tif param:\n\t\t\t\tparams.append(param)\n\t\tif params:\n\t\t\treturn \"{} {}\".format(\"\".join(modeStr), \" \".join(params))\n\t\treturn \"\".join(modeStr)\n\t\n\tdef userRank(self, user):\n\t\t\"\"\"\n\t\tGets the user's numeric rank in the channel.\n\t\t\"\"\"\n\t\tif user not in self.users:\n\t\t\treturn -1\n\t\tstatus = self.users[user][\"status\"]\n\t\tif not status:\n\t\t\treturn 0\n\t\treturn self.ircd.channelStatuses[status[0]][1]\n\nclass InvalidChannelNameError(Exception):\n\tdef __str__(self):\n\t\treturn \"Invalid character in channel name\"","sub_path":"txircd/channel.py","file_name":"channel.py","file_ext":"py","file_size_in_byte":15985,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"130503673","text":"from typing import Dict, List\n\nfrom hearthstone.entities import Entity\nfrom hearthstone.enums import GameTag, Zone, SpellSchool\n\nfrom .action import Action\nfrom .base_entity import BaseEntity\nfrom .hero_entity import HeroEntity\nfrom .spell_entity import SpellEntity\n\n\nclass GameEntity(BaseEntity):\n\n def __init__(self, entity: Entity):\n super().__init__(entity)\n # 所有英雄\n self.hero_entities: Dict[int, HeroEntity] = {}\n # 我方场上0, 1, 2号随从(只有战斗阶段才有数据)\n self.my_hero: List[HeroEntity] = []\n # 敌方场上0, 1, 2\n self.enemy_hero: List[HeroEntity] = []\n # 手牌上(从左往右按顺序)\n self.setaside_hero: List[HeroEntity] = []\n # 死掉的\n self.dead_hero: List[HeroEntity] = []\n # 当前回合的敌方action列表\n self.enemy_action_list = []\n # 当前回合的我方action列表\n self.my_action_list = []\n # 当前回合的action列表\n self.all_action_list = []\n # 1为选择随从 0为战斗\n self.action_step_type = 1\n self.turn = 0 # 回合数\n # 允许移动随从\n self.allow_move_minion = 0\n self.parse_entity()\n\n def parse_entity(self):\n if self.entity is None:\n return\n super(GameEntity, self).parse_entity()\n self.action_step_type = self.get_tag(GameTag.ACTION_STEP_TYPE)\n self.turn = self.get_tag(GameTag.TURN)\n self.allow_move_minion = self.get_tag(GameTag.ALLOW_MOVE_MINION)\n\n pass\n\n def add_hero(self, hero: HeroEntity):\n self.hero_entities[hero.entity_id] = hero\n if hero.zone == Zone.PLAY:\n if hero.own():\n self.my_hero.append(hero)\n else:\n self.enemy_hero.append(hero)\n elif hero.zone == Zone.SETASIDE:\n self.setaside_hero.append(hero)\n elif hero.zone == Zone.GRAVEYARD:\n if hero.own():\n self.dead_hero.append(hero)\n\n self.my_hero.sort(key=lambda x: x.zone_position)\n self.enemy_hero.sort(key=lambda x: x.zone_position)\n\n def get_spell_power(self, spell_school: SpellSchool, own=True):\n\n power_list = [h.spellpower[spell_school] for h in self.my_hero] if own \\\n else [h.spellpower[spell_school] for h in self.enemy_hero]\n power = sum(power_list) if len(power_list) else 0\n # 后续操作\n return power\n\n def get_action_list(self, own=True):\n return self.my_action_list if own else self.enemy_action_list\n\n def get_hero_list(self, own=True):\n return self.my_hero if own else self.enemy_hero\n\n def can_combo(self, spell: SpellEntity, spell_school=None, own=True):\n action_list = self.get_action_list(own)\n if len(action_list) <= 0:\n return False\n if spell_school is None:\n return action_list[0].entity_id != spell.entity_id\n else:\n for action in action_list:\n if action.spell.entity_id == spell.entity_id:\n return False\n if action.spell.spell_school == spell_school:\n return True\n\n def get_enemy_action(self):\n if len(self.enemy_action_list):\n return self.enemy_action_list\n action = []\n for h in self.enemy_hero:\n spell = h.get_enemy_action()\n action.append(Action(hero=h, spell=spell, target=self.find_min_health()))\n self.action_list = action\n return action\n\n def find_min_health(self, own=True):\n \"\"\"\n 查找敌我场上生命值最低的佣兵, 默认我方\n Args:\n own: 是否是我方场上\n \"\"\"\n hero_list = self.my_hero if own else self.enemy_hero\n if len(hero_list) <= 0:\n return None\n return min(hero_list, key=lambda x: x.get_health())\n\n def play(self, hero: HeroEntity, spell: SpellEntity, target: HeroEntity):\n power = self.get_spell_power(spell.spell_school)\n spell.play(hero, target)\n pass\n\n def do_action(self, action):\n # 回合开始\n # 技能施放\n # 受伤扳机\n # 检测死亡\n # 亡语扳机\n # 回合结束\n pass\n","sub_path":"entity/game_entity.py","file_name":"game_entity.py","file_ext":"py","file_size_in_byte":4273,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"382603957","text":"from tkinter import Frame, Tk, BOTH, Text, Menu, END, filedialog, messagebox, Button\r\n\r\nclass Axiom(Frame):\r\n\r\n def __init__(self, parent):\r\n Frame.__init__(self, parent)\r\n\r\n self.parent = parent\r\n self.initUI()\r\n\r\n def initUI(self):\r\n\r\n self.parent.title(\"Axiom File Fixer\")\r\n self.pack(fill=BOTH, expand = 1)\r\n\r\n menubar = Menu(self.parent)\r\n self.parent.config(menu=menubar)\r\n\r\n fileMenu = Menu(menubar)\r\n fileMenu.add_command(label=\"Open\", command=self.onOpen)\r\n fileMenu.add_command(label='Exit', command=self.parent.quit)\r\n menubar.add_cascade(label=\"File\", menu=fileMenu)\r\n\r\n button1 = Button(text='Exit Application', command=self.parent.destroy)\r\n\r\n\r\n self.txt = Text(self)\r\n self.txt.pack(fill=BOTH, expand = 1)\r\n \r\n def onOpen(self):\r\n\r\n ftypes = [('Axiom files', '*.DDO'), ('All files', '*')]\r\n dlg = filedialog.Open(self, filetypes = ftypes)\r\n fl = dlg.show()\r\n\r\n if fl != '':\r\n self.convert(fl)\r\n\r\n def convert(self, file):\r\n with open(file) as f:\r\n read_lines = f.readlines()\r\n content = list(read_lines)\r\n header = content[:2]\r\n data = content[2:-2]\r\n footer = content[-2:]\r\n output = [[]]\r\n for line in data:\r\n l = line.split(',')\r\n code = l[10][-6:-1]\r\n l[3] = code\r\n j = ','.join(l)\r\n output.append([j])\r\n\r\n title = file[:-4] + \"a.DDO\"\r\n with open(title, \"w\") as f2:\r\n for line in header:\r\n f2.write(line)\r\n output.pop(0)\r\n for line in output:\r\n f2.write(line[0])\r\n for line in footer:\r\n f2.write(line)\r\n \r\n messagebox.askokcancel(\"Title\",\"Completed\")\r\n\r\ndef main():\r\n\r\n root = Tk()\r\n ax = Axiom(root)\r\n root.geometry(\"300x250+300+300\")\r\n root.mainloop()\r\n\r\nif __name__ == '__main__':\r\n main()","sub_path":"axiom.py","file_name":"axiom.py","file_ext":"py","file_size_in_byte":2054,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"396234969","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport random\nfrom collections import defaultdict\nimport backend_calc \n\n\n\n# Constants:\n## calculation of ECEI windiw coverage\nR0, r = 1.65, 0.2\n## Model parameters\n# t_crash = 100.0e-6 # [s]\nratio_magn_recon = 10\ndt = 5e-6\nalpha_ECEI = 60*np.pi/180\n## Statistics\nN_stat = 100\n## Save statistics\npath = '/afs/ipp-garching.mpg.de/home/o/osam/Sawtooth_crash/Videos/Simple_ST_model/'\n# Checks\nSave_storage_check = 0 \ntor_symm_check = 1\n\n## Variables\nnu_mode_array = np.linspace(0.5e3,15e3,30) # Hz\nstats_array = np.zeros_like(nu_mode_array)\n\nt_crash_array = np.array([25.0e-6,50.0e-6,100.0e-6,])\n# for t_i in range(len(t_crash_array)):\nfor t_i in range(2,3):\n Stat_storage = defaultdict(dict)\n t_crash = t_crash_array[t_i] \n print(t_crash)\n t_array = np.linspace(0,t_crash, int(t_crash/dt))\n # for nu_j in range(len(nu_mode_array)):\n for nu_j in range(26,27):\n print(\"processing %g/%g\"%(nu_j+1,len(nu_mode_array)))\n nu_mode = nu_mode_array[nu_j]\n v_ST = nu_mode/ratio_magn_recon\n obs_counter = 0\n for n_i in range(N_stat):\n dphi_line_0 = 2*np.pi*random.uniform(0, 1) \n dphi_ST_0 = 2*np.pi*random.uniform(0, 1)\n obs_during_rot = False\n for i in range(len(t_array)):\n # dphi_line = dphi_line_0 + 2.0*np.pi*t_array[i]*nu_mode\n # phi_line = np.linspace(0.0*np.pi,2.0*np.pi,n_line)\n # x_q_res = R0*np.cos(phi_line)+r*np.sin(dphi_line)\n # y_q_res = R0*np.sin(phi_line)+r*np.cos(dphi_line)\n # phi_ST = np.linspace(dphi_ST_0,dphi_ST_0+2.0*np.pi*t_array[i]*v_ST,n_line)\n # x_ST = R0*np.cos(phi_ST - dphi_line)+r*np.sin(dphi_line)\n # y_ST = R0*np.sin(phi_ST - dphi_line)+r*np.cos(dphi_line)\n observation = backend_calc.calc(alpha_ECEI,R0,r,nu_mode,v_ST,dphi_line_0,dphi_ST_0,t_array[i],tor_symm_check)[-1]\n if observation == True:\n obs_during_rot = True\n\n # print(observation)\n\n if obs_during_rot == True:\n obs_counter += 1\n else:\n print(\"Not observed! dphi_line_0 = %g, dphi_ST_0 = %g\"%(dphi_line_0,dphi_ST_0))\n\n # print('dphi_obs = %g' %(dphi_obs_max*180/np.pi))\n # print('observ_counter = %g/%g ' %(obs_counter,N_stat))\n stats_array[nu_j] = float(obs_counter)/N_stat\n\n print(stats_array)\n\n Stat_storage['t_crash'] = t_crash \n Stat_storage['ratio_magn_recon'] = ratio_magn_recon\n Stat_storage['dt'] = dt\n Stat_storage['R0'] = R0\n Stat_storage['r'] = r\n Stat_storage['alpha_ECEI'] = round(alpha_ECEI*180/np.pi)\n Stat_storage['N_stat'] = N_stat\n Stat_storage['nu_mode_array'] = nu_mode_array\n Stat_storage['stats_array'] = stats_array\n\n if (Save_storage_check == 1):\n import pickle\n if (tor_symm_check == 0):\n file = open('/afs/ipp-garching.mpg.de/home/o/osam/workspace/python2_projects/Simple_ST_model/tor_loc_t_crash_%02d_alpha_%02d.pickle'%(int(t_crash*1e6),round(alpha_ECEI*180/np.pi)), 'wb')\n pickle.dump(Stat_storage, file)\n if (tor_symm_check == 1):\n file = open('/afs/ipp-garching.mpg.de/home/o/osam/workspace/python2_projects/Simple_ST_model/tor_glob_t_crash_%02d_alpha_%02d.pickle'%(int(t_crash*1e6),round(alpha_ECEI*180/np.pi)), 'wb')\n pickle.dump(Stat_storage, file)\n file.close()\n","sub_path":"python3_projects/Simple_ST_model/check_model.py","file_name":"check_model.py","file_ext":"py","file_size_in_byte":3484,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"202850653","text":"#\n# @lc app=leetcode.cn id=31 lang=python3\n#\n# [31] 下一个排列\n#\n# https://leetcode-cn.com/problems/next-permutation/description/\n#\n# algorithms\n# Medium (30.16%)\n# Likes: 190\n# Dislikes: 0\n# Total Accepted: 13.2K\n# Total Submissions: 42.7K\n# Testcase Example: '[1,2,3]'\n#\n# 实现获取下一个排列的函数,算法需要将给定数字序列重新排列成字典序中下一个更大的排列。\n# \n# 如果不存在下一个更大的排列,则将数字重新排列成最小的排列(即升序排列)。\n# \n# 必须原地修改,只允许使用额外常数空间。\n# \n# 以下是一些例子,输入位于左侧列,其相应输出位于右侧列。\n# 1,2,3 → 1,3,2\n# 3,2,1 → 1,2,3\n# 1,1,5 → 1,5,1\n# \n#\nclass Solution:\n def nextPermutation(self, nums):\n length = len(nums)\n # 从后往前寻找非递增的序列\n # 即1,3,5,4,2,将会找到数字5的位置\n for i in range(length-1, -1, -1):\n if i ==0:\n nums.reverse()\n break\n if nums[i] > nums[i-1]:\n for j in range(length-1, i-1,-1):\n if nums[j]> nums[i-1]:\n # 交换\n nums[i-1], nums[j] = nums[j], nums[i-1]\n break\n # 逆序交换后的后半部分的递增数列\n nums[i::] = nums[length:i-1:-1]\n break\n \n","sub_path":"Day19_下一个排列/31.下一个排列.py","file_name":"31.下一个排列.py","file_ext":"py","file_size_in_byte":1436,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"645601856","text":"import nltk\nimport pickle\nfrom sklearn.naive_bayes import GaussianNB\nfrom nltk.tokenize import TweetTokenizer\nfrom nltk.classify.svm import SvmClassifier\nfrom sklearn import svm\nfrom nltk.corpus import stopwords \nimport numpy as np\nfrom numpy import *\nimport re\n\ndef dialogue_act_features(post, tweet_tokenizer):\n #print post\n features = {}\n for word in tweet_tokenizer.tokenize(post):\n features['contains({})'.format(word.lower())] = True\n return features\n\ndef train_classifier(classifier_name, train_set):\n\n\tif classifier_name==\"naive_bayes\":\n\t\tclassifier = nltk.NaiveBayesClassifier.train(train_set)\n\tif classifier_name==\"svm\":\n\t\tclassifier = svm.SVC(gamma='scale')\n\t\tclassifier.fit(train_set)\n\n\treturn classifier\n\n\ndef build_vocab(text, vocab, tweet_tokenizer, stop_words, question_words):\n\n\ttext = re.sub(r'[^a-zA-Z\\s]', u'', text)\n\tfor word in tweet_tokenizer.tokenize(text):\n\t\tword = word.lower()\n\t\tif word not in stop_words or word in question_words: \n\t\t\tif word not in vocab:\n\t\t\t\tvocab[word] = 1\n\t\t\telse:\n\t\t\t\tvocab[word] += 1\n\n\treturn vocab\n\ndef process_post(post, stop_words, question_words, tweet_tokenizer):\n\ttext = {}\n\tpost = re.sub(r'[^a-zA-Z\\s]', u'', post)\n\tfor word in tweet_tokenizer.tokenize(post):\n\t\tword = word.lower()\n\t\tif word not in stop_words or word in question_words:\n\t\t\tif word not in text:\n\t\t\t\ttext[word] = 1\n\t\t\telse:\n\t\t\t\ttext[word] += 1\n\n\treturn text\n\ndef build_dataset(posts, word_value_dict, class_value_dict, vocab_size, stop_words, question_words):\n\n\tcount = 0\n\tfor post in posts:\t\n\t\tif count==0:\n\t\t\tdataset = np.zeros((1,vocab_size+1))\n\t\t\ttext = process_post(post.text, stop_words, question_words)\n\t\t\tfor key in text:\n\t\t\t\tif key in word_value_dict:\n\t\t\t\t\tdataset[0][word_value_dict[key]] = text[key]\n\n\t\t\tdataset[0][vocab_size] = class_value_dict[post.get('class')]\n\t\t\tcount += 1\n\n\t\telse:\n\t\t\tvector = np.zeros((1,vocab_size+1))\n\t\t\ttext = process_post(post.text, stop_words, question_words)\n\t\t\tfor key in text:\n\t\t\t\tif key in word_value_dict:\n\t\t\t\t\tvector[0][word_value_dict[key]] = text[key]\n\n\t\t\t#print class_value_dict[post.get('class')]\n\t\t\tvector[0][vocab_size] = class_value_dict[post.get('class')]\n\t\t\t\n\t\t\tdataset = np.concatenate((dataset, vector), axis=0)\n\n\treturn dataset\n\nif __name__ == '__main__':\n\n\tnltk.download('nps_chat')\n\tposts = nltk.corpus.nps_chat.xml_posts()\n\ttweet_tokenizer = TweetTokenizer()\n\tvocab = {}\n\tstop_words = set(stopwords.words('english'))\n\tvocab_size = 2000\n\n\tquestion_words = ['what', 'where', 'when','how','why','did','do','does','have','has','am','is','are','can','could','may','would','will','should'\n\t\"didn't\",\"doesn't\",\"haven't\",\"isn't\",\"aren't\",\"can't\",\"couldn't\",\"wouldn't\",\"won't\",\"shouldn't\",'?']\n\n\tclasses = {}\n\tfor post in posts:\n\t\tclass_name = post.get('class')\n\t\tif class_name not in classes:\n\t\t\tclasses[class_name] = True\n\t\tvocab = build_vocab(post.text, vocab, tweet_tokenizer, stop_words, question_words)\n\n\tclass_value_dict = {}\n\tcount = 1\n\tfor key in sorted(classes):\n\t\tclass_value_dict[key] = count\n\t\tcount += 1\n\n\t#print len(vocab)\n\t#print len(classes)\n\t\n\tword_value_dict = {}\n\tsorted_vocab = sorted((value, key) for (key,value) in vocab.items())\n\tresult = sorted_vocab[(-1)*vocab_size:]\n\tcount = 0\n\tfor element in reversed(result):\n\t\tif element[1] not in word_value_dict:\n\t\t\tword_value_dict[element[1]] = count\n\t\t\tcount += 1\n\n\t#print \"word_value_dict: \", word_value_dict\n\t#print \"class_value_dict: \", class_value_dict\n\n\tdataset = build_dataset(posts, word_value_dict, class_value_dict, vocab_size, stop_words, question_words)\n\n\tprint (dataset.shape)\n\n\t#dataset = np.asarray(dataset)\n\t#np.savetxt(\"dataset.csv\", dataset.astype(int), fmt='%i', delimiter=\",\")\n\n\tfile_pointer = open('word_to_dict_file.txt', 'w')\n\tfor key in word_value_dict:\n\t\tfile_pointer.write(key + \",\" + str(word_value_dict[key]))\n\t\tfile_pointer.write(\"\\n\")\n\n\tfile_pointer.close()\n\n\t'''data = [(dialogue_act_features(post.text, tweet_tokenizer), post.get('class')) for post in posts]\n\t\n\ttest_batch_size = int(len(data) * 0.1)\n\ttrain_set, test_set = data[test_batch_size:], data[:test_batch_size]\n\n\tclassifier = train_classifier(\"svm\", train_set)\n\n\tprint(nltk.classify.accuracy(classifier, test_set))'''\n\n\n\n\n\n\n","sub_path":"train_model.py","file_name":"train_model.py","file_ext":"py","file_size_in_byte":4178,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"56468538","text":"# Circles.py\n# Prints the circumference and area of a circle based off of an input diameter\n\n# Imports math library\nimport math\n\n# Prompts the user for the diameter\ndiameter = float(input(\"Enter the diameter of a circle: \"))\n\n# Computes the radius of the circle\nradius = diameter / 2\n\n# Computes the circumference of the circle\ncircumference = 2 * math.pi * radius\n\n# Computes the area of the circle\narea = math.pi * math.pow(radius, 2)\n\n\n# Prints the circumference and area of the circle\nprint(\"The circle's circumference is {:.2f}.\".format(circumference))\nprint(\"The circle's area is {:.2f}.\".format(area))\n","sub_path":"Circles.py","file_name":"Circles.py","file_ext":"py","file_size_in_byte":609,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"437643031","text":"import torch\nimport torch.nn.functional as F\nimport torch.nn as nn\nimport torch_geometric.nn as pyg_nn\n\n\nclass GINStack(nn.Module):\n def __init__(\n self, input_dim: int, output_dim: int, hidden_dim: int, num_conv_layers: int\n ):\n super(GINStack, self).__init__()\n self.num_conv_layers = num_conv_layers\n self.dropout = 0.25\n self.hidden_dim = hidden_dim\n self.convs = nn.ModuleList()\n self.convs.append(self.build_conv_model(input_dim, self.hidden_dim))\n self.lns = nn.ModuleList()\n for l in range(self.num_conv_layers):\n self.convs.append(self.build_conv_model(self.hidden_dim, self.hidden_dim))\n self.lns.append(nn.LayerNorm(self.hidden_dim))\n\n # post-message-passing\n self.post_mp = nn.Sequential(\n nn.Linear(self.hidden_dim, self.hidden_dim),\n nn.Dropout(self.dropout),\n nn.Linear(self.hidden_dim, output_dim),\n )\n\n def build_conv_model(self, input_dim, hidden_dim):\n # refer to pytorch geometric nn module for different implementation of GNNs.\n return pyg_nn.GINConv(\n nn.Sequential(\n nn.Linear(input_dim, hidden_dim),\n nn.ReLU(),\n nn.Linear(hidden_dim, hidden_dim),\n )\n )\n\n def forward(self, data):\n x, edge_index, batch = data.x, data.edge_index, data.batch\n\n for i in range(self.num_conv_layers):\n x = self.convs[i](x, edge_index)\n x = F.relu(x)\n if not i == self.num_conv_layers - 1:\n x = self.lns[i](x)\n\n x = pyg_nn.global_mean_pool(x, batch)\n\n x = self.post_mp(x)\n\n return x\n\n def loss(self, pred, value):\n pred_shape = pred.shape\n value_shape = value.shape\n if pred_shape != value_shape:\n value = torch.reshape(value, pred_shape)\n return F.mse_loss(pred, value)\n\n def loss_rmse(self, pred, value):\n pred_shape = pred.shape\n value_shape = value.shape\n if pred_shape != value_shape:\n value = torch.reshape(value, pred_shape)\n return torch.sqrt(F.mse_loss(pred, value))\n\n def __str__(self):\n return \"GINStack\"\n","sub_path":"models/GINStack.py","file_name":"GINStack.py","file_ext":"py","file_size_in_byte":2235,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"115558276","text":"from django.forms import SelectMultiple, HiddenInput\nfrom django_filters import MultipleChoiceFilter\nfrom service_catalog.models import Instance, Service\nfrom service_catalog.models.instance import InstanceState\nfrom utils.squest_filter import SquestFilter\n\n\nclass InstanceFilter(SquestFilter):\n class Meta:\n model = Instance\n fields = ['name', 'id', 'spoc__username', 'service__id', 'state']\n\n state = MultipleChoiceFilter(\n choices=InstanceState.choices,\n widget=SelectMultiple(attrs={'data-live-search': \"true\"}))\n\n service__id = MultipleChoiceFilter(\n widget=SelectMultiple(attrs={'data-live-search': \"true\"}))\n\n def __init__(self, *args, **kwargs):\n super(InstanceFilter, self).__init__(*args, **kwargs)\n self.filters['spoc__username'].field.label = 'SPOC (Name)'\n self.filters['service__id'].field.label = 'Type'\n self.filters['id'].field.widget = HiddenInput()\n self.filters['service__id'].field.choices = [(service.id, service.name) for service in Service.objects.all()]\n","sub_path":"service_catalog/filters/instance_filter.py","file_name":"instance_filter.py","file_ext":"py","file_size_in_byte":1061,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"396560896","text":"from PyQt5.QtCore import Qt, QObject, QEvent\nfrom PyQt5.QtWidgets import QWidget, QVBoxLayout, QPushButton, QLabel, QCheckBox, \\\n QLineEdit, QMessageBox, QGridLayout, QRadioButton, QButtonGroup, QHBoxLayout\n\nfrom utils.utils import Logger\n\n\nclass AssignmentFrame(Logger, QWidget):\n name = \"AssignmentFrame\"\n\n def __init__(self, parent):\n\n # noinspection PyArgumentList\n super().__init__(parent=parent)\n\n self.layout = QVBoxLayout()\n\n self.next_button = QPushButton(\"Next\")\n self.previous_button = QPushButton(\"Previous\")\n\n self.group = QButtonGroup()\n\n self.group.addButton(self.previous_button)\n self.group.addButton(self.next_button)\n\n self.parameters = dict()\n\n self.error = None\n\n self.setup_done = False\n self.setup()\n\n def setup(self):\n\n game_param = self.parent().get_game_parameters()\n\n # noinspection PyUnusedLocal\n roles = \\\n [\"firm\" for i in range(game_param[\"n_firms\"])] \\\n + [\"customer\" for i in range(game_param[\"n_customers\"])]\n\n n_agents = len(roles)\n\n labels = (\"Server id\", \"Firm \" + \" Customer\", \"Bot\")\n\n # noinspection PyUnusedLocal\n self.parameters[\"assign\"] = [[] for i in range(n_agents)]\n\n # ----- check if an old config exists --------- #\n\n old_assign = self.parent().mod.controller.data.param[\"assignment\"]\n\n if len(old_assign) != len(self.parameters[\"assign\"]):\n self.show_warning(msg=\"assignment.json not matching game.json config file!\")\n self.new_setup(n_agents, roles)\n else:\n self.load_setup(old_assign)\n\n # --------- fill layout ----------------------------------- #\n\n self.fill_layout(labels, n_agents)\n\n # noinspection PyUnresolvedReferences\n self.next_button.clicked.connect(self.push_next_button)\n # noinspection PyUnresolvedReferences\n self.previous_button.clicked.connect(self.push_previous_button)\n\n self.setup_done = True\n\n def fill_layout(self, labels, n_agents):\n\n # prepare layout\n grid_layout = QGridLayout()\n\n # add labels\n for y, label in enumerate(labels):\n grid_layout.addWidget(QLabel(label), 0, y)\n\n # grid layout coordinates\n coordinates = [(x, y) for x in range(1, n_agents + 1) for y in range(len(labels))]\n\n # parameters index\n index = [(i, j) for i in range(n_agents) for j in range(len(labels))]\n\n for (i, j), (x, y) in zip(index, coordinates):\n self.parameters[\"assign\"][i][j].add_to_grid_layout(grid_layout, x, y)\n\n horizontal_layout = QHBoxLayout()\n horizontal_layout.addWidget(self.previous_button, alignment=Qt.AlignCenter)\n horizontal_layout.addWidget(self.next_button, alignment=Qt.AlignCenter)\n\n self.layout.addLayout(grid_layout)\n self.layout.addLayout(horizontal_layout)\n\n self.setLayout(self.layout)\n\n def load_setup(self, assignment):\n\n for i, (server_id, role, bot) in enumerate(assignment):\n self.parameters[\"assign\"][i].append(IntParameter(parent=self, value=server_id, idx=i))\n\n self.parameters[\"assign\"][i].append(RadioParameter(checked=role))\n\n self.parameters[\"assign\"][i].append(CheckParameter(parent=self, checked=bot, idx=i))\n\n def new_setup(self, n_agents, roles):\n\n for i in range(n_agents):\n self.parameters[\"assign\"][i].append(IntParameter(parent=self, value=\"Bot\", idx=i))\n\n self.parameters[\"assign\"][i].append(RadioParameter(checked=roles[i]))\n\n self.parameters[\"assign\"][i].append(CheckParameter(parent=self, checked=True, idx=i))\n\n def push_next_button(self):\n\n warning = self.check_assignment_validity()\n\n if warning:\n self.show_warning(msg=warning)\n\n else:\n self.log(\"Push 'next' button.\")\n\n self.parent().show_frame_parameters()\n\n def push_previous_button(self):\n\n if self.error:\n\n self.show_warning(msg=self.error)\n\n else:\n self.log(\"Push 'previous' button.\")\n self.parent().show_frame_load_game_new_game()\n\n def check_assignment_validity(self):\n\n assignment = list(enumerate(self.get_parameters()))\n\n for i, (server_id, role, bot) in assignment:\n\n if server_id != \"Bot\" and not server_id.isdigit():\n return \"Wrong input: '{}'.\".format(server_id)\n\n for j, (other_id, other_role, other_bot) in assignment:\n\n if other_id == server_id and other_id != \"Bot\" and i != j:\n return \"Two identical inputs: '{}'.\".format(server_id)\n\n def get_parameters(self):\n return [[i.get_value(), j.get_value(), k.get_value()] for i, j, k in self.parameters[\"assign\"]]\n\n def show_warning(self, **instructions):\n\n QMessageBox().warning(\n self, \"\", instructions[\"msg\"],\n QMessageBox.Ok\n )\n\n def switch_line_edit(self, idx, from_line):\n\n if self.setup_done:\n\n # get desired widgets\n line_edit = self.parameters[\"assign\"][idx][0].edit\n check_box = self.parameters[\"assign\"][idx][2].check_box\n\n # if line edit (containing server ids) is not enabled\n if not line_edit.isEnabled():\n self.enable_line_edit(line_edit, check_box)\n\n # if line edit is enabled and signal comes from check box\n elif line_edit.isEnabled() and not from_line:\n self.disable_line_edit(line_edit)\n\n @staticmethod\n def disable_line_edit(line_edit):\n\n line_edit.setText(\"Bot\")\n line_edit.setEnabled(False)\n line_edit.setStyleSheet(line_edit.greyed_style)\n\n @staticmethod\n def enable_line_edit(line_edit, check_box):\n\n check_box.setChecked(False)\n line_edit.setEnabled(True)\n line_edit.setText(\"\")\n line_edit.setStyleSheet(\"\")\n line_edit.setFocus(True)\n\n def prepare(self):\n\n self.setFocus()\n self.next_button.setFocus()\n self.next_button.setEnabled(True)\n\n # --------------------------------- Widgets used in assignment menu --------------------------------- #\n\n\nclass RadioParameter(object):\n \"\"\"role (firm/customer)\"\"\"\n\n def __init__(self, checked):\n\n self.layout = QHBoxLayout()\n\n self.group = QButtonGroup()\n\n self.firm = QRadioButton()\n self.customer = QRadioButton()\n\n self.setup(checked)\n\n def setup(self, checked):\n\n if checked == \"customer\":\n self.customer.setChecked(True)\n self.customer.setEnabled(False)\n self.firm.setEnabled(False)\n else:\n self.firm.setChecked(True)\n self.firm.setEnabled(False)\n self.customer.setEnabled(False)\n\n self.group.addButton(self.firm)\n self.group.addButton(self.customer)\n\n self.layout.addWidget(self.firm)\n self.layout.addWidget(self.customer)\n\n def get_value(self):\n\n return (\"customer\", \"firm\")[self.firm.isChecked()]\n\n def add_to_grid_layout(self, layout, x, y):\n\n layout.addLayout(self.layout, x, y)\n\n\nclass IntParameter(object):\n \"\"\"server_id\"\"\"\n\n def __init__(self, parent, value, idx):\n\n self.idx = idx\n self.edit = QLineEdit(str(value))\n self.edit.greyed_style = '''color: #808080;\n background-color: #F0F0F0;\n border: 1px solid #B0B0B0;\n border-radius: 2px;'''\n\n self.filter = MouseClick(parent, idx)\n self.setup(value)\n\n def setup(self, value):\n\n if value == \"Bot\":\n self.edit.setEnabled(False)\n self.edit.setStyleSheet(self.edit.greyed_style)\n else:\n self.edit.setEnabled(True)\n\n self.edit.installEventFilter(self.filter)\n\n def get_value(self):\n\n return self.edit.text()\n\n def add_to_grid_layout(self, layout, x, y):\n\n layout.addWidget(self.edit, x, y, alignment=Qt.AlignCenter)\n\n\nclass CheckParameter(object):\n \"\"\"bot or not\"\"\"\n\n def __init__(self, parent, checked, idx):\n self.parent = parent\n self.idx = idx\n self.check_box = QCheckBox()\n self.setup(checked)\n\n def setup(self, checked):\n # noinspection PyUnresolvedReferences\n self.check_box.stateChanged.connect(\n lambda: self.parent.switch_line_edit(idx=self.idx, from_line=False))\n\n self.check_box.setChecked(checked)\n\n def get_value(self):\n return self.check_box.isChecked()\n\n def add_to_grid_layout(self, layout, x, y):\n layout.addWidget(self.check_box, x, y)\n\n\nclass MouseClick(QObject):\n \"\"\"class used in order\n to detect if QLineEdit widget\n has been clicked\"\"\"\n\n def __init__(self, parent, idx):\n super().__init__()\n self.idx = idx\n self.parent = parent\n\n def eventFilter(self, obj, event):\n if event.type() == QEvent.MouseButtonPress:\n self.parent.switch_line_edit(idx=self.idx, from_line=True)\n return True\n\n return False\n","sub_path":"hotelling_server/graphics/assignment_view.py","file_name":"assignment_view.py","file_ext":"py","file_size_in_byte":9162,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"463855872","text":"# -*- coding: utf-8 -*-\n\"\"\"\n.. invisible:\n _ _ _____ _ _____ _____\n | | | | ___| | | ___/ ___|\n | | | | |__ | | | |__ \\ `--.\n | | | | __|| | | __| `--. \\\n \\ \\_/ / |___| |___| |___/\\__/ /\n \\___/\\____/\\_____|____/\\____/\n\nCreated on Aug 3, 2015\n\n███████████████████████████████████████████████████████████████████████████████\n\nLicensed to the Apache Software Foundation (ASF) under one\nor more contributor license agreements. See the NOTICE file\ndistributed with this work for additional information\nregarding copyright ownership. The ASF licenses this file\nto you under the Apache License, Version 2.0 (the\n\"License\"); you may not use this file except in compliance\nwith the License. You may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing,\nsoftware distributed under the License is distributed on an\n\"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\nKIND, either express or implied. See the License for the\nspecific language governing permissions and limitations\nunder the License.\n\n███████████████████████████████████████████████████████████████████████████████\n\"\"\"\n\n\nimport numpy\n\nfrom veles.memory import Array\nimport veles.prng as prng\nfrom veles.tests import AcceleratedTest, assign_backend\nfrom veles.znicz.lstm import LSTM, GDLSTM\nfrom veles.znicz.tests.unit.gd_numdiff import GDNumDiff\n\n\nclass TestLSTM(AcceleratedTest, GDNumDiff):\n ABSTRACT = True\n\n def setUp(self):\n super(TestLSTM, self).setUp()\n self.precision_threshold = {\n numpy.float64: 1.0e-8,\n numpy.float32: 1.0e-4,\n numpy.float16: 1.0e-2}[self.dtype]\n\n def test_simple(self):\n self._test(True)\n\n def _test_extended(self):\n self._test(False)\n\n def _test(self, simple):\n N = 3\n I = 5\n O = 9\n\n inp = Array(numpy.zeros((N, I), dtype=self._dtype))\n hid = Array(numpy.zeros((N, O), dtype=self._dtype))\n mem = Array(numpy.zeros((N, O), dtype=self._dtype))\n prng.get().fill(inp.mem)\n prng.get().fill(hid.mem)\n prng.get().fill(mem.mem)\n\n lstm = LSTM(self.parent, simple=simple,\n output_sample_shape=hid.shape[1:])\n lstm.input = inp\n lstm.prev_output = hid\n lstm.prev_memory = mem\n\n lstm.initialize(self.device)\n\n # Compute LSTM's output manually\n x = numpy.append(inp.mem, hid.mem, axis=1)\n ig = numpy.dot(x, lstm.input_gate.weights.mem.transpose())\n ig += lstm.input_gate.bias.mem\n ig = 1.0 / (1.0 + numpy.exp(-ig))\n mm = numpy.dot(x, lstm.memory_maker.weights.mem.transpose())\n mm += lstm.memory_maker.bias.mem\n mm = 1.7159 * numpy.tanh(0.6666 * mm)\n fg = numpy.dot(x, lstm.forget_gate.weights.mem.transpose())\n fg += lstm.forget_gate.bias.mem\n fg = 1.0 / (1.0 + numpy.exp(-fg))\n\n im = ig * mm\n fm = fg * mem.mem\n sm = im + fm\n oa = 1.7159 * numpy.tanh(0.6666 * sm)\n\n if not simple:\n xg = numpy.append(x, sm, axis=1)\n else:\n xg = x\n og = numpy.dot(xg, lstm.output_gate.weights.mem.transpose())\n\n og += lstm.output_gate.bias.mem\n og = 1.0 / (1.0 + numpy.exp(-og))\n\n om = oa * og\n\n vector_value_map = {\n lstm.input: inp.mem.copy(),\n lstm.prev_output: hid.mem.copy(),\n lstm.prev_memory: mem.mem.copy()\n }\n for unit in (lstm.input_gate, lstm.memory_maker, lstm.forget_gate,\n lstm.output_gate):\n for attr in (\"weights\", \"bias\"):\n arr = getattr(unit, attr)\n arr.map_read()\n vector_value_map[arr] = arr.mem.copy()\n\n lstm.run()\n lstm.output.map_read()\n max_diff = numpy.fabs(lstm.output.mem - om).max()\n self.assertLess(max_diff, self.precision_threshold,\n \"LSTM forward failed\")\n\n target = numpy.zeros((N, O), dtype=self._dtype)\n prng.get().fill(target)\n err_output = Array(lstm.output.mem - target)\n err_memory = Array(numpy.zeros_like(err_output.mem))\n\n # Backpropagate error manually\n goa = err_output.mem * og\n goa *= oa * oa * (-0.388484177) + 1.14381894\n\n gog = err_output.mem * oa\n gog *= og * (1.0 - og)\n gogx = numpy.dot(gog, lstm.output_gate.weights.mem)\n if not simple:\n gx = gogx[:, :x.shape[1]].copy()\n goa += gogx[:, x.shape[1]:]\n else:\n gx = gogx.copy()\n\n gim = goa\n gfm = goa\n gfg = gfm * mem.mem\n gmm = gim * ig\n gig = gim * mm\n gfg *= fg * (1.0 - fg)\n gig *= ig * (1.0 - ig)\n gmm *= mm * mm * (-0.388484177) + 1.14381894\n gx += numpy.dot(gfg, lstm.forget_gate.weights.mem)\n gx += numpy.dot(gmm, lstm.memory_maker.weights.mem)\n gx += numpy.dot(gig, lstm.input_gate.weights.mem)\n ginp = gx[:, :inp.mem.shape[1]]\n\n gd_lstm = GDLSTM(self.parent, lstm)\n\n gd_lstm.err_output = err_output\n gd_lstm.err_memory = err_memory\n\n gd_lstm.initialize(self.device)\n gd_lstm.run()\n\n gd_lstm.err_input.map_read()\n max_diff = numpy.fabs(gd_lstm.err_input.mem - ginp).max()\n self.assertLess(max_diff, self.precision_threshold,\n \"LSTM backward failed\")\n\n self.info(\"Checking err_input via numeric differentiation...\")\n err_input = gd_lstm.err_input.mem.ravel()\n self.numdiff_check(\n lstm, lstm.input, vector_value_map,\n lstm.output, target, err_input,\n self.info, self.assertLess, GDNumDiff.sse, inp.shape[0])\n self.info(\"Checked err_input via numeric differentiation: All Ok\")\n\n\n@assign_backend(\"ocl\")\nclass OCLTestLSTM(TestLSTM):\n pass\n\n\n@assign_backend(\"cuda\")\nclass CUDATestLSTM(TestLSTM):\n pass\n\n\n@assign_backend(\"numpy\")\nclass NUMPYTestLSTM(TestLSTM):\n pass\n\n\nif __name__ == \"__main__\":\n AcceleratedTest.main()\n","sub_path":"tests/unit/test_lstm.py","file_name":"test_lstm.py","file_ext":"py","file_size_in_byte":6416,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"148932173","text":"import pytest\n\nfrom day08 import *\n\nd = pytest.fixture(lambda: LEDDisplay(3, 7))\nD = pytest.fixture(lambda: LEDDisplay())\n\ndef test_08_example(d):\n d.rect(y=2, x=3) ; assert str(d) == '###....\\n###....\\n.......'\n d.rotx(x=1, n=1) ; assert str(d) == '#.#....\\n###....\\n.#.....'\n d.roty(y=0, n=4) ; assert str(d) == '....#.#\\n###....\\n.#.....'\n d.rotx(x=1, n=1) ; assert str(d) == '.#..#.#\\n#.#....\\n.#.....'\n assert int(d) == 6\n\ndef test_08a_example_str(d):\n script = ['rect 3x2', 'rotate column x=1 by 1', 'rotate row y=0 by 4', 'rotate column x=1 by 1']\n d.execute(script)\n assert str(d) == '.#..#.#\\n#.#....\\n.#.....'\n\ndef test_08a_example_int(D):\n script = ['rect 3x3']\n D.execute(script)\n assert int(D) == 9\n\ndef test_08a_answer(D):\n with open('input/day08.txt') as script:\n D.execute(script)\n assert int(D) == 128 #124\n","sub_path":"tests/test_day08.py","file_name":"test_day08.py","file_ext":"py","file_size_in_byte":870,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"453969904","text":"import graphene\nimport base64\nimport os\nimport flask\n\nfrom gtmcore.inventory.inventory import InventoryManager, InventoryException\nfrom gtmcore.logging import LMLogger\nfrom gtmcore.workflows.gitlab import GitLabManager\nfrom gtmcore.exceptions import GigantumException\n\nfrom lmsrvcore.auth.user import get_logged_in_username, get_logged_in_author\nfrom lmsrvcore.utilities import configure_git_credentials\nfrom gtmcore.activity import ActivityStore, ActivityType, ActivityDetailType, ActivityRecord, \\\n ActivityDetailRecord\nfrom gtmcore.activity.utils import ImmutableDict, TextData, DetailRecordList, ImmutableList\n\n\nfrom lmsrvlabbook.api.objects.dataset import Dataset\nfrom gtmcore.dataset.manifest import Manifest\nfrom gtmcore.dispatcher import Dispatcher, jobs\n\n# Temporary hardcoding of Gigantum Dataset type. Can be removed with #1328\nfrom gtmcore.dataset.storage import get_storage_backend\nfrom gtmcore.dataset import Dataset as DatasetObj\n# Temporary hardcoding of Gigantum Dataset type. Can be removed with #1328\n\n\nfrom lmsrvlabbook.api.connections.dataset import DatasetConnection\nfrom lmsrvlabbook.api.objects.dataset import DatasetConfigurationParameterInput\nfrom lmsrvlabbook.api.connections.labbook import Labbook, LabbookConnection\nfrom lmsrvcore.auth.identity import tokens_from_request_context\n\n\nlogger = LMLogger.get_logger()\n\n\nclass CreateDataset(graphene.relay.ClientIDMutation):\n \"\"\"Mutation for creation of a new Dataset on disk\"\"\"\n\n class Input:\n name = graphene.String(required=True)\n description = graphene.String(required=True)\n storage_type = graphene.String(required=True)\n\n # Return the dataset instance\n dataset = graphene.Field(Dataset)\n\n @classmethod\n def mutate_and_get_payload(cls, root, info, name, description, storage_type, client_mutation_id=None):\n username = get_logged_in_username()\n inv_manager = InventoryManager()\n\n inv_manager.create_dataset(username=username,\n owner=username,\n dataset_name=name,\n description=description,\n storage_type=storage_type,\n author=get_logged_in_author())\n\n return CreateDataset(dataset=Dataset(id=\"{}&{}\".format(username, name),\n name=name, owner=username))\n\n\nclass ConfigureDataset(graphene.relay.ClientIDMutation):\n \"\"\"Mutation to configure a dataset backend if needed.\n\n Workflow to configure a dataset:\n - TODO\n\n \"\"\"\n\n class Input:\n dataset_owner = graphene.String(required=True, description=\"Owner of the dataset to configure\")\n dataset_name = graphene.String(required=True, description=\"Name of the dataset to configure\")\n parameters = graphene.List(DatasetConfigurationParameterInput)\n confirm = graphene.Boolean(description=\"Set to true so confirm the configuration and continue. \"\n \"False will clear the configuration to start over\")\n\n dataset = graphene.Field(Dataset)\n is_configured = graphene.Boolean(description=\"If true, all parameters a set and OK to continue\")\n should_confirm = graphene.Boolean(description=\"If true, should confirm configuration with the user \"\n \"and resubmit with confirm=True to finalize\")\n error_message = graphene.String(description=\"Configuration error message to display to the user\")\n confirm_message = graphene.String(description=\"Confirmation message to display to the user\")\n has_background_job = graphene.Boolean(description=\"If true, this backend type requires background work\"\n \" after confirmation. Check complete_background_key for key to \"\n \"provide user feedback.\")\n background_job_key = graphene.String(description=\"Background job key to query on for feedback if needed\")\n\n @classmethod\n def mutate_and_get_payload(cls, root, info, dataset_owner, dataset_name, parameters=None, confirm=None,\n client_mutation_id=None):\n logged_in_username = get_logged_in_username()\n im = InventoryManager()\n ds = im.load_dataset(logged_in_username, dataset_owner, dataset_name, get_logged_in_author())\n ds.backend.set_default_configuration(logged_in_username,\n bearer_token=flask.g.access_token,\n id_token=flask.g.id_token)\n\n should_confirm = False\n error_message = None\n confirm_message = None\n background_job_key = None\n is_configured = None\n\n if confirm is None:\n if parameters:\n # Update the configuration\n current_config = ds.backend_config\n for param in parameters:\n current_config[param.parameter] = param.value\n ds.backend_config = current_config\n\n # Validate the configuration\n try:\n confirm_message = ds.backend.confirm_configuration(ds)\n if confirm_message is not None:\n should_confirm = True\n except ValueError as err:\n error_message = f\"{err}\"\n is_configured = False\n else:\n if confirm is False:\n # Clear configuration\n current_config = ds.backend_config\n for param in parameters:\n current_config[param.parameter] = None\n ds.backend_config = current_config\n\n else:\n if ds.backend.can_update_from_remote:\n d = Dispatcher()\n kwargs = {\n 'logged_in_username': logged_in_username,\n 'access_token': flask.g.access_token,\n 'id_token': flask.g.id_token,\n 'dataset_owner': dataset_owner,\n 'dataset_name': dataset_name,\n }\n\n # Gen unique keys for tracking jobs\n metadata = {'dataset': f\"{logged_in_username}|{dataset_owner}|{dataset_name}\",\n 'method': 'update_unmanaged_dataset_from_remote'}\n job_response = d.dispatch_task(jobs.update_unmanaged_dataset_from_remote,\n kwargs=kwargs, metadata=metadata)\n\n background_job_key = job_response.key_str\n\n if is_configured is None:\n is_configured = ds.backend.is_configured\n\n return ConfigureDataset(dataset=Dataset(id=\"{}&{}\".format(dataset_owner, dataset_name),\n name=dataset_name, owner=dataset_owner),\n is_configured=is_configured,\n should_confirm=should_confirm,\n confirm_message=confirm_message,\n error_message=error_message,\n has_background_job=ds.backend.can_update_from_remote,\n background_job_key=background_job_key)\n\n\nclass UpdateUnmanagedDataset(graphene.relay.ClientIDMutation):\n \"\"\"Mutation to update the manifest for a local dataset based on changes either locally or via the remote the\n dataset is linked to\n \"\"\"\n\n class Input:\n dataset_owner = graphene.String(required=True, description=\"Owner of the dataset to configure\")\n dataset_name = graphene.String(required=True, description=\"Name of the dataset to configure\")\n from_local = graphene.Boolean(description=\"If true, update the dataset based on local state of the dataset\")\n from_remote = graphene.Boolean(description=\"If true, update the dataset based on remote state of the dataset.\"\n \" This effectivelly also updates the local state, so the\"\n \" `fromLocal` argument is ignored\")\n\n dataset = graphene.Field(Dataset)\n background_job_key = graphene.String(description=\"Background job key to query on for feedback if needed\")\n\n @classmethod\n def mutate_and_get_payload(cls, root, info, dataset_owner, dataset_name, from_local=False, from_remote=False,\n client_mutation_id=None):\n logged_in_username = get_logged_in_username()\n im = InventoryManager()\n ds = im.load_dataset(logged_in_username, dataset_owner, dataset_name, get_logged_in_author())\n ds.backend.set_default_configuration(logged_in_username,\n bearer_token=flask.g.access_token,\n id_token=flask.g.id_token)\n\n if not ds.backend.is_configured:\n raise ValueError(\"Dataset is not fully configured. Cannot update.\")\n\n d = Dispatcher()\n kwargs = {\n 'logged_in_username': logged_in_username,\n 'access_token': flask.g.access_token,\n 'id_token': flask.g.id_token,\n 'dataset_owner': dataset_owner,\n 'dataset_name': dataset_name,\n }\n\n background_job_key = None\n\n if from_remote is True:\n if ds.backend.can_update_from_remote:\n # Gen unique keys for tracking jobs\n metadata = {'dataset': f\"{logged_in_username}|{dataset_owner}|{dataset_name}\",\n 'method': 'update_unmanaged_dataset_from_remote'}\n\n job_response = d.dispatch_task(jobs.update_unmanaged_dataset_from_remote,\n kwargs=kwargs, metadata=metadata)\n background_job_key = job_response.key_str\n else:\n raise ValueError(\"This dataset type does not support automatic update via querying its remote\")\n\n elif from_local is True:\n # Gen unique keys for tracking jobs\n metadata = {'dataset': f\"{logged_in_username}|{dataset_owner}|{dataset_name}\",\n 'method': 'update_unmanaged_dataset_from_local'}\n\n job_response = d.dispatch_task(jobs.update_unmanaged_dataset_from_local,\n kwargs=kwargs, metadata=metadata)\n background_job_key = job_response.key_str\n else:\n ValueError(\"Either `fromRemote` or `fromLocal` must be True.\")\n\n return UpdateUnmanagedDataset(dataset=Dataset(id=\"{}&{}\".format(dataset_owner, dataset_name),\n name=dataset_name, owner=dataset_owner),\n background_job_key=background_job_key)\n\n\nclass FetchDatasetEdge(graphene.relay.ClientIDMutation):\n class Input:\n owner = graphene.String(required=True)\n dataset_name = graphene.String(required=True)\n\n new_dataset_edge = graphene.Field(DatasetConnection.Edge)\n\n @classmethod\n def mutate_and_get_payload(cls, root, info, owner, dataset_name, client_mutation_id=None):\n cursor = base64.b64encode(f\"{0}\".encode('utf-8'))\n dsedge = DatasetConnection.Edge(node=Dataset(owner=owner, name=dataset_name),\n cursor=cursor)\n\n return FetchDatasetEdge(new_dataset_edge=dsedge)\n\n\nclass ModifyDatasetLink(graphene.relay.ClientIDMutation):\n \"\"\"\"Mutation to link and unlink Datasets from a Project\"\"\"\n\n class Input:\n labbook_owner = graphene.String(required=True, description=\"Owner of the labbook\")\n labbook_name = graphene.String(required=True, description=\"Name of the labbook\")\n dataset_owner = graphene.String(required=True, description=\"Owner of the dataset to link\")\n dataset_name = graphene.String(required=True, description=\"Name of the dataset to link\")\n action = graphene.String(required=True, description=\"Action to perform, either `link`, `unlink`, or `update`\")\n dataset_url = graphene.String(description=\"URL to the Dataset to link. Only required when `action=link`\")\n\n new_labbook_edge = graphene.Field(LabbookConnection.Edge)\n\n @staticmethod\n def _get_remote_domain(dataset_url, dataset_owner, dataset_name):\n \"\"\"Helper method to get the domain or return none\"\"\"\n if \"http\" in dataset_url:\n dataset_url, _ = dataset_url.split('.git')\n _, _, remote_domain, namespace, name = dataset_url.split(\"/\")\n if namespace != dataset_owner:\n raise ValueError(\"The dataset owner does not match url\")\n if name != dataset_name:\n raise ValueError(\"The dataset name does not match url\")\n else:\n remote_domain = None\n\n return remote_domain\n\n @classmethod\n def mutate_and_get_payload(cls, root, info, labbook_owner, labbook_name, dataset_owner, dataset_name, action,\n dataset_url=None, client_mutation_id=None):\n logged_in_username = get_logged_in_username()\n im = InventoryManager()\n lb = im.load_labbook(logged_in_username, labbook_owner, labbook_name, author=get_logged_in_author())\n\n with lb.lock():\n if action == 'link':\n if dataset_url:\n remote_domain = cls._get_remote_domain(dataset_url, dataset_owner, dataset_name)\n\n # Make sure git creds are configured for the remote\n if remote_domain:\n configure_git_credentials()\n else:\n # Link to local dataset\n ds = im.load_dataset(logged_in_username, dataset_owner, dataset_name)\n dataset_url = f\"{ds.root_dir}/.git\"\n\n # Link the dataset to the labbook\n ds = im.link_dataset_to_labbook(dataset_url, dataset_owner, dataset_name, lb, logged_in_username)\n ds.namespace = dataset_owner\n\n # Preload the dataloader\n info.context.dataset_loader.prime(f\"{get_logged_in_username()}&{dataset_owner}&{dataset_name}\", ds)\n\n # Relink the revision\n m = Manifest(ds, logged_in_username)\n m.link_revision()\n elif action == 'unlink':\n im.unlink_dataset_from_labbook(dataset_owner, dataset_name, lb)\n elif action == 'update':\n ds = im.update_linked_dataset_reference(dataset_owner, dataset_name, lb)\n\n # Reload cache and relink revision due to update\n m = Manifest(ds, logged_in_username)\n m.force_reload()\n m.link_revision()\n\n info.context.dataset_loader.prime(f\"{get_logged_in_username()}&{dataset_owner}&{dataset_name}\", ds)\n else:\n raise ValueError(\"Unsupported action. Use `link`, `unlink`, or `update`\")\n\n info.context.labbook_loader.prime(f\"{get_logged_in_username()}&{labbook_owner}&{labbook_name}\", lb)\n edge = LabbookConnection.Edge(node=Labbook(owner=labbook_owner, name=labbook_name),\n cursor=base64.b64encode(f\"{0}\".encode('utf-8')))\n\n return ModifyDatasetLink(new_labbook_edge=edge)\n\n\nclass DeleteDataset(graphene.ClientIDMutation):\n \"\"\"Delete a dataset.\"\"\"\n class Input:\n owner = graphene.String(required=True)\n dataset_name = graphene.String(required=True)\n local = graphene.Boolean()\n remote = graphene.Boolean()\n\n local_deleted = graphene.Boolean()\n remote_deleted = graphene.Boolean()\n\n @classmethod\n def mutate_and_get_payload(cls, root, info, owner, dataset_name, local=False, remote=False,\n client_mutation_id=None):\n logged_in_user = get_logged_in_username()\n local_deleted = False\n remote_deleted = False\n if remote:\n logger.info(f\"Deleting remote Dataset {owner}/{dataset_name}\")\n\n config = flask.current_app.config['LABMGR_CONFIG']\n\n # Get tokens from request context\n access_token, id_token = tokens_from_request_context(tokens_required=True)\n\n # Temporary hard-coding of Gigantum Dataset type. Can be removed with #1328\n try:\n ds = InventoryManager().load_dataset(logged_in_user, owner, dataset_name,\n author=get_logged_in_author())\n remove_remote_config = True\n except InventoryException:\n # Does not exist locally, so create a \"mock\" Dataset instance\n remove_remote_config = False\n ds = DatasetObj(owner)\n ds._data = {\"name\": dataset_name}\n\n ds_backend = get_storage_backend(\"gigantum_object_v1\")\n ds_backend.set_default_configuration(logged_in_user, access_token, id_token)\n ds_backend.delete_contents(ds)\n # Temporary hard-coding of Gigantum Dataset type. Can be removed with #1328\n\n # Get remote server configuration\n server_config = config.get_server_configuration()\n\n # Delete the repository\n mgr = GitLabManager(server_config.git_url,\n server_config.hub_api_url,\n access_token=access_token,\n id_token=id_token)\n mgr.remove_repository(owner, dataset_name)\n logger.info(f\"Deleted {owner}/{dataset_name} repository from the\"\n f\" remote repository {server_config.git_url}\")\n\n # Remove locally any references to that cloud repo that's just been deleted.\n if remove_remote_config:\n try:\n ds.remove_remote()\n except GigantumException as e:\n logger.warning(f\"Failed to remove remote config from Dataset during Remote Dataset Delete: {e}\")\n\n remote_deleted = True\n\n if local:\n logger.info(f\"Deleting local Dataset {owner}/{dataset_name}\")\n\n # Delete the dataset\n dataset_delete_job = InventoryManager().delete_dataset(logged_in_user, owner, dataset_name)\n local_deleted = True\n\n # Schedule Job to clear file cache if dataset is no longer in use\n job_metadata = {'method': 'clean_dataset_file_cache'}\n job_kwargs = {\n 'logged_in_username': logged_in_user,\n 'dataset_owner': dataset_delete_job.namespace,\n 'dataset_name': dataset_delete_job.name,\n 'cache_location': dataset_delete_job.cache_root\n }\n\n dispatcher = Dispatcher()\n job_key = dispatcher.dispatch_task(jobs.clean_dataset_file_cache, metadata=job_metadata,\n kwargs=job_kwargs)\n logger.info(f\"Dispatched clean_dataset_file_cache({owner}/{dataset_name}) to Job {job_key}\")\n\n return DeleteDataset(local_deleted=local_deleted, remote_deleted=remote_deleted)\n\n\nclass SetDatasetDescription(graphene.relay.ClientIDMutation):\n class Input:\n owner = graphene.String(required=True)\n dataset_name = graphene.String(required=True)\n description = graphene.String(required=True)\n\n updated_dataset = graphene.Field(Dataset)\n\n @classmethod\n def mutate_and_get_payload(cls, root, info, owner, dataset_name,\n description, client_mutation_id=None):\n username = get_logged_in_username()\n ds = InventoryManager().load_dataset(username, owner, dataset_name,\n author=get_logged_in_author())\n ds.description = description\n with ds.lock():\n ds.git.add(os.path.join(ds.root_dir, '.gigantum/gigantum.yaml'))\n commit = ds.git.commit('Updating description')\n\n adr = ActivityDetailRecord(ActivityDetailType.LABBOOK,\n show=False,\n data=TextData('plain',\n f\"Updated Dataset description: {description}\"))\n\n ar = ActivityRecord(ActivityType.LABBOOK,\n message=\"Updated Dataset description\",\n linked_commit=commit.hexsha,\n tags=ImmutableList([\"dataset\"]),\n show=False,\n detail_objects=DetailRecordList([adr]))\n\n ars = ActivityStore(ds)\n ars.create_activity_record(ar)\n return SetDatasetDescription(updated_dataset=Dataset(owner=owner, name=dataset_name))\n\n\nclass WriteDatasetReadme(graphene.relay.ClientIDMutation):\n class Input:\n owner = graphene.String(required=True)\n dataset_name = graphene.String(required=True)\n content = graphene.String(required=True)\n\n updated_dataset = graphene.Field(Dataset)\n\n @classmethod\n def mutate_and_get_payload(cls, root, info, owner, dataset_name, content, client_mutation_id=None):\n username = get_logged_in_username()\n ds = InventoryManager().load_dataset(username, owner, dataset_name,\n author=get_logged_in_author())\n\n # Write data\n with ds.lock():\n ds.write_readme(content)\n\n return WriteDatasetReadme(updated_dataset=Dataset(owner=owner, name=dataset_name))\n\n\nclass VerifyDataset(graphene.ClientIDMutation):\n \"\"\"Verify the contents of a dataset, returning a job key. The 'modified_keys' value in the metadata indicates\n which files have changed, once the job is complete.\"\"\"\n\n class Input:\n dataset_owner = graphene.String(required=True)\n dataset_name = graphene.String(required=True)\n labbook_owner = graphene.String(required=False, description=\"Optional arg if dataset is linked\")\n labbook_name = graphene.String(required=False, description=\"Optional arg if dataset is linked\")\n\n background_job_key = graphene.String()\n\n @classmethod\n def mutate_and_get_payload(cls, root, info, dataset_owner, dataset_name, labbook_owner=None, labbook_name=None,\n client_mutation_id=None):\n logged_in_user = get_logged_in_username()\n\n # Schedule Job to clear file cache if dataset is no longer in use\n job_metadata = {'method': 'verify_dataset_contents'}\n job_kwargs = {\n 'logged_in_username': logged_in_user,\n 'access_token': flask.g.access_token,\n 'id_token': flask.g.id_token,\n 'dataset_owner': dataset_owner,\n 'dataset_name': dataset_name,\n 'labbook_owner': labbook_owner,\n 'labbook_name': labbook_name\n }\n\n dispatcher = Dispatcher()\n job_key = dispatcher.dispatch_task(jobs.verify_dataset_contents, metadata=job_metadata,\n kwargs=job_kwargs)\n logger.info(f\"Dispatched verify_dataset_contents({dataset_owner}/{dataset_name}) to Job {job_key}\")\n\n return VerifyDataset(background_job_key=job_key)\n","sub_path":"packages/gtmapi/lmsrvlabbook/api/mutations/dataset.py","file_name":"dataset.py","file_ext":"py","file_size_in_byte":23338,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"13090659","text":"# coding: utf-8\n\nfrom __future__ import absolute_import\nimport unittest\n\nfrom flask import json\nfrom six import BytesIO\n\nfrom access_control_policy.models.access_control_policy_list import AccessControlPolicyList # noqa: E501\nfrom access_control_policy.models.problem_details import ProblemDetails # noqa: E501\nfrom access_control_policy.test import BaseTestCase\n\n\nclass TestDefaultController(BaseTestCase):\n \"\"\"DefaultController integration test stubs\"\"\"\n\n def test_access_control_policy_list_service_api_id_get(self):\n \"\"\"Test case for access_control_policy_list_service_api_id_get\n\n \n \"\"\"\n query_string = [('aef-id', 'aef_id_example'),\n ('api-invoker-id', 'api_invoker_id_example'),\n ('supported-features', 'supported_features_example')]\n headers = { \n 'Accept': 'application/json',\n }\n response = self.client.open(\n '/access-control-policy/v1/accessControlPolicyList/{service_api_id}'.format(service_api_id='service_api_id_example'),\n method='GET',\n headers=headers,\n query_string=query_string)\n self.assert200(response,\n 'Response body is : ' + response.data.decode('utf-8'))\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"services/TS29222_CAPIF_Access_Control_Policy_API/access_control_policy/test/test_default_controller.py","file_name":"test_default_controller.py","file_ext":"py","file_size_in_byte":1317,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"625885636","text":"import retina \nimport cv2 \nimport numpy as np \n\ndetector = retina.RetinaFace('./model/', True)\n\nimg = cv2.imread('rotated90.ppm')\n\nthresh = 0.8\nscales = [480, 640]\ncount = 1\n\nim_shape = img.shape\ntarget_size = scales[0]\nmax_size = scales[1]\nim_size_min = np.min(im_shape[0:2])\nim_size_max = np.max(im_shape[0:2])\nim_scale = float(target_size) / float(im_size_min)\nif np.round(im_scale * im_size_max) > max_size:\n\tim_scale = float(max_size) / float(im_size_max)\n\nprint('im_scale', im_scale)\n\nscales2 = [im_scale]\nflip = False\n\nfaces, landmarks = detector.detect(img, thresh, scales=scales2, do_flip=flip)\n\nif faces is not None:\n\tprint('find', faces.shape[0], 'faces')\n\tfor i in range(faces.shape[0]):\n\t\tbox = faces[i].astype(np.int)\n\n\t#color = (255,0,0)\n\tcolor = (0,255,0)\n\tprint('BOX', box)\n\tprint(landmarks)\n\t# print(box[3]-box[1])\n\tcv2.rectangle(img, (box[0], box[1]), (box[2], box[3]), color, 2)\n\tif landmarks is not None:\n\t\tlandmark5 = landmarks[i].astype(np.int)\n\t\tfor l in range(landmark5.shape[0]):\n\t\t\tcolor = (0,0,255)\n\t\t\tcv2.circle(img, (landmark5[l][0], landmark5[l][1]), 1, color, 2)\n\ncv2.imwrite('result.jpg', img)\n","sub_path":"example/FaceDetector/Retina_mobilenet/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1127,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"599395217","text":"# 无论多少个圆块,可以抽象成为同一套思路:就是想办法把(n-1)个a柱上的圆块先移动到b柱,然后把最底部最大的一个圆块移动到c柱,最后把b柱上的(n-1)个圆块移动到c柱\ndef hanoi(n, a, buffer, c):\n if n == 1:\n print(a, '--->', c) # 定义从a柱移动到c柱的操作\n else:\n hanoi(n-1, a, c, buffer) # 把(n-1)个a柱上的圆块移动到缓冲区buffer柱\n hanoi(1, a, buffer, c) # 把最底部的最大的圆块移动到c柱\n hanoi(n-1, buffer, a, c) # 把(n-1)个缓冲区buffer柱上的圆块移动到c柱\n\nhanoi(3, 'A', 'B', 'C')\n","sub_path":"hanoi.py","file_name":"hanoi.py","file_ext":"py","file_size_in_byte":626,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"432056807","text":"import os\nfrom functools import partial\nfrom multiprocessing.pool import ThreadPool\n\nimport pytest\n\nimport esmvalcore\nfrom esmvalcore._task import (BaseTask, _run_tasks_parallel,\n _run_tasks_sequential, run_tasks)\n\n\n@pytest.fixture\ndef example_tasks():\n \"\"\"Example tasks for testing the task runners.\"\"\"\n tasks = set()\n for i in range(3):\n task = BaseTask(\n name=f'task{i}',\n ancestors=[\n BaseTask(name=f'task{i}-ancestor{j}') for j in range(3)\n ],\n )\n for task0 in task.flatten():\n task0.priority = i\n tasks.add(task)\n\n return tasks\n\n\n@pytest.mark.parametrize('max_parallel_tasks', [1, 2, 3, 4, 16, None])\ndef test_run_tasks(monkeypatch, tmp_path, max_parallel_tasks, example_tasks):\n \"\"\"Check that tasks are run correctly.\"\"\"\n def _run(self, input_files):\n output_file = tmp_path / self.name\n\n msg = ('running {} in thread {}, using input {}, generating {}'.format(\n self.name, os.getpid(), input_files, output_file))\n print(msg)\n\n # Check that the output is created just once\n assert not output_file.exists()\n output_file.write_text(msg)\n output_file = str(output_file)\n\n # Check that ancestor results are provided correctly\n assert len(self.ancestors) == len(input_files)\n for ancestor in self.ancestors:\n assert len(ancestor.output_files) == 1\n assert ancestor.output_files[0].startswith(output_file)\n assert str(tmp_path / ancestor.name) in input_files\n\n return [output_file]\n\n monkeypatch.setattr(BaseTask, '_run', _run)\n\n run_tasks(example_tasks, max_parallel_tasks)\n\n for task in example_tasks:\n print(task.name, task.output_files)\n assert task.output_files\n\n\n@pytest.mark.parametrize('runner', [\n _run_tasks_sequential,\n partial(_run_tasks_parallel, max_parallel_tasks=1),\n])\ndef test_runner_uses_priority(monkeypatch, runner, example_tasks):\n \"\"\"Check that the runner tries to respect task priority.\"\"\"\n order = []\n\n def _run(self, input_files):\n print(f'running task {self.name} with priority {self.priority}')\n order.append(self.priority)\n return [f'{self.name}_test.nc']\n\n monkeypatch.setattr(BaseTask, '_run', _run)\n monkeypatch.setattr(esmvalcore._task, 'Pool', ThreadPool)\n\n runner(example_tasks)\n print(order)\n assert len(order) == 12\n assert order == sorted(order)\n","sub_path":"tests/integration/test_task.py","file_name":"test_task.py","file_ext":"py","file_size_in_byte":2516,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"25735503","text":"#! /usr/bin/python\n# https://adventofcode.com/2018/day/12\n\nfrom collections import defaultdict\nimport re\n\ndef readInput():\n # read file into a list of lines\n infile = open(\"input\",\"r\")\n lines = []\n while True:\n # read line and strip newline characters\n instr = infile.readline()\n\n if instr == '': # eof\n break\n\n instr = instr.strip()\n\n lines.append(instr)\n infile.close()\n return lines\n\ndef parseInput(lines):\n pattern = re.compile(r\"initial state: (.*)\")\n match = pattern.match(lines[0])\n initial = match.group(1)\n\n initial = initial\n\n pattern = re.compile(r\"(.*) => (.*)\")\n rules = {}\n for line in lines[1:]:\n match = pattern.match(line)\n if match:\n rule = match.group(1)\n result = match.group(2)\n rules[rule] = result\n\n return initial,rules\n\ndef findSoln(initial,rules):\n state = initial\n offset = 0\n for gen in range(1000):\n state = '.....'+state+'.....'\n offset += 5\n newState = ['.' for _ in state]\n for i in range(len(state)-2):\n pattern = state[i:i+5]\n if pattern in [k for k in rules]:\n newState[i+2] = rules[pattern]\n else:\n newState[i+2] = '.'\n state = ''.join(newState)\n # strip extra dots \n oldlen = len(state)\n state = state.lstrip('.')\n offset -= (oldlen-len(state))\n state = state.rstrip('.')\n print(state,offset)\n\n # this is really hacky\n # noticed that it devolves into a stable pattern somwhere around 1000 generations\n # then we can just manually check the offset for the 50000000000th\n offset = -940 - int(49999999000)\n\n soln = 0\n for i,c in enumerate(state):\n if c == '#': \n soln += (i-offset)\n\n print(soln)\n\ndef main():\n lines = readInput()\n initial,rules = parseInput(lines)\n findSoln(initial,rules)\n\nmain()\n","sub_path":"12/p2.py","file_name":"p2.py","file_ext":"py","file_size_in_byte":1964,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"318811547","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('rom', '0006_auto_20150708_1752'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='GameLogic',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('created', models.DateTimeField(auto_now_add=True)),\n ('modified', models.DateTimeField(auto_now=True)),\n ('rom', models.CharField(max_length=64)),\n ('title_screen', models.CharField(max_length=64)),\n ('start_map', models.IntegerField()),\n ],\n options={\n 'abstract': False,\n },\n bases=(models.Model,),\n ),\n migrations.AlterField(\n model_name='map',\n name='uuid',\n field=models.CharField(default=b'7e6fa533-32ee-11e5-87bf-64b9e8bebca0', max_length=64),\n preserve_default=True,\n ),\n migrations.AlterField(\n model_name='rom',\n name='uuid',\n field=models.CharField(default=b'7e6dcb19-32ee-11e5-96e9-64b9e8bebca0', max_length=64),\n preserve_default=True,\n ),\n ]\n","sub_path":"rom/migrations/0007_auto_20150725_1259.py","file_name":"0007_auto_20150725_1259.py","file_ext":"py","file_size_in_byte":1356,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"381815574","text":"import os\nfrom subprocess import check_call, check_output, CalledProcessError\n\ndef main():\n \"\"\" Installs Python packages listed in any requirements.txt file\n located at root of project.\n \"\"\"\n\n # Returns list of all folders / files in \"efs\" directory.\n projects = os.listdir(\"/efs/\")\n\n print(f'Projects in efs {projects}')\n\n for project in projects:\n\n project = \"/efs/\" + project\n\n try:\n # Installs all packages listed in requirements.txt file.\n check_call([\"pip3\", \"install\", \"-r\", f\"{project}/requirements.txt\"])\n\n # Opens requirements.txt to print packages that were successfully installed.\n with open(f'{project}/requirements.txt', \"r\") as txt:\n requirements = ''\n for line in txt:\n requirements += line\n print(f'\\nThe following Python packages have been installed for project \"{project}\":\\n{requirements}')\n\n except Exception as e:\n print(\"Error : \" + str(e))","sub_path":"scripts/install_python_deps.py","file_name":"install_python_deps.py","file_ext":"py","file_size_in_byte":1072,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"580940211","text":"######## Picamera Object Detection Using Tensorflow Classifier #########\n#\n# Author: Evan Juras\n# Date: 4/15/18\n# Description: \n# This program uses a TensorFlow classifier to perform object detection.\n# It loads the classifier uses it to perform object detection on a Picamera feed.\n# It draws boxes and scores around the objects of interest in each frame from\n# the Picamera. It also can be used with a webcam by adding \"--usbcam\"\n# when executing this script from the terminal.\n\n## Some of the code is copied from Google's example at\n## https://github.com/tensorflow/models/blob/master/research/object_detection/object_detection_tutorial.ipynb\n\n## and some is copied from Dat Tran's example at\n## https://github.com/datitran/object_detector_app/blob/master/object_detection_app.py\n\n## but I changed it to make it more understandable to me.\n\nprint(\"initializing...\")\n# Import packages\nimport os\nimport cv2\nimport numpy as np\nfrom picamera.array import PiRGBArray\nfrom picamera import PiCamera\nfrom PIL import Image\nimport tensorflow as tf\nimport argparse\nimport RPi.GPIO as GPIO\nimport serial\nimport sys\nimport time\n\n# Set up GPIO\nGPIO.setmode(GPIO.BOARD)\nGPIO.setup(13,GPIO.OUT) # M0\nGPIO.setup(15,GPIO.OUT) # M1\nGPIO.setup(36,GPIO.OUT) # infrared light\nGPIO.setup(37,GPIO.IN,pull_up_down=GPIO.PUD_DOWN) # pir\nGPIO.output(13,GPIO.LOW)\nGPIO.output(15,GPIO.LOW)\nGPIO.output(36,GPIO.LOW)\n\n# Set up serial communication\nser = serial.Serial(\n port='/dev/serial0',\n baudrate=9600,\n parity=serial.PARITY_NONE,\n stopbits=serial.STOPBITS_ONE,\n bytesize=serial.EIGHTBITS,\n timeout=1\n)\n\n# Set up camera constants\nIM_WIDTH = 1280\nIM_HEIGHT = 720\n#IM_WIDTH = 640 Use smaller resolution for\n#IM_HEIGHT = 480 slightly faster framerate\n\n# Select camera type (if user enters --usbcam when calling this script,\n# a USB webcam will be used)\ncamera_type = 'picamera'\nparser = argparse.ArgumentParser()\nparser.add_argument('--usbcam', help='Use a USB webcam instead of picamera',\n action='store_true')\nargs = parser.parse_args()\nif args.usbcam:\n camera_type = 'usb'\n\n# This is needed since the working directory is the object_detection folder.\nsys.path.append('..')\n\n# Import utilites\nfrom utils import label_map_util\nfrom utils import visualization_utils as vis_util\n\n# Name of the directory containing the object detection module we're using\nMODEL_NAME = 'ssdlite_mobilenet_v2_coco_2018_05_09'\n\n# Grab path to current working directory\nCWD_PATH = os.getcwd()\n\n# Path to frozen detection graph .pb file, which contains the model that is used\n# for object detection.\nPATH_TO_CKPT = os.path.join(CWD_PATH,MODEL_NAME,'frozen_inference_graph.pb')\n\n# Path to label map file\nPATH_TO_LABELS = os.path.join(CWD_PATH,'data','mscoco_label_map.pbtxt')\n\n# Number of classes the object detector can identify\nNUM_CLASSES = 90\n\n## Load the label map.\n# Label maps map indices to category names, so that when the convolution\n# network predicts `5`, we know that this corresponds to `airplane`.\n# Here we use internal utility functions, but anything that returns a\n# dictionary mapping integers to appropriate string labels would be fine\nlabel_map = label_map_util.load_labelmap(PATH_TO_LABELS)\ncategories = label_map_util.convert_label_map_to_categories(label_map, max_num_classes=NUM_CLASSES, use_display_name=True)\ncategory_index = label_map_util.create_category_index(categories)\n\nprint(\"loading Tensorflow model...\")\n\n# Load the Tensorflow model into memory.\ndetection_graph = tf.Graph()\nwith detection_graph.as_default():\n od_graph_def = tf.GraphDef()\n with tf.gfile.GFile(PATH_TO_CKPT, 'rb') as fid:\n serialized_graph = fid.read()\n od_graph_def.ParseFromString(serialized_graph)\n tf.import_graph_def(od_graph_def, name='')\n\n sess = tf.Session(graph=detection_graph)\n\n\n# Define input and output tensors (i.e. data) for the object detection classifier\n\n# Input tensor is the image\nimage_tensor = detection_graph.get_tensor_by_name('image_tensor:0')\n\n# Output tensors are the detection boxes, scores, and classes\n# Each box represents a part of the image where a particular object was detected\ndetection_boxes = detection_graph.get_tensor_by_name('detection_boxes:0')\n\n# Each score represents level of confidence for each of the objects.\n# The score is shown on the result image, together with the class label.\ndetection_scores = detection_graph.get_tensor_by_name('detection_scores:0')\ndetection_classes = detection_graph.get_tensor_by_name('detection_classes:0')\n\n# Number of objects detected\nnum_detections = detection_graph.get_tensor_by_name('num_detections:0')\n\n# Initialize frame rate calculation\nframe_rate_calc = 1\nfreq = cv2.getTickFrequency()\nfont = cv2.FONT_HERSHEY_SIMPLEX\n\n# Initialize camera and perform object detection.\n# The camera has to be set up and used differently depending on if it's a\n# Picamera or USB webcam.\n# Initialize Picamera and grab reference to the raw capture\nprint(\"setting up camera...\")\ncamera = PiCamera()\ncamera.resolution = (IM_WIDTH,IM_HEIGHT)\ncamera.framerate = 10\nrawCapture = PiRGBArray(camera, size=(IM_WIDTH,IM_HEIGHT))\nrawCapture.truncate(0)\n\n# set up the image classification so it will run faster later\nidx=0\nfor frame1 in camera.capture_continuous(rawCapture, format=\"bgr\", use_video_port=True):\n idx=idx+1\n frame = frame1.array\n frame.setflags(write=1)\n frame_expanded = np.expand_dims(frame, axis=0)\n (boxes, scores, classes, num) = sess.run(\n [detection_boxes, detection_scores, detection_classes, num_detections],\n feed_dict={image_tensor: frame_expanded})\n if idx>0:\n break\nrawCapture.truncate(0)\n\n# check starting time\ntime0=time.time()\n# img xmit flags\nimg_taken=0\nimg_xmit=0\nimg_xmitting=0\nimg_done=0\nprint(\"beginning PIR scan...\")\ntry:\n while True:\n # check PIR\n pir_out=GPIO.input(37)\n # check time\n time1=time.time()\n time_lapsed=time1-time0\n\n if time_lapsed>1800:\n time0=time1\n # if there are any images, start sending the latest one.\n if img_taken and img_xmitting != 1:\n img_xmit=1\n img_xmitting=1\n # load latest image\n all_imgs=os.listdir('/home/pi/Desktop/saved-images/')\n img_to_send='/home/pi/Desktop/saved-images/'+all_imgs[-1]\n # load latest image in packets and start sending\n img=Image.open(img_to_send)\n img=img.resize((160,90)) # resize for transmission\n img=img.convert(\"L\")\n img_array=np.array(img)\n h=img_array.shape[0]\n w=img_array.shape[1]\n size=241\n data_buff=np.zeros(size) # create a buffer to send in packets\n # start the iteration over every pixel\n iteration_sent=300\n pixel_count=h*w\n packet=1\n for counter in range(1,int(size)):\n row2=int(packet/w)\n col2=packet%w\n data_buff[0]=iteration_sent # the header of each packet sent\n data_buff[counter]=img_array[row2][col2]\n ser.write(str(data_buff[counter]).encode())\n ('sending packet '+str(packet))\n iteration_sent=iteration_sent+1\n packet=packet+1\n\n # if image not done sending and pir hasn't been triggered yet\n if pir_out!=1 and img_xmitting==1 and img_done!=1:\n for counter in range(0,int(size)):\n row2=int(packet/w)\n col=packet%w\n data_buff[0]=iteration_sent\n data_buff[counter]=img_array[row2][col2]\n ser.write(str(data_buff[counter]).encode())\n iteration_sent=iteration_sent+1\n packet=packet+1\n if packet == pixel_count:\n img_done=1\n\n # if image done sending\n if img_done==1:\n # reset variables\n img_xmitting=0\n img_done=0\n\n if pir_out == 1:\n GPIO.output(36,GPIO.HIGH)\n counter=0\n rawCapture.truncate(0)\n for frame1 in camera.capture_continuous(rawCapture, format=\"bgr\",use_video_port=True):\n counter=counter+1\n print(counter)\n t1 = cv2.getTickCount()\n \n # Acquire frame and expand frame dimensions to have shape: [1, None, None, 3]\n # i.e. a single-column array, where each item in the column has the pixel RGB value\n frame = frame1.array\n frame.setflags(write=1)\n frame_expanded = np.expand_dims(frame, axis=0)\n \n # Perform the actual detection by running the model with the image as input\n (boxes, scores, classes, num) = sess.run(\n [detection_boxes, detection_scores, detection_classes, num_detections],\n feed_dict={image_tensor: frame_expanded})\n threshold = 0.1\n # Print detected objects and scores\n objects=[]\n for idx,value in enumerate(classes[0]):\n object_name=(category_index.get(value)).get('name')\n if scores[0,idx]>threshold and object_name=='person':\n camera.capture('/home/pi/Desktop/saved-images/'+time.strftime(\"%y%m%d_%H%M%S\")+'.jpg', quality=6)\n img_taken=1\n print('person found')\n \n t2 = cv2.getTickCount()\n time1 = (t2-t1)/freq\n frame_rate_calc = 1/time1\n\n rawCapture.truncate(0)\n\n if counter>5:\n # turn off infrared lighting\n GPIO.output(36,GPIO.LOW)\n break\n\nexcept KeyboardInterrupt: \n camera.close() \n GPIO.cleanup()\n ser.close()\n cv2.destroyAllWindows()\n \nexcept Exception as e:\n print(e)\n camera.close() \n GPIO.cleanup()\n ser.close()\n cv2.destroyAllWindows()\n## # restart program\n## cmd='python3 /home/pi/Desktop/SSLP/tensorflow/object_detection/panda.py'\n## print(cmd)\n## ret=os.system(cmd)\n","sub_path":"tensorflow/object_detection/panda_img_xmit.py","file_name":"panda_img_xmit.py","file_ext":"py","file_size_in_byte":10314,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"38987156","text":"from ftw.builder import Builder\nfrom ftw.builder import create\nfrom opengever.base.oguid import Oguid\nfrom opengever.meeting.vocabulary import get_committee_member_vocabulary\nfrom opengever.meeting.vocabulary import get_proposal_transitions_vocabulary\nfrom opengever.testing import IntegrationTestCase\nfrom plone.uuid.interfaces import IUUID\nfrom zope.component import getUtility\nfrom zope.schema.interfaces import IVocabularyFactory\n\n\nclass TestProposalTransitionsVocabulary(IntegrationTestCase):\n\n def test_get_proposal_transitions_vocabulary_draft_proposal(self):\n self.login(self.regular_user)\n terms = get_proposal_transitions_vocabulary(self.draft_proposal)\n self.assertItemsEqual(\n ['proposal-transition-cancel', 'proposal-transition-submit'],\n [term.value for term in terms])\n\n def test_get_proposal_transitions_vocabulary_submitted_proposal(self):\n self.login(self.regular_user)\n terms = get_proposal_transitions_vocabulary(self.proposal)\n self.assertItemsEqual([], [term.value for term in terms])\n\n\nclass TestCommitteeVocabularies(IntegrationTestCase):\n\n def test_committeee_vocabulary_list_all_local_committees(self):\n self.login(self.committee_responsible)\n\n create(Builder('committee_model').having(admin_unit_id='foreign'))\n\n factory = getUtility(IVocabularyFactory,\n name='opengever.meeting.CommitteeVocabulary')\n self.assertItemsEqual(\n [self.empty_committee.load_model(),\n self.committee.load_model()],\n [term.value for term in factory(context=None)])\n\n def test_active_committeee_vocabulary_list_only_active_local_committees(self):\n self.login(self.committee_responsible)\n\n create(Builder('committee_model').having(admin_unit_id='foreign'))\n\n factory = getUtility(IVocabularyFactory,\n name='opengever.meeting.ActiveCommitteeVocabulary')\n self.assertItemsEqual(\n [Oguid.for_object(self.empty_committee).id,\n Oguid.for_object(self.committee).id],\n [term.value for term in factory(context=None)])\n\n self.empty_committee.load_model().deactivate()\n self.assertItemsEqual(\n [Oguid.for_object(self.committee).id],\n [term.value for term in factory(context=None)])\n\n\nclass TestCommitteeMemberVocabulary(IntegrationTestCase):\n\n def test_return_fullname_with_email_as_title(self):\n self.login(self.meeting_user)\n vocabulary = get_committee_member_vocabulary(self.meeting)\n\n self.assertEqual(\n [u'Sch\\xf6ller Heidrun (h.schoeller@example.org)',\n u'W\\xf6lfl Gerda (g.woelfl@example.com)',\n u'Wendler Jens (jens-wendler@example.com)'],\n [term.title for term in vocabulary])\n\n def test_returns_member_as_value(self):\n self.login(self.meeting_user)\n vocabulary = get_committee_member_vocabulary(self.meeting)\n\n self.assertEqual(\n [self.committee_president.model,\n self.committee_participant_1.model,\n self.committee_participant_2.model],\n [term.value for term in vocabulary])\n\n def test_omits_braces_when_no_email_is_available(self):\n self.login(self.meeting_user)\n self.committee_president.model.email = None\n\n vocabulary = get_committee_member_vocabulary(self.meeting)\n self.assertEqual(u'Sch\\xf6ller Heidrun', vocabulary._terms[0].title)\n\n\nclass TestProposalTemplatesVocabulary(IntegrationTestCase):\n\n def test_contains_proposal_templates(self):\n self.login(self.regular_user)\n factory = getUtility(IVocabularyFactory,\n name='opengever.meeting.ProposalTemplatesVocabulary')\n self.assertItemsEqual(\n [self.proposal_template.Title(),\n self.ad_hoc_agenda_item_template.Title(),\n self.recurring_agenda_item_template.Title()],\n [term.title.encode('utf-8') for term in factory(context=None)])\n\n self.assertItemsEqual(\n [IUUID(self.proposal_template),\n IUUID(self.ad_hoc_agenda_item_template),\n IUUID(self.recurring_agenda_item_template)],\n [term.value for term in factory(context=None)])\n\n\nclass TestProposalTemplatesForCommitteeVocabulary(IntegrationTestCase):\n features = ('meeting',)\n\n def test_consists_of_all_templates_by_default(self):\n self.login(self.committee_responsible)\n baubewilligungen = create(\n Builder('proposaltemplate')\n .titled(u'Baubewilligung')\n .within(self.templates))\n\n factory = getUtility(\n IVocabularyFactory,\n name='opengever.meeting.ProposalTemplatesForCommitteeVocabulary')\n self.assertItemsEqual(\n [self.proposal_template,\n baubewilligungen,\n self.ad_hoc_agenda_item_template,\n self.recurring_agenda_item_template],\n [term.value for term in factory(context=self.dossier)])\n\n def test_reduce_allowed_templates_with_committee_settings(self):\n self.login(self.committee_responsible)\n baubewilligungen = create(\n Builder('proposaltemplate')\n .titled(u'Baubewilligung')\n .within(self.templates))\n\n factory = getUtility(\n IVocabularyFactory,\n name='opengever.meeting.ProposalTemplatesForCommitteeVocabulary')\n self.assertItemsEqual(\n [self.proposal_template,\n baubewilligungen,\n self.ad_hoc_agenda_item_template,\n self.recurring_agenda_item_template],\n [term.value for term in factory(context=self.dossier)])\n\n self.committee.allowed_proposal_templates = [IUUID(baubewilligungen)]\n self.request.form['form.widgets.committee_oguid'] = [\n unicode(self.committee.load_model().oguid)]\n self.assertItemsEqual(\n [baubewilligungen],\n [term.value for term in factory(context=self.dossier)])\n\n def test_offer_predecessor_proposal_document(self):\n self.login(self.committee_responsible)\n factory = getUtility(\n IVocabularyFactory,\n name='opengever.meeting.ProposalTemplatesForCommitteeVocabulary')\n self.assertItemsEqual(\n [self.proposal_template,\n self.ad_hoc_agenda_item_template,\n self.recurring_agenda_item_template],\n [term.value for term in factory(context=self.dossier)])\n\n self.request.form['form.widgets.predecessor_proposal'] = '/'.join(\n self.proposal.getPhysicalPath()).replace('/plone', '')\n self.assertItemsEqual(\n [self.proposal.get_proposal_document(),\n self.proposal_template,\n self.ad_hoc_agenda_item_template,\n self.recurring_agenda_item_template],\n [term.value for term in factory(context=self.dossier)])\n","sub_path":"opengever/meeting/tests/test_vocabularies.py","file_name":"test_vocabularies.py","file_ext":"py","file_size_in_byte":6974,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"187528645","text":"from concurrent import futures\nimport grpc\nimport logging\nimport time\n\nfrom rpc_package.tree_pb2_grpc import add_TreeServiceServicer_to_server, TreeServiceServicer\nfrom rpc_package.tree_pb2 import *\n\n# Root node at the server\n# Stores an item-client mapping\nclass TreeNode():\n def __init__(self, key = None, parent = None, count = 0):\n self._key = key\n self._parent = parent\n self._count = count\n self._children = {}\n self._comb_table = {}\n self._item_table = {}\n\n def addMapping(self, item, client_name):\n self._children[item] = client_name\n\n def lookup(self, item):\n if item in self._children:\n return self._children[item]\n return \"\"\n\n\nclass Tree(TreeServiceServicer):\n def __init__(self, minsup):\n self._root = TreeNode()\n self._size = 0\n self.minsup = minsup\n self._history = []\n\n #-------------------------- public accessors -------------------\n def size(self):\n return self._size\n\n def is_empty(self):\n return self.size() == 0\n\n #iterators\n def __iter__(self):\n for node in self.preorder():\n yield node\n\n def nodes(self):\n for node in self.preorder():\n yield node\n\n def keys(self):\n for node in self.preorder():\n yield node._key\n\n def counts(self):\n for node in self.preorder():\n yield node._count\n\n def children(self, node):\n for child in node._children.keys():\n yield child\n\n def preorder(self):\n if not self.is_empty():\n for node in self._subtree_preorder(self._root):\n yield node\n\n def _subtree_preorder(self, node):\n yield node\n for c in node._children.values():\n for other in self._subtree_preorder(c):\n yield other\n\n def __repr__(self):\n ret = []\n for item in self:\n if item._count >= self.minsup:\n ret.append(str(item._key))\n return str(sorted(ret))\n\n def _addNode(self, parent, value, count=0):\n newNode = TreeNode(value, parent, count)\n parent.addChild(newNode)\n self._size += 1\n return newNode\n\n def _recordAccess(self, node):\n node._count += 1\n\n def _recordInfo(self, node, comb, count=1, exist=True):\n # record pattern\n combStr = (\",\").join(comb)\n node._comb_table[combStr] = node._comb_table.get(combStr, 0) + count\n # record item\n for item in comb:\n node._item_table[item] = node._item_table.get(item, 0) + count\n for item in comb:\n # item just became frequent\n prefix = \"\"\n if node._key:\n prefix = node._key + \",\"\n if node._item_table[item] >= self.minsup and (prefix + item) not in node._children:\n # add node\n newNode = self._addNode(node, prefix + item, node._item_table[item])\n # transfer patterns to newNode\n for key in list(node._comb_table.keys()):\n ptn = key.split(\",\")\n if item in ptn:\n i = ptn.index(item)\n if i < len(ptn) - 1:\n suffix = ptn[i + 1:]\n self._recordInfo(newNode, suffix, node._comb_table[key], exist=False)\n # moved the whole combination to the child\n if node._comb_table[key] >= self.minsup:\n del node._comb_table[key]\n\n def insertAndRecord(self, node, comb):\n # not root\n self._recordAccess(node)\n # reached the end\n if not comb:\n return\n self._recordInfo(node, comb)\n for i in range(len(comb)):\n prefix = \"\"\n if node._key:\n prefix = node._key + \",\"\n if prefix + comb[i] in node._children.keys():\n self.insertAndRecord(node._children[prefix + comb[i]], comb[i+1:])\n\n def insert(self, node, trx):\n for i in range(len(trx)):\n if trx[i] not in node._children.keys():\n newNode = self._addNode(node, trx[i])\n self.insertAndRecord(node._children[trx[i]], trx[i+1:])\n\n # ----------------------------------------------------------------------- #\n # rerouter at the server\n # feeds trxs to the mapped clients\n def add_note_root(self, request, context):\n trx = request.trx\n client = request.client\n ret_msg = {}\n # server root acts as the root of a centralized tree\n # it iterates through the trx to feed the segments to the clients\n for i in range(len(trx)):\n # if the item is new and not mapped to a client\n if not self._root.lookup(trx[i]):\n print(\"New item: create mapping\")\n self._root.addMapping(trx[i], client)\n # Add only one single note to root\n # server tree size always 0\n # print(\"Appended. Size: \" + str(self._size))\n self._history.append(streamRequest(client=client,trx=trx[i:],message=\"boardcasting for client: \"+client, addNewItem=True))\n ret_msg[trx[i:]] = \"Append \" + trx[i:] + \" to root\\n\"\n # if the item is already mapped to a client\n else:\n subtree_loc = self._root.lookup(trx[i])\n self._history.append(streamRequest(client=subtree_loc,trx=trx[i:],message=\"boardcasting for client: \"+subtree_loc, addNewItem=False))\n ret_msg[trx[i:]] = \"Reroute to client: \" + subtree_loc +\"\\n\"\n return rootAddReply(status=True,\n client=client, message=ret_msg, trx=\"\")\n\n # Keep sending messages for new insert requests\n def Stream(self, request, context):\n lastindex = 0\n while True:\n while len(self._history) > lastindex:\n boardcast = self._history[lastindex]\n lastindex += 1\n yield boardcast\n\n\ndef serve():\n # 通过thread pool来并发处理server的任务\n server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))\n\n #将对应的任务处理函数添加到rpc server中\n add_TreeServiceServicer_to_server(Tree(minsup=0.4), server)\n\n #此处使用非安全接口,gRPC支持TLS/SSL安全连接,以及各种鉴权机制\n server.add_insecure_port('[::]:50000')\n server.start()\n try:\n while True:\n time.sleep(60 * 60 * 24)\n except KeyboardInterrupt:\n server.stop(0)\n\nif __name__ == \"__main__\":\n logging.basicConfig()\n serve()\n","sub_path":"gRPC/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":6676,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"633330719","text":"# -*- coding: utf-8 -*-\n\n\"\"\"\nThe MIT License (MIT)\n\nCopyright (c) 2020 James\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n\"\"\"\n\nimport re\nfrom typing import TYPE_CHECKING, Any, Optional\n\nfrom bs4 import BeautifulSoup\n\nfrom .enums import EResult\n\nif TYPE_CHECKING:\n from aiohttp import ClientResponse\n\n from .protobufs import MsgProto\n\n\n__all__ = (\n \"SteamException\",\n \"NotFound\",\n \"Forbidden\",\n \"LoginError\",\n \"NoCMsFound\",\n \"HTTPException\",\n \"ClientException\",\n \"ConfirmationError\",\n \"AuthenticatorError\",\n \"InvalidCredentials\",\n \"WSException\",\n \"WSForbidden\",\n \"WSNotFound\",\n \"InvalidSteamID\",\n)\n\nCODE_FINDER = re.compile(r\"\\S(\\d+)\\S\")\n\n\nclass SteamException(Exception):\n \"\"\"Base exception class for steam.py.\"\"\"\n\n\nclass ClientException(SteamException):\n \"\"\"Exception that's thrown when something isn't possible\n but is handled by the client.\n\n Subclass of :exc:`SteamException`.\n \"\"\"\n\n\nclass HTTPException(SteamException):\n \"\"\"Exception that's thrown for any web API error.\n\n Subclass of :exc:`SteamException`.\n\n Attributes\n ------------\n response: :class:`aiohttp.ClientResponse`\n The response of the failed HTTP request.\n message: :class:`str`\n The message associated with the error.\n Could be an empty string if no message can parsed.\n status: :class:`int`\n The status code of the HTTP request.\n code: Union[:class:`.EResult`, :class:`int`]\n The Steam specific error code for the failure.\n It will attempt to find a matching a :class:`.EResult` for the value.\n \"\"\"\n\n def __init__(self, response: \"ClientResponse\", data: Optional[Any]):\n self.response = response\n self.status = response.status\n self.code = 0\n self.message = \"\"\n\n if data:\n if isinstance(data, dict):\n if len(data) != 1 and data.get(\"success\", False): # ignore {'success': False} as the message\n message = data.get(\"message\") or str(list(data.values())[0])\n code = data.get(\"result\") or CODE_FINDER.findall(message)\n if code:\n self.code = EResult.try_value(int(code[0]))\n self.message = CODE_FINDER.sub(\"\", message)\n else:\n text = BeautifulSoup(data, \"html.parser\").get_text(\"\\n\")\n self.message = text if text else \"\"\n\n self.message = self.message.replace(\" \", \" \")\n super().__init__(\n f\"{response.status} {response.reason} (error code: {self.code})\"\n f'{f\": {self.message}\" if self.message else \"\"}'\n )\n\n\nclass Forbidden(HTTPException):\n \"\"\"Exception that's thrown when status code 403 occurs.\n\n Subclass of :exc:`HTTPException`.\n \"\"\"\n\n\nclass NotFound(HTTPException):\n \"\"\"Exception that's thrown when status code 404 occurs.\n\n Subclass of :exc:`HTTPException`.\n \"\"\"\n\n\nclass WSException(SteamException):\n \"\"\"Exception that's thrown for any web API error.\n\n Subclass of :exc:`SteamException`.\n\n Attributes\n ------------\n msg: Union[:class:`~steam.protobufs.MsgProto`, :class:`~steam.protobufs.Msg`]\n The received protobuf.\n code: Union[:class:`~steam.EResult`, :class:`int`]\n The Steam specific error code for the failure.\n It will attempt to find a matching a :class:`~steam.EResult` for the value.\n \"\"\"\n\n def __init__(self, msg: \"MsgProto\"):\n self.msg = msg\n self.code = EResult.try_value(msg.header.eresult)\n super().__init__(f\"The request {msg.header.job_name_target} failed. (error code: {repr(self.code)})\")\n\n\nclass WSForbidden(WSException):\n \"\"\"Exception that's thrown when the websocket returns\n an :class:`.EResult` that means we do not have permission\n to perform an action.\n Similar to :exc:`Forbidden`.\n\n Subclass of :exc:`WSException`.\n \"\"\"\n\n\nclass WSNotFound(WSException):\n \"\"\"Exception that's thrown when the websocket returns\n an :class:`.EResult` that means the object wasn't found.\n Similar to :exc:`NotFound`.\n\n Subclass of :exc:`WSException`.\n \"\"\"\n\n\nclass LoginError(ClientException):\n \"\"\"Exception that's thrown when a login fails.\n\n Subclass of :exc:`ClientException`.\n \"\"\"\n\n\nclass InvalidCredentials(LoginError):\n \"\"\"Exception that's thrown when credentials are incorrect.\n Subclass of :exc:`LoginError`.\n \"\"\"\n\n\nclass AuthenticatorError(ClientException):\n \"\"\"Exception that's thrown when Steam cannot authenticate your details.\n\n Subclass of :exc:`LoginError`.\n \"\"\"\n\n\nclass ConfirmationError(AuthenticatorError):\n \"\"\"Exception that's thrown when a confirmation fails.\n\n Subclass of :exc:`AuthenticatorError`.\n \"\"\"\n\n\nclass NoCMsFound(LoginError):\n \"\"\"Exception that's thrown when no CMs can be found to connect to.\n\n Subclass of :exc:`LoginError`.\n \"\"\"\n\n\nclass InvalidSteamID(SteamException):\n \"\"\"Exception that's thrown when a SteamID cannot be valid.\n\n Subclass of :exc:`SteamException`.\n \"\"\"\n\n def __init__(self, id: Any, msg: Optional[str] = None):\n self.id = id\n super().__init__(\n f\"{id!r} cannot be converted to any valid SteamID {f'as it is {msg}' if msg is not None else ''}\"\n )\n","sub_path":"steam/errors.py","file_name":"errors.py","file_ext":"py","file_size_in_byte":6245,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"213553090","text":"from distutils.core import setup\n\ninstall_requires = open(\"requirements.txt\").read().strip().split(\"\\n\")\n\nsetup(\n name = 'pyfsync',\n packages = ['pyfsync'],\n version = '0.16',\n license='MIT', \n description = 'Synchronize directories between hosts. ', \n author = 'Be Water',\n author_email = 'be@water.com',\n url = 'https://github.com/hanayashiki/fsync', \n download_url = 'https://github.com/hanayashiki/fsync', \n keywords = ['Websocket', 'File Management', 'Tool'], \n install_requires=install_requires,\n include_package_data=True,\n classifiers=[\n 'Development Status :: 3 - Alpha', \n 'Intended Audience :: Developers', \n 'Topic :: Software Development :: Build Tools',\n 'License :: OSI Approved :: MIT License', \n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n ],\n)\n","sub_path":"pypi_install_script/pyfsync-0.16.tar/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":862,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"467843806","text":"from django.http import HttpResponseRedirect, HttpResponseBadRequest\n\nfrom django.template import RequestContext\nfrom django.shortcuts import render, get_object_or_404, render_to_response\nfrom .forms import UploadFileForm\nfrom django.core.urlresolvers import reverse\n\nfrom .models import RawFile\nfrom openpyxl import load_workbook\nimport django_excel as excel\nimport pyexcel\n\ndef upload_file(request):\n if request.method == 'POST':\n\n form = UploadFileForm(request.POST, request.FILES)\n if form.is_valid():\n # file is saved\n form.save()\n print ('>>>> form ok')\n return HttpResponseRedirect(reverse('file_upload.views.upload_file'))\n\n else:\n print ('>>>> form invalid')\n else:\n form = UploadFileForm()\n print ('>>>> upload failed')\n return render(request, 'file_upload/upload.html', {'form': form})\n\ndef upload_file_v2(request):\n if request.method == 'POST':\n\n form = UploadFileForm(request.POST, request.FILES)\n def choice_func(row):\n print (row[0])\n tsr = TimeSeries.objects.filter(slug=row[0])[0]\n row[0] = trs\n return row\n if form.is_valid():\n # file is saved\n print (request.FILES.keys())\n request.FILES['raw_file'].save_book_to_database(\n models = [\n (TimeSeries, ['uw_year', 'as_at', 'loss_type', 'os_usd', 'paid_usd','slug'], None, 0)\n ]\n )\n\n print ('>>>> form ok')\n return HttpResponse ('OK', status=200)\n\n else:\n print ('>>>> form invalid')\n return HttpResponse('Bad')\n else:\n form = UploadFileForm()\n print ('>>>> upload failed')\n return render(request, 'file_upload/upload.html', {'form': form})\n\n\n\n\ndef upload_file_v3(request):\n if request.method == \"POST\":\n form = UploadFileForm(request.POST,\n request.FILES)\n if form.is_valid():\n print (request.FILES.keys())\n request.FILES['raw_file'].save_to_database(\n name_columns_by_row=0,\n model=TimeSeries,\n mapdict=['uw_year', 'as_at', 'loss_type', 'os_usd', 'paid_usd','slug'])\n return HttpResponse(\"OK\")\n else:\n print ('not valid')\n return HttpResponseBadRequest()\n else:\n form = UploadFileForm()\n return render_to_response('file_upload/upload.html',\n {'form': form},\n context_instance=RequestContext(request))\n\n\n\n\n\n\ndef file_detail(request, file_id):\n current_file = get_object_or_404(raw_file, pk=file_id)\n wb = load_workbook(current_file)\n #print (wb.get_sheet_names())\n return render(request, 'file_upload/file_detail.html', {'current_file':current_file})\n","sub_path":"file_upload/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2908,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"14177944","text":"import pytest\n\nfrom plato.internal.graph import toposort\n\n\ndef test_toposort():\n graph = {\n 0: (2, 3, 5),\n 2: (5, 3),\n 3: (4,),\n 4: (),\n 5: (),\n }\n valid_toposorts = {\n (4, 5, 3, 2, 0),\n (5, 4, 3, 2, 0),\n }\n\n assert graph == {\n 0: (2, 3, 5),\n 2: (5, 3),\n 3: (4,),\n 4: (),\n 5: (),\n }\n assert tuple(toposort(graph)) in valid_toposorts\n\n\ndef test_toposort_with_cycle_raises():\n graph = {0: [1], 1: [0, 2], 2: []}\n\n with pytest.raises(ValueError):\n toposort(graph)\n\n\ndef test_toposort_on_empty_graph_returns_empty_list():\n assert toposort({}) == []\n","sub_path":"tests/internal/test_graph.py","file_name":"test_graph.py","file_ext":"py","file_size_in_byte":666,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"452165593","text":"\"\"\"ppob_multipay URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/2.1/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\nfrom django.contrib import admin\nfrom django.urls import path, include\n\nfrom django.conf import settings\nfrom django.conf.urls.static import static\nfrom django.contrib.auth.views import (\n LoginView, LogoutView\n)\n\nfrom rest_framework_jwt.views import (\n obtain_jwt_token, refresh_jwt_token, verify_jwt_token\n)\n\nurlpatterns = [\n path('jwt-api-token-auth/', obtain_jwt_token),\n path('jwt-api-token-refresh/', refresh_jwt_token),\n path('jwt-api-token-verify/', verify_jwt_token),\n path('login/', LoginView.as_view(), name='login'),\n path('logout/', LogoutView.as_view(), name='logout'),\n path('admin/', admin.site.urls),\n path('manage/', include('userprofile.urls')),\n path('api/manage/', include('userprofile.api.urls')),\n path('payment/', include('payment.urls')),\n path('api/payment/', include('payment.api.urls')),\n path('billing/', include('bill.urls')),\n path('api/billing/', include('bill.api.urls')),\n path('instanpay/', include('instanpay.urls')),\n path('api/instanpay/', include('instanpay.api.urls')),\n path('api/ppob/', include('ppob.api.urls')),\n\n]\n\nif not settings.DEBUG:\n urlpatterns += static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)\n","sub_path":"ppob_multipay/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1854,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"298435913","text":"# -*- coding:utf-8 -*-\n'''\ndps 数据导入\n1、ftp下载各源系统的数据文件\n2、解压数据文件并导入到dps数据库\n3、导入前先清空表\n\nCreated on 2015年9月30日\n\n@author: shaoxw\n'''\n\nimport datetime\nimport shutil\nimport os.path\nimport tarfile\n\nfrom loadutil import getFtpClient,getDBGZName,getSignName,getTblFileName,escapefile,formatSql\nfrom dbmetaload import ALLTABLENAMES\nfrom pgcomm import PGUtils\n\nfrom settings import SOURCE_DICT,SOURCE_DATA_DATE,allSys,localDataHome,ftpServerIp,ftpServerPort,ftpUser,ftpPwd,ftpSourceDataDir,logger\n\n\ndef downDBFile(each_sys,curDate):\n '''\n 下载源数据文件\n '''\n ftpClient = getFtpClient(ftpServerIp,int(ftpServerPort),ftpUser,ftpPwd)\n logger.info(ftpClient.getwelcome())\n try:\n serverPwd = ftpClient.pwd()\n logger.info('ftp pwd: '+serverPwd)\n # 切换到源数据文件所在的目录\n ftpClient.cwd(ftpSourceDataDir[\"SERVERDIR_\"+each_sys])\n logger.info('ftp cwd: '+ftpSourceDataDir[\"SERVERDIR_\"+each_sys])\n tarName = getDBGZName(each_sys, curDate)\n signName = getSignName(each_sys, curDate)\n lsTmp = ftpClient.nlst()\n if signName in lsTmp and tarName in lsTmp :\n ctFile = os.path.join(localDataHome, tarName)\n ftpClient.retrbinary('RETR ' + tarName, open(ctFile, 'wb').write)\n csFile = os.path.join(localDataHome, signName)\n ftpClient.retrbinary('RETR ' + signName, open(csFile, 'wb').write)\n logger.info(\"下载完成(\"+each_sys+\"): \"+tarName+\", \"+signName)\n else:\n logger.error(\"ftp server不存在文件(\"+each_sys+\"): \"+tarName+\", \"+signName)\n return False\n # ftpClient.cwd(serverPwd)\n # logger.info('ftp cwd: '+serverPwd)\n finally:\n ftpClient.close()\n return True\n\ndef truncateTbl(conn, tblName):\n '''\n 清空表\n '''\n sql = \"TRUNCATE TABLE \" + tblName\n logger.debug(sql)\n with conn:\n with conn.cursor() as cur:\n cur.execute(sql)\n\ndef copy_from(conn, tableName, fileName):\n \"\"\"\n 从文件拷贝数据到表。执行成功,事务提交;执行出现异常,事务回滚。\n conn: 数据库连接对象\n tableName: 数据库表名 \n fileName: 文件名\n \"\"\"\n with conn:\n with conn.cursor() as cur:\n with open(fileName, mode='r', encoding='utf-8') as fileObj:\n cur.copy_from(fileObj, tableName, null='NULL')\n\ndef importDB(conn, each_sys, curDate, truncate=True):\n '''\n 解压数据文件,然后导入到数据库\n '''\n d00 = datetime.datetime.now()\n logger.info(\"解压数据文件开始...\"+each_sys)\n extractPath = os.path.join(localDataHome, each_sys, curDate)\n # 删除已存在的解压目录。 如果目录不存在,在解压时回自动创建。这里不需要创建解压目录。\n if os.path.exists(extractPath):\n shutil.rmtree(extractPath, ignore_errors=True)\n tarName = getDBGZName(each_sys, curDate)\n gzFileTmp = os.path.join(localDataHome, tarName)\n with tarfile.open(gzFileTmp, 'r:gz') as tar:\n tar.extractall(extractPath)\n\n logger.info(\"解压数据文件结束..., 开始导入到PostgreSQL...\")\n # 导入PostgreSQL数据库\n filetables = ALLTABLENAMES[each_sys]\n for (each_file,each_tbl) in filetables.items():\n if truncate:\n #导入前,先清空表\n truncateTbl(conn, each_tbl)\n fileNameTmp = getTblFileName(each_file)\n fileNameTmp = os.path.join(extractPath, fileNameTmp)\n fileNameTmpDesc = os.path.join(extractPath, fileNameTmp+\".new\")\n #数据加载前的预处理,这里的是mysql数据库的处理。 如果是其它的数据库,需评估该方法是否支持。 \n escapefile(fileNameTmp,fileNameTmpDesc)\n d1 = datetime.datetime.now()\n copy_from(conn, each_tbl, fileNameTmpDesc)\n d2 = datetime.datetime.now()\n logger.info(\"copy %s, 耗时: %s 秒... from %s 完成.\" % (each_tbl,(d2-d1).seconds, fileNameTmpDesc))\n d01 = datetime.datetime.now()\n logger.info(\"导入到PostgreSQL结束, 源: %s, 耗时: %s 秒\" % (each_sys,(d01-d00).seconds))\n\n# 插入日志: 数据日期,处理结果,日志内容,开始时间,结束时间,处理时间(秒),记录插入时间\nsql_insert_log = '''\ninsert into ods.dataload_log \n(source_id, source_name, data_date, deal_result, log_content, start_time, end_time, deal_time, create_date) values \n(%s, %s, %s, %s, %s, %s, %s, %s, %s)\n'''\n\ndef insert_log(conn, source_id, source_name, curDate, deal_result, log_content, start_time=None, end_time=None, deal_time=0):\n '''\n 记录操作日志\n '''\n logger.debug(formatSql(sql_insert_log))\n stime = ''\n etime = ''\n time_pattern = '%H%M%S'\n if start_time : stime = start_time.strftime(time_pattern)\n if end_time : etime = end_time.strftime(time_pattern)\n value = (source_id, source_name, curDate, deal_result, log_content, stime, etime, deal_time, datetime.datetime.now())\n logger.debug('log value: '+str(value))\n with conn:\n with conn.cursor() as cur:\n cur.execute(sql_insert_log, value)\n\ndef dealOneSys(conn, each_sys, curDate):\n '''\n 单个源的处理\n '''\n d0 = datetime.datetime.now()\n if downDBFile(each_sys, curDate) : \n importDB(conn, each_sys, curDate)\n # 记录成功日志\n d1 = datetime.datetime.now()\n log_content=\"源数据导入成功[%s]\" % each_sys\n insert_log(conn,SOURCE_DICT[each_sys],each_sys, curDate,'OK',log_content,d0,d1,(d1-d0).seconds)\n else:\n # 记录失败日期\n log_content=\"源数据下载失败[%s]\" % each_sys\n insert_log(conn,SOURCE_DICT[each_sys],each_sys, curDate,'FAIL',log_content)\n\ndef updateDataDate(conn,curDate):\n '''\n 更新数据日期\n '''\n sql = 'UPDATE ods.dataload_status SET data_date=%s'\n param_val = (curDate,)\n with conn:\n with conn.cursor() as cur:\n logger.debug(sql)\n logger.debug(\"... param value: \" + str(param_val))\n cur.execute(sql, param_val)\n logger.info(\"更新数据日期完成, 数据日期: \" + curDate)\n \n\ndef deal():\n '''\n 源数据导入处理\n '''\n d0 = datetime.datetime.now()\n logger.info(\"start... 源数据导入处理.\")\n\n # 如果未指定数据日期,则取前一天\n if not SOURCE_DATA_DATE :\n curDate = (datetime.date.today() + datetime.timedelta(days=-1)).strftime(\"%Y%m%d\")\n else:\n curDate = SOURCE_DATA_DATE\n pgUtils = PGUtils()\n conn = pgUtils.getConnection()\n try:\n # 设置数据日期\n updateDataDate(conn,curDate)\n # 导入处理\n for each_sys in allSys:\n dealOneSys(conn, each_sys, curDate)\n finally:\n conn.close()\n d1 = datetime.datetime.now()\n logger.info(\"end..... 源数据导入处理, 耗时: %s 秒. \" % (d1-d0).seconds)\n\nif __name__ == '__main__':\n deal()","sub_path":"etl/dataload/scripts/dataload.py","file_name":"dataload.py","file_ext":"py","file_size_in_byte":6983,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"106694940","text":"\nhour = input(\"Digite a hora:\")\nminute = input(\"Digite o minuto:\")\nsecond = input(\"Digite o segundo:\")\n\nif hour.isdigit() and minute.isdigit() and second.isdigit():\n hour = int(hour)\n minute = int(minute)\n second = int(second)\n if 0 <= hour <= 23 and 0 <= minute <= 59 and 0 <= second <= 59:\n if 0 <= hour <= 11:\n print(\"Bom Dia!\")\n print(\"Hora atual: {}:{}:{}\".format(hour, minute, second))\n elif 12 <= hour <= 17:\n print(\"Boa Tarde!\")\n print(\"Hora atual: {}:{}:{}\".format(hour, minute, second))\n elif 18 <= hour <= 23:\n print(\"Boa Noite!\")\n print(\"Hora atual: {}:{}:{}\".format(hour, minute, second))\n else:\n print(\"Valores Inválidos para horas...\")\nelse:\n print(\"Tipos de dados inválidos...\")","sub_path":"Programação Estruturada/Exercícios de Condicionais/ex02.py","file_name":"ex02.py","file_ext":"py","file_size_in_byte":808,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"473639470","text":"class Solution(object):\n def restoreIpAddresses(self, s):\n \"\"\"\n :type s: str\n :rtype: List[str]\n \"\"\"\n res = []\n self.dfs(res, '', s, 4)\n return res\n def dfs(self, res, subset, s, index):\n ls = len(s)\n if index == 0 and ls == 0 :\n res.append(subset[:len(subset) - 1])\n return\n if ls < 0 or ls > index * 3:\n return False\n for i in range(1, 4):\n # This line, if s[:i] is None, there is no int(s[:i])\n if s[:i]:\n if i > 1 and s[0] == '0':\n return False\n if int(s[:i]) < 256 and len(s[:i]) == i:\n self.dfs(res, subset + s[:i] + '.', s[i:], index - 1)\n\nif __name__ == '__main__':\n S = Solution()\n # test = S.restoreIpAddresses(\"25525511135\")\n # test = S.restoreIpAddresses(\"0000\")\n test = S.restoreIpAddresses(\"1111\")\n print(test)\n # print(int('0'))","sub_path":"Project/Leetcode/Backtracking/93. Restore IP Addresses.py","file_name":"93. Restore IP Addresses.py","file_ext":"py","file_size_in_byte":967,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"610792980","text":"# Posty Project: opensocket_client.py\n# Copyright (c) 2019 Clayton Darwin claytondarwin@gmail.com\n\n# notify\nprint('LOAD: opensocket_client.py')\n\n# ----------------------------------------------\n# imports\n# ----------------------------------------------\n\n# standard library imports\nimport sys,time,traceback,socket,select\n\n# ----------------------------------------------\n# simple non-threaded socket client\n# ----------------------------------------------\n\nclass OpenSocket_Client:\n\n # ----------------------------------------------\n\n # this is an example of how you might interact with the OpenSocket_Server\n # this has minimal socket management (i.e. no reconnect and such)\n\n # connect and sendline errors (or closed socket) will raise exceptions\n # getline and waitforline will not raise exceptions (will return None)\n\n # set the ip and port variables, then connect()\n\n # use sendline(line) to send a line of str or bytes data\n # sent data will be converted bytes(str(line),'utf-8','?').strip() + b'\\r\\n'\n\n # use getline() to get a line of data from server\n # it will read io buffer and parse lines\n # it will return None if there are no lines\n # it will return bytes.strip() data if there is a line\n # it does not wait for a line if none are available\n\n # use waitforline(timeout_seconds) to get a line if you want to wait\n # it will keep trying until timeout is up\n # it will return same data as getline()\n\n # ----------------------------------------------\n\n # user defined variables\n server_ip = None\n server_port = 8765\n server_connect_timeout = 10 # how long to try and connect\n\n # process variables\n socket = None\n socket_lines = []\n socket_buffer = b''\n\n def connect(self):\n\n # try connect\n try:\n\n # clear data\n socket_lines = []\n\n # close open socket\n self.disconnect()\n\n # create socket\n self.socket = socket.socket(socket.AF_INET,socket.SOCK_STREAM)\n self.socket.settimeout(0.0) # non-blocking\n\n # when to stop trying\n stop_at = time.time() + self.server_connect_timeout\n\n # try to connect\n while 1:\n\n if self.socket.connect_ex((self.server_ip,self.server_port)) == 0:\n print('SOCKET CONNECTED')\n break\n\n elif time.time() >= stop_at:\n raise OSError('No route to host.')\n\n else:\n #print('CONNECT WAIT')\n time.sleep(0.1)\n\n # done\n return True\n \n # connect failed\n except Exception as e:\n print(traceback.format_exc())\n self.disconnect() # may raise errors\n raise e\n\n def disconnect(self):\n\n # socket did exist\n if self.socket:\n\n # attempt to inform server\n try:\n self.socket.sendall(b'EOD\\r\\n')\n time.sleep(0.5) # wait a bit for send to happen\n except:\n pass\n\n # attempt to formally close socket \n try:\n self.socket.close()\n except:\n pass\n\n # notify\n print('SOCKET CLOSED')\n\n # set socket to None\n self.socket = None\n\n def sendline(self,line):\n\n # socket must be open\n if not self.socket:\n raise OSError('Socket is not open.')\n\n # data must be bytes\n if type(line) != bytes:\n line = bytes(str(line),'utf-8','?')\n\n # format\n line = line.strip() + b'\\r\\n'\n\n # catch broken pipe\n try:\n self.socket.sendall(line)\n\n # broken pipe\n except BrokenPipeError as e:\n self.disconnect()\n raise e\n\n # done\n return len(line)\n\n def getline(self):\n\n # line exists in buffer\n if self.socket_lines:\n return self.socket_lines.pop(0)\n\n # socket closed\n if not self.socket:\n return None\n\n # read io buffer data\n while 1:\n rlist,wlist,xlist = select.select([self.socket],[],[],0.01)\n if rlist:\n self.socket_buffer += self.socket.recv(1024)\n else:\n break\n\n # parse bytes buffer\n if b'\\r\\n' in self.socket_buffer:\n self.socket_buffer = self.socket_buffer.split(b'\\r\\n')\n for line in self.socket_buffer[:-1]:\n line = line.strip()\n if line:\n self.socket_lines.append(line)\n if line == b'EOD':\n self.disconnect()\n self.socket_buffer = self.socket_buffer[-1]\n\n # return line\n if self.socket_lines:\n return self.socket_lines.pop(0)\n else:\n return None\n\n def waitforline(self,timeout=10):\n\n # line exists in buffer\n if self.socket_lines:\n return self.socket_lines.pop(0)\n\n # socket closed\n if not self.socket:\n return None\n\n # when to stop trying\n stop_at = time.time() + timeout\n\n # loop until timeout\n while 1:\n\n line = self.getline()\n\n if line:\n return line\n\n elif time.time() >= stop_at:\n return None\n\n else:\n time.sleep(0.01)\n\n# ----------------------------------------------\n# end\n# ----------------------------------------------\n","sub_path":"Dimension/MicroPython/Sockets/OpenSocket/opensocket_client.py","file_name":"opensocket_client.py","file_ext":"py","file_size_in_byte":5581,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"20743281","text":"import os\nfrom setuptools import setup, find_packages\n\nfrom dockerfile_bakery.about import __version__, __description__\n\n\ndef requirements():\n reqfile = 'requirements.txt'\n with open(os.path.join(os.path.dirname(__file__), reqfile)) as f:\n return f.read().splitlines()\n\n\nsetup(name='dockerfile_bakery',\n version=__version__,\n description=__description__,\n author='kujyp',\n author_email='pjo901018@gmail.com',\n url='https://github.com/kujyp/dockerfile_bakery',\n packages=find_packages(exclude=[]),\n entry_points='''\n [console_scripts]\n dockerfile_bakery = dockerfile_bakery.cli:main\n ''',\n include_package_data=True,\n install_requires=requirements())\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":727,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"519989542","text":"from pylab import *\nimport tables\n\nimport pylab\nfrom matplotlib import rcParams\nimport matplotlib.pyplot as plt\n\n# customization for figure\n#rcParams['lines.linewidth'] = 2\nrcParams['font.size'] = 18\n#rcParams['xtick.major.size'] = 8 # default is 4\n#rcParams['xtick.major.width'] = 3 # default is 0.5\n#rcParams['ytick.major.size'] = 8 # default is 4\n#rcParams['ytick.major.width'] = 3 # default is 0.5\nrcParams['figure.facecolor'] = 'white'\n#rcParams['figure.subplot.bottom'] = 0.125\n#rcParams['figure.subplot.right'] = 0.85 # keep labels/ticks of colobar in figure\nrcParams['image.interpolation'] = 'none'\nrcParams['image.origin'] = 'lower'\nrcParams['contour.negative_linestyle'] = 'solid'\n#rcParams['savefig.bbox'] = 'tight'\n\n# Math/LaTex fonts:\n# http://matplotlib.org/users/mathtext.html\n# http://matplotlib.org/users/usetex.html\n# Example: xlabel(r'$t \\cdot l / V_{A,bc}$')\nrcParams['mathtext.default'] = 'regular' # match the font used for regular text\n\ngasGamma = 1.4\n\nfh = tables.openFile(\"s398-euler-reim-ds-2d_q_5.h5\")\ngrid = fh.root.StructGrid\nnx, ny = grid._v_attrs.vsNumCells[0], grid._v_attrs.vsNumCells[1]\ndx = 1.0/nx\ndy = 1.0/ny\nXe = linspace(0.0, 1.0, nx+1)\nYe = linspace(0.0, 1.0, ny+1)\nXXe, YYe = meshgrid(Xe, Ye)\n\nX = linspace(0.5*dx, 1.0-0.5*dx, nx)\nY = linspace(0.5*dx, 1.0-0.5*dx, ny)\nXX, YY = meshgrid(X, Y)\n\nq = fh.root.StructGridField\nrho = q[:,:,0]\nu = q[:,:,1]/rho\nv = q[:,:,2]/rho\npr = (q[:,:,4] - 0.5*rho*(u**2+v**2))*(gasGamma-1)\n\n# plot it\npcolormesh(XXe, YYe, transpose(pr))\ncolorbar()\n\nrhoMin = 0.43\nrhoMax = 0.99\nstep = 0.02\nnlevels = (int) (rhoMax-rhoMin)/step\ncList = linspace(rhoMin, rhoMax, 29)\ncontour(XX, YY, transpose(rho), cList, colors='k')\n\nsavefig('s398-pr-dens-flow.png', dpi=300, bbox_inches='tight')\n\nshow()\n","sub_path":"source/sims/s398/plot-riem.py","file_name":"plot-riem.py","file_ext":"py","file_size_in_byte":1879,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"435855377","text":"import numpy as np\r\nimport math\r\n\r\ndef cluster(points, metric, r):\r\n clusters = [[p] for p in points]\r\n prev_clusters = []\r\n while (clusters != prev_clusters):\r\n prev_clusters = clusters\r\n i = 0\r\n while (i < len(clusters)):\r\n j = i + 1\r\n while (j < len(clusters)):\r\n if (has_near(clusters[i], clusters[j], metric, r)):\r\n clusters[j] = union(clusters[i], clusters[j])\r\n clusters[i] = []\r\n j += 1\r\n i += 1\r\n\r\n remove_empty(clusters)\r\n return clusters\r\n\r\ndef has_near(A, B, metric, r):\r\n for x in A:\r\n for y in B:\r\n if metric(x,y) < r:\r\n return True\r\n return False\r\n\r\ndef remove_empty(l):\r\n i = 0\r\n while i < len(l):\r\n if l[i] == []:\r\n l.pop(i)\r\n i -= 1\r\n i += 1\r\n\r\ndef unique(l):\r\n return list(set(l))\r\n\r\ndef union(A, B):\r\n return unique(A+B)\r\n\r\ndef m(p, q):\r\n return math.sqrt((p[0] - q[0])**2 + (p[1] - q[1])**2)\r\n\r\n","sub_path":"py/cluster.py","file_name":"cluster.py","file_ext":"py","file_size_in_byte":1043,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"543637039","text":"# coding: utf-8\n\n\"\"\"\n mistune_contrib.meta\n ~~~~~~~~~~~~~~~~~~~~\n\n Support YAML Meta features for mistune.\n\n\n ---\n key: value\n author: yxy\n list:\n - a\n - b\n ---\n\n\"\"\"\n\n\nimport re\nimport yaml\n\n\nMETA = re.compile(r'^---(.+?)---\\r?\\n', re.DOTALL)\n\n\ndef parse(text):\n \"\"\"Parse the given text into metadata and strip it for a Markdown parser.\n\n :param text: text to be parsed\n \"\"\"\n rv = {}\n m = META.match(text)\n if m:\n rv = yaml.load(m.group(1))\n text = text[len(m.group(0)):]\n return rv, text\n","sub_path":"mistune_contrib/yaml_meta.py","file_name":"yaml_meta.py","file_ext":"py","file_size_in_byte":586,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"119419293","text":"# Copyright © 2019 Province of British Columbia\n#\n# Licensed under the Apache License, Version 2.0 (the 'License');\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an 'AS IS' BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Endpoints to to manage user.\"\"\"\n\nimport traceback\nfrom flask import request\nfrom flask_restplus import Resource, Namespace\nfrom auth_api.services.keycloak import KeycloakService\nimport opentracing\nfrom flask_opentracing import FlaskTracing\nfrom ..utils.trace_tags import TraceTags as tags\nfrom auth_api.utils.util import cors_preflight\n\n\nAPI = Namespace('user', description='Keycloak Admin - user')\nKEYCLOAK_SERVICE = KeycloakService()\n\ntracer = opentracing.tracer\ntracing = FlaskTracing(tracer)\n\n\n@cors_preflight('GET, POST, DELETE, OPTIONS')\n@API.route('', methods=['GET', 'POST', 'DELETE', 'OPTIONS'])\nclass User(Resource):\n \"\"\"End point resource to manage users.\"\"\"\n\n @staticmethod\n @tracing.trace()\n def post():\n \"\"\"Add user, return a new/existing user.\"\"\"\n\n current_span = tracer.active_span\n data = request.get_json()\n if not data:\n data = request.values\n try:\n response = KEYCLOAK_SERVICE.add_user(data)\n\n return response, 201\n except Exception as err:\n current_span.set_tag(tags.ERROR, 'true')\n tb = traceback.format_exc()\n current_span.log_kv({'event': 'error',\n 'error.kind': str(type(err)),\n 'error.message': err.with_traceback(None),\n 'error.object': tb})\n current_span.set_tag(tags.HTTP_STATUS_CODE, 500)\n return {\"error\": \"{}\".format(err)}, 500\\\n\n\n @staticmethod\n @tracing.trace()\n def get():\n \"\"\"Get user by username and return a user\"\"\"\n\n current_span = tracer.active_span\n data = request.get_json()\n if not data:\n data = request.values\n try:\n user = KEYCLOAK_SERVICE.get_user_by_username(data.get(\"username\"))\n return user, 200\n except Exception as err:\n current_span.set_tag(tags.ERROR, 'true')\n tb = traceback.format_exc()\n current_span.log_kv({'event': 'error',\n 'error.kind': str(type(err)),\n 'error.message': err.with_traceback(None),\n 'error.object': tb})\n current_span.set_tag(tags.HTTP_STATUS_CODE, 500)\n return {\"error\": \"{}\".format(err)}, 500\\\n\n\n @staticmethod\n @tracing.trace()\n def delete():\n \"\"\"Delete user by username\"\"\"\n\n current_span = tracer.active_span\n data = request.get_json()\n if not data:\n data = request.values\n try:\n response = KEYCLOAK_SERVICE.delete_user_by_username(data.get(\"username\"))\n return response, 204\n except Exception as err:\n current_span.set_tag(tags.ERROR, 'true')\n tb = traceback.format_exc()\n current_span.log_kv({'event': 'error',\n 'error.kind': str(type(err)),\n 'error.message': err.with_traceback(None),\n 'error.object': tb})\n current_span.set_tag(tags.HTTP_STATUS_CODE, 500)\n return {\"error\": \"{}\".format(err)}, 500\\\n\n","sub_path":"auth-api/src/auth_api/resources/user.py","file_name":"user.py","file_ext":"py","file_size_in_byte":3789,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"512314536","text":"import pymysql\r\n\r\nfrom config import (\r\n DB,\r\n DB_HOST,\r\n DB_USER,\r\n DB_PASSWORD,\r\n DB_CHARSET\r\n)\r\n\r\n\r\nclass DBResult:\r\n Suc = False\r\n Result = None\r\n Err = None\r\n Rows = None\r\n\r\n def __init__(self):\r\n pass\r\n\r\n\r\nclass BaseDB:\r\n def __init__(self):\r\n self.dbConn = pymysql.connect(host=DB_HOST, user=DB_USER, passwd=DB_PASSWORD, db=DB, charset=DB_CHARSET,\r\n cursorclass=pymysql.cursors.DictCursor)\r\n self.cursor = self.dbConn.cursor()\r\n self.columns = []\r\n\r\n # Return DBResult\r\n def select(self, sql, params=None):\r\n # if self.dbConn is None:\r\n # self.dbConn = pymysql.connect(host=DB_HOST, user=DB_USER, passwd=DB_PASSWORD, db=DB, charset=DB_CHARSET,\r\n # cursorclass=pymysql.cursors.DictCursor)\r\n # self.cursor = self.dbConn.cursor()\r\n r = DBResult()\r\n try:\r\n if params is None or len(params) == 0 or type(params) != dict:\r\n r.Rows = self.cursor.execute(sql)\r\n else:\r\n r.Rows = self.cursor.execute(sql, params)\r\n r.Result = self.cursor.fetchall() if r.Rows != 0 else []\r\n r.Suc = True\r\n except Exception as e:\r\n r.Err = e\r\n # self.cursor = None\r\n # if self.dbConn is not None:\r\n # self.dbConn.close()\r\n # self.dbConn = None\r\n return r\r\n\r\n # Return DBResult\r\n def execute(self, sql, params=None, close=True):\r\n # if self.dbConn is None:\r\n # self.dbConn = pymysql.connect(host=DB_HOST, user=DB_USER, passwd=DB_PASSWORD, db=DB, charset=DB_CHARSET,\r\n # cursorclass=pymysql.cursors.DictCursor)\r\n # self.cursor = self.dbConn.cursor()\r\n r = DBResult()\r\n try:\r\n if not params:\r\n r.Rows = self.cursor.execute(sql)\r\n else:\r\n r.Rows = self.cursor.execute(sql, params)\r\n r.Result = self.cursor.fetchall() if r.Rows != 0 else []\r\n r.Suc = True\r\n self.dbConn.commit()\r\n except Exception as e:\r\n r.Err = e\r\n self.dbConn.rollback()\r\n # if close:\r\n # self.cursor = None\r\n # if self.dbConn is not None:\r\n # self.dbConn.close()\r\n # self.dbConn = None\r\n return r\r\n\r\n # Return DBResult\r\n def insert(self, sql, params=None):\r\n # if self.dbConn is None:\r\n # self.dbConn = pymysql.connect(host=DB_HOST, user=DB_USER, passwd=DB_PASSWORD, db=DB, charset=DB_CHARSET,\r\n # cursorclass=pymysql.cursors.DictCursor)\r\n # self.cursor = self.dbConn.cursor()\r\n r = self.execute(sql, params, False)\r\n if not r.Suc:\r\n r.Result = -1\r\n return r\r\n r.Rows = self.cursor.execute(\"SELECT LAST_INSERT_ID()\")\r\n r.Result = self.cursor.fetchone() if r.Rows != 0 else []\r\n\r\n # self.cursor = None\r\n # if self.dbConn is not None:\r\n # self.dbConn.close()\r\n # self.dbConn = None\r\n return r\r\n\r\n # Return DBResult\r\n def getValue(self, sql, params=None):\r\n # if self.dbConn is None:\r\n # self.dbConn = pymysql.connect(host=DB_HOST, user=DB_USER, passwd=DB_PASSWORD, db=DB, charset=DB_CHARSET,\r\n # cursorclass=pymysql.cursors.DictCursor)\r\n # self.cursor = self.dbConn.cursor()\r\n r = self.select(sql, params)\r\n if r.Suc:\r\n if r.Result:\r\n r.Result = r.Result[0]\r\n else:\r\n r.Result = -1\r\n return r\r\n\r\n\r\nclass BaseManager:\r\n\r\n def __init__(self, table, obj):\r\n self.table = table\r\n self.obj = obj\r\n self.dbConn = BaseDB().dbConn\r\n\r\n self.insertKeyTmp = \", \".join(['`' + k + '`' for k in obj.insertKeys])\r\n self.insertValueTmp = \", \".join([\"%(\" + k + \")s\" for k in obj.insertKeys])\r\n\r\n self.searchTmp = \" OR \".join(\r\n ['`' + k + '`' + \" like %(key)s\" for k in obj.searchKeys]\r\n )\r\n\r\n self.updateTmp = \", \".join(['`' + k + '`' + \"=%(\" + k + \")s\" for k in obj.updateKeys])\r\n\r\n def getAll(self):\r\n sql = '''SELECT * FROM %(table)s'''\r\n r = self.dbConn.select(sql, {'table': self.table})\r\n return r\r\n\r\n def get(self, objID):\r\n sql = '''SELECT * FROM %(table)s WHERE `id` = %(objID)s'''\r\n r = self.dbConn.select(sql, {'table': self.table, 'objID': objID})\r\n return r\r\n\r\n def delete(self, objID):\r\n sql = '''DELETE FROM %(table)s WHERE `id` = %(objID)s'''\r\n r = self.dbConn.execute(sql, {'table': self.table, 'objID': objID})\r\n return r\r\n\r\n def update(self, params):\r\n sql = '''UPDATE %s SET %s WHERER %s''' % (self.table, self.updateTmp, '`id` = %(id)s')\r\n r = self.dbConn.execute(sql, params)\r\n return r\r\n\r\n def add(self, params):\r\n sql = '''INSERT INTO %s(%s) VALUES(%s)''' %(self.table, self.insertKeyTmp, self.insertValueTmp)\r\n r = self.dbConn.insert(sql, params)\r\n return r\r\n","sub_path":"Core/BaseDB.py","file_name":"BaseDB.py","file_ext":"py","file_size_in_byte":5201,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"471930562","text":"# -*-coding:utf-8-*- \n# 作者: 29511\n# 文件名: 文件指定日期整理.py\n# 日期时间:2020/11/4,12:02\nimport pandas as pd\nimport numpy as np\nimport os, openpyxl\n\n\n# 移动符合条件文件,并删除二级文件夹和多余文件\ndef move_file(file_path, _new_path, date_xl_str):\n # 本月文件移动至对应新建文件夹,非本月文件直接删除\n for curDir, dirs, files in os.walk(file_path):\n for file in files:\n old_path = os.path.join(curDir, file)\n new_path = os.path.join(_new_path, file)\n file_date = file.split(\"_\")[-1][:10]\n try:\n os.rename(old_path, new_path) if file_date in date_xl_str else os.remove(old_path)\n except:\n os.remove(old_path)\n\n # 移除子文件夹\n for curDir, dirs, files in os.walk(file_path):\n for _dir in dirs:\n os.removedirs(os.path.join(curDir, _dir))\n os.mkdir(\"data\")\n\n\n# 文件去重-相同日期文件\ndef qch_date(file_path):\n wj_names = os.listdir(file_path)\n wj_list = []\n num = 0\n for wj in wj_names:\n new_wj = wj[:-11]\n if new_wj not in wj_list:\n wj_list.append(new_wj)\n else:\n os.remove(file_path + \"\\\\\" + wj)\n num += 1\n return num\n\n\n# 更新数据源\ndef refresh_data(file_path, sheet_name, data):\n book = openpyxl.load_workbook(file_path)\n writer = pd.ExcelWriter(file_path, engine=\"openpyxl\")\n\n # 在ExcelWriter的源代码中,它初始化空工作簿并删除所有工作表,\n # writer.book = book将原来表里面的内容保存到writer中\n writer.book = book\n\n # activate激活指定sheet工作表\n ws = book[sheet_name]\n\n # 清空当前活动表数据\n for row in ws.iter_rows():\n for cell in row:\n cell.value = None\n\n # dataframe行列数\n idx_num, col_num = data.shape\n\n # 新数据写入当前活动表-注意索引偏移\n for i in range(1, idx_num + 1):\n for j in range(1, col_num + 1):\n ws.cell(row=i, column=j).value = data.iloc[i - 1, j - 1]\n\n # 保存关闭writer\n writer.save()\n writer.close()\n\n return None\n\n\n# 文件检查\ndef check_file(file_path, check_file=\"文件检查.xlsx\"):\n wj_names = os.listdir(file_path)\n data = pd.DataFrame([wj.split(\"_\")[2:] for wj in wj_names], columns=[\"店铺名称\", \"日期\"])\n data['日期'] = data['日期'].str[:10]\n\n # 标题columns放到dataframe中\n nind = data.index.insert(0, '0')\n data1 = data.reindex(index=nind)\n data1.loc['0'] = data.columns\n data1.reset_index(drop=True, inplace=True)\n\n # 刷新数据源\n refresh_data(check_file, \"数据源\", data1)\n\n return None\n\n\nfile_path = \"data\"\n# 日期格式:xxxx-xx eg:2020-07-01\nstart_date = input(\"请输入开始日期:\")\nend_date = input(\"请输入结束日期:\")\n\n# 生成日期区间-字符串类型\ndate_xl_str = [str(i)[:10] for i in pd.date_range(start_date, end_date, freq='D')]\n\n# 创建指定文件夹\nnew_path = start_date + \"~\" + end_date\ntry:\n os.mkdir(new_path)\nexcept:\n print(\"文件夹 【%s】 已存在\" % new_path)\n\n# 移动符合条件文件,并删除二级文件夹和多余文件\nmove_file(file_path, new_path, date_xl_str)\n\n# 文件去重\nnum = qch_date(new_path)\nprint(\"去除重复文件 %s 个\" % num)\n\n# 文件检查\ncheck_file(new_path)\n","sub_path":"其他/文件指定日期整理.py","file_name":"文件指定日期整理.py","file_ext":"py","file_size_in_byte":3384,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"254855308","text":"from utilities.statusdisplay import StatusDisplay\nfrom pages.home.home_page import HomePage\nfrom pages.home.login_page import LoginPage\nfrom pages.models.models_page import ModelsPage\nimport pytest\nimport time\n\n@pytest.mark.usefixtures(\"oneTimeSetUp\")\nclass TestHome:\n\n @pytest.fixture(autouse=True)\n def classSetup(self, oneTimeSetUp):\n self.hp = HomePage(self.driver)\n self.ts = StatusDisplay(self.driver)\n\n # method that check status of login and page then\n # return to right url/login.\n self.hp.checkLogoffHome()\n\n def test_elementsAvailable(self):\n \"\"\" Check all filter elements available on Home Page after log out.\n TC # 027\n \"\"\"\n\n # own verify method for Home page\n res = self.hp.verifyHomePageElements()\n\n self.ts.markFinal(\n \"TC #027 All elements are available on Home page :\",\n res, \": TC #027 TOTALLY FAILED test_elementsAvailable.\")\n\n def test_verifyForClientsBtn(self):\n \"\"\" Check all elements available on Home Page after click \"For Clients.\n TC # 028\n \"\"\"\n\n # own verify method for Home page\n res = self.hp.verifyForClientsElements()\n\n self.ts.markFinal(\n \"TC #028 All elements available after click For Clients :\",\n res, \": TC #028 TOTALLY FAILED test_verifyForClientsBtn.\")\n\n def test_verifyForAgenciesBtn(self):\n \"\"\" Check all elements available on Home Page after click \"For Agencies.\n TC # 029\n \"\"\"\n\n # own verify method for Home page\n res = self.hp.verifyForAgenciesElements()\n\n self.ts.markFinal(\n \"TC #029 All elements available after click For Agencies :\",\n res, \": TC #029 TOTALLY FAILED test_verifyForAgenciesBtn.\")\n\n def test_verifyForModelsBtn(self):\n \"\"\" Check elements available on Home Page after click \"For Models.\n TC # 030\n \"\"\"\n\n # own verify method for Home page\n res = self.hp.verifyForModelsElements()\n\n\n self.ts.markFinal(\n \"TC #030 All elements available after click For Models :\",\n res, \": TC #030 TOTALLY FAILED test_verifyForModelsBtn.\")\n\n def test_verifyBrowseTalentBtn(self):\n \"\"\" Check Models' loggoffed catalogues available\n after click \"Browse Talent\".\n TC # 031\n \"\"\"\n\n # own verify method for Home page\n res = self.hp.verifyBrowseTalentElements()\n\n self.ts.markFinal(\n \"TC #031 All elements available after click Browse Talent :\",\n res, \": TC #031 TOTALLY FAILED test_verifyBrowseTalentBtn.\")","sub_path":"tests/home/home_test.py","file_name":"home_test.py","file_ext":"py","file_size_in_byte":2624,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"258226555","text":"# encoding: utf-8\n\n\"\"\"\nCopyright (c) 2017, Ernesto Ruge\nAll rights reserved.\nRedistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:\n1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.\n2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.\n3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\"\"\"\n\nfrom flask_wtf import FlaskForm\nfrom flask_babel import _\nfrom wtforms import validators\nfrom wtforms import StringField, TextAreaField, SelectField, SubmitField, HiddenField\nfrom ..common.form import SearchBaseForm\nfrom ..common.countrycodes import country_codes\nfrom ..common.form_field import RegionField\nfrom ..common.form_filter import float_filter\n\n\nclass StoreSearchForm(SearchBaseForm):\n name = StringField(\n label='Name'\n )\n sort_field = SelectField(\n label='Sortier-Feld',\n choices=[\n ('name', 'Name'),\n ('created', 'Erstellt')\n ]\n )\n\n\nclass StoreForm(FlaskForm):\n class Meta:\n locales = ('de_DE', 'de')\n\n name = StringField(\n label=_('Name'),\n validators=[\n validators.DataRequired(\n message=_('Bitte geben Sie einen Namen an.')\n )\n ]\n )\n type = SelectField(\n label=_('Art des Geschäfts'),\n choices=[\n ('0', 'bitte wählen'),\n ('Apotheke', 'Apotheke'),\n ('Bäckerei', 'Bäckerei'),\n ('Restaurant', 'Restaurant')\n ],\n validators=[\n validators.DataRequired(\n message='Bitte geben Sie einen Typen an.'\n ),\n validators.NoneOf(\n ['0'],\n message='Bitte geben Sie einen Typen an.'\n )\n ]\n )\n region = RegionField(\n label=_('Region'),\n\n )\n firstname = StringField(\n label=_('Vorname')\n )\n lastname = StringField(\n label=_('Nachname')\n )\n company = StringField(\n label=_('Unternehmen')\n )\n address = StringField(\n label=_('Straße und Hausnummer'),\n validators=[\n validators.DataRequired(\n message=_('Bitte geben Sie eine Straße und Hausnummer an.')\n )\n ]\n )\n postalcode = StringField(\n label=_('Postleitzahl'),\n validators=[\n validators.DataRequired(\n message=_('Bitte geben Sie eine Postleitzahl an.')\n )\n ]\n )\n locality = StringField(\n label=_('Ort'),\n validators=[\n validators.DataRequired(\n message=_('Bitte geben Sie einen Ort an.')\n )\n ]\n )\n country = SelectField(\n _('Staat'),\n validators=[\n validators.DataRequired(\n message=_('Bitte geben Sie einen Staat an.')\n )\n ],\n choices=country_codes,\n default='DE'\n )\n lat = HiddenField(\n label=_('Längengrad'),\n filters=[\n float_filter\n ]\n )\n lon = HiddenField(\n label=_('Breitengrad'),\n filters=[\n float_filter\n ]\n )\n website = StringField(\n label=_('Website'),\n validators=[\n validators.url(\n message='Bitte geben Sie eine URL an'\n ),\n validators.Optional()\n ],\n )\n email = StringField(\n label=_('E-Mail'),\n validators=[\n validators.email(\n message='Bitte geben Sie eine E-Mail an'\n ),\n validators.Optional()\n ],\n )\n phone = StringField(\n label=_('Telefon')\n )\n mobile = StringField(\n label=_('Mobiltelefon')\n )\n fax = StringField(\n label=_('Fax')\n )\n description = TextAreaField(\n label='Beschreibung'\n )\n submit = SubmitField(_('speichern'))\n\n\nclass StoreDeleteForm(FlaskForm):\n submit = SubmitField(_('löschen'))\n abort = SubmitField(_('abbrechen'))\n\n","sub_path":"webapp/store_management/StoreManagementForms.py","file_name":"StoreManagementForms.py","file_ext":"py","file_size_in_byte":5138,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"522364579","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Nov 13 14:49:13 2018\n\n@author: sanketrai\n\"\"\"\n\nimport requests\n\nres = requests.get(\"https://www.w3.org/TR/PNG/iso_8859-1.txt\")\n\ntry:\n res.raise_for_status()\nexcept Exception as exc:\n print(\"There is an error : \" + str(exc))\n \nnewfile = open(\"sample.txt\", 'wb')\n\nfor chunk in res.iter_content(100000):\n newfile.write(chunk)\n \nnewfile.close()\n","sub_path":"Web Scraping/WebPageDownload.py","file_name":"WebPageDownload.py","file_ext":"py","file_size_in_byte":421,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"328456070","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.neighbors import KNeighborsClassifier\n\ndataset = pd.read_csv('../Social_Network_Ads.csv')\nX = dataset.iloc[:, :-1].values\nY = dataset.iloc[:, -1].values\n\nX_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.25, random_state=0)\n\n# Feature Scaling\nsc = StandardScaler()\nX_train = sc.fit_transform(X_train)\nX_test = sc.fit_transform(X_test)\n\n# Training the k-NN model on the Training set\nclassifier = KNeighborsClassifier(n_neighbors=5, metric='minkowski', p=2) # as a parameters we are choosing k-neighbours and distance kind\nclassifier.fit(X_train, Y_train)\n\n# Predict a new result\ny_uno_pred = classifier.predict(sc.transform([[30, 8700]])) # i have to remember to transform the data that i check if i did feature scaling earlier\nprint(y_uno_pred)\n\n# Predict the Test set result\ny_pred = classifier.predict(X_test)\nprint(np.concatenate((y_pred.reshape(len(y_pred), 1), Y_test.reshape(len(Y_test), 1)), 1)) # we are creating matrix or two vectors to easier compare them\n\n# Making the Confusion Matrix\n# confusion matrix is 2D matrix (2 rows, 2 columns) and it will how as number of corrected predictions in previous point\nfrom sklearn.metrics import confusion_matrix, accuracy_score\ncm = confusion_matrix(Y_test, y_pred)\nprint(cm)\na_s = accuracy_score(Y_test, y_pred) # return a value of accuracy (1 is the highest value)\nprint(a_s) # it returns how many corrected predictions we have in percentage\n","sub_path":"classification/K-nearest_neighbour/knn.py","file_name":"knn.py","file_ext":"py","file_size_in_byte":1606,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"69188456","text":"movies = [\n {\n \"movie\" : \"SARKAR\",\n \"rating\" : \"U\",\n \"tickets\" : 10\n },\n {\n \"movie\" : \"IAMK\",\n \"rating\": \"A\",\n \"tickets\": 2\n }\n]\n\ndef bookTickets(ticketCount,movieName,x):\n # print(\"Caleld\",ticketCount)\n if ticketCount <= movies[x]['tickets']:\n movies[x][\"tickets\"] = movies[x]['tickets'] - ticketCount\n print(\"Tickets Booked\")\n else:\n print(\"Tickets Unavailable\")\n\nwhile True:\n movieName = input(\"Which movie you wanna watch: \")\n\n x = 0\n while x < len(movies):\n if movies[x]['movie'] == movieName and movies[x]['rating'] == 'U':\n tickets = int(input(\"{} available. How many tickets do you want?\".format(movies[x]['tickets'])))\n bookTickets(tickets,movieName,x)\n break\n elif movies[x][\"movie\"] == movieName and movies[x]['rating'] == \"A\":\n age = int(input(\"Whats is your age?: \"))\n if(age >= 18):\n tickets = int(input(\"{} available. How many tickets do you want?\".format(movies[x]['tickets'])))\n bookTickets(tickets,movieName,x)\n break\n else:\n print(\"You are not old enough to watch that movie\")\n break\n elif x == len(movieName) - 1:\n print(\"We are not screening that movie\")\n else:\n x = x + 1\n","sub_path":"cinema-ticket-counter.py","file_name":"cinema-ticket-counter.py","file_ext":"py","file_size_in_byte":1366,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"608290588","text":"import time\nimport json\nimport math\nfrom cgi import valid_boundary\n\nfrom flask import Blueprint, render_template, abort, jsonify, request, current_app\nfrom jinja2 import TemplateNotFound\n\nfrom ...common import definitions as common_definitions\nfrom ..redis_store import redis_store\n\n\ndef now():\n return float(round(time.time()*1000))\n\n\ndef viewlimit(plotname, timebase=now()):\n return timebase - current_app.config[\"%s_PLOT_SCOPE_MS\" % plotname].total_seconds()*1000\n\n\nplots = Blueprint('plots', __name__, url_prefix=\"/plots\")\n\n\n@plots.route(\"/\")\ndef plots_index():\n try:\n return render_template('plots.html')\n except TemplateNotFound:\n abort(404)\n\n\n@plots.route(\"/plot_data\")\ndef plot_data():\n chart_name = request.args.get(\"chartName\")\n\n if chart_name == \"acc\":\n return _get_acc_data()\n elif chart_name == \"gyro\":\n return _get_gyro_data()\n elif chart_name == \"temperature\":\n return _get_temperature_data()\n elif chart_name == \"pressure\":\n return _get_pressure_data()\n elif chart_name == \"distance\":\n return _get_distance_data()\n\n@plots.route(\"/map_data\")\ndef map_data():\n # Достаем элементы\n time = now()\n latest_update_time = float(request.args.get(\"latestUpdateTime\"))\n latest_update_time = max(latest_update_time, viewlimit(\"MAP\", time))\n\n zsetname = common_definitions.ZSET_NAME_MAP\n elems = redis_store.zrangebyscore(zsetname, latest_update_time, time, withscores=True, score_cast_func=int)\n\n data = []\n for e in elems:\n value, score = e\n value = json.loads(value.decode(\"utf-8\"))\n data.append({\n 'time_usec': value['time_usec'],\n 'fix_type': value['fix_type'],\n 'lat': value['lat'] / (10.0 ** 7),\n 'lon': value['lon'] / (10.0 ** 7),\n 'alt': value['alt'] / 1000,\n 'servertime': score\n })\n\n return jsonify(data)\n\n@plots.route(\"/gl_data\")\ndef gl_data():\n # Достаем элементы\n time = now()\n\n zsetname = common_definitions.ZSET_NAME_ATTITUDE\n elems = redis_store.zrange(zsetname, -1, -1, withscores=True, score_cast_func=int)\n\n data = []\n for e in elems:\n value, score = e\n value = json.loads(value.decode(\"utf-8\"))\n data.append({\n 'data': [value['q2'], value['q3'], value['q4'], value['q1']],\n 'servertime': score\n })\n\n return jsonify(data)\n\n\ndef _get_data_abstract(plotname, yvalue_name, time=now()):\n \"\"\" Преобразует набор \"мавлинкоджсоновых элементов в набор элементов точек для графика\n Элементы оси Y выбираются по указанному ключу \"\"\"\n\n # Достаем элементы\n latest_update_time = float(request.args.get(\"latestUpdateTime\"))\n latest_update_time = max(latest_update_time, viewlimit(plotname, time))\n\n zsetname = getattr(common_definitions, \"ZSET_NAME_%s\" % plotname)\n elems = redis_store.zrangebyscore(zsetname, latest_update_time, time, withscores=True, score_cast_func=int)\n\n data = []\n for e in elems:\n value, score = e\n value = json.loads(value.decode(\"utf-8\"))\n data.append({\n \"x\": value[\"time_boot_ms\"] / 1000.0, # будем показывать секунды, считаем что бортовое время в мс\n \"y\": value[yvalue_name],\n \"servertime\": score\n })\n\n return data\n\n\ndef _get_acc_data():\n \"\"\" Датасет для графика акселерометра \"\"\"\n time = now()\n datax, datay, dataz = [_get_data_abstract(\"IMU\", x, time) for x in (\"xacc\", \"yacc\", \"zacc\")]\n\n for x, y, z in zip(datax, datay, dataz):\n x[\"y\"] /= 1000.0\n y[\"y\"] /= 1000.0\n z[\"y\"] /= 1000.0\n\n latestUpdateTime = request.args.get(\"latestUpdateTime\")\n if len(datax) > 0:\n latestUpdateTime = datax[-1][\"servertime\"]\n\n data = {\n \"datas\": [datax, datay, dataz],\n \"latestUpdateTime\": latestUpdateTime,\n \"viewlimit\": viewlimit(\"IMU\", time)\n }\n\n return jsonify(data)\n\n\ndef _get_gyro_data():\n \"\"\" Датасет для графика гироскопа \"\"\"\n time = now()\n datax, datay, dataz = [_get_data_abstract(\"IMU\", x, time) for x in (\"xgyro\", \"ygyro\", \"zgyro\")]\n\n latestUpdateTime = request.args.get(\"latestUpdateTime\")\n if len(datax) > 0:\n latestUpdateTime = datax[-1][\"servertime\"]\n\n for x, y, z in zip(datax, datay, dataz):\n x[\"y\"] /= 1000.0\n y[\"y\"] /= 1000.0\n z[\"y\"] /= 1000.0\n\n data = {\n \"datas\": [datax, datay, dataz],\n \"latestUpdateTime\": latestUpdateTime,\n \"viewlimit\": viewlimit(\"IMU\", time)\n }\n\n return jsonify(data)\n\n\ndef _get_temperature_data():\n time = now()\n temperature = _get_data_abstract(\"PRESSURE\", \"temperature\", time)\n\n latestUpdateTime = request.args.get(\"latestUpdateTime\")\n if len(temperature) > 0:\n latestUpdateTime = temperature[-1][\"servertime\"]\n\n for record in temperature:\n record[\"y\"] /= 100.0\n\n data = {\n \"datas\": [temperature],\n \"latestUpdateTime\": latestUpdateTime,\n \"viewlimit\": viewlimit(\"PRESSURE\", time)\n }\n\n return jsonify(data)\n\ndef _get_pressure_data():\n time = now()\n pressure = _get_data_abstract(\"PRESSURE\", \"press_abs\", time)\n\n latestUpdateTime = request.args.get(\"latestUpdateTime\")\n if len(pressure) > 0:\n latestUpdateTime = pressure[-1][\"servertime\"]\n\n data = {\n \"datas\": [pressure],\n \"latestUpdateTime\": latestUpdateTime,\n \"viewlimit\": viewlimit(\"PRESSURE\", time)\n }\n\n return jsonify(data)\n\ndef _get_distance_data():\n time = now()\n distance = _get_data_abstract(\"DISTANCE\", \"distance\", time)\n\n latestUpdateTime = request.args.get(\"latestUpdateTime\")\n if len(distance) > 0:\n latestUpdateTime = distance[-1][\"servertime\"]\n\n data = {\n \"datas\": [distance],\n \"latestUpdateTime\": latestUpdateTime,\n \"viewlimit\": viewlimit(\"DISTANCE\", time)\n }\n\n return jsonify(data)\n","sub_path":"src/ground/drainbow_mcc/src/drainbow_mcc/webapp/blueprints/plots.py","file_name":"plots.py","file_ext":"py","file_size_in_byte":6139,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"259447158","text":"import datetime\nfrom typing import Set, Dict\n\nfrom app import db\nfrom app.models.exceptions import NotFound\nfrom app.schema.products import ProductPresentation, BrandPresentation, CategoryPresentation\n\nFEATURED_THRESHOLD = 8\n\n\nclass Product(db.Model):\n \"\"\"\n Product db class.\n \"\"\"\n __tablename__ = 'products'\n id = db.Column(db.Integer, primary_key=True)\n\n name = db.Column(db.Unicode(50), nullable=False)\n rating = db.Column(db.Float, nullable=False)\n featured = db.Column(db.Boolean, nullable=False, default=False)\n\n created_at = db.Column(db.DateTime, nullable=False, default=datetime.datetime.utcnow)\n expiration_date = db.Column(db.DateTime, nullable=True)\n\n brand_id = db.Column(db.Integer, db.ForeignKey('brands.id'), nullable=False)\n categories = db.relationship('Category', secondary='products_categories', backref='products')\n\n items_in_stock = db.Column(db.Integer, nullable=False)\n receipt_date = db.Column(db.DateTime, nullable=True)\n\n def on_update(self, data: Dict):\n if data.get(\"featured\", None) is None and data.get(\"rating\", 0) > FEATURED_THRESHOLD:\n self.featured = True\n\n @classmethod\n def create(cls, data: Dict):\n \"\"\"\n Create new product.\n @param data: Product data. Should include all needed fields.\n @return: New product.\n \"\"\"\n product = Product(**data)\n product.on_update(data)\n return product\n\n @classmethod\n def get(cls, product_id: int):\n \"\"\"\n Get product by its ID.\n Throws NotFound if there is product with such id.\n @param product_id: ID of product we need.\n @return: Wanted product.\n \"\"\"\n product: Product = db.session.query(Product).filter_by(id=product_id).first()\n\n if product is None:\n raise NotFound([f\"Product[{product_id}]\"])\n\n return product\n\n def update(self, data: Dict):\n \"\"\"\n Update product.\n @param data: Product data. Can be partial.\n \"\"\"\n data = {\n key: value\n for key, value in data.items()\n if hasattr(self, key)\n }\n\n for key, value in data.items():\n setattr(self, key, value)\n\n self.on_update(data)\n\n @property\n def serialized(self) -> ProductPresentation:\n \"\"\"\n Get product presentation, prepared to be turned into JSON.\n @return: Product representation.\n \"\"\"\n return {\n 'id': self.id,\n 'name': self.name,\n 'rating': self.rating,\n 'featured': self.featured,\n 'items_in_stock': self.items_in_stock,\n 'receipt_date': self.receipt_date,\n 'brand': self.brand.serialized,\n 'categories': [c.serialized for c in self.categories],\n 'expiration_date': self.expiration_date,\n 'created_at': self.created_at\n }\n\n\nclass Brand(db.Model):\n \"\"\"\n Brand db class.\n \"\"\"\n __tablename__ = 'brands'\n\n id = db.Column(db.Integer, primary_key=True)\n name = db.Column(db.Unicode(50), nullable=False)\n country_code = db.Column(db.Unicode(2), nullable=False)\n\n products = db.relationship('Product', backref='brand')\n\n @classmethod\n def get(cls, brand_id: int):\n \"\"\"\n Get brand by its ID.\n Throws NotFound if there is brand with such id.\n @param brand_id: ID of brand we need.\n @return: Wanted brand.\n \"\"\"\n brand: Brand = db.session.query(Brand).filter_by(id=brand_id).first()\n\n if brand is None:\n raise NotFound([f\"Brand[{brand_id}]\"])\n\n return brand\n\n @property\n def serialized(self) -> BrandPresentation:\n \"\"\"\n Get brand presentation, prepared to be turned into JSON.\n @return: Brand presentation.\n \"\"\"\n return {\n 'id': self.id,\n 'name': self.name,\n 'country_code': self.country_code\n }\n\n\nclass Category(db.Model):\n \"\"\"\n Category db class.\n \"\"\"\n __tablename__ = 'categories'\n\n id = db.Column(db.Integer, primary_key=True)\n name = db.Column(db.Unicode(50), nullable=False)\n\n @classmethod\n def get_all(cls, ids: Set[int]):\n \"\"\"\n Get categories with specified ids.\n Throws NotFound if any Category not found.\n @param ids: IDs of wanted Categories.\n @return: Wanted categories.\n \"\"\"\n categories = db.session.query(Category).filter(\n Category.id.in_(ids)\n ).all()\n\n db_ids = {record.id for record in categories}\n\n if len(categories) != len(ids):\n raise NotFound([f\"Category[{category_id}]\" for category_id in ids.difference(db_ids)])\n\n return categories\n\n @property\n def serialized(self) -> CategoryPresentation:\n \"\"\"\n Get category presentation, prepared to be turned into JSON.\n @return: Category presentation.\n \"\"\"\n return {\n 'id': self.id,\n 'name': self.name,\n }\n\n\nproducts_categories = db.Table(\n 'products_categories',\n db.Column('product_id', db.Integer, db.ForeignKey('products.id'), primary_key=True),\n db.Column('category_id', db.Integer, db.ForeignKey('categories.id'), primary_key=True)\n)\n","sub_path":"app/models/products.py","file_name":"products.py","file_ext":"py","file_size_in_byte":5274,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"555300587","text":"def fibonacci(max):\n fiblist = [1,1]\n while True:\n if fiblist[-1] > max:\n break\n fiblist.append(fiblist[-1] + fiblist[-2])\n \n return fiblist\nsum = 0\nfor i in fibonacci(4000000):\n if i % 2 == 0:\n sum += i\nprint (sum)\n","sub_path":"2. Sum of Even Fibonacci Numbers Below___.py","file_name":"2. Sum of Even Fibonacci Numbers Below___.py","file_ext":"py","file_size_in_byte":267,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"365145264","text":"import numpy as np\nfrom random import shuffle\n\ndef softmax_loss_naive(W, X, y, reg):\n \"\"\"\n Softmax loss function, naive implementation (with loops)\n Inputs:\n - W: C x D array of weights\n - X: D x N array of data. Data are D-dimensional columns\n - y: 1-dimensional array of length N with labels 0...K-1, for K classes\n - reg: (float) regularization strength\n Returns:\n a tuple of:\n - loss as single float\n - gradient with respect to weights W, an array of same size as W\n \"\"\"\n # Initialize the loss and gradient to zero.\n loss = 0.0\n dW = np.zeros_like(W)\n num_train = X.shape[1]\n num_classes = W.shape[0]\n\n for i in xrange(num_train):\n scores = W.dot(X[:, i])\n scores -= np.max(scores) # shift values so max number is 0\n cross_entropy_loss = np.exp(scores)\n normalized_cross_entropy_loss = cross_entropy_loss / np.sum(cross_entropy_loss) # normalize\n correct_class_score = y[i]\n loss = loss - np.log(normalized_cross_entropy_loss[correct_class_score])\n for j in xrange(num_classes):\n # (1/sum of e^scores) * image * e^scores\n dW[j,:] += np.reciprocal(np.sum(cross_entropy_loss)) * np.exp(scores[j]) * X[:,i] \n if j == correct_class_score:\n dW[j,:] -= X[:,i] # for the correct class, subtract Xi\n\n # these are a sum over all training images, we want an average\n loss /= num_train\n dW /= num_train\n\n # Add regularization\n loss += 0.5 * reg * np.sum(W * W)\n dW += reg * W\n\n return loss, dW\n\n\ndef softmax_loss_vectorized(W, X, y, reg):\n \"\"\"\n Softmax loss function, vectorized version.\n\n Inputs and outputs are the same as softmax_loss_naive.\n \"\"\"\n # Initialize the loss and gradient to zero.\n loss = 0.0\n dW = np.zeros_like(W)\n num_train = X.shape[1]\n num_classes = W.shape[0]\n\n scores = W.dot(X)\n scores -= np.max(scores, axis=0) # shift values per image (columns)\n\n cross_entropy_loss = np.exp(scores)\n cross_entropy_loss_sums = np.sum(cross_entropy_loss, axis=0) # cache for below\n normalized_cross_entropy_loss = cross_entropy_loss / cross_entropy_loss_sums # normalize\n\n # loss\n y_range = np.arange(0, num_train)\n loss = np.sum(-np.log(normalized_cross_entropy_loss[y, y_range]))\n loss /= num_train\n\n # gradient\n dW += (np.reciprocal(cross_entropy_loss_sums) * cross_entropy_loss).dot(X.T)\n\n # build matrix to subtract the correct Xi's\n correct_xi = np.zeros_like(scores)\n correct_xi[y, range(num_train)] = 1\n dW -= np.dot(correct_xi, X.T) # subtract Xi's for correct classes\n\n dW /= num_train\n\n # Add regularization\n loss += 0.5 * reg * np.sum(W * W)\n dW += reg * W\n\n return loss, dW\n","sub_path":"cs231n/classifiers/softmax.py","file_name":"softmax.py","file_ext":"py","file_size_in_byte":2620,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"645559149","text":"from tensorflow.keras.preprocessing.sequence import pad_sequences\nfrom tensorflow.keras.preprocessing.text import Tokenizer\nimport re\nimport tensorflow as tf\nimport numpy as np\nimport os\n\n\ndef SentimentAnalysis(request_text):\n path_to_train_file = tf.keras.utils.get_file('train.txt', './train.txt')\n path_to_test_file = tf.keras.utils.get_file('train.txt', './test.txt')\n train_text = open(path_to_train_file, 'rb').read().decode(encoding='utf-8')\n test_text = open(path_to_test_file, 'rb').read().decode(encoding='utf-8')\n\n train_Y = np.array([[int(row.split('\\t')[2])]\n for row in train_text.split('\\n')[1:] if row.count('\\t') > 0])\n test_Y = np.array([[int(row.split('\\t')[2])]\n for row in test_text.split('\\n')[1:] if row.count('\\t') > 0])\n\n def clean_str(string):\n string = re.sub(r\"[^가-힣A-Za-z0-9(),!?\\'\\`]\", \" \", string)\n string = re.sub(r\"\\'s\", \" \\'s\", string)\n string = re.sub(r\"\\'ve\", \" \\'ve\", string)\n string = re.sub(r\"n\\'t\", \" n\\'t\", string)\n string = re.sub(r\"\\'re\", \" \\'re\", string)\n string = re.sub(r\"\\'d\", \" \\'d\", string)\n string = re.sub(r\"\\'ll\", \" \\'ll\", string)\n string = re.sub(r\",\", \" , \", string)\n string = re.sub(r\"!\", \" ! \", string)\n string = re.sub(r\"\\(\", \" \\( \", string)\n string = re.sub(r\"\\)\", \" \\) \", string)\n string = re.sub(r\"\\?\", \" \\? \", string)\n string = re.sub(r\"\\s{2,}\", \" \", string)\n string = re.sub(r\"\\'{2,}\", \"\\'\", string)\n string = re.sub(r\"\\'\", \"\", string)\n\n return string.lower()\n\n train_text_X = [row.split('\\t')[1] for row in train_text.split('\\n')[\n 1:] if row.count('\\t') > 0]\n train_text_X = [clean_str(sentence) for sentence in train_text_X]\n sentences = [sentence.split(' ') for sentence in train_text_X]\n\n sentences_new = []\n for sentence in sentences:\n sentences_new.append([word[:5] for word in sentence][:25])\n sentences = sentences_new\n\n # 토큰화\n tokenizer = Tokenizer(num_words=20000)\n tokenizer.fit_on_texts(sentences)\n train_X = tokenizer.texts_to_sequences(sentences)\n train_X = pad_sequences(train_X, padding='post')\n\n model = tf.keras.Sequential([\n tf.keras.layers.Embedding(20000, 300, input_length=25),\n tf.keras.layers.LSTM(units=50),\n tf.keras.layers.Dense(2, activation='softmax')\n ])\n model.compile(optimizer='adam',\n loss='sparse_categorical_crossentropy', metrics=['accuracy'])\n\n # 체크포인트\n checkpoint_path = os.getcwd() + \"\\\\SentimentAnalysis\\\\training_1\\\\cp.ckpt\"\n # checkpoint_path = \"./training_1/cp.ckpt\"\n checkpoint_dir = os.path.dirname(checkpoint_path)\n\n # 체크포인트 콜백 만들기\n cp_callback = tf.keras.callbacks.ModelCheckpoint(checkpoint_path,\n save_weights_only=True,\n verbose=1)\n latest = tf.train.latest_checkpoint(checkpoint_dir)\n model.load_weights(latest)\n # 학습\n\n history = model.fit(train_X, train_Y, epochs=1, batch_size=30000,\n validation_split=0.2, callbacks=[cp_callback])\n\n test_sentence = request_text\n test_sentence = test_sentence.split(' ')\n test_sentences = []\n now_sentence = []\n for word in test_sentence:\n now_sentence.append(word)\n test_sentences.append(now_sentence[:])\n\n test_X_1 = tokenizer.texts_to_sequences(test_sentences)\n test_X_1 = pad_sequences(test_X_1, padding='post', maxlen=25)\n prediction = model.predict(test_X_1)\n for idx, sentence in enumerate(test_sentences):\n print(sentence)\n print(prediction[idx])\n negative = round(\n (prediction[idx][0] / (prediction[idx][0] + prediction[idx][1])) * 100, 3)\n positive = round(\n (prediction[idx][1] / (prediction[idx][0] + prediction[idx][1])) * 100, 3)\n return negative, positive\n\n","sub_path":"projectAI/flask_server/SentimentAnalysis/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":3975,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"405392439","text":"import copy\r\nimport networkx as nx\r\nimport numpy as np\r\nimport config as cf\r\nimport random\r\nimport math\r\nfrom network import Network\r\n\r\n\r\ndef Optimizer(network, Alive_Node, Update=False, R=30, IN_Median=False, First=False):\r\n LEACH_NET=nx.create_empty_copy(network)\r\n LEACH_CHID=[]\r\n P = cf.P_CH\r\n r = LEACH_NET.node[0]['round']\r\n ## CH Selection\r\n for i in Alive_Node:\r\n if r%(1/P) == 0 and r>1:\r\n LEACH_NET.node[i]['round'] = 1\r\n r0 = LEACH_NET.node[i]['round']\r\n if random.random()\", drawLine)\n\ninstructionLabel = Label(mainframe, text=\"CLICK and DRAG!\", font=(\"Courier\", 30))\nclearButton = Button(mainframe,text=\"CLEAR\",command = clearScreen)\n\n\n#GRID THE WIDGETS\n###########\nroot.minsize(width=450, height=200)\nmainframe.grid(row=1, column=1, padx=50, pady=50)\n\n\ninstructionLabel.grid(row=1, column=1, sticky=W)\nclearButton.grid(row=1,column=2,sticky=E,ipadx = 50)\ncv.grid(row=2, column=1, columnspan=2)\n\n\n\nroot.mainloop()\n","sub_path":"gui day 8 question 4.py","file_name":"gui day 8 question 4.py","file_ext":"py","file_size_in_byte":962,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"251992307","text":"#!/usr/bin/env python3\n\n# usage: wiiu_titlekeys.py\n\nimport json\nimport sys\nfrom urllib.request import Request, urlopen\n\nkeys_url = 'http://vault.titlekeys.ovh/json'\nkeys_filename = 'titlekeys.json'\nkeys_cemu = 'keys.txt'\n\nprint(\"Generating key lists from {} structure...\".format(keys_url, keys_filename))\nrequest = Request(keys_url)\nrequest.add_header('User-Agent', 'Mozilla/5.0 (Windows NT 6.1; Win64; x64)')\nresponse = urlopen(request).read()\nkeys_json_raw = json.loads(response)\n\n# Formatted JSON structure\nkeys_json_formatted = json.dumps(keys_json_raw, indent=4)\nwith open(keys_filename, \"w\") as f:\n f.write(keys_json_formatted)\nprint(\" Formatted json: \" + keys_filename)\n\nif '--cemu' in sys.argv:\n # CEMU key list\n title_key_list = []\n\n for title in keys_json_raw:\n if title[\"titleKey\"]:\n if title[\"name\"]:\n title_key_list.append(title[\"titleKey\"].upper() + ' # ' + title[\"name\"].replace('\\n',''))\n else:\n title_key_list.append(title[\"titleKey\"].upper())\n with open(keys_cemu, \"w\") as f:\n f.write('\\n'.join(map(str, title_key_list)))\n print(\" CEMU key list with all known keys: \" + keys_cemu)\n\n#keys_json_formatted = keys_json_formatted.encode('utf-8').decode('unicode-escape')\n","sub_path":"wiiu_titlekeys.py","file_name":"wiiu_titlekeys.py","file_ext":"py","file_size_in_byte":1270,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"378409942","text":"#!/bin/python3\n\nimport sys\n\ndef makingAnagrams(x, y):\n freq_x = [0] * 256\n freq_y = [0] * 256\n\n for c in x:\n freq_x[ord(c)] += 1\n\n for c in y:\n freq_y[ord(c)] += 1\n\n delete_chars = 0\n for fx, fy in zip(freq_x, freq_y):\n delete_chars += abs(fx - fy)\n\n return delete_chars\n\n\ndef main():\n s1 = input().strip()\n s2 = input().strip()\n result = makingAnagrams(s1, s2)\n print(result)\n\n\nif __name__ == \"__main__\":\n main()\n\n\n","sub_path":"online_judge/hackerrank/Strings/MakingAnagrams/ma.py","file_name":"ma.py","file_ext":"py","file_size_in_byte":473,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"249019712","text":"#!/usr/bin/env python3\n\nimport uuid\n\nimport datetime\nimport time\nimport logging\nimport json\n\n# Configuration\nbucket = 'iot-mpg-is' #Make sure you have permissions to Put, Delete and Get.\npath = 'nullid/picam-' #The prefix of the pictures.\nmaxb = 75 # The max brightness of the pictures\nperiod = 0.25\ntopic = 'arn:aws:sns:eu-west-1:384599271648:iot-nullid-taco'\nthreshold = 10 # How much a pixel has to change to be noticed\nsensitivity = 20 # How many changed pixels to count as \"motion\"\n\nlogger = logging.getLogger(__name__)\nnow = datetime.datetime.now()\nlogging.basicConfig(filename='taco-log-{}-{}-{}'.format(now.year,now.month,now.day),\n format='%(asctime)s %(message)s',\n level=logging.INFO)\n\nch = logging.StreamHandler()\nch.setLevel(logging.INFO)\nlogger.addHandler(ch)\n\nlogger.info('Initializing...')\n\nimport picamera\nimport boto3\nfrom io import BytesIO\nfrom PIL import Image\n\ncamera = picamera.PiCamera()\ns3 = boto3.client('s3')\nrek = boto3.client('rekognition')\nsns = boto3.client('sns')\n\nlogger.info('Initialization done!')\n\n# Capture a small test image (for motion detection)\ndef captureTestImage():\n imageData = BytesIO()\n # what format is appropriate? does it matter?\n camera.capture(imageData, format='jpeg', resize =(100,75))\n imageData.seek(0)\n image = Image.open(imageData)\n pixels = image.load()\n imageData.close()\n return image, pixels\n\n# Returns a boolean that says whether the number of pixels that differ more than\n# threshold between pixel access arrays im1 and im2 of width w and height h is\n# greater than sensitivity\ndef pixelDiff(im1,im2,w,h,threshold,sensitivity):\n changedPixels = 0\n diff = False\n for x in xrange(0, w):\n for y in xrange(0, h):\n if not diff:\n # Just check green channel as it's the highest quality channel\n pixdiff = abs(im1[x,y][1] - im2[x,y][1])\n if pixdiff > threshold:\n changedPixels += 1\n if changedPixels > sensitivity:\n diff = true\n return diff\n\ndef letKnow(type, objname, LabelMap=None):\n logger.info('{} sighted! Notifying!'.format(type))\n s3.put_object_acl(ACL='public-read',Bucket=bucket,Key=objname)\n msg ='TACOS Alert! {} detected in {}! See it at {}. The labels were {}'.format(type,objname, link, json.dumps(LabelMap))\n sns.publish(TopicArn=topic, Message=msg)\n\n# Capture higher quality image, run rekognize and save if there is an animal\ndef captureRekognizeSave():\n logger.info('Taking higher resolution picture...')\n camera.capture('/tmp/picam.jpg')\n\n objname = '{}{}.jpg'.format(path, str(uuid.uuid4())[-8:])\n\n logger.info('Uploading as {}...'.format(objname))\n\n s3.upload_file('/tmp/picam.jpg', bucket,objname)\n logger.info('Done!')\n\n logger.info('Rekognizing...')\n\n res = rek.detect_labels(\n Image={\n 'S3Object':{\n 'Bucket': bucket,\n 'Name': objname\n }},\n MaxLabels=10)\n\n labels = res['Labels']\n lks = map(lambda label: (label['Name'],label['Confidence']), labels)\n LabelMap = dict(lks)\n logger.info(LabelMap)\n link = 'https://s3-eu-west-1.amazonaws.com/{}/{}'.format(bucket,objname)\n if 'Cat' in LabelMap:\n letKnow('Kitty',objname)\n elif 'Animal' in LabelMap and LabelMap['Animal'] > 75:\n letKnow('Animal',objname, LabelMap)\n else:\n logger.info('No cat detected... :(')\n logger.info('Deleting non-kitty picture')\n s3.delete_object(Bucket=bucket, Key=objname)\n\ncamera.start_preview()\n# Camera warmup time\nsleep(2)\n# Capture first image\nimage1, buffer1 = captureTestImage()\n\nwhile True:\n now = datetime.datetime.now()\n #Make it more bright at night\n camera.brightness = int(min(maxb,abs((now.hour - 12)/24)*maxb + 50))\n\n logger.info(now)\n\n # Capture comparison image\n logger.info('Taking picture for comparison...')\n image2, buffer2 = captureTestImage()\n\n # Count changed pixels\n logger.info('Comparing...')\n delta = pixelDiff(buffer1,buffer2,100,75,threshold)\n\n # Save an image if pixels changed\n if delta:\n logger.info('Motion detected!')\n captureRekognizeSave()\n\n # Swap comparison buffers\n image1 = image2\n buffer1 = buffer2\n\n logger.info('Waiting for {} seconds to try again'.format(period))\n time.sleep(period)\n\n","sub_path":"TACOS.py","file_name":"TACOS.py","file_ext":"py","file_size_in_byte":4410,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"418987873","text":"#!/usr/bin/python3\n\"\"\"\n Queries Reddit API for top ten\n posts for a given subreddit\n\"\"\"\n\n\ndef get_user_agent():\n \"\"\" create/return a user_agent \"\"\"\n platform = \"linux\"\n app_id = \"for_science\"\n version = \"v0.0.1\"\n username = \"Almost_Irish\"\n user_agent = '{}:{}:{} (by /u/{})'.format(\n platform, app_id, version, username\n )\n return user_agent\n\n\ndef count_words(subreddit, word_list, **kwargs):\n \"\"\"Prints a sorted count of given keywords listed for a subreddit\"\"\"\n import requests\n\n count = kwargs['count'] if 'count' in kwargs else 0\n after = kwargs['after'] if 'after' in kwargs else {}\n memo = kwargs['memo'] if 'memo' in kwargs else {}\n word_count = kwargs['word_count'] if 'word_count' in kwargs else {}\n\n url = [\n \"https://www.reddit.com/r/\",\n \"{}/hot/.json\".format(subreddit),\n \"?limit=100&after={}\".format(after)\n ]\n url = \"\".join(url)\n headers = {\n 'user-agent': get_user_agent()\n }\n response = requests.get(url, headers=headers, allow_redirects=False)\n\n if response.status_code != 200:\n # print('error', response.status_code, response.url)\n return None\n\n after = response.json()['data']['after']\n children = response.json()['data']['children']\n\n for child in children:\n title = child['data']['title'].lower().split()\n\n for word in word_list:\n word = word.lower()\n counted = memo[word] if word in memo else None\n word_count = counted if counted is not None else 0\n word_count += title.count(word)\n memo.update({word: word_count})\n\n count += len(children)\n\n if after is not None:\n count_words(\n subreddit, word_list,\n count=count,\n after=after,\n memo=memo,\n word_count=word_count\n )\n else:\n values = list(memo.values())\n keys = list(memo.keys())\n\n while values != []:\n max_index = values.index(max(values))\n key, value = keys.pop(max_index), values.pop(max_index)\n if value > 0:\n print(\"{}: {}\".format(key, value))\n","sub_path":"0x13-count_it/0-count.py","file_name":"0-count.py","file_ext":"py","file_size_in_byte":2165,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"366187512","text":"\"\"\"sistema_vehiculos URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/3.0/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\nfrom django.contrib import admin\nfrom django.urls import path\nfrom vehiculo import views, v_fabricante, v_solicitud\nfrom . import auth\n\nfrom django.conf import settings\nfrom django.conf.urls.static import static\n\nurlpatterns = [\n path('', v_fabricante.index, name=\"index\"),\n #Vehiculo\n path('crear/', views.crear, name=\"crear\"),\n path('post_crear/', views.post_crear, name=\"post_crear\"),\n path('editar/', views.editar, name=\"editar\"),\n path('detalle_vehiculo/', views.detalle_vehiculo, name=\"detalle_vehiculo\"),\n path('lst_vehiculo/', views.lst_vehiculo, name=\"lst_vehiculo\"),\n path('buscar_v/', views.buscar_v, name=\"buscar_v\"),\n path('actualizar/', views.actualizar, name=\"actualizar\"),\n path('eliminar/', views.eliminar, name=\"eliminar\"),\n path('lst_vehiculo_by_fab/', v_fabricante.lst_vehiculo_by_fab, name=\"lst_vehiculo_by_fab\"),\n path('buscar_v_f/', v_fabricante.buscar_v_f, name=\"buscar_v_f\"),\n #Fabricante\n path('lst_fabricante/', v_fabricante.lst_fabricante, name=\"lst_fabricante\"),\n path('buscar_f/', v_fabricante.buscar_f, name=\"buscar_f\"),\n path('crear_fabricante/', v_fabricante.crear_fabricante, name=\"crear_fabricante\"),\n path('post_crear_fabricante/', v_fabricante.post_crear_fabricante, name=\"post_crear_fabricante\"),\n path('editar_fabricante/', v_fabricante.editar_fabricante, name=\"editar_fabricante\"),\n path('actualizar_fabricante/', v_fabricante.actualizar_fabricante, name=\"actualizar_fabricante\"),\n path('eliminar_fabricante/', v_fabricante.eliminar_fabricante, name=\"eliminar_fabricante\"),\n #Pins\n path('crear_pin/', views.crear_pin, name=\"crear_pin\"),\n path('post_crear_pin/', views.post_crear_pin, name=\"post_crear_pin\"),\n path('eliminar_pin/', views.eliminar_pin, name=\"eliminar_pin\"),\n path('lst_pines/', views.lst_pines, name=\"lst_pines\"),\n #Auth\n path('sign_in/', auth.sign_in, name=\"sign_in\"),\n path('post_sign_in/', auth.post_sign_in, name=\"post_sign_in\"),\n path('sign_up/', auth.sign_up, name=\"sign_up\"),\n path('post_sign_up/', auth.post_sign_up, name=\"post_sign_up\"),\n path('logout/', auth.logout, name=\"logout\"),\n #Solicitud\n path('crear_solicitud/', v_solicitud.crear_solicitud, name=\"crear_solicitud\"),\n path('post_crear_solicitud/', v_solicitud.post_crear_solicitud, name=\"post_crear_solicitud\"),\n path('lst_solicitud/', v_solicitud.lst_solicitud, name=\"lst_solicitud\"),\n path('realizar_solicitud/', v_solicitud.realizar_solicitud, name=\"realizar_solicitud\"),\n\n path('admin/', admin.site.urls),\n\n] +static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)\n","sub_path":"sistema_vehiculos/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":3253,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"213966518","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed Sep 29 15:07:54 2021\r\n\r\n@author: Abdul Qayyum\r\n\"\"\"\r\n\r\n#%% Convert dataset into same resampling format\r\n# this is necessary preprocessing step to resample both modality PET/CT\r\n\r\nimport os\r\nimport sys\r\nimport pathlib\r\nfrom pathlib import Path\r\nimport numpy as np\r\nimport pandas as pd\r\nimport SimpleITK as sitk\r\nfrom tqdm.notebook import tqdm\r\n\r\nimport matplotlib.pyplot as plt\r\n\r\ninput_folder=\"C:\\\\Users\\\\Administrateur\\\\Desktop\\\\micca2021\\\\MICCAI2021\\\\Hector2021\\\\testing2021hector\\\\hecktor2021_test\\\\hecktor2021_test\\\\hecktor_nii\"\r\ninput_folder = Path(input_folder)\r\n\r\npath_bb = 'C:\\\\Users\\\\Administrateur\\\\Desktop\\\\micca2021\\\\MICCAI2021\\\\Hector2021\\\\testing2021hector\\\\hecktor2021_test\\\\hecktor2021_test\\\\hecktor2021_bbox_testing.csv'\r\n\r\n#patient_list = [f.name.split(\"_\")[0] for f in input_folder.rglob(\"*_ct*\")]\r\noutput_folder=\"C:\\\\Users\\\\Administrateur\\\\Desktop\\\\micca2021\\\\MICCAI2021\\\\Hector2021\\\\testing2021hector\\\\hecktor2021_test\\\\hecktor_nii_resampled\"\r\noutput_folder = Path(output_folder)\r\noutput_folder.mkdir(exist_ok=True)\r\n\r\n\r\n#print('resampling is {}'.format(str(resampling)))\r\nbb_df = pd.read_csv(path_bb)\r\nbb_df = bb_df.set_index('PatientID')\r\n\r\npatient_list = [f.name.split(\"_\")[0] for f in input_folder.rglob(\"*_ct*\")]\r\nresampling=(1,1,1)\r\nprint('resampling is {}'.format(str(resampling)))\r\nresampler = sitk.ResampleImageFilter()\r\nresampler.SetOutputDirection([1, 0, 0, 0, 1, 0, 0, 0, 1])\r\nresampler.SetOutputSpacing(resampling)\r\n\r\n#%matplotlib inline\r\ndef resample_one_patient(p):\r\n bb = np.array([\r\n bb_df.loc[p, 'x1'], bb_df.loc[p, 'y1'], bb_df.loc[p, 'z1'],\r\n bb_df.loc[p, 'x2'], bb_df.loc[p, 'y2'], bb_df.loc[p, 'z2']\r\n ])\r\n size = np.round((bb[3:] - bb[:3]) / resampling).astype(int)\r\n ct = sitk.ReadImage(\r\n str([f for f in input_folder.rglob(p + \"_ct*\")][0].resolve()))\r\n pt = sitk.ReadImage(\r\n str([f for f in input_folder.rglob(p + \"_pt*\")][0].resolve()))\r\n # gtvt = sitk.ReadImage(\r\n # str([f for f in input_folder.rglob(p + \"_gtvt*\")][0].resolve()))\r\n \r\n resampler.SetOutputOrigin(bb[:3])\r\n resampler.SetSize([int(k) for k in size]) # sitk is so stupid\r\n resampler.SetInterpolator(sitk.sitkBSpline)\r\n ct = resampler.Execute(ct)\r\n pt = resampler.Execute(pt)\r\n resampler.SetInterpolator(sitk.sitkNearestNeighbor)\r\n #gtvt = resampler.Execute(gtvt)\r\n \r\n sitk.WriteImage(ct, str((output_folder / (p + \"_ct.nii.gz\")).resolve()))\r\n \r\n sitk.WriteImage(pt, str((output_folder / (p + \"_pt.nii.gz\")).resolve()))\r\n \r\n #sitk.WriteImage(gtvt,str((output_folder / (p + \"_gtvt.nii.gz\")).resolve()))\r\n# p=patient_list[0]\r\n# resample_one_patient(p)\r\nfor p in patient_list:\r\n resample_one_patient(p)","sub_path":"Preprocessing_Hector21data.py","file_name":"Preprocessing_Hector21data.py","file_ext":"py","file_size_in_byte":2859,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"317905111","text":"\n\nimport os\nimport re\nimport collections\nimport typing\n\nimport jk_typing\nimport jk_prettyprintobj\n\nfrom .GitConfigFileSection import GitConfigFileSection\n\n\n\n\n\nclass GitConfigFile(jk_prettyprintobj.DumpMixin):\n\n\t################################################################################################################################\n\t## Constructor\n\t################################################################################################################################\n\n\t#\n\t# Constructor method.\n\t#\n\t@jk_typing.checkFunctionSignature()\n\tdef __init__(self, filePath:str):\n\t\tself.__filePath = filePath\n\t\tself.__sections:typing.List[GitConfigFileSection] = []\n\t#\n\n\t################################################################################################################################\n\t## Public Properties\n\t################################################################################################################################\n\n\t@property\n\tdef filePath(self) -> str:\n\t\treturn self.__filePath\n\t#\n\n\t@property\n\tdef sections(self) -> typing.List[GitConfigFileSection]:\n\t\treturn self.__sections\n\t#\n\n\t################################################################################################################################\n\t## Helper Methods\n\t################################################################################################################################\n\n\tdef _dumpVarNames(self) -> list:\n\t\treturn [\n\t\t\t\"filePath\",\n\t\t\t\"sections\",\n\t\t]\n\t#\n\n\t################################################################################################################################\n\t## Public Methods\n\t################################################################################################################################\n\n\tdef __iter__(self) -> typing.Iterable[GitConfigFileSection]:\n\t\treturn self.__sections.__iter__()\n\t#\n\n\tdef __getitem__(self, ii) -> typing.Union[GitConfigFileSection,None]:\n\t\tif isinstance(ii, int):\n\t\t\treturn self.__sections[ii]\n\t\telif isinstance(ii, str):\n\t\t\treturn self.getSection(ii)\n\t\telse:\n\t\t\traise TypeError(repr(ii) + \" - \" + str(type(ii)))\n\t#\n\n\tdef __len__(self):\n\t\treturn len(self.__sections)\n\t#\n\n\tdef __bool__(self):\n\t\treturn bool(self.__sections)\n\t#\n\n\t@jk_typing.checkFunctionSignature()\n\tdef getSections(self, sectionName:str) -> typing.List[GitConfigFileSection]:\n\t\tret = []\n\t\tfor sect in self.__sections:\n\t\t\tif sect.name == sectionName:\n\t\t\t\tret.append(sect)\n\t\treturn ret\n\t#\n\n\t@jk_typing.checkFunctionSignature()\n\tdef getValue(self, sectionName:str, propertyKey:str) -> typing.Union[str,None]:\n\t\tsection = self.getSection(sectionName)\n\t\tif section:\n\t\t\treturn section.getProperty(propertyKey)\n\t\telse:\n\t\t\treturn None\n\t#\n\n\t#\n\t# This method replaces getValue().\n\t#\n\t# @param\tstr sectionName\t\t\t(required) The name of the section to retrieve\n\t# @param\tstr argument\t\t\t(optional) Additional search condition: The name of the section argument.\n\t#\t\t\t\t\t\t\t\t\tUsing null by default to indicate 'no section argument'.\n\t# @param\tstr propertyKey\t\t\t(required) The name of the section property to retrieve\n\t# @return\t\t\t\t\t\t\tThe requested value if a) the section and b) the value requested was found. null otherwise.\n\t#\n\t@jk_typing.checkFunctionSignature()\n\tdef getValue2(self, sectionName:str, argument:typing.Union[str,None], propertyKey:str) -> typing.Union[str,None]:\n\t\tsection = self.getSection(sectionName, argument)\n\t\tif section:\n\t\t\treturn section.getProperty(propertyKey)\n\t\telse:\n\t\t\treturn None\n\t#\n\n\t#\n\t# Get a specific setion.\n\t#\n\t# @param\tstr sectionName\t\t\t(required) The name of the section to retrieve\n\t# @param\tstr argument\t\t\t(optional) Additional search condition: The name of the section argument.\n\t#\t\t\t\t\t\t\t\t\tUsing null by default to indicate 'no section argument'.\n\t# @return\t\t\t\t\t\t\tThe git configuration section object if the section was found. null otherwise.\n\t#\n\t@jk_typing.checkFunctionSignature()\n\tdef getSection(self, sectionName:str, argument:str = None) -> typing.Union[GitConfigFileSection,None]:\n\t\tif argument:\n\t\t\tfor sect in self.__sections:\n\t\t\t\tif (sect.name == sectionName) and (sect.argument == argument):\n\t\t\t\t\treturn sect\n\t\telse:\n\t\t\tfor sect in self.__sections:\n\t\t\t\tif sect.name == sectionName:\n\t\t\t\t\treturn sect\n\t\treturn None\n\t#\n\n\t@staticmethod\n\t@jk_typing.checkFunctionSignature()\n\tdef loadFromFile(filePath:str):\n\t\twith open(filePath, \"r\") as fin:\n\t\t\trawText = fin.read()\n\n\t\tret = GitConfigFile(filePath)\n\n\t\tcurrentSection = None\n\t\tfor line in rawText.split(\"\\n\"):\n\t\t\tline = line.rstrip()\t# just to be sure\n\t\t\tif not line:\t\t\t# just to be sure\n\t\t\t\tcontinue\n\n\t\t\t# something like: [foo \"bar\"]\n\t\t\tm = re.match(r\"\\[([a-z]+)\\s+\\\"([^\\\"]+)\\\"\\]\", line)\n\t\t\tif m:\n\t\t\t\tcurrentSection = GitConfigFileSection(m.group(1), m.group(2))\n\t\t\t\tret.__sections.append(currentSection)\n\t\t\telse:\n\t\t\t\t# something like: [foo]\n\t\t\t\tm = re.match(r\"\\[([a-z]+)\\]\", line)\n\t\t\t\tif m:\n\t\t\t\t\tcurrentSection = GitConfigFileSection(m.group(1), None)\n\t\t\t\t\tret.__sections.append(currentSection)\n\t\t\t\telse:\n\t\t\t\t\t# something like: key = value\n\t\t\t\t\tm = re.match(r\"\\s+([a-zA-Z0-9_]+)\\s+=\\s+(.+)\", line)\n\t\t\t\t\tif m:\n\t\t\t\t\t\tcurrentSection.setProperty(m.group(1), m.group(2))\n\n\t\treturn ret\n\t#\n\n#\n\n\n\n\n\n","sub_path":"src/jk_git/impl/GitConfigFile.py","file_name":"GitConfigFile.py","file_ext":"py","file_size_in_byte":5177,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"162455058","text":"# =========================================================================================\n# Implementation of \"Show, Attend and Tell: Neural Caption Generator With Visual Attention\".\n# There are some notations.\n# N is batch size.\n# L is spacial size of feature vector (196).\n# D is dimension of image feature vector (512).\n# T is the number of time step which is equal to caption's length-1 (16).\n# V is vocabulary size (about 10000).\n# M is dimension of word vector which is embedding size (default is 512).\n# H is dimension of hidden state (default is 1024).\n# =========================================================================================\n\nfrom __future__ import division\n\nimport tensorflow as tf\nimport pickle\nimport numpy as np\n\nclass Dis(object):\n def __init__(self, itm_cnt, usr_cnt, dim_hidden, n_time_step, learning_rate, grad_clip, emb_dim, lamda=0.2, initdelta=0.05,MF_paras=None,model_type=\"rnn\",use_sparse_tensor=True, update_rule=\"sgd\",pairwise=False):\n \"\"\"\n Args:\n dim_itm_embed: (optional) Dimension of item embedding.\n dim_usr_embed: (optional) Dimension of user embedding.\n dim_hidden: (optional) Dimension of all hidden state.\n n_time_step: (optional) Time step size of LSTM. \n usr_cnt: (optional) The size of all users.\n itm_cnt: (optional) The size of all items.\n \"\"\"\n self.V_M = itm_cnt\n self.V_U = usr_cnt\n self.param=MF_paras\n self.H = dim_hidden\n self.T = n_time_step\n \n self.MF_paras=MF_paras\n self.grad_clip = grad_clip\n\n\n self.weight_initializer = tf.random_uniform_initializer(minval=-0.05, maxval=0.05)\n self.const_initializer = tf.constant_initializer(0.0)\n self.emb_initializer = tf.random_uniform_initializer(minval=-0.05, maxval=0.05)\n\n\n# self.emb_initializer = tf.random_uniform_initializer(minval=-0.1, maxval=0.1)\n \n self.sparse_tensor=use_sparse_tensor\n\n # Place holder for features and captions\n self.pairwise=pairwise\n if self.sparse_tensor:\n self.user_sparse_tensor= tf.sparse_placeholder(tf.float32)\n self.user_sequence = tf.sparse_tensor_to_dense(self.user_sparse_tensor)\n self.item_sparse_tensor= tf.sparse_placeholder(tf.float32)\n self.item_sequence = tf.sparse_tensor_to_dense(self.item_sparse_tensor) \n if self.pairwise:\n self.item_neg_sparse_tensor= tf.sparse_placeholder(tf.float32)\n self.item_neg_sequence = tf.sparse_tensor_to_dense(self.item_sparse_tensor) \n else:\n self.user_sequence = tf.placeholder(tf.float32, [None, self.T, self.V_M]) \n self.item_sequence = tf.placeholder(tf.float32, [None, self.T, self.V_U]) \n \n if self.pairwise: \n self.item_neg_sequence =tf.placeholder(tf.float32, [None, self.T, self.V_U]) \n\n self.rating = tf.placeholder(tf.float32, [None,])\n \n self.learning_rate = learning_rate\n \n \n self.emb_dim = emb_dim\n self.lamda = lamda # regularization parameters\n self.initdelta = initdelta\n \n self.u = tf.placeholder(tf.int32)\n self.i = tf.placeholder(tf.int32)\n if self.pairwise:\n self.j=tf.placeholder(tf.int32)\n print(\"pairwise training\")\n self.paras_rnn=[]\n self.model_type=model_type\n self.update_rule = update_rule\n def _init_MF(self):\n with tf.variable_scope('MF'):\n if self.MF_paras is None:\n self.user_embeddings = tf.Variable(\n tf.random_uniform([self.V_U, self.emb_dim], minval=-self.initdelta, maxval=self.initdelta,\n dtype=tf.float32))\n self.item_embeddings = tf.Variable(\n tf.random_uniform([self.V_M, self.emb_dim], minval=-self.initdelta, maxval=self.initdelta,\n dtype=tf.float32))\n self.item_bias = tf.Variable(tf.zeros([self.V_M])) \n self.user_bias = tf.Variable(tf.zeros([self.V_U])) \n else:\n self.user_embeddings = tf.Variable(self.param[0])\n self.item_embeddings = tf.Variable(self.param[1])\n self.user_bias = tf.Variable(self.param[2]) \n self.item_bias = tf.Variable(self.param[3]) \n\n \n\n self.paras_mf=[self.user_embeddings,self.item_embeddings,self.user_bias,self.item_bias]\n\n def _decode_lstm(self, h_usr, h_itm, reuse=False):\n if False:\n with tf.variable_scope('D_rating', reuse=reuse):\n w_usr = tf.get_variable('w_usr', [self.H, self.H], initializer=self.weight_initializer)\n b_usr = tf.get_variable('b_usr', [self.H], initializer=self.const_initializer)\n w_itm = tf.get_variable('w_itm', [self.H, self.H], initializer=self.weight_initializer)\n b_itm = tf.get_variable('b_itm', [self.H], initializer=self.const_initializer)\n\n usr_vec = tf.matmul(h_usr, w_usr) + b_usr\n \n itm_vec = tf.matmul(h_itm, w_itm) + b_itm\n \n out_preds = tf.reduce_sum(tf.multiply(usr_vec, itm_vec), 1) \n self.paras_rnn.extend([w_usr,b_usr,w_itm,b_itm])\n return out_preds\n else:\n out_preds = tf.reduce_sum(tf.multiply(h_usr, h_itm), 1) \n print(\"Do not use a fully-connectted layer at the time of output decoding.\") \n return out_preds\n \n def _get_initial_lstm(self, batch_size):\n with tf.variable_scope('D_initial_lstm'): \n c_itm = tf.zeros([batch_size, self.H], tf.float32)\n h_itm = tf.zeros([batch_size, self.H], tf.float32)\n\n # self.paras_rnn.extend([c_itm, h_itm, c_usr, h_usr]) # these variable should be trainable or not \n return c_itm, h_itm,\n\n def _item_embedding(self, inputs, reuse=False):\n with tf.variable_scope('D_item_embedding', reuse=reuse):\n w = tf.get_variable('w', [self.V_U, self.H], initializer=self.emb_initializer) \n x_flat = tf.reshape(inputs, [-1, self.V_U]) #(N * T, U) \n x = tf.matmul(x_flat, w) #(N * T, H)\n x = tf.reshape(x, [-1, self.T, self.H]) #(N, T, H)\n self.paras_rnn.extend([w])\n return x\n \n def _user_embedding(self, inputs, reuse=False):\n with tf.variable_scope('D_user_embedding', reuse=reuse):\n w = tf.get_variable('w', [self.V_M, self.H], initializer=self.emb_initializer) \n x_flat = tf.reshape(inputs, [-1, self.V_M]) #(N * T, M) \n x = tf.matmul(x_flat, w) #(N * T, H)\n x = tf.reshape(x, [-1, self.T, self.H]) #(N, T, H)\n self.paras_rnn.extend([w])\n return x\n def all_logits(self,u):\n u_embedding = tf.nn.embedding_lookup(self.user_embeddings, u)\n u_bias = tf.gather(self.user_bias, u)\n return tf.reduce_sum(tf.multiply(u_embedding, self.item_embeddings), 1) + self.item_bias +u_bias\n\n\n def get_rnn_output(self, item_sequence,itm_lstm_cell, input_type=\"item\",reuse=False):\n\n batch_size = tf.shape(self.item_sequence)[0]\n \n c_itm, h_itm = self._get_initial_lstm(batch_size)\n if input_type==\"item\":\n x_itm = self._item_embedding(inputs=item_sequence,reuse=reuse) \n else:\n x_itm = self._user_embedding(inputs=item_sequence,reuse=reuse)\n \n for t in range(self.T):\n with tf.variable_scope('D_'+input_type+'_lstm', reuse=(reuse or (t!=0))):\n _, (c_itm, h_itm) = itm_lstm_cell(inputs=x_itm[:,t,:], state=[c_itm, h_itm]) \n \n# MF_Regularizer = self.lamda * (tf.nn.l2_loss(self.u_embedding) + tf.nn.l2_loss(self.i_embedding) + tf.nn.l2_loss(self.u_bias) +tf.nn.l2_loss(self.i_bias))\n# RNN_Regularizer = tf.reduce_sum([tf.nn.l2_loss(para) for para in self.paras_rnn])\n \n# tv = tf.trainable_variables()\n# Regularizer = tf.reduce_sum([ tf.nn.l2_loss(v) for v in tv ]) \n \n return h_itm\n\n def get_mf_logists(self,u,i):\n \n u_embedding = tf.nn.embedding_lookup(self.user_embeddings, u)\n i_embedding = tf.nn.embedding_lookup(self.item_embeddings, i)\n i_bias = tf.gather(self.item_bias, i)\n u_bias = tf.gather(self.user_bias, u)\n pre_logits_MF = tf.reduce_sum(tf.multiply(u_embedding, i_embedding), 1) + i_bias +u_bias \n return pre_logits_MF\n\n\n def build_pretrain(self):\n self._init_MF()\n \n\n itm_lstm_cell = tf.contrib.rnn.LSTMCell(num_units=self.H)\n usr_lstm_cell = tf.contrib.rnn.LSTMCell(num_units=self.H) \n\n h_usr=self.get_rnn_output(self.user_sequence, usr_lstm_cell,input_type=\"user\")\n h_itm=self.get_rnn_output(self.item_sequence, itm_lstm_cell)\n\n self.logits_RNN = self._decode_lstm(h_usr, h_itm, reuse=False) \n self.logits_MF=self.get_mf_logists(self.u,self.i)\n if not self.pairwise:\n\n with tf.name_scope(\"pointwise\"): \n\n self.loss_RNN = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(labels=self.rating, logits=self.logits_RNN)) #+\n self.loss_MF = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(labels=self.rating, logits=self.logits_MF)) #+self.lamda * (tf.nn.l2_loss(self.user_embeddings) + tf.nn.l2_loss(self.item_embeddings) + tf.nn.l2_loss(self.user_bias) +tf.nn.l2_loss(self.item_bias))\n \n \n self.pre_joint_logits = self.logits_MF + self.logits_RNN\n # self.pre_joint_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(labels=self.rating, logits=self.pre_joint_logits)) + Regularizer\n \n self.pre_joint_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(labels=self.rating, logits=self.pre_joint_logits)) #+Regularizer*self.lamda\n self.pre_joint_loss+= self.lamda * (tf.nn.l2_loss(self.user_embeddings) + tf.nn.l2_loss(self.item_embeddings) + tf.nn.l2_loss(self.user_bias) +tf.nn.l2_loss(self.item_bias))\n # self.pre_joint_loss += self.lamda * (tf.nn.l2_loss(self.u_embedding) + tf.nn.l2_loss(self.i_embedding) + tf.nn.l2_loss(self.u_bias) +tf.nn.l2_loss(self.i_bias))\n self.pre_joint_loss += self.lamda * tf.reduce_sum([tf.nn.l2_loss(para) for para in self.paras_rnn])\n else:\n with tf.name_scope(\"pairwise\"):\n self.logits_MF_neg=self.get_mf_logists(self.u,self.j)\n \n h_itm_neg=self.get_rnn_output(self.item_neg_sequence, itm_lstm_cell,reuse=True)\n self.logits_RNN_neg = self._decode_lstm(h_usr, h_itm_neg, reuse=False) \n # self.pos_over_neg = tf.sigmoid( self.logits_MF + self.logits_RNN - self.logits_MF_neg -self.logits_RNN_neg)\n # self.pre_joint_loss= -tf.reduce_mean(tf.log(self.pos_over_neg)) \n \n tv = tf.trainable_variables()\n Regularizer = tf.reduce_sum([ tf.nn.l2_loss(v) for v in tv ])\n self.pre_joint_loss = tf.maximum(0.0, tf.subtract(0.05, tf.subtract(self.logits_MF + self.logits_RNN, self.logits_MF_neg +self.logits_RNN_neg)))\n self.pre_joint_logits = self.logits_MF + self.logits_RNN\n # self.pre_joint_loss+= self.lamda * (tf.nn.l2_loss(self.user_embeddings) + tf.nn.l2_loss(self.item_embeddings) + tf.nn.l2_loss(self.user_bias) +tf.nn.l2_loss(self.item_bias))\n # self.pre_joint_loss += self.lamda * (tf.nn.l2_loss(self.u_embedding) + tf.nn.l2_loss(self.i_embedding) + tf.nn.l2_loss(self.u_bias) +tf.nn.l2_loss(self.i_bias))\n self.pre_joint_loss += self.lamda * Regularizer\n\n if self.update_rule == 'adam':\n self.optimizer = tf.train.AdamOptimizer\n elif self.update_rule == 'momentum':\n self.optimizer = tf.train.MomentumOptimizer\n elif self.update_rule == 'rmsprop':\n self.optimizer = tf.train.RMSPropOptimizer\n else:\n self.optimizer = tf.train.GradientDescentOptimizer\n\n optimizer = self.optimizer(learning_rate=self.learning_rate)\n if self.model_type == 'joint':\n grads = tf.gradients(self.pre_joint_loss, tf.trainable_variables())\n elif self.model_type == 'rnn':\n grads = tf.gradients(self.loss_RNN, tf.trainable_variables())\n elif self.model_type == 'mf':\n grads = tf.gradients(self.loss_MF, tf.trainable_variables())\n \n grads_and_vars = list(zip(grads, tf.trainable_variables()))\n clipped_gradients = [(tf.clip_by_value(_[0], -self.grad_clip, self.grad_clip), _[1]) for _ in grads_and_vars if _[1] is not None and _[0] is not None]\n self.pretrain_updates = optimizer.apply_gradients(grads_and_vars=clipped_gradients) \n \n self.all_logits = self.all_logits(self.u)\n\n\n self.reward = tf.placeholder(tf.float32)\n self.pg_loss = - tf.reduce_mean(tf.log( tf.sigmoid(self.pre_joint_loss)) * self.reward) #+ MF_Regularizer + RNN_Regularizer \n \n pg_grads = tf.gradients(self.pg_loss, tf.trainable_variables()) \n pg_grads_and_vars = list(zip(pg_grads, tf.trainable_variables()))\n self.pg_updates = optimizer.apply_gradients(grads_and_vars=pg_grads_and_vars) \n \n def pretrain_step(self, sess, rating, u, i,user_sequence=None, item_sequence=None): \n if user_sequence is not None:\n if self.sparse_tensor:\n outputs = sess.run([self.pretrain_updates, self.loss_MF ,self.loss_RNN,self.pre_joint_loss,self.logits_MF,self.logits_RNN ], feed_dict = {self.user_sparse_tensor: user_sequence, \n self.item_sparse_tensor: item_sequence, self.rating: rating, self.u: u, self.i: i})\n else:\n outputs = sess.run([self.pretrain_updates, self.loss_MF ,self.loss_RNN,self.pre_joint_loss,self.logits_MF,self.logits_RNN ], feed_dict = {self.user_sequence: user_sequence, \n self.item_sequence: item_sequence, self.rating: rating, self.u: u, self.i: i})\n else:\n outputs = sess.run([self.pretrain_updates, self.pre_joint_loss,self.pre_logits_MF], feed_dict = {self.rating: rating, self.u: u, self.i: i})\n\n return outputs\n def pretrain_step_pair(self, sess, u,user_sequence,i,item_sequence,j,item_neg_sequence): \n if user_sequence is not None:\n if self.sparse_tensor:\n outputs = sess.run([self.pretrain_updates, self.pre_joint_loss ], feed_dict = {self.user_sparse_tensor: user_sequence, \n self.item_sparse_tensor: item_sequence, self.u: u, self.i: i ,self.j : j, self.item_neg_sequence: item_neg_sequence})\n else:\n outputs = sess.run([self.pretrain_updates,self.pre_joint_loss ], feed_dict = {self.user_sequence: user_sequence, \n self.item_sequence: item_sequence, self.u: u, self.i: i,self.j : j, self.item_neg_sequence: item_neg_sequence})\n else:\n outputs = sess.run([self.pretrain_updates, self.pre_joint_loss], feed_dict = {self.rating: rating, self.u: u, self.i: i})\n\n return outputs\n \n def prediction(self, sess, user_sequence, item_sequence, u, i,sparse=True, use_sparse_tensor = None):\n if self.sparse_tensor and (use_sparse_tensor is None or use_sparse_tensor!=False):\n outputs = sess.run(self.pre_joint_logits, feed_dict = {self.user_sparse_tensor: user_sequence, \n self.item_sparse_tensor: item_sequence, self.u: u, self.i: i}) \n return outputs\n if sparse:\n user_sequence,item_sequence=[ii.toarray() for ii in user_sequence],[ii.toarray() for ii in item_sequence]\n outputs = sess.run(self.pre_joint_logits, feed_dict = {self.user_sequence: user_sequence, \n self.item_sequence: item_sequence, self.u: u, self.i: i}) \n return outputs\n \n def predictionItems(self, sess, u):\n outputs = sess.run(self.all_logits, feed_dict = {self.u: u}) \n return outputs\n \n\n def getRewards(self,sess,gen, samples,sparse=False):\n u_seq,i_seq = [[ sample[i].toarray() for sample in samples ] for i in range(2)]\n u,i = [[ sample[i] for sample in samples ] for i in range(2,4)]\n# rating = [ sample[5] for sample in samples ] \n# indices = [j for j,v in enumerate([sample[4] for sample in samples]) if v == 1] \n \n labeled_rewards = np.zeros(len(samples))\n \n# if len(indices) > 0:\n# _,loss_mf,loss_rnn,joint_loss,joint_loss_list = gen.pretrain_step(sess, [rating[ind] for ind in indices], \n# [u[ind] for ind in indices], \n# [i[ind] for ind in indices], \n# [u_seq[ind] for ind in indices], \n# [i_seq[ind] for ind in indices])\n# for ind,v in enumerate(joint_loss_list # labeled_rewards[indices[ind]] = v\n \n unlabeled_rewards = self.prediction(sess,u_seq,i_seq,u,i)\n \n rewards = labeled_rewards + unlabeled_rewards\n \n return 2 * (self.sigmoid(rewards) - 0.5) \n\n\n def saveMFModel(self, sess, filename):\n self.paras_mf = [self.user_embeddings,self.item_embeddings,self.user_bias,self.item_bias]\n param = sess.run(self.paras_mf)\n pickle.dump(param, open(filename, 'wb'))\n\n def sigmoid(self,x):\n exp_x=np.exp(x)\n return exp_x/np.sum(exp_x)\n ","sub_path":"Discrimiator.py","file_name":"Discrimiator.py","file_ext":"py","file_size_in_byte":18192,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"579660926","text":"import torch.nn as nn\n\n\nclass TransposeConvBNRelu(nn.Module):\n \"\"\"\n Is a sequence of Convolution, Batch Normalization, and ReLU activation\n \"\"\"\n\n def __init__(self, channels_in, channels_out, stride=1):\n super(TransposeConvBNRelu, self).__init__()\n\n self.layers = nn.Sequential(\n nn.ConvTranspose2d(channels_in, channels_out, kernel_size=3, stride=stride, padding=1),\n nn.BatchNorm2d(channels_out),\n # nn.InstanceNorm2d(channels_out),\n nn.ReLU(inplace=True)\n )\n\n # nn.ConvTranspose2d(channels_in, channels_out, kernel_size=3, stride=stride, padding=1)\n\n def forward(self, x):\n return self.layers(x)\n","sub_path":"network/transpose_conv_bn_relu.py","file_name":"transpose_conv_bn_relu.py","file_ext":"py","file_size_in_byte":694,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"586973298","text":"#!/usr/bin/env python\n\n\"\"\" Assignment 1, Exercise 3, INF1340, Fall, 2015. Troubleshooting Car Issues.\n\nThis module contains one function diagnose_car(). It is an expert system to\ninteractive diagnose car issues.\n\n\"\"\"\n\n__author__ = 'Susan Sim'\n__email__ = \"ses@drsusansim.org\"\n__copyright__ = \"2015 Susan Sim\"\n__license__ = \"MIT License\"\n\n\ndef diagnose_car():\n \"\"\"\n Interactively queries the user with yes/no questions to identify a\n possible issue with a car.\n\n Inputs: \"Y\" or \"N\"\n Expected Outputs:\n Clean terminals and try starting again.\n Replace cables and try again.\n Replace the battery.\n Check spark plug connections.\n Engine is not getting enough fuel. Clean fuel pump.\n Check to ensure choke is opening and closing.\n Get it in for service.\n\n Actual Outputs:\n Clean terminals and try starting again.\n Replace cables and try again.\n Replace the battery.\n Check spark plug connections.\n Engine is not getting enough fuel. Clean fuel pump.\n Check to ensure choke is opening and closing.\n Get it in for service.\n\n Errors: If unexpected input, ask user to start over\n\n Test Case:\n 1)raw input \"Y\", raw input \"Y\", output \"Clean terminals and try starting again.\"\n 2)raw input \"Y\", raw input \"N\", output \"Replace cables and try again.\"\n 3)raw input \"N\", raw input \"Y\", output \"Replace the battery.\"\n 4)raw input \"N\", raw input \"N\", raw input \"Y\", output \"Check spark plug connections.\"\n 5)raw input \"N\", raw input \"N\", raw input \"N\", raw input \"N\",\n output \"Engine is not getting enough fuel. Clean fuel pump.\"\n 6)raw input \"N\", raw input \"N\", raw input \"N\", raw input \"Y\", raw input \"N\"\n output \"Check to ensure choke is opening and closing.\"\n 7)raw input \"N\", raw input \"N\", raw input \"N\", raw input \"Y\", raw input \"Y\"\n output \"Get it in for service.\"\n \"\"\"\n\n #Program asks user first question and then determines\n #what question to ask next depending on the answer\n\n initial_question = raw_input(\"Is the car silent when you turn it on?\")\n\n if initial_question == 'Y':\n question1 = \"Are the battery terminals corroded?\"\n question2 = raw_input(question1)\n if question2 == 'Y':\n solution = \"Clean terminals and try starting again.\"\n print (solution)\n elif question2 == 'N':\n solution = \"Replace cables and try again.\"\n print(solution)\n else:\n print(\"Incorrect entry, start over\")\n\n #Program asks user second question depending on answer to first question\n\n elif initial_question == 'N':\n question1 = \"Does the car make a clicking noise?\"\n question2 = raw_input(question1)\n if question2 == 'Y':\n print(\"Replace the battery.\")\n\n #Program asks user third question depending on answer to first question\n\n elif question2 == 'N':\n question3 = \"Does the car crank up but fail to start?\"\n question4 = raw_input(question3)\n if question4 == 'Y':\n print(\"Check spark plug connections.\")\n\n #Program asks user fourth question depending on answer to first question\n\n elif question4 == 'N':\n question5 = \"Does the engine start and then die?\"\n question6 = raw_input(question5)\n if question6 == 'Y':\n question7 = \"Does your car have fuel injection?\"\n question8 = raw_input(question7)\n if question8 == 'Y':\n print(\"Get it in for service.\")\n\n #Program asks user fourth question depending on answer to first question\n\n elif question8 == 'N':\n print(\"Check to ensure the choke is opening and closing.\")\n else:\n print(\"Incorrect entry, start over\")\n elif question6 == 'N':\n print(\"Engine is not getting enough fuel. Clean fuel pump.\")\n else:\n print(\"Incorrect entry, start over\")\n else:\n print(\"Incorrect entry, start over\")\n else:\n print(\"Incorrect entry, start over\")\n\n #If user enters anything other than \"y\" or \"n\" it will recognize the error\n #And ask the user to start the program over\n\n else:\n print(\"Incorrect entry, start over\")\n\n\n#diagnose_car()\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"exercise3.py","file_name":"exercise3.py","file_ext":"py","file_size_in_byte":4429,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"303199682","text":"# Copyright (c) 2021, Frappe Technologies and contributors\n# For license information, please see license.txt\n\nfrom json.encoder import JSONEncoder\nimport datetime\nimport requests\nfrom datetime import timedelta\nfrom dateutil import parser\n\nimport frappe\nfrom frappe import _\nfrom frappe.model.document import Document\nfrom frappe.integrations.utils import create_payment_gateway, get_payment_gateway_controller\nfrom frappe.utils import call_hook_method, validate_email_address\n\n\nclass Pagos360Settings(Document):\n\n supported_currencies = [\"ARS\"]\n\n def validate_transaction_currency(self, currency):\n if currency not in self.supported_currencies:\n frappe.throw(_(\"Please select another payment method. Pagos360 does not support transactions in currency '{0}'\").format(currency))\n\n def validate(self):\n self.validate_recipients()\n create_payment_gateway(\"Pagos360\")\n call_hook_method('payment_gateway_enabled', gateway=\"Pagos360\")\n if not self.flags.ignore_mandatory:\n self.validate_pagos360_credentials()\n\n def validate_recipients(self):\n if self.recipients:\n for recipient in self.recipients.split(\",\"):\n validate_email_address(recipient, True)\n\n def validate_pagos360_credentials(self):\n try:\n pagos360_settings = get_payment_gateway_controller(\"Pagos360\")\n pago360 = Pagos360(pagos360_settings.get_password(fieldname=\"api_key\", raise_exception=False) or self.api_key, pagos360_settings.sandbox)\n pago360.get_account()\n except Exception:\n frappe.throw(_(\"Invalid payment gateway credentials\"))\n\n def get_payment_url(self, **kwargs):\n \"\"\"\n Url para solicitudes de pago\n \"\"\"\n from erpnext_argentina.pagos360 import pago360_log_error\n from frappe.utils import validate_email_address\n pagos360_settings = get_payment_gateway_controller(\"Pagos360\")\n pago360 = Pagos360(pagos360_settings.get_password(fieldname=\"api_key\", raise_exception=False) or self.api_key, pagos360_settings.sandbox)\n payment_request = frappe.get_doc(kwargs[\"reference_doctype\"], kwargs[\"reference_docname\"])\n\n sales_invoice = frappe.get_doc(payment_request.reference_doctype, payment_request.reference_name)\n\n if hasattr(sales_invoice, 'subscription') and sales_invoice.subscription:\n return None\n\n def get_due_date(sales_invoice):\n today = datetime.date.today()\n date = getattr(sales_invoice, 'due_date', None) or getattr(sales_invoice, 'delivery_date', None) or today\n if date > today:\n return date.strftime(\"%d-%m-%Y\")\n return (today + timedelta(days=7)).strftime(\"%d-%m-%Y\")\n\n payment_request_data = {\n \"description\": kwargs[\"description\"].decode(\"utf-8\"),\n \"first_due_date\": get_due_date(sales_invoice),\n \"first_total\": '{0:.2f}'.format(kwargs['amount']),\n \"payer_name\": kwargs[\"payer_name\"].decode(\"utf-8\"),\n \"external_reference\": kwargs[\"reference_docname\"],\n \"metadata\": {\"external_reference\": kwargs[\"reference_docname\"]}\n }\n\n if validate_email_address(kwargs.get('payer_email')):\n payment_request_data.update({\"payer_email\": kwargs.get('payer_email')})\n\n result = pago360.create_payment_request({\"payment_request\": payment_request_data})\n\n if result.get(\"status\", 0) == 201:\n response = result.get(\"response\", {})\n frappe.db.set_value(\"Payment Request\", payment_request.name, \"pagos360_barcode\", response.get(\"barcode\"))\n frappe.db.set_value(\"Payment Request\", payment_request.name, \"pagos360_barcode_url\", response.get(\"barcode_url\"))\n frappe.db.set_value(\"Payment Request\", payment_request.name, \"pagos360_checkout_url\", response.get(\"checkout_url\"))\n frappe.db.set_value(\"Payment Request\", payment_request.name, \"pagos360_pdf_url\", response.get(\"pdf_url\"))\n frappe.db.commit()\n return response.get('checkout_url')\n else:\n pago360_log_error(\"Ocurrió un error en la solicitud de pago\", {\"request\": payment_request_data, \"response\": result}, exception=True)\n frappe.throw(\"Ocurrió un error en la pasarela de pago\")\n return None\n\n def get_parts_from_payment_request(self, payment_request):\n if payment_request.reference_doctype != \"Sales Invoice\":\n return None, None, None\n\n sales_invoice = frappe.get_doc(\"Sales Invoice\", payment_request.reference_name)\n\n if not sales_invoice.subscription:\n return sales_invoice, None, None\n\n subscription = frappe.get_doc(\"Subscription\", sales_invoice.subscription)\n\n payment_gateway = subscription.get_payment_gateway()\n\n if getattr(payment_gateway, \"gateway\", \"\") != \"Pagos360\" or not subscription.adhesion_pagos360 or not frappe.get_value(\"Adhesion Pagos360\", subscription.adhesion_pagos360, 'id_adhesion'):\n return sales_invoice, subscription, None\n\n adhesion = frappe.get_doc(\"Adhesion Pagos360\", subscription.adhesion_pagos360)\n return sales_invoice, subscription, adhesion\n\n def on_payment_request_submission(self, payment_request):\n from erpnext_argentina.pagos360 import pago360_log_error\n\n sales_invoice, subscription, adhesion = self.get_parts_from_payment_request(payment_request)\n\n if sales_invoice and subscription and adhesion:\n try:\n self.solicitar_debito(subscription, adhesion, sales_invoice, payment_request)\n except Exception:\n pago360_log_error(\"on_payment_request_submission\", payment_request.as_json(), exception=True)\n return False # No debe enviar email\n\n return True # En solicitud de pago comun debe enviar mail\n\n def get_due_date(self, pago360, sales_invoice):\n \"\"\"\n Debe haber como mínimo 72hs hábiles entre hoy y la primera fecha de vencimiento.\n \"\"\"\n data = {\n \"next_business_day\": {\n \"date\": sales_invoice.posting_date.strftime(\"%d-%m-%Y\"),\n \"days\": 4,\n }\n }\n response = pago360.get_next_business_day(data)\n\n if response[\"status\"] == 200:\n return parser.parse(response[\"response\"]).date()\n return sales_invoice.posting_date + timedelta(days=4)\n\n def solicitar_debito(self, subscription, adhesion, sales_invoice, payment_request):\n from dateutil.relativedelta import relativedelta\n from erpnext_argentina.pagos360 import pago360_log_error\n\n pagos360_settings = get_payment_gateway_controller(\"Pagos360\")\n pago360 = Pagos360(pagos360_settings.get_password(\"api_key\"), pagos360_settings.sandbox)\n\n debit_request = {\"metadata\": {\"external_reference\": payment_request.name}}\n\n if adhesion.tipo == \"Débito\":\n nombre_objeto = \"debit_request\"\n method = pago360.create_cbu_debit_request\n\n try:\n due_date = self.get_due_date(pago360, sales_invoice)\n except Exception:\n due_date = sales_invoice.posting_date + timedelta(days=4)\n\n debit_request.update({\n \"adhesion_id\": int(adhesion.id_adhesion),\n \"first_due_date\": due_date.strftime(\"%d-%m-%Y\"),\n \"first_total\": '{0:.2f}'.format(payment_request.grand_total),\n \"description\": f\"Suscripción {subscription.company}\",\n })\n elif adhesion.tipo == \"Crédito\":\n nombre_objeto = \"card_debit_request\"\n method = pago360.create_card_debit_request\n\n posting_date = sales_invoice.posting_date\n\n if posting_date.day > 19:\n posting_date = posting_date + relativedelta(day=1, months=+1)\n\n debit_request.update({\n \"card_adhesion_id\": int(adhesion.id_adhesion),\n \"month\": posting_date.month,\n \"year\": posting_date.year,\n \"amount\": '{0:.2f}'.format(payment_request.grand_total),\n \"description\": f\"Suscripción {subscription.company}\"\n })\n\n result = method({nombre_objeto: debit_request})\n\n if result.get(\"status\", 0) == 201:\n return result.get(\"response\", {})\n\n pago360_log_error(\"El débito no se solicitó\", {\"request\": debit_request, \"response\": result}, exception=True)\n\n def send_notification_email(self, subject, msg):\n if not self.recipients:\n return\n\n frappe.sendmail(\n recipients=self.recipients.split(\",\"),\n subject=subject,\n message=msg,\n )\n frappe.db.commit()\n\n\nclass Pagos360:\n\n base_url = 'https://api.pagos360.com/'\n base_sandbox_url = 'https://api.sandbox.pagos360.com/'\n mime_type = 'application/json'\n\n def __init__(self, api_key, sandbox):\n self.api_key = api_key\n self.headers = {'authorization': 'Bearer ' + self.api_key, 'Content-type': self.mime_type}\n self.sandbox = bool(sandbox)\n\n def get_base_url(self):\n if self.sandbox:\n return self.base_sandbox_url\n return self.base_url\n\n def get(self, uri, params=None):\n api_result = requests.get(self.get_base_url() + uri, params=params, headers=self.headers)\n return {\"status\": api_result.status_code, \"response\": api_result.json()}\n\n def post(self, uri, data=None, params=None):\n if data is not None:\n data = JSONEncoder().encode(data)\n\n api_result = requests.post(self.get_base_url() + uri, params=params, data=data, headers=self.headers)\n return {\"status\": api_result.status_code, \"response\": api_result.json()}\n\n def put(self, uri, data=None, params=None):\n if data is not None:\n data = JSONEncoder().encode(data)\n\n api_result = requests.put(self.get_base_url() + uri, params=params, data=data, headers=self.headers)\n return {\"status\": api_result.status_code, \"response\": api_result.json()}\n\n def validate_date(self, date=None):\n if not date:\n return (datetime.datetime.today() - datetime.timedelta(days=1)).strftime(\"%d-%m-%Y\")\n if type(date) != str:\n return date.strftime(\"%d-%m-%Y\")\n return date\n\n def get_account(self):\n return self.get(\"account\")\n\n def get_account_balance(self):\n return self.get(\"account/balances/\")\n\n def get_collection_report(self, date=None):\n return self.get(\"report/collection/\" + self.validate_date(date))\n\n def get_chargeback_report(self, date=None):\n return self.get(\"report/chargeback/\" + self.validate_date(date))\n\n def get_settlement_report(self, date=None):\n return self.get(\"report/settlement/\" + self.validate_date(date))\n\n def get_card_adhesion(self, id):\n return self.get(\"card-adhesion/\" + str(id))\n\n def create_card_adhesion(self, data):\n return self.post(\"card-adhesion/\", data)\n\n def cancel_card_adhesion(self, id):\n return self.put(\"card-adhesion/{}/cancel\".format(id))\n\n def get_cbu_adhesion(self, id):\n return self.get(\"adhesion/\" + str(id))\n\n def create_cbu_adhesion(self, data):\n return self.post(\"adhesion/\", data)\n\n def cancel_cbu_adhesion(self, id):\n return self.put(\"adhesion/{}/cancel\".format(id))\n\n def create_card_debit_request(self, data):\n return self.post(\"card-debit-request/\", data)\n\n def get_card_debit_request(self, id):\n return self.get(\"card-debit-request/\" + str(id))\n\n def cancel_card_debit_request(self, id):\n return self.put(\"card-debit-request/{}/cancel/\".format(id))\n\n def create_cbu_debit_request(self, data):\n return self.post(\"debit-request/\", data)\n\n def get_cbu_debit_request(self, id):\n return self.get(\"debit-request/\" + str(id))\n\n def cancel_cbu_debit_request(self, id):\n return self.put(\"debit-request/{}/cancel/\".format(id))\n\n def create_payment_request(self, data):\n return self.post(\"payment-request/\", data)\n\n def get_payment_request(self, id):\n return self.get(\"payment-request/\" + str(id))\n\n def get_next_business_day(self, data):\n if \"next_business_day\" not in data:\n data = {\"next_business_day\": data}\n return self.post(\"validator/next-business-day/\", data)\n","sub_path":"frappe/integrations/doctype/pagos360_settings/pagos360_settings.py","file_name":"pagos360_settings.py","file_ext":"py","file_size_in_byte":12421,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"591989480","text":"import pygame\nfrom mock import Mock, mock\n\nfrom app.backgrounds.camera import Camera\nfrom app.common.pygame_surface_wrapper import PygameSurfaceWrapper\nfrom app.inputs.game_play.game_play_controller_model import GamePlayControllerModel\nfrom app.player.player_state import PlayerState\nfrom app.renderers.background_renderer import BackgroundRenderer\nfrom app.states.abc_game_state import ABCGameState\nfrom app.states.game_play.game_play_state import GamePlayState\nfrom app.states.game_state_changer import GameStateChanger\nfrom test.patchable_modules import GAME_PLAY_STATE\nfrom test.pm_test_case import *\n\n\nclass TestGamePlayState(PMTestCase):\n def setUp(self):\n pass\n\n def tearDown(self):\n pass\n\n @mock.patch(GAME_PLAY_STATE.GAME_PLAY_UPDATER, autospec=True)\n @mock.patch(GAME_PLAY_STATE.APPLICATION_SETTINGS, autospec=True)\n @mock.patch(GAME_PLAY_STATE.PLAYER_MANAGER, autospec=True)\n @mock.patch(GAME_PLAY_STATE.WORLD_SPACE_MANAGER, autospec=True)\n @mock.patch(GAME_PLAY_STATE.GAME_PLAY_RENDERER_MANAGER, autospec=True)\n @mock.patch(GAME_PLAY_STATE.PYGAME_SURFACE_WRAPPER_FACTORY, autospec=True)\n def testInit(\n self,\n mockSurfaceFactory,\n mockRendererManager,\n mockWorldSpaceManagerModule,\n mockPlayerManagerModule,\n mockAppSettings,\n mockGamePlayUpdaterModule\n ):\n mockMainSurface = Mock(pygame.Surface)\n mainSurfaceWrapper = PygameSurfaceWrapper(mockMainSurface)\n mockSurfaceFactory.buildNonAlpha.return_value = mainSurfaceWrapper\n mockChanger = Mock(GameStateChanger)\n\n mockWorldSpaceManagerModule.return_value.backgroundRenderer = Mock(spec=BackgroundRenderer)\n mockWorldSpaceManagerModule.return_value.camera = Mock(spec=Camera)\n\n mockAppSettings.return_value.windowWidth = 55\n mockAppSettings.return_value.windowHeight = 71\n\n gamePlayControllerModel = Mock(spec=GamePlayControllerModel)\n gamePlay = GamePlayState(mockChanger, gamePlayControllerModel)\n\n self.assertIsInstance(gamePlay, ABCGameState)\n\n self.assertIs(mockChanger, gamePlay.gameStateChanger)\n self.assertIs(gamePlay.mainSurfaceWrapper, mainSurfaceWrapper)\n\n mockGamePlayUpdaterModule.assert_called_once_with(gamePlayControllerModel)\n self.assertIs(mockGamePlayUpdaterModule.return_value, gamePlay.updater)\n\n self.assertIsNone(gamePlay.updatedPlayerState)\n\n self.assertIs(gamePlayControllerModel, gamePlay.controllerModel)\n\n self.assertIs(mockPlayerManagerModule.return_value, gamePlay.playerManager)\n mockPlayerManagerModule.assert_called_once_with(\"Player1\")\n\n self.assertIs(mockWorldSpaceManagerModule.return_value, gamePlay.worldSpaceManager)\n mockWorldSpaceManagerModule.assert_called_once_with(\n mockPlayerManagerModule.return_value.playerState.worldSpaceName)\n\n mockAppSettings.assert_called_once()\n\n mockSurfaceFactory.buildNonAlpha.assert_called_once_with(size=(\n mockAppSettings.return_value.windowWidth,\n mockAppSettings.return_value.windowHeight\n ))\n\n self.assertIs(mockRendererManager.return_value, gamePlay.rendererManager)\n mockRendererManager.assert_called_once_with(\n mockWorldSpaceManagerModule.return_value.camera,\n mockWorldSpaceManagerModule.return_value.backgroundRenderer,\n mockPlayerManagerModule.return_value.playerState.renderer\n )\n\n @mock.patch(GAME_PLAY_STATE.PLAYER_MANAGER)\n @mock.patch(GAME_PLAY_STATE.WORLD_SPACE_MANAGER)\n def testProcessInputs(self, _, __):\n\n controller = Mock(spec=GamePlayControllerModel)\n gamePlay = GamePlayState(Mock(spec=GameStateChanger), controller)\n\n events = [Mock(spec=pygame.event.Event), Mock(spec=pygame.event.Event), Mock(spec=pygame.event.Event)]\n gamePlay.processInputs(events)\n\n controller.processInputs.assert_called_once_with(events)\n\n @mock.patch(GAME_PLAY_STATE.GAME_PLAY_UPDATER)\n @mock.patch(GAME_PLAY_STATE.PLAYER_MANAGER)\n @mock.patch(GAME_PLAY_STATE.WORLD_SPACE_MANAGER, autospec=True)\n def testUpdate_CallsUpdaterCorrectly_SetsUpdatedPlayerState(\n self,\n mockWorldSpaceManagerModule,\n mockPlayerManagerModule,\n mockGamePlayUpdaterModule\n ):\n mockWorldSpaceManagerModule.return_value.backgroundRenderer = Mock(spec=BackgroundRenderer)\n\n mockWorldSpaceManagerModule.return_value.camera = Mock(spec=Camera)\n\n mockChanger = Mock(GameStateChanger)\n mockController = Mock(spec=GamePlayControllerModel)\n\n gamePlay = GamePlayState(mockChanger, mockController)\n\n self.assertIsNone(gamePlay.updatedPlayerState)\n\n timeSinceProgramStart = 59202\n gamePlay.update(timeSinceProgramStart)\n\n updatedEntities = mockGamePlayUpdaterModule.return_value.update.return_value\n self.assertIs(updatedEntities, gamePlay.updatedPlayerState)\n\n mockGamePlayUpdaterModule.return_value.update.assert_called_once_with(\n timeSinceProgramStart,\n mockPlayerManagerModule.return_value,\n mockWorldSpaceManagerModule.return_value)\n\n @mock.patch(GAME_PLAY_STATE.PLAYER_MANAGER)\n @mock.patch(GAME_PLAY_STATE.WORLD_SPACE_MANAGER)\n @mock.patch(GAME_PLAY_STATE.GAME_PLAY_RENDERER_MANAGER, autospec=True)\n def testRenderCallsRendererManager(self, mockRendererManager, _, __):\n mockChanger = Mock(spec=GameStateChanger)\n gamePlay = GamePlayState(mockChanger, Mock(spec=GamePlayControllerModel))\n mockScreen = Mock(spec=PygameSurfaceWrapper)\n\n gamePlay.updatedPlayerState = mockUpdatedPlayer = Mock(spec=PlayerState)\n\n gamePlay.render(mockScreen)\n\n mockRendererManager.return_value.render.assert_called_once_with(mockScreen, mockUpdatedPlayer)\n","sub_path":"test/states/game_play/test_game_play_state.py","file_name":"test_game_play_state.py","file_ext":"py","file_size_in_byte":5890,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"176113355","text":"from makeT3Cleandir import *\nimport os\nos.system(\"voms-proxy-init cms\")\n#version =\"\"\npath = \"/cms/store/user/jhkim/\"\n\ndir = [\"LQ_LJ_Jan10W2Jets\", \"LQ_LJ_Jan10W3Jets\", \"LQ_LJ_Jan10topLJ\"]\n\nfor d in dir:\n makeCleandir(path, d)\n","sub_path":"doclean.py","file_name":"doclean.py","file_ext":"py","file_size_in_byte":229,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"42462935","text":"my_string= input ('Введите строку ')\nlen_type = str(len)\nstring_long = len(my_string)\nmax_type = str(max)\nstring_max = max(my_string)\nmin_type = str(min)\nstring_min = min(my_string)\nprint('Длина строки', string_long, 'символов, функция len', len_type)\nprint('Максимальный символ:', string_max, ', функция max', max_type)\nprint('Минимальный символ:', string_min, ', функция min', min_type)","sub_path":"Lesson 3/lesson03_hw03.py","file_name":"lesson03_hw03.py","file_ext":"py","file_size_in_byte":476,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"620419163","text":"from django.shortcuts import render\nfrom django.utils import timezone\nfrom .models import Post\nfrom django.shortcuts import render, get_object_or_404\nfrom .forms import PostForm\nfrom django.shortcuts import redirect\nfrom django.contrib.auth.models import User\n\n#returns authenticated user\ndef authenticated_user(request):\n # if not, create an anonymous user and log them in\n u = User(username='0000', first_name='Anonymous', last_name='User')\n u.set_unusable_password()\n u.save()\n \n u.username = u.id\n u.save()\n return u\n\ndef post_list(request):\n posts = Post.objects.filter(published_date__lte=timezone.now()).order_by('published_date')\n return render(request, 'blog/post_list.html', {'posts': posts})\n\ndef post_detail(request, pk):\n if request.method == \"POST\":\n u = get_object_or_404(Post, pk=pk).delete()\n return post_list(request)\n post = get_object_or_404(Post, pk=pk)\n return render(request, 'blog/post_detail.html', {'post': post})\n\ndef post_new(request):\n if request.method == \"POST\":\n form = PostForm(request.POST)\n if form.is_valid():\n post = form.save(commit=False)\n post.name = authenticated_user(request)\n post.published_date = timezone.now()\n post.save()\n #print(\"before summarize.py\")\n #summarize.main()\n #print(\"after summarize.py\")\n return redirect('post_detail', pk=post.pk)\n else:\n form = PostForm()\n return render(request, 'blog/post_edit.html', {'form': form})\n\ndef post_edit(request, pk):\n post = get_object_or_404(Post, pk=pk)\n if request.method == \"POST\":\n form = PostForm(request.POST, instance=post)\n if form.is_valid():\n post = form.save(commit=False)\n post.name = authenticated_user(request)\n post.published_date = timezone.now()\n post.save()\n return redirect('post_detail', pk=post.pk)\n else:\n form = PostForm(instance=post)\n return render(request, 'blog/post_edit.html', {'form': form})\n\ndef post_delete(request,pk):\n u = get_object_or_404(Post, pk=pk).delete()\n return post_list(request)","sub_path":"blog/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2188,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"308094258","text":"from astropy.io import fits\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport glob\n\nf = glob.glob('*.fit')\n\ndat= np.empty((15, 1023, 1536))\n\nfor i in range(len(f)):\n hdul = fits.open(f[i])\n #hdul.info()\n dat[i] = hdul[0].data\n print(hdul[0].data.shape)\n hdul.close()\n print(i)\n\ndat.shape\nbias = np.average(dat, axis=0)\nprint(bias)\nnp.save(\"averagebias.npy\", bias)\nplt.imshow(bias)\nfigname = 'averagebias.png'\nplt.savefig(figname,bbox_inches='tight', transparent=True,pad_inches=0)\nplt.show()\n \n","sub_path":"bias/averageBias.py","file_name":"averageBias.py","file_ext":"py","file_size_in_byte":523,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"568513785","text":"#26.10.2018\r\ndef sortDict(givenDict):\r\n returnDict = {}\r\n a = []\r\n for element in givenDict.keys():\r\n a.append(element)\r\n a = sorted(a)\r\n for element in a:\r\n for secondary_element in givenDict.items():\r\n if element == secondary_element[0]:\r\n returnDict[element] = secondary_element[1]\r\n\r\n return returnDict\r\n\r\nprint(sortDict({2:5, 1:13}))\r\n","sub_path":"Dictionary/Problem_14.py","file_name":"Problem_14.py","file_ext":"py","file_size_in_byte":398,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"361113087","text":"\"\"\"\nRetrieve stock data using Yahoo Finance API.\nAPI query based on StackOverflow answer https://stackoverflow.com/a/47505102.\n\"\"\"\nimport requests\nfrom typing import NamedTuple\n\n__base_url = 'https://query1.finance.yahoo.com'\n__stock_api = '/v8/finance/chart'\n\n\ndef fetch_historical_stock_price(\n ticker: str,\n start: int = 0,\n end: int = 9999999999,\n interval: str = '3mo'\n) -> dict:\n \"\"\" Fetch historical stock prices for given ticker. \"\"\"\n\n url = f'{__base_url}{__stock_api}/{ticker}'\n params = {\n 'period1': start,\n 'period2': end,\n 'interval': interval\n }\n response = requests.get(url=url, params=params)\n\n result = response.json()['chart']['result'][0]\n timestamp = result['timestamp']\n start_time = timestamp[0]\n end_time = timestamp[-1]\n\n chart = result['indicators']['quote'][0]\n volume = chart['volume']\n close_values = chart['close']\n high_values = chart['high']\n low_values = chart['low']\n open_values = chart['open']\n\n fields = ['timestamp','volume','close','open','high','low']\n data = {\n 'url': response.url,\n 'ticker': ticker,\n 'range_start': start_time,\n 'range_end': end_time,\n 'interval': interval,\n 'data': [ dict(zip(fields, a))\n for a \n in zip(timestamp, volume, close_values, open_values, high_values, low_values)]\n }\n\n return data\n\nif __name__ == '__main__':\n import argparse\n ap = argparse.ArgumentParser()\n\n ap.add_argument('-t', '--ticker', required=True, help='Ticker symbol.')\n ap.add_argument('-s', '--start', required=False, help='Start time.')\n ap.add_argument('-e', '--end', required=False, help='End time.')\n ap.add_argument('-i', '--interval', required=False, default='3mo', help='Granularity of the historical price data.')\n\n args = ap.parse_args()\n\n print(args)\n\n ticker = args.ticker\n start = 946684800\n end = 9999999999\n interval = args.interval\n \n data = fetch_historical_stock_price(ticker, start=start, end=end, interval=interval)\n\n print('----------------------------------')\n print(data)\n print('')\n print(data['url'])","sub_path":"stock.py","file_name":"stock.py","file_ext":"py","file_size_in_byte":2200,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"532327281","text":"import sys\nimport os\nimport steg\nimport cv2 as cv\nimport PySide2.QtCore\nfrom PySide2.QtCore import Qt, QRectF,SIGNAL, QObject, QTranslator\nfrom PySide2.QtGui import QBrush, QColor, QPainter, QPen, QPalette\nfrom PySide2.QtWidgets import (QApplication, QDoubleSpinBox,\n QFormLayout, QGridLayout, QGroupBox, QPushButton, QWidget, QComboBox, QFileDialog)\nfrom PySide2.QtCharts import QtCharts\nfrom PySide2.QtCore import QDate, QDir, QStandardPaths, Qt, QUrl, QSize\nfrom PySide2.QtGui import QClipboard, QGuiApplication, QDesktopServices, QIcon, QTextLine, QTextLine\nfrom PySide2.QtGui import QImage, QPixmap, QImageReader\nfrom PySide2.QtWidgets import (QAction, qApp, QApplication, QHBoxLayout, QLabel,\n QMainWindow, QPushButton, QTabWidget, QToolBar, QVBoxLayout, QWidget)\nfrom PySide2.QtMultimedia import QCamera, QCameraImageCapture, QCameraInfo\nfrom PySide2.QtMultimediaWidgets import QCameraViewfinder\nfrom PySide2.QtWidgets import QDialog, QLineEdit, QPushButton\n\nclass MainWidget(QMainWindow):\n def __init__(self, parent=None):\n super(MainWidget, self).__init__(parent)\n\n #toolBar = QToolBar()\n #self.addToolBar(toolBar)\n\n self.fileMenu = self.menuBar().addMenu(\"&File\")\n\n openContainerAction = QAction(QIcon.fromTheme(\"application-exit\"), \"Open Container\",\n self, shortcut=\"Ctrl+T\", triggered=self.loadcontainerpath)\n self.fileMenu.addAction(openContainerAction)\n openCodeAction = QAction(QIcon.fromTheme(\"application-exit\"), \"Open Code\",\n self, shortcut=\"Ctrl+Y\", triggered=self.loadcodepath)\n self.fileMenu.addAction(openCodeAction)\n\n\n exitAction = QAction(QIcon.fromTheme(\"application-exit\"), \"E&xit\",\n self, shortcut=\"Ctrl+Q\", triggered=self.close)\n self.fileMenu.addAction(exitAction)\n\n self.aboutMenu = self.menuBar().addMenu(\"&About\")\n aboutQtAction = QAction(\"About &Qt\", self, triggered=qApp.aboutQt)\n self.aboutMenu.addAction(aboutQtAction)\n\n self.statusBar().showMessage(\"steganography with metrics measurement\")\n self.tabWidget = QTabWidget()\n self.setCentralWidget(self.tabWidget)\n\n self.image_container_label = QLabel()\n self.image_coded_label = QLabel()\n self.image_container_pix = QPixmap()\n self.image_coded_pix = QPixmap()\n self.image_container = QImage()\n self.image_coded = QImage()\n\n self.container_path = \"./background.jpg\"\n self.coded_path = \"./background2.jpg\"\n ret1 = self.image_container.load(self.container_path)\n ret2 = self.image_coded.load(self.coded_path)\n if ret1 is True and ret2 is True:\n self.image_container = self.image_container.scaled(QSize(700,600),Qt.IgnoreAspectRatio)\n self.image_coded = self.image_coded.scaled(QSize(700,600),Qt.IgnoreAspectRatio)\n self.image_container_pix = self.image_container_pix.fromImage(self.image_container)\n self.image_container_label.setPixmap(self.image_container_pix)\n self.image_coded_pix = self.image_coded_pix.fromImage(self.image_coded)\n self.image_coded_label.setPixmap(self.image_coded_pix)\n\n self.main_layout = QGridLayout()\n self.left_layout = QVBoxLayout()\n self.right_layout = QFormLayout()\n\n self.blue_combobox = QComboBox()\n self.red_combobox = QComboBox()\n self.green_combobox = QComboBox()\n self.blue_combobox.addItems([\"0\",\"1\",\"2\",\"3\",\"4\",\"5\",\"6\",\"7\",\"8\"])\n self.red_combobox.addItems([\"0\",\"1\",\"2\",\"3\",\"4\",\"5\",\"6\",\"7\",\"8\"])\n self.green_combobox.addItems([\"0\",\"1\",\"2\",\"3\",\"4\",\"5\",\"6\",\"7\",\"8\"])\n\n self.blue_combobox.currentIndexChanged.connect(self.selectionchanged_blue)\n self.green_combobox.currentIndexChanged.connect(self.selectionchanged_green)\n self.red_combobox.currentIndexChanged.connect(self.selectionchanged_red)\n self.blue_value = 0\n self.green_value = 0\n self.red_value = 0\n\n self.code_button = QPushButton()\n self.decode_button = QPushButton()\n self.code_button.setText(\"Code\")\n self.decode_button.setText(\"Decode\")\n self.code_button.clicked.connect(self.codeImage)\n self.decode_button.clicked.connect(self.decodeImage)\n self.label_image = QLabel()\n\n #bottom nested\n self.mid_nested = QFormLayout()\n self.blur_label = QLabel()\n self.ssim_label = QLabel()\n self.ssim_text = QLineEdit()\n self.blur_text = QLineEdit()\n self.vqm_label = QLabel()\n self.vqm_Text = QLineEdit()\n\n self.vqm_Text.setVisible(False)\n self.ssim_text.setVisible(False)\n self.blur_text.setVisible(False)\n self.mid_nested.addRow(self.blur_label, self.blur_text)\n self.mid_nested.addRow(self.ssim_label, self.ssim_text)\n self.mid_nested.addRow(self.vqm_label,self.vqm_Text)\n #left layout\n self.left_layout.addWidget(self.image_container_label)\n self.left_layout.addWidget(self.image_coded_label)\n #right nested layout\n self.right_nested_layout = QHBoxLayout()\n self.right_nested_layout.addWidget(self.code_button)\n self.right_nested_layout.addWidget(self.decode_button)\n #right layout\n self.right_layout.addRow(QLabel(\"Blue Channel\"), self.blue_combobox)\n self.right_layout.addRow(QLabel(\"Green Channel\"), self.green_combobox)\n self.right_layout.addRow(QLabel(\"Red Channel\"), self.red_combobox)\n self.right_layout.addRow(self.right_nested_layout)\n self.right_layout.addWidget(self.label_image)\n self.right_layout.addRow(self.mid_nested)\n\n self.main_layout.addLayout(self.left_layout,1,1)\n self.main_layout.addLayout(self.right_layout,1,2)\n self.centralWidget().setLayout(self.main_layout)\n \n\n def decodeImage(self):\n channel_bits = [(0,self.blue_value),(1,self.green_value),(2,self.red_value)]\n dialog = QFileDialog(self)\n dialog.setFileMode(QFileDialog.AnyFile)\n dialog.setViewMode(QFileDialog.Detail)\n if dialog.exec_():\n fileNames = dialog.selectedFiles()\n img_container = cv.imread(fileNames[0])\n ret,img_decoded = steg.DecodeImage(img_container, channel_bits)\n if ret == -1:\n print(\"something went wrong\")\n return\n cv.imwrite(\"decoded.png\",img_decoded)\n image = QImage()\n image.load(\"decoded.png\")\n pixmap = QPixmap.fromImage(image)\n pixmap = pixmap.scaled(QSize(700,600),Qt.IgnoreAspectRatio)\n self.label_image.setPixmap(pixmap)\n\n\n def codeImage(self):\n channel_bits = [(0,self.blue_value),(1,self.green_value),(2,self.red_value)]\n print(channel_bits)\n\n img_container, data = steg.Code(self.container_path, self.coded_path, channel_bits)\n\n filename = QFileDialog.getSaveFileName(self,\"Save file\")\n\n cv.imwrite(filename[0],img_container)\n #image = QImage(img_container.data, img_container.shape[1], img_container.shape[0], QImage.Format_RGB888).rgbSwapped()\n image = QImage()\n image.load(filename[0])\n pixmap = QPixmap.fromImage(image)\n pixmap = pixmap.scaled(QSize(700,600),Qt.IgnoreAspectRatio)\n self.label_image.setPixmap(pixmap)\n\n self.chart = QtCharts.QChart()\n self.chart.setAnimationOptions(QtCharts.QChart.SeriesAnimations)\n self.chart.setAnimationOptions(QtCharts.QChart.AllAnimations)\n self.series = QtCharts.QBarSeries()\n set0 = QtCharts.QBarSet(\"PAE\")\n set1 = QtCharts.QBarSet(\"MSE\")\n set2 = QtCharts.QBarSet(\"PSNR\")\n #set3 = QtCharts.QBarSet(\"VQM\")\n set0.append(data[0])\n set1.append(data[1])\n set2.append(data[2])\n #set3.append(data[6])\n #Blur\n self.vqm_label.setText(\"VQM\")\n self.ssim_label.setText(\"SSIM\")\n self.blur_label.setText(\"BlurringMetric\")\n self.ssim_text.setText(str(data[5]))\n self.blur_text.setText(str(data[4]))\n self.vqm_Text.setText(str(data[6]))\n self.blur_text.setVisible(True)\n self.ssim_text.setVisible(True)\n self.vqm_Text.setVisible(True)\n \n self.series.append(set0)\n self.series.append(set1)\n self.series.append(set2)\n #self.series.append(set3)\n\n y_axis = QtCharts.QValueAxis()\n y_axis.setRange(0,255)\n self.chart.addAxis(y_axis, Qt.AlignLeft)\n self.series.attachAxis(y_axis)\n self.chart.addSeries(self.series)\n self.chart.setTitle(\"Quality metrics, origin-container diff\")\n self.chartView = QtCharts.QChartView(self.chart)\n self.chartView.setRenderHints(QPainter.Antialiasing)\n self.main_layout.addWidget(self.chartView,1,3)\n \n\n\n def loadImage(self, path):\n image = QImage()\n ret = image.load(path)\n if ret is True:\n pixmap = QPixmap.fromImage(image)\n pixmap = pixmap.scaled(QSize(800,600),Qt.IgnoreAspectRatio)\n return pixmap\n return None\n\n\n def loadcontainerpath(self):\n dialog = QFileDialog(self)\n dialog.setFileMode(QFileDialog.AnyFile)\n dialog.setViewMode(QFileDialog.Detail)\n if dialog.exec_():\n fileNames = dialog.selectedFiles()\n print(fileNames)\n self.container_path = fileNames[0]\n pixmap = self.loadImage(fileNames[0])\n self.image_container_label.setPixmap(pixmap)\n\n\n def loadcodepath(self):\n dialog = QFileDialog(self)\n dialog.setFileMode(QFileDialog.AnyFile)\n dialog.setViewMode(QFileDialog.Detail)\n if dialog.exec_():\n fileNames = dialog.selectedFiles()\n print(fileNames)\n self.coded_path = fileNames[0]\n pixmap = self.loadImage(fileNames[0])\n self.image_coded_label.setPixmap(pixmap)\n\n\n def selectionchanged_blue(self,i):\n print(\"Current index\",i,\"selection changed blue \",self.blue_combobox.currentText())\n self.blue_value = i\n\n def selectionchanged_green(self,i):\n print(\"Current index\",i,\"selection changed green\",self.green_combobox.currentText())\n self.green_value = i\n\n def selectionchanged_red(self,i):\n print(\"Current index\",i,\"selection changed red\",self.red_combobox.currentText())\n self.red_value = i\n\n\ndef Run():\n # Prints PySide2 version\n # e.g. 5.11.1a1\n print(\"Pyside2 version\",PySide2.__version__)\n app = QApplication(sys.argv)\n w = MainWidget()\n pal = app.palette()\n #pal.setColor(QPalette.Base, QColor(15, 15, 15))\n #pal.setColor(QPalette.Window, QColor(3, 18, 0))\n #pal.setColor(QPalette.Highlight, QColor(142, 45, 197).lighter())\n #pal.setColor(QPalette.HighlightedText, Qt.black)\n app.setPalette(pal)\n \n available_geometry = app.desktop().availableGeometry(w)\n size_h = available_geometry.height()\n size_w = available_geometry.width()\n \n w.setMinimumSize(size_w * 0.5, size_h * 0.5)\n w.showMaximized()\n w.show()\n sys.exit(app.exec_())\n\n","sub_path":"window.py","file_name":"window.py","file_ext":"py","file_size_in_byte":11174,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"292067119","text":"from tkinter import *\nimport time\nimport tkinter.messagebox\n\nclass WellRested:\n def __init__(self, master):\n frame = Frame(master, width=400, height=400)\n frame.pack()\n topMenu = Menu(master)\n master.config(menu=topMenu)\n\n fileMenu = Menu(topMenu)\n topMenu.add_cascade(label=\"File\", menu=fileMenu)\n fileMenu.add_command(label=\"New\", command=print)\n fileMenu.add_command(label=\"Home\", command=print)\n fileMenu.add_separator()\n fileMenu.add_command(label=\"Exit\", command=master.destroy)\n\n windowMenu = Menu(topMenu)\n topMenu.add_cascade(label=\"Window\", menu=windowMenu)\n windowMenu.add_command(label=\"Minimize\", command=print)\n\n helpMenu = Menu(topMenu)\n topMenu.add_cascade(label=\"Help\", menu=helpMenu)\n helpMenu.add_command(label=\"Getting Started\")\n\n timeElapsedLabel = Label(frame, text=\"Time elapsed in current interval: 0:00:00:00\")\n timeElapsedLabel.grid(row=0, column=0)\n timeRemainingLabel = Label(frame, text=\"Time remaining in current interval: 0:00:00:00\")\n timeRemainingLabel.grid(row=1, column=0)\n createIntervalButton = Button(frame, text=\"Create a new rest interval\", command=self.createInterval)\n createIntervalButton.grid(row=0, column=5)\n pauseIntervalButton = Button(frame, text=\"Pause the current interval\", command=self.pauseInterval)\n pauseIntervalButton.grid(row=1, column=5)\n resetIntervalButton = Button(frame, text=\"Reset the current interval\", command=self.resetInterval)\n resetIntervalButton.grid(row=2, column=5)\n\n def createInterval(self):\n createIntervalFrame = Tk()\n promptLabel = Label(createIntervalFrame, text=\"Please select the type of interval you would like to create\")\n promptLabel.grid(row=0)\n defaultButton = Button(createIntervalFrame, text=\"Default\", command=lambda: self.callFunctionsForDefault(createIntervalFrame)) # lambda allows a callback function to take a parameter\n defaultButton.grid(row=1, column=0)\n customButton = Button(createIntervalFrame, text=\"Custom\", command=lambda: self.callFunctionsForCustom(createIntervalFrame))\n customButton.grid(row=1, column=1)\n createIntervalFrame.mainloop()\n\n def determineCustom(self):\n\n customFrame = Tk()\n\n customFrame.mainloop()\n\n def callFunctionsForDefault(self, frame): # This is a helper function called in createInterval to make sure that the beginDefault function gets called and then the window is destroyed\n self.beginDefault()\n frame.destroy()\n\n def callFunctionsForCustom(self, frame): # This is a helper function called in createInterval to make sure that the determineCustom function gets called and then the window is destroyed\n self.determineCustom()\n frame.destroy()\n\n def beginDefault(self):\n initialTime = time.time()\n print(initialTime)\n\n def pauseInterval(selfs):\n print(\"Pause Interval\")\n\n def resetInterval(self):\n print(\"Reset Interval\")\n\n\n\n\n","sub_path":"WellRested.py","file_name":"WellRested.py","file_ext":"py","file_size_in_byte":3087,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"644862931","text":"import unittest\nfrom os.path import join\nimport pandas as pd\nimport numpy as np\nfrom sklearn.model_selection import train_test_split\nfrom xgboost import XGBRegressor\nfrom crypr.cryptocompare import retrieve_all_data\nfrom crypr.models import RegressionModel, SavedPickleRegressionModel\nfrom crypr.preprocessors import SimplePreprocessor\nfrom crypr.util import get_project_path, utc_timestamp_ymd\n\n\nclass TestBase(unittest.TestCase):\n def setUp(self):\n np.random.seed(31337)\n\n self.project_path = get_project_path()\n self.data_dir = join(self.project_path, 'crypr', 'tests', 'data')\n\n self.SYM = 'ETH'\n self.LAST_N_HOURS = 14000\n self.FEATURE_WINDOW = 72\n self.MOVING_AVERAGE_LAGS = [6, 12, 24, 48, 72]\n self.TARGET = 'close'\n self.Tx = 72\n self.Ty = 1\n self.TEST_SIZE = 0.05\n self.end_time = utc_timestamp_ymd(2018, 6, 27)\n\n self.data = retrieve_all_data(coin=self.SYM, num_hours=self.LAST_N_HOURS, comparison_symbol='USD',\n end_time=self.end_time)\n\n self.predict_data = retrieve_all_data(coin=self.SYM, num_hours=self.Tx + self.FEATURE_WINDOW - 1,\n comparison_symbol='USD', end_time=self.end_time)\n\n self.X_shape = (13852, 1224)\n self.y_shape = (13852, 1)\n\n self.X_sample = 709.48\n self.y_sample = -1.498064809896027\n\n self.X_train_shape = (13159, 1224)\n self.X_test_shape = (693, 1224)\n self.y_train_shape = (13159, 1)\n self.y_test_shape = (693, 1)\n\n self.X_train_sample = 11.41\n self.y_train_sample = 0.0\n\n self.X_test_sample = 487.58\n self.y_test_sample = 0.9448599618077758\n\n self.parameters = {\n 'objective': 'reg:linear',\n 'learning_rate': .07,\n 'max_depth': 10,\n 'min_child_weight': 4,\n 'silent': 1,\n 'subsample': 0.7,\n 'colsample_bytree': 0.7,\n 'n_estimators': 20,\n }\n\n self.train_mae = 0.8953377462440475\n self.train_rmse = 1.4144230033451395\n self.prediction = 1.2296733856201172\n\n def test_preprocess(self):\n np.random.seed(31337)\n preprocessor = SimplePreprocessor(production=False, target_col=self.TARGET, Tx=self.Tx, Ty=self.Ty,\n moving_averages=self.MOVING_AVERAGE_LAGS)\n\n X, y = preprocessor.fit(self.data).transform(self.data)\n\n old_shape = X.shape\n new_shape = (old_shape[0], old_shape[1] * old_shape[2])\n X = pd.DataFrame(np.reshape(a=X, newshape=new_shape), columns=preprocessor.engineered_columns)\n\n X_sample = X.sample(1, random_state=0).values[0][0]\n y_sample = y.sample(1, random_state=0).values[0][0]\n self.assertEqual((X_sample, y_sample, X.shape, y.shape), (self.X_sample, self.y_sample, self.X_shape, self.y_shape))\n\n def test_split(self):\n np.random.seed(31337)\n preprocessor = SimplePreprocessor(False, self.TARGET, self.Tx, self.Ty, self.MOVING_AVERAGE_LAGS)\n X, y = preprocessor.fit(self.data).transform(self.data)\n\n old_shape = X.shape\n new_shape = (old_shape[0], old_shape[1] * old_shape[2])\n X = pd.DataFrame(np.reshape(a=X, newshape=new_shape), columns=preprocessor.engineered_columns)\n\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=self.TEST_SIZE, shuffle=False)\n X_train_sample = X_train.sample(1, random_state=0).values[0][0]\n X_test_sample = X_test.sample(1, random_state=0).values[0][0]\n y_train_sample = y_train.sample(1, random_state=0).values[0][0]\n y_test_sample = y_test.sample(1, random_state=0).values[0][0]\n self.assertEqual((X_train_sample, X_test_sample, y_train_sample, y_test_sample,\n X_train.shape, X_test.shape, y_train.shape, y_test.shape),\n (self.X_train_sample, self.X_test_sample, self.y_train_sample, self.y_test_sample,\n self.X_train_shape, self.X_test_shape, self.y_train_shape, self.y_test_shape))\n\n def test_fit(self):\n np.random.seed(31337)\n preprocessor = SimplePreprocessor(production=False, target_col=self.TARGET, Tx=self.Tx, Ty=self.Ty,\n moving_averages=self.MOVING_AVERAGE_LAGS)\n\n X, y = preprocessor.fit(self.data).transform(self.data)\n X_train, _, y_train, _ = train_test_split(X, y, test_size=self.TEST_SIZE, shuffle=False)\n\n old_shape = X_train.shape\n new_shape = (old_shape[0], old_shape[1]*old_shape[2])\n X_train = pd.DataFrame(np.reshape(a=X_train, newshape=new_shape), columns=preprocessor.engineered_columns)\n\n self.ta = RegressionModel(XGBRegressor())\n\n self.ta.estimator.set_params(**self.parameters)\n self.ta.fit(X_train, y_train)\n\n rmse, mae = self.ta.evaluate(X_pred=X_train, y_true=y_train)\n self.assertAlmostEqual(rmse, self.train_rmse, 1)\n self.assertAlmostEqual(mae, self.train_mae, 1)\n\n def test_predict(self):\n np.random.seed(31337)\n preprocessor = SimplePreprocessor(production=True, target_col=self.TARGET, Tx=self.Tx, Ty=self.Ty,\n moving_averages=self.MOVING_AVERAGE_LAGS)\n X = preprocessor.fit(self.predict_data).transform(self.predict_data)\n\n old_shape = X.shape\n new_shape = (old_shape[0], old_shape[1] * old_shape[2])\n\n X = pd.DataFrame(np.reshape(a=X, newshape=new_shape), columns=preprocessor.engineered_columns)\n\n ta_model_filename = 'unit_xgboost_ETH_tx72_ty1_flag72.pkl'\n self.ta = SavedPickleRegressionModel(join(self.data_dir, ta_model_filename))\n self.assertEqual(self.ta.predict(X)[0], self.prediction)\n","sub_path":"crypr/tests/test_base.py","file_name":"test_base.py","file_ext":"py","file_size_in_byte":5833,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"280022043","text":"# leia três numero e mostre qual é o maior e qual é o menor\na = int(input('Primeiro valor: '))\nb = int(input('Segundo valor: '))\nc = int(input('Terceiro valor: '))\n# verificar o menor\nmenor = a\nif b < a and b < c:\n menor = b\nif c < a and c < b:\n menor = c\nprint('O menor valor é {}'.format(menor))\n# verificar o maior\nmaior = a\nif b > a and b > c:\n maior = b\nif c > a and c > b:\n maior = c\nprint('O maior valor é o {}'.format(maior))\n","sub_path":"Ex/ex033.py","file_name":"ex033.py","file_ext":"py","file_size_in_byte":451,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"307013109","text":"#!/usr/bin/python\n\nfrom requests import Session\nfrom csv import DictReader, DictWriter, unix_dialect, QUOTE_MINIMAL\nfrom contextlib import suppress\nfrom logging import basicConfig, getLogger, DEBUG, INFO, WARNING, ERROR\nfrom os import access, R_OK\nfrom sys import stdin, stdout\n\nfrom lxml import etree\n\nfrom retr.farm import *\nfrom retr.retriever import retriever, ValidateException\nfrom retr.proxypool import proxypool \n\nd=unix_dialect\nd.delimiter=','\nd.escapechar='\\\\'\nd.quoting=QUOTE_MINIMAL\n\nnum_threads=100\n\npp=proxypool('proxies.lst')\n#pp=proxypool([''])\n#pp=proxypool(['1.80.68.100:8118'])\n\nlg=getLogger(__name__)\n\ngetLogger('requests.packages.urllib3').setLevel(ERROR)\n\nll=WARNING\nll=INFO\n#ll=DEBUG\n\nbasicConfig( format='{asctime} {threadName:11s}: {message}', datefmt=\"%H:%M:%S\",\n style='{', level=ll)\n\nclass domtools(retriever):\n summary_xpath=etree.XPath('//div[contains(@class, \"narrative-summary\")]//text()')\n\n def __init__(self):\n etree.set_default_parser(etree.HTMLParser())\n #with lock: ua=UA.firefox # safari, chrome, random\n #self.ua=ua #'Mozilla/5.0 (X11; Linux x86_64; rv:47.0) Gecko/20100101 Firefox/47.0'\n super().__init__(pp, {'User-Agent': \"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.17 (KHTML, like Gecko) Chrome/24.0.1312.60 Safari/537.17\"},\n #timeout=10\n )\n \n def validate(self, url, r):\n # if r.status_code != 200:\n # q=r.url.split('=')[-1]\n # with open('output/{}_{}.html'.format(r.status_code, q), 'w') as f:\n # f.write(r.text)\n super().validate(url, r)\n\n if 'www.googletagmanager.com/ns.html?id=GTM-5P2JCN' in r.text:\n pass\n else:\n #if 'misuse' in r.text: print(r.text)\n open('noanchor.html','w').write(r.text)\n raise ValidateException ('retry', 'noanchor')\n\n def do(self, q):\n fn='output/{}.html'.format(q['fullname'])\n\n if not access(fn, R_OK):\n try:\n r=self.request('get', 'http://research.domaintools.com/research/whois-history/search/',\n params={'q': q['fullname']})\n except KeyboardInterrupt:\n return\n with open(fn,'w') as f: f.write(r.text)\n\n with open(fn) as f:\n txt=f.read()\n if 'We did not find any results for your lookup' in txt:\n yield q # No results, empty values\n return \n tree=etree.fromstring(txt)\n # ['\\n We have ', '66', ' historical records for ', 'Ebmalls.com', '.', 'The oldest record dates back ', 'more than ', '9', ' years', '.', 'There are at least ', '30', ' ', 'significant changes', '.', 'All of the records publish domain name ownership data. ']\n summary=self.summary_xpath(tree)\n lg.debug(summary)\n if summary[2].startswith(' historical record'):\n q['histrec']=summary[1]\n else:\n lg.error(summary)\n _exit(1)\n summary[:5]=[]\n if summary[0].startswith('The oldest'):\n q['oldest']=summary[2]\n summary[:5]=[]\n else:\n q['oldest']='N/A'\n lg.debug(summary)\n if summary[3].startswith('significant change'):\n q['changes']=summary[1]\n else:\n lg.error(summary)\n _exit(1)\n summary[:5]=[]\n lg.debug(summary)\n\n yield q\n\nwith open('output.csv', 'w', newline='') as fo:\n #rd=DictReader(fi)\n rd=({'fullname':_.strip()} for _ in stdin)\n wd=DictWriter(fo, fieldnames=('fullname', 'histrec', 'oldest', 'changes'),\n extrasaction='ignore', dialect=d)\n wd.writeheader()\n\n #rd=[{'fullname': 'aanddgiftsandmore.com'}]\n f=farm(num_threads, domtools, rd)\n with suppress(KeyboardInterrupt):\n for it in f.run(): wd.writerow(it)\n\n","sub_path":"scraper/feature/dt_helper.py","file_name":"dt_helper.py","file_ext":"py","file_size_in_byte":3984,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"73389456","text":"import numpy as np\nimport rpyc\n\n\nclass DataBean:\n def __init__(self, _id: str, vec: np.array):\n self._id = _id\n self.vec = vec\n if '\\n' in _id:\n raise Exception(\"_id cannot contain \\\\n\")\n\n if len(vec.shape) != 1:\n raise Exception(\"vec must be 1-d vector\")\n\n if vec.dtype != np.float32:\n raise Exception(\"the dtype of vec must be np.float32\")\n\n\nclass SPTAG_RpcSearchClient:\n\n ALGO_BKT = \"BKT\" # SPTAG-BKT is advantageous in search accuracy in very high-dimensional data\n ALGO_KDT = \"KDT\" # SPTAG-KDT is advantageous in index building cost,\n\n DIST_L2 = \"L2\"\n DIST_Cosine = \"Cosine\"\n\n def __init__(self, host, port):\n c = rpyc.connect(host, port)\n c._config['sync_request_timeout'] = None\n self.proxy = c.root\n\n def search(self, beans: [DataBean], p_resultNum):\n _, vecs = self.__get_meta_and_vec_from_beans(beans)\n vecs_ = vecs.tolist()\n return self.proxy.search(vecs_, p_resultNum)\n\n def __get_meta_and_vec_from_beans(self, beans: [DataBean]):\n if len(beans) == 0:\n raise Exception(\"beans length cannot be zero!\")\n\n if len(beans) > 1000:\n raise Exception(\"cannot add more than 1000 beans at once!\")\n\n dim = beans[0].vec.shape[0]\n meta = \"\"\n vecs = np.zeros((len(beans), dim))\n\n for i, bean in enumerate(beans):\n meta += bean._id + '\\n'\n vecs[i] = bean.vec\n\n meta = meta.encode()\n return meta, vecs\n\n\nif __name__ == \"__main__\":\n client = SPTAG_RpcSearchClient(\"127.0.0.1\", \"8888\")\n print(\"Test Search\")\n q = DataBean(_id=f\"s{0}\", vec=0 * np.ones((10,), dtype=np.float32))\n print(client.search([q], 3))\n","sub_path":"index_construction/SPTAG_rpc_search_client.py","file_name":"SPTAG_rpc_search_client.py","file_ext":"py","file_size_in_byte":1748,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"45363908","text":"#coding:utf-8\nimport sys\nN,K=map(int,raw_input().split())\n\nif K>sum(range(1,N-2+1)): # 1+...+(n-2)\n print(-1)\n sys.exit(0)\nF=[]\nfor i in range(2,N+1):\n F.append((1,i))\nn=sum(range(1,N-2+1))-K\n\nc=0\nfor i in range(2,N+1):\n for j in range(i+1,N+1):\n if(c= 0 and num <= 27:\n path = \"roof\" + str(num) + \".csv\"\n \n f = open(path,'r',encoding='utf-8')\n lines = f.readlines()\n a = W-1\n b = 0\n for line in lines:\n data = line.split(\",\")\n for cell in data:\n if cell == \"1\" or cell == \"1\\n\":\n mc.setBlock(xo+a,yo+H-1,zo+b,35)\n b = b+1\n a = a-1\n b = 0\n\ndef house(x0,y0,z0,L,W,H,f,num):\t#原点(x0,y0,z0),材质:f\n \n ##地板\n for x in range(W):\n for z in range(L):\n mc.setBlock(x0+x,y0,z0+z,f)\n # 平行墙\n for y in range(1,H):\n for a in range(1,W):\n mc.setBlock(x0+a, y0+y, z0, f)\n for a in range(1,L):\n mc.setBlock(x0+W-1, y0+y, z0+a, f)\n for a in range(1,W):\n mc.setBlock(x0+W-1-a, y0+y, z0+L-1, f)\n for a in range(1,L):\n mc.setBlock(x0, y0+y, z0+L-1-a, f)\n ##屋顶\n for x in range(W):\n for z in range(L):\n mc.setBlock(x0+x,y0+H-1,z0+z,f)\n ##门\n for y in range(2):\n mc.setBlock(x0+int(W/2),y0+1+y,z0,0)\n ##窗\n for y in range(2):\n for z in range(2):\n mc.setBlock(x0+W-1,y0+int(H/2)+y,z0+int(L/2)+z,160)\n\t\t \n huawen(x0,y0,z0,L,W,H,num)\n\t\t \n\nl = 10\nw = 10\nh = 10\n##x0 = pos.x\n##y0 = pos.y\n##z0 = pos.z\nx0 = 0\ny0 = 0+10\nz0 = 0\nmc.player.setTilePos([x0+50,y0,z0])\ni = 220\nj = 1\nfor X in range(3):\n for Y in range(3):\n for Z in range(3):\n house(x0+3*l*X,y0+3*h*Y,z0+3*w*Z,l,w,h,i,j)\n i = i +1\n j = j +1\n\t\t\t\n\n\n","sub_path":"students/Xionghuilan/homework0323/myhouse.py","file_name":"myhouse.py","file_ext":"py","file_size_in_byte":1928,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"349182119","text":"\ndef make_header(headers):\n header = headers.split(\"\\n\")\n d_headers = dict()\n for h in header:\n if h:\n k,v = h.strip().split(\":\",1)\n d_headers[k] = v.strip()\n return d_headers\n\nif __name__ == '__main__':\n headers = \"\"\"\nAccept:text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8\nAccept-Encoding:gzip, deflate\nAccept-Language:zh-CN,zh;q=0.9\nCache-Control:max-age=0\nConnection:keep-alive\nHost:wenshu.court.gov.cn\nUpgrade-Insecure-Requests:1\nUser-Agent:Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/64.0.3282.186 Safari/537.36\n\"\"\"\n print(make_header(headers))","sub_path":"cpws_pkg/make_header.py","file_name":"make_header.py","file_ext":"py","file_size_in_byte":673,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"90669466","text":"# -*- coding: utf-8 -*-\n# @Time : 2020/7/17 17:25\n# @Author : Mamamooo\n# @Site :\n# @File : lec_122.py\n# @Software: PyCharm\n\"\"\"\n给定一个数组,它的第 i 个元素是一支给定股票第 i 天的价格。\n设计一个算法来计算你所能获取的最大利润。你可以尽可能地完成更多的交易(多次买卖一支股票)。\n注意:你不能同时参与多笔交易(你必须在再次购买前出售掉之前的股票)。\n示例 1:\n输入: [7,1,5,3,6,4]\n输出: 7\n解释: 在第 2 天(股票价格 = 1)的时候买入,在第 3 天(股票价格 = 5)的时候卖出, 这笔交易所能获得利润 = 5-1 = 4 。\n  随后,在第 4 天(股票价格 = 3)的时候买入,在第 5 天(股票价格 = 6)的时候卖出, 这笔交易所能获得利润 = 6-3 = 3 。\n\"\"\"\nclass Solution:\n def maxProfit(self, prices):\n n = len(prices)\n dp_i_0 = 0\n dp_i_1 = float('-inf')\n for i in range(n):\n tmp = dp_i_0\n dp_i_0 = max(dp_i_0,dp_i_1+prices[i])\n dp_i_1 = max(dp_i_1,tmp-prices[i])\n\n return dp_i_0\n\n\n#prices = [7,1,5,3,6,4]\nprices = [1,9,6,9,1,7,1,1,5,9,9,9]\ns = Solution()\nprint(s.maxProfit(prices))","sub_path":"leecode/lec_122.py","file_name":"lec_122.py","file_ext":"py","file_size_in_byte":1214,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"392809794","text":"#! /bin/python\nfrom socket import *\nimport time\nimport select\n# default params\nserverAddr = (\"\", 50001)\n\nimport sys, os\ndef usage():\n print(\"usage: %s [--serverPort ]\" % sys.argv[0])\n sys.exit(1)\n\ntry:\n args = sys.argv[1:]\n while args:\n sw = args[0]; del args[0]\n if sw == \"--serverPort\":\n serverAddr = (\"\", int(args[0])); del args[0]\n else:\n print(\"unexpected parameter %s\" % args[0])\n usage();\nexcept:\n usage()\n\ncwd = os.getcwd()\n\n###################################################################\n############# PUT METHOD ######################\n###################################################################\ndef put(fileName, numPacketsi, inputs, outputs):\n # If serverDirectory does not exist, create it as a subdirectory\n if not os.path.exists(cwd + '/serverDirectory/'):\n os.makedirs(cwd + '/serverDirectory')\n\n\n # Open the file in serverDirectory (creating it if it does not exist)\n fileOpen = open(os.path.join(cwd + '/serverDirectory/', fileName), 'wb+')\n fileOpen.close()\n\n \n #serverSocket.sendto(b\"Received request, being sending file\", clientAddrPort)\n #time.sleep(1)\n # While loop for receiving the body of the file and writing it to the one created on the server\n while True:\n readable, writable, exceptional = select.select(inputs, outputs, inputs, 5)\n for s in readable:\n if s is serverSocket:\n sequenceNumber = 0 \n payload, port = serverSocket.recvfrom(2048)\n #select(5)\n #print(\"payload is: %s \" % (payload.decode()))\n if not payload:\n sys.exit(0)\n\n payload = payload.decode()\n sequenceNumber = payload[0]\n payload = payload[1:]\n # Replacing the '~`\" back to '\\n' new line character\n payload = payload.replace('~`', '\\n')\n # Opening the file for appending\n #currFile = cwd + '/serverDirectory/' \n fileOpen = open(cwd + '/serverDirectory/' + fileName, 'a')\n try:\n # If the finish character is found in the payload to show file is done sending\n if '~fInIs' in payload:\n fileOpen.close()\n success = b\"File finished sending\"\n #print(success)\n # Send that the file was received successully\n serverSocket.sendto(success, clientAddrPort)\n sys.exit(0)\n # If it is not finished, write to the file\n else:\n fileOpen.write(payload)\n # serverSocket.sendto(str(sequenceNumber).encode() + b' packet(s) received ', clientAddrPort)\n except FileNotFoundError:\n print(\"Error trying to receive file\")\n else:\n if s in outputs:\n outputs.remove(s)\n inputs.remove(s)\n s.close()\n\n # Remove message queue\n del message_queues[s]\n\n # Handle outputs\n for s in writable:\n try:\n next_msg = message_queues[s].get_nowait()\n except Queue.Empty:\n # No messages waiting so stop checking for writability.\n # print >>sys.stderr, 'output queue for', s.getpeername(), 'is empty'\n outputs.remove(s)\n else:\n #print >>sys.stderr, 'sending \"%s\" to %s' % (next_msg, s.getpeername())\n #s.send(next_msg)\n print(\"else\")\n\n # Handle \"exceptional conditions\"\n for s in exceptional:\n #print >>sys.stderr, 'handling exceptional condition for', s.getpeername()\n # Stop listening for input on the connection\n inputs.remove(s)\n if s in outputs:\n outputs.remove(s)\n s.close()\n\n # Remove message queue\n del message_queues[s]\n\n###################################################################\n############# BEGIN CODE ######################\n###################################################################\nprint(\"binding datagram socket to %s\" % repr(serverAddr))\n\nserverSocket = socket(AF_INET, SOCK_DGRAM)\nserverSocket.bind(serverAddr)\n#print(\"ready to receive\")\n\n# Sockets from which we expect to read\ninputs = [serverSocket]\n\n# Sockets to which we expect to write\noutputs = []\n\n# Outgoing message queues (socket:Queue)\nmessage_queues = {}\n\nwhile 1:\n #message, clientAddrPort = serverSocket.recvfrom(2048)\n\n readable, writable, exceptional = select.select(inputs, outputs, inputs, 5)\n # Handle inputs\n for s in readable:\n if s is serverSocket:\n # A \"readable\" server socket is ready to accept a connection\n #connection, client_address = s.accept()\n\n # Recieving first message\n headerPayload, clientAddrPort = s.recvfrom(2048)\n print(\"from %s: rec'd connection\" % (repr(clientAddrPort)))\n\n # Initializing fileName variable\n fileName = ''\n\n # While loop that waits for fileName to get initiated\n while fileName == '':\n # Recieve filename and save it as \"header\"\n #headerPayload = framedReceive(serverSocket)\n # If headerPayload is not NON\n if headerPayload:\n # Split the decoded payload by the spaces\n pl = headerPayload.decode().split()\n\n # If start is in the payload by the spaces\n if b'start' in headerPayload:\n PorG = pl[0]\n fileName = pl[-1]\n\n if PorG == 'put':\n numPackets = pl[2]\n put(fileName, numPackets, inputs, outputs)\n\n elif PorG == 'get':\n get(fileName)\n # Add output channel for response\n if s not in outputs:\n outputs.append(s)\n\n else:\n # serverSocket.sendto(b\"Invalid request, closing connection\", clientAddrPort)\n #clientAddrPort.close()\n # Interpret empty result as closed connection\n # Stop listening for input on the connection\n if s in outputs:\n outputs.remove(s)\n inputs.remove(s)\n s.close()\n\n # Remove message queue\n del message_queues[s]\n\n #connection.setblocking(0)\n #inputs.append(connection)\n\n # Give the connection a queue for data we want to send\n #message_queues[connection] = Queue.Queue()\n\n #else:\n #message_queues[s].put(data)\n #print(\"from %s: rec'd connection\" % (repr(clientAddrPort)))\n\n\n #message = sys.stdin.readline()[:-1] # delete final \\n\n #modifiedMessage = message.upper()\n #serverSocket.sendto(modifiedMessage, clientAddrPort)\n \n \n","sub_path":"proxy/udpServer.py","file_name":"udpServer.py","file_ext":"py","file_size_in_byte":7203,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"474769366","text":"from __future__ import print_function\nimport datetime\nimport dateutil.parser # available on PyPi as `python-dateutil`\nimport mysql.connector # available on PyPi as `mysql-connector` import nba_py.league\nimport nba_py.player\nimport os\n\n\n# 201939 steph curry\n# 201565 derrick rose\n\ndef convert_to_inches(height):\n try:\n h = height.split('-')\n feet = int(h[0])\n inches = int(h[1])\n ht = (feet * 12) + inches\n except:\n # sometimes height is Unknown or ''\n ht = None\n return ht\n\ndef search(player, players):\n return next(filter(\n lambda p: p['first'].lower() == player['FIRST_NAME'].lower() and\n p['last'].lower() == player['LAST_NAME'].lower() and\n p['birthdate'] == player['BIRTHDATE'],\n players))\n\n\ndef cleanup_name(name):\n return name.lower().replace(\"'\",\"\")\n\n\ndef age_on(birthdate, date):\n d = dateutil.relativedelta.relativedelta(date, birthdate)\n age = d.years + d.months / 12 + d.days / 365.24\n return float(\"{:.2f}\".format(age))\n\n\ndef determine_censor(from_year, to_year, current_season_year):\n \"\"\"\n Determine a censor for a given player.\n\n Args:\n from_year (int): The year the player started playing in the NBA.\n to_year (int): The year the player ended playing in the NBA (or the\n current year if still playing).\n current_season_year (int): The current season year: i.e. for the\n 2015-2016 season this would be 2016.\n\n Returns:\n string: The censor- one of 'BOTH', 'LEFT', 'RIGHT' or 'NONE'.\n \"\"\"\n first_season_year = 2003\n if from_year < first_season_year and to_year >= current_season_year:\n censor = 'BOTH'\n elif from_year < first_season_year and to_year < current_season_year:\n censor = 'LEFT'\n elif from_year >= first_season_year and to_year >= current_season_year:\n censor = 'RIGHT'\n elif from_year >= first_season_year and to_year < current_season_year:\n censor = 'NONE'\n return censor\n\n\nif __name__ == \"__main__\":\n # FIXME: dont need both\n season = '2015-16'\n season_year = datetime.datetime.now().year\n\n positions_map = {\n 'Guard-Forward': 'SG',\n 'Center-Forward': 'C',\n 'Forward-Guard': 'SF',\n 'Forward': 'SF',\n 'Forw': 'SF',\n 'Forward-Center': 'PF',\n 'Guard': 'PG',\n 'Guar': 'PG',\n 'Center': 'C',\n 'Cent': 'C',\n '': None\n }\n\n config_file = os.path.join(os.path.expanduser('~'), '.my.cnf')\n connection = mysql.connector.connect(option_files=config_file)\n\n print(\"Downloading and parsing data...\")\n new_players = []\n for player in nba_py.player.PlayerList(season=season).info():\n info = nba_py.player.PlayerSummary(player['PERSON_ID']).info()[0]\n\n info['BIRTHDATE'] = dateutil.parser.parse(info['BIRTHDATE']).date()\n info['HEIGHT'] = convert_to_inches(info['HEIGHT'])\n try:\n info['WEIGHT'] = int(info['WEIGHT'])\n except:\n info['WEIGHT'] = None\n info['POS'] = positions_map[info['POSITION']]\n\n info['CENSOR'] = determine_censor(info['FROM_YEAR'], info['TO_YEAR'], season_year)\n\n if not info['POS']:\n continue\n\n for game in nba_py.player.PlayerGameLogs(player['PERSON_ID'],\n season=season).info():\n date = dateutil.parser.parse(game['GAME_DATE']).date()\n new_players.append((cleanup_name(info['FIRST_NAME']),\n cleanup_name(info['LAST_NAME']),\n season_year,\n game['MATCHUP'].split()[0], #HACK\n info['HEIGHT'],\n info['WEIGHT'],\n info['BIRTHDATE'],\n info['POS'],\n date,\n game['MIN'],\n age_on(info['BIRTHDATE'], date),\n info['CENSOR']))\n\n print(\"Updating database...\")\n cursor = connection.cursor(prepared=True)\n # FIXME: what is idno??\n stmt = \"\"\"REPLACE INTO test_nbaGameInjuries\n (idno, first, last, season, team, ht, wt, birthdate,\n pos, date, mp, age, censor)\n VALUES (0, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)\"\"\"\n cursor.executemany(stmt, new_players)\n connection.commit()\n cursor.close()\n connection.close()\n","sub_path":"nba_insert_season.py","file_name":"nba_insert_season.py","file_ext":"py","file_size_in_byte":4576,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"93333535","text":"\"\"\"\n\nCreated by: Nathan Starkweather\nCreated on: 04/09/2016\nCreated in: PyCharm Community Edition\n\n\n\"\"\"\n__author__ = 'Nathan Starkweather'\n\nimport logging\n\nlogger = logging.getLogger(__name__)\n_h = logging.StreamHandler()\n_f = logging.Formatter(\"%(created)s %(name)s %(levelname)s (%(lineno)s): %(message)s\")\n_h.setFormatter(_f)\nlogger.addHandler(_h)\nlogger.propagate = False\nlogger.setLevel(logging.DEBUG)\ndel _h, _f\n\n\nslot_names = tps = [\n \"tp_name\",\n \"tp_basicsize\",\n \"tp_itemsize\",\n \"tp_dealloc\",\n \"tp_print\",\n \"tp_getattr\",\n \"tp_setattr\",\n \"tp_as_async\",\n \"tp_repr\",\n \"tp_as_number\",\n \"tp_as_sequence\",\n \"tp_as_mapping\",\n \"tp_hash\",\n \"tp_call\",\n \"tp_str\",\n \"tp_getattro\",\n \"tp_setattro\",\n \"tp_as_buffer\",\n \"tp_flags\",\n \"tp_doc\",\n \"tp_traverse\",\n \"tp_clear\",\n \"tp_richcompare\",\n \"tp_weaklistoffset\",\n \"tp_iter\",\n \"tp_iternext\",\n \"tp_methods\",\n \"tp_members\",\n \"tp_getset\",\n \"tp_base\",\n \"tp_dict\",\n \"tp_descr_get\",\n \"tp_descr_set\",\n \"tp_dictoffset\",\n \"tp_init\",\n \"tp_alloc\",\n \"tp_new\",\n \"tp_free\",\n \"tp_is_gc\",\n \"tp_bases\",\n \"tp_mro\",\n \"tp_cache\",\n \"tp_subclasses\",\n \"tp_weaklist\",\n \"tp_del\",\n \"tp_version_tag\",\n \"tp_finalize\",\n \"tp_allocs\",\n \"tp_frees\",\n \"tp_maxalloc\",\n \"tp_prev\",\n \"tp_next\"\n]\n\npytype_slots = [\n (\"const char *\", \"tp_name\", \"__name__\"),\n (\"Py_ssize_t\", \"tp_basicsize\", \"\"),\n (\"Py_ssize_t\", \"tp_itemsize\", \"\"),\n (\"destructor\", \"tp_dealloc\"),\n (\"printfunc\", \"tp_print\", \"\"),\n (\"getattrfunc\", \"tp_getattr\", \"\"),\n (\"setattrfunc\", \"tp_setattr\", \"\"),\n (\"PyAsyncMethods *\", \"tp_as_async\", \"\"),\n (\"reprfunc\", \"tp_repr\"),\n (\"PyNumberMethods *\", \"tp_as_number\", \"\"),\n (\"PySequenceMethods *\", \"tp_as_sequence\", \"\"),\n (\"PyMappingMethods *\", \"tp_as_mapping\", \"\"),\n (\"hashfunc\", \"tp_hash\", \"__hash__\"),\n (\"ternaryfunc\", \"tp_call\", \"__call__\"),\n (\"reprfunc\", \"tp_str\", \"__str__\"),\n (\"getattrofunc\", \"tp_getattro\", \"__getattr__\"),\n (\"setattrofunc\", \"tp_setattro\", \"__setattr__\"),\n (\"PyBufferProcs *\", \"tp_as_buffer\"),\n (\"unsigned long\", \"tp_flags\"),\n (\"const char *\", \"tp_doc\"),\n (\"traverseproc\", \"tp_traverse\"),\n (\"inquiry\", \"tp_clear\"),\n (\"richcmpfunc\", \"tp_richcompare\"),\n (\"Py_ssize_t\", \"tp_weaklistoffset\"),\n (\"getiterfunc\", \"tp_iter\", \"__iter__\"),\n (\"iternextfunc\", \"tp_iternext\", \"__next__\"),\n (\"struct PyMethodDef *\", \"tp_methods\"),\n (\"struct PyMemberDef *\", \"tp_members\"),\n (\"struct PyGetSetDef *\", \"tp_getset\"),\n (\"struct _typeobject *\", \"tp_base\"),\n (\"PyObject *\", \"tp_dict\"),\n (\"descrgetfunc\", \"tp_descr_get\"),\n (\"descrsetfunc\", \"tp_descr_set\"),\n (\"Py_ssize_t\", \"tp_dictoffset\"),\n (\"initproc\", \"tp_init\", \"__init__\"),\n (\"allocfunc\", \"tp_alloc\"),\n (\"newfunc\", \"tp_new\", \"__new__\"),\n (\"freefunc\", \"tp_free\"),\n (\"inquiry\", \"tp_is_gc\"),\n (\"PyObject *\", \"tp_bases\"),\n (\"PyObject *\", \"tp_mro\"),\n (\"PyObject *\", \"tp_cache\"),\n (\"PyObject *\", \"tp_subclasses\"),\n (\"PyObject *\", \"tp_weaklist\"),\n (\"destructor\", \"tp_del\", \"__del__\"),\n (\"unsigned int\", \"tp_version_tag\", \"\"),\n (\"destructor\", \"tp_finalize\", \"\"),\n (\"Py_ssize_t\", \"tp_allocs\", \"\")\n]\n\nfor _i in range(len(pytype_slots)):\n if len(pytype_slots[_i]) == 2:\n pytype_slots[_i] += (\"\",)\ndel _i\n\ntrace_alloc_slots = [\n (\"Py_ssize_t\", \"tp_frees\"),\n (\"Py_ssize_t\", \"tp_maxalloc\"),\n (\"struct _typeobject *\", \"tp_prev\"),\n (\"struct _typeobject *\", \"tp_next\"),\n]\n\nall_slots = pytype_slots + trace_alloc_slots\n","sub_path":"cext/type_slots.py","file_name":"type_slots.py","file_ext":"py","file_size_in_byte":3584,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"242637732","text":"class Solution:\n def carFleet(self, target: int, position: List[int], speed: List[int]) -> int:\n pair = [[p, s] for p, s in zip(position, speed)]\n stack = []\n\n for p, s in sorted(pair)[::-1]: # Sort in reverse order\n time = (target - p) / s\n if len(stack) >= 1 and stack[-1] >= time:\n continue\n stack.append(time)\n\n return len(stack)\n","sub_path":"src/leetcode/stack/853.py","file_name":"853.py","file_ext":"py","file_size_in_byte":413,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"227317677","text":"# -*- coding: utf-8 -*-\n# @Author : Shu\n# @Email : httpservlet@yeah.net\n# @Date : 2017/12/27\n# @Description : 打包python项目为exe\n\n# coding=utf-8\n\n\n# http://www.cnblogs.com/dcb3688/p/4211390.html\nif __name__ == '__main__':\n from PyInstaller.__main__ import run\n\n # -w 纯窗口程序, 不带命令窗口\n # --icon 可执行文件图标\n # --version-file 可执行文件的文件信息\n # --upx-dir upx加壳压缩(需要单独下载upx), 放到此文件同目录下会自动找到upx.exe(都不需要专门注明此参数)\n # -y 打包生成的文件直接覆盖上一次生成\n # --add-data: 添加数据文件. 格式为 源;目标\n params = ['-y', '-n=BitcoinNano', 'deterministic.spec']\n run(params)\n","sub_path":"bitcoinnano_pyinstaller.py","file_name":"bitcoinnano_pyinstaller.py","file_ext":"py","file_size_in_byte":758,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"318462455","text":"# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport nvidia.dali\nimport nvidia.dali.ops as ops\nimport nvidia.dali.fn as fn\nfrom nvidia.dali.pipeline import pipeline_def\nimport nvidia.dali.types as types\nimport test_utils\nimport numpy as np\nimport librosa\nimport torch\nimport math\nimport os\n\naudio_files = test_utils.get_files('db/audio/wav', 'wav')\naudio_files = [file for file in audio_files if '237-134500' in file] # Filtering librispeech samples\nnpy_files = [os.path.splitext(fpath)[0] + '.npy' for fpath in audio_files]\nnpy_files_sr = 16000\n\n# From DeepLearningExamples\ndef _convert_samples_to_float32(samples):\n \"\"\"Convert sample type to float32.\n Audio sample type is usually integer or float-point.\n Integers will be scaled to [-1, 1] in float32.\n \"\"\"\n float32_samples = samples.astype('float32')\n if samples.dtype in np.sctypes['int']:\n bits = np.iinfo(samples.dtype).bits\n float32_samples *= (1. / 2 ** (bits - 1))\n elif samples.dtype in np.sctypes['float']:\n pass\n else:\n raise TypeError(\"Unsupported sample type: %s.\" % samples.dtype)\n return float32_samples\n\ntorch_windows = {\n 'hann': torch.hann_window,\n 'hamming': torch.hamming_window,\n 'blackman': torch.blackman_window,\n 'bartlett': torch.bartlett_window,\n 'none': None,\n}\n\nclass FilterbankFeatures():\n def __init__(self, sample_rate=16000, window_size=0.02, window_stride=0.01, window=\"hann\", normalize=\"per_feature\",\n n_fft=None, preemph=0.97, nfilt=64, lowfreq=0, highfreq=None, log=True, frame_splicing=1):\n self.win_length = int(sample_rate * window_size)\n self.hop_length = int(sample_rate * window_stride)\n self.n_fft = n_fft or 2 ** math.ceil(math.log2(self.win_length))\n\n self.normalize = normalize\n self.log = log\n self.frame_splicing = frame_splicing\n self.nfilt = nfilt\n self.preemph = preemph\n window_fn = torch_windows.get(window, None)\n self.window = window_fn(self.win_length, periodic=False) if window_fn else None\n self.fb = torch.tensor(\n librosa.filters.mel(sample_rate, self.n_fft, n_mels=nfilt, fmin=lowfreq, fmax=highfreq), dtype=torch.float).unsqueeze(0)\n\n @staticmethod\n def splice_frames(x, frame_splicing):\n \"\"\" Stacks frames together across feature dim\n\n input is batch_size, feature_dim, num_frames\n output is batch_size, feature_dim*frame_splicing, num_frames\n\n \"\"\"\n seq = [x]\n for n in range(1, frame_splicing):\n tmp = torch.zeros_like(x)\n tmp[:, :, :-n] = x[:, :, n:]\n seq.append(tmp)\n return torch.cat(seq, dim=1)[:, :, ::frame_splicing]\n\n @staticmethod\n def normalize_batch(x, seq_len, normalize_type):\n constant = 1e-5\n if normalize_type == \"per_feature\":\n x_mean = torch.zeros((seq_len.shape[0], x.shape[1]), dtype=x.dtype,\n device=x.device)\n x_std = torch.zeros((seq_len.shape[0], x.shape[1]), dtype=x.dtype,\n device=x.device)\n for i in range(x.shape[0]):\n x_mean[i, :] = x[i, :, :seq_len[i]].mean(dim=1)\n x_std[i, :] = x[i, :, :seq_len[i]].std(dim=1)\n # make sure x_std is not zero\n x_std += constant\n return (x - x_mean.unsqueeze(2)) / x_std.unsqueeze(2)\n elif normalize_type == \"all_features\":\n x_mean = torch.zeros(seq_len.shape, dtype=x.dtype, device=x.device)\n x_std = torch.zeros(seq_len.shape, dtype=x.dtype, device=x.device)\n for i in range(x.shape[0]):\n x_mean[i] = x[i, :, :seq_len[i].item()].mean()\n x_std[i] = x[i, :, :seq_len[i].item()].std()\n # make sure x_std is not zero\n x_std += constant\n return (x - x_mean.view(-1, 1, 1)) / x_std.view(-1, 1, 1)\n else:\n return x\n\n def get_seq_len(self, seq_len):\n return torch.ceil(seq_len.to(dtype=torch.float) / self.hop_length).to(\n dtype=torch.int)\n\n def forward(self, inp, seq_len):\n x = inp\n dtype = x.dtype\n\n seq_len = self.get_seq_len(seq_len)\n\n # do preemphasis\n if self.preemph is not None:\n x = torch.cat((x[:, 0].unsqueeze(1), x[:, 1:] - self.preemph * x[:, :-1]),\n dim=1)\n\n # do stft\n x = torch.stft(x, n_fft=self.n_fft, hop_length=self.hop_length,\n win_length=self.win_length, pad_mode='reflect',\n center=True, window=self.window.to(dtype=torch.float))\n\n # get power spectrum\n x = x.pow(2).sum(-1)\n\n # dot with filterbank energies\n x = torch.matmul(self.fb.to(x.dtype), x)\n\n # log features if required\n if self.log:\n x = torch.log(x + 1e-20)\n\n # frame splicing if required\n if self.frame_splicing > 1:\n x = self.splice_frames(x, self.frame_splicing)\n\n # normalize if required\n if self.normalize:\n x = self.normalize_batch(x, seq_len, normalize_type=self.normalize)\n\n # mask to zero any values beyond seq_len in batch, pad to multiple of `pad_to` (for efficiency)\n max_len = x.size(-1)\n mask = torch.arange(max_len).to(seq_len.dtype).to(x.device).expand(x.size(0),\n max_len) >= seq_len.unsqueeze(1)\n x = x.masked_fill(mask.unsqueeze(1).to(device=x.device), 0)\n return x.to(dtype)\n\ndef dali_run(pipe, device):\n pipe.build()\n outs = pipe.run()\n return np.array(outs[0][0].as_cpu() if device == 'gpu' else outs[0][0])\n\ndef win_args(sample_rate, window_size_sec, window_stride_sec):\n win_length = int(sample_rate * window_size_sec) # frame size\n hop_length = int(sample_rate * window_stride_sec)\n return win_length, hop_length\n\ndef torch_spectrogram(audio, sample_rate, device='cpu',\n window_size=0.02, window_stride=0.01,\n center=True, pad_mode='reflect',\n window=\"hann\", n_fft=None):\n audio = torch.tensor(audio, dtype=torch.float32)\n if device == 'gpu':\n audio = audio.cuda()\n win_length, hop_length = win_args(sample_rate, window_size, window_stride)\n n_fft = n_fft or 2 ** math.ceil(math.log2(win_length))\n window_fn = torch_windows.get(window, None)\n window_tensor = window_fn(win_length, periodic=False) if window_fn else None\n stft_out = torch.stft(audio, n_fft=n_fft, hop_length=hop_length,\n win_length=win_length, pad_mode=pad_mode,\n center=center, window=window_tensor.to(dtype=torch.float))\n # get power spectrum\n spectrogram = stft_out.pow(2).sum(-1)\n spectrogram = spectrogram.cpu().detach().numpy()\n return spectrogram\n\ndef librosa_spectrogram(audio_data, sample_rate, device='cpu',\n window_size=0.02, window_stride=0.01,\n center=True, pad_mode='reflect',\n window=\"hann\", n_fft=None):\n win_length, hop_length = win_args(sample_rate, window_size, window_stride)\n n_fft = n_fft or 2 ** math.ceil(math.log2(win_length))\n window_fn = torch_windows.get(window, None)\n window_tensor = window_fn(win_length, periodic=False).detach().numpy() if window_fn else None\n spectrogram = np.abs(\n librosa.stft(y=audio_data, n_fft=n_fft or win_length,\n win_length=win_length, hop_length=hop_length, window=window_tensor))**2\n return spectrogram\n\ndef dali_spectrogram(audio_data, sample_rate, device='cpu',\n window_size=0.02, window_stride=0.01,\n center=True, pad_mode='reflect',\n window=\"hann\", n_fft=None):\n win_length, hop_length = win_args(sample_rate, window_size, window_stride)\n n_fft = n_fft or 2 ** math.ceil(math.log2(win_length))\n window_fn = torch_windows.get(window, None)\n window_tensor = window_fn(win_length, periodic=False).detach().numpy() if window_fn else None\n reflect_padding = 'reflect' == pad_mode\n @pipeline_def(batch_size=1, device_id=0, num_threads=3)\n def spectrogram_pipe():\n audio = fn.external_source(lambda: audio_data, device=device, batch=False)\n spectrogram = fn.spectrogram(audio, device=device, nfft=n_fft, reflect_padding=reflect_padding,\n center_windows=center, window_fn=window_tensor.tolist(),\n window_length=win_length, window_step=hop_length)\n return spectrogram\n return dali_run(spectrogram_pipe(), device=device)\n\ndef _testimpl_torch_vs_dali_spectrogram(device, pad_mode='reflect', center=True, atol=1e-03):\n for s in range(len(npy_files)):\n arr = _convert_samples_to_float32(np.load(npy_files[s]))\n torch_out = torch_spectrogram(arr, npy_files_sr, pad_mode=pad_mode, center=center, device=device)\n dali_out = dali_spectrogram(arr, npy_files_sr, pad_mode=pad_mode, center=center, device=device)\n rosa_out = librosa_spectrogram(arr, npy_files_sr, pad_mode=pad_mode, center=center, device=device)\n np.testing.assert_allclose(rosa_out, dali_out, atol=atol)\n np.testing.assert_allclose(torch_out, rosa_out, atol=atol)\n np.testing.assert_allclose(torch_out, dali_out, atol=atol)\n\ndef test_torch_vs_dali_spectrogram():\n for device in ['cpu', 'gpu']:\n yield _testimpl_torch_vs_dali_spectrogram, device\n\ndef torch_mel_fbank(spectrogram, sample_rate, device='cpu',\n nfilt=64, lowfreq=0, highfreq=None):\n spectrogram = torch.tensor(spectrogram, dtype=torch.float32)\n if device == 'gpu':\n spectrogram = spectrogram.cuda()\n n_fft = 2 * (spectrogram.shape[0] - 1)\n filterbanks = torch.tensor(\n librosa.filters.mel(sample_rate, n_fft, n_mels=nfilt, fmin=lowfreq, fmax=highfreq), dtype=torch.float)\n if device == 'gpu':\n filterbanks = filterbanks.cuda()\n mel_spectrogram = torch.matmul(filterbanks.to(spectrogram.dtype), spectrogram)\n mel_spectrogram = mel_spectrogram.cpu().detach().numpy()\n return mel_spectrogram\n\ndef dali_mel_fbank(spectrogram_data, sample_rate, device='cpu',\n nfilt=64, lowfreq=0, highfreq=None):\n @pipeline_def(batch_size=1, device_id=0, num_threads=3)\n def mel_fbank_pipe():\n spectrogram = fn.external_source(lambda: spectrogram_data, device=device, batch=False)\n mel_spectrogram = fn.mel_filter_bank(spectrogram, sample_rate=sample_rate, nfilter=nfilt,\n freq_low=lowfreq, freq_high=highfreq)\n return mel_spectrogram\n return dali_run(mel_fbank_pipe(), device=device)\n\ndef _testimpl_torch_vs_dali_mel_fbank(device):\n for s in range(len(npy_files)):\n arr = _convert_samples_to_float32(np.load(npy_files[s]))\n spec = torch_spectrogram(arr, npy_files_sr, device=device)\n torch_out = torch_mel_fbank(spec, npy_files_sr, device=device)\n dali_out = dali_mel_fbank(spec, npy_files_sr, device=device)\n np.testing.assert_allclose(torch_out, dali_out, atol=1e-04)\n\ndef test_torch_vs_dali_mel_fbank():\n for device in ['cpu', 'gpu']:\n yield _testimpl_torch_vs_dali_mel_fbank, device\n\ndef torch_log(x, device='cpu'):\n x = torch.tensor(x, dtype=torch.float32)\n if device == 'gpu':\n x = x.cuda()\n log_x = torch.log(x + 1e-20)\n log_x = log_x.cpu().detach().numpy()\n return log_x\n\ndef dali_log(x_data, device='cpu'):\n @pipeline_def(batch_size=1, device_id=0, num_threads=3)\n def log_pipe():\n x = fn.external_source(lambda: x_data, device=device, batch=False)\n log_x = fn.to_decibels(x, multiplier=np.log(10), reference=1.0, cutoff_db=-80)\n return log_x\n return dali_run(log_pipe(), device=device)\n\ndef _testimpl_torch_vs_dali_log(device):\n arr = _convert_samples_to_float32(np.load(npy_files[0]))\n spec = torch_spectrogram(arr, npy_files_sr, device=device)\n mel_spec = torch_mel_fbank(spec, npy_files_sr, device=device)\n torch_out = torch_log(mel_spec, device=device)\n dali_out = dali_log(mel_spec, device=device)\n np.testing.assert_allclose(torch_out, dali_out, atol=1e-5)\n\ndef torch_preemphasis(x, preemph, device='cpu'):\n x = torch.tensor(x, dtype=torch.float32)\n if device == 'gpu':\n x = x.cuda()\n y = torch.cat((x[0].unsqueeze(0), x[1:] - preemph * x[:-1]), dim=0)\n y = y.cpu().detach().numpy()\n return y\n\ndef dali_preemphasis(x_data, preemph, device='cpu'):\n @pipeline_def(batch_size=1, device_id=0, num_threads=3)\n def preemph_pipe():\n x = fn.external_source(lambda: x_data, device=device, batch=False)\n y = fn.preemphasis_filter(x, preemph_coeff=preemph)\n return y\n return dali_run(preemph_pipe(), device=device)\n\ndef _testimpl_torch_vs_dali_preemphasis(device):\n arr = _convert_samples_to_float32(np.load(npy_files[0]))\n torch_out = torch_preemphasis(arr, 0.97, device=device)\n dali_out = dali_preemphasis(arr, 0.97, device=device)\n # DALI and torch differ in the first element:\n # DALI: y[0] = x[0] - coeff * x[0]\n # Torch: y[0] = x[0]\n np.testing.assert_allclose(torch_out[1:], dali_out[1:], atol=1e-5)\n\ndef test_torch_vs_dali_preemphasis():\n for device in ['cpu', 'gpu']:\n yield _testimpl_torch_vs_dali_preemphasis, device\n\ndef torch_normalize(mel_spec, normalize_type, seq_len=None, device='cpu'):\n mel_spec = torch.tensor(mel_spec, dtype=torch.float32).unsqueeze(0)\n if seq_len is None:\n seq_len = torch.tensor(mel_spec.shape[2]).unsqueeze(0)\n if device == 'gpu':\n mel_spec = mel_spec.cuda()\n out = FilterbankFeatures().normalize_batch(\n mel_spec, seq_len, normalize_type=normalize_type)\n out = out.cpu().detach().numpy().squeeze(0)\n return out\n\ndef dali_normalize(mel_spec_data, normalize_type, device='cpu'):\n @pipeline_def(batch_size=1, device_id=0, num_threads=3)\n def log_pipe():\n data = fn.external_source(lambda: mel_spec_data, device=device, batch=False)\n if normalize_type == 'per_feature':\n out = fn.normalize(data, axes=[1], device=device, epsilon=4e-5, ddof=1)\n elif normalize_type == 'all_features':\n out = fn.normalize(data, axes=[0, 1], device=device, epsilon=4e-5, ddof=1)\n else:\n assert False\n return out\n return dali_run(log_pipe(), device=device)\n\ndef _testimpl_torch_vs_dali_normalize(normalize_type, device):\n for s in range(len(npy_files)):\n arr = _convert_samples_to_float32(np.load(npy_files[s]))\n spec = torch_spectrogram(arr, npy_files_sr, device=device)\n mel_spec = torch_mel_fbank(spec, npy_files_sr, device=device)\n log_features = torch_log(mel_spec, device=device)\n torch_out = torch_normalize(log_features, normalize_type=normalize_type, device=device)\n dali_out = dali_normalize(log_features, normalize_type=normalize_type, device=device)\n np.testing.assert_allclose(torch_out, dali_out, atol=1e-4)\n\ndef test_torch_vs_dali_normalize():\n for device in ['cpu', 'gpu']:\n for normalize_type in ['per_feature', 'all_features']:\n yield _testimpl_torch_vs_dali_normalize, normalize_type, device\n\n@pipeline_def(batch_size=1, device_id=0, num_threads=3)\ndef rnnt_train_pipe(files, sample_rate=16000, silence_threshold=-80, preemph_coeff=.97,\n window_size=.02, window_stride=.01, window=\"hann\", nfeatures=64, nfft=512, frame_splicing=1,\n lowfreq=0.0, highfreq=None, normalize_type='per_feature'):\n norm_axes = [1] if 'per_feature' else [0, 1]\n win_len, win_hop = win_args(sample_rate, window_size, window_stride)\n window_fn = torch_windows.get(window, None)\n window_fn_arg = window_fn(win_len, periodic=False).detach().numpy().tolist() if window_fn else None\n\n data, _ = fn.readers.file(files=files, device=\"cpu\", random_shuffle=False, shard_id=0, num_shards=1)\n audio, _ = fn.decoders.audio(data, dtype=types.FLOAT, downmix=True)\n preemph_audio = fn.preemphasis_filter(audio, preemph_coeff=preemph_coeff, reflect_padding=False)\n spec = fn.spectrogram(preemph_audio, nfft=nfft, window_fn=window_fn_arg, window_length=win_len, window_step=win_hop,\n center_windows=True, reflect_padding=True)\n\n mel_spec = fn.mel_filter_bank(spec, sample_rate=sample_rate, nfilter=nfeatures, freq_low=lowfreq, freq_high=highfreq)\n log_features = fn.to_decibels(mel_spec + 1e-20, multiplier=np.log(10), reference=1.0, cutoff_db=-80)\n\n if frame_splicing > 1:\n log_features = fn.transpose(log_features, perm=[1, 0])\n log_features = fn.reshape(log_features, rel_shape=[-1, frame_splicing])\n log_features = fn.pad(log_features, axes=[0], fill_value=0, align=frame_splicing, shape=[1])\n log_features = fn.transpose(log_features, perm=[1, 0])\n\n norm_log_features = fn.normalize(log_features, axes=[1], epsilon=4e-5, ddof=1)\n return norm_log_features, log_features, mel_spec, spec, preemph_audio, audio\n\n# Test compares pre-calculated output of native data pipeline with an output\n# from DALI data pipeline. There are few modification of native data pipeline\n# comparing to the reference: random operations (i.e. dither and presampling\n# aka \"speed perturbation\") are turned off\ndef test_rnnt_data_pipeline():\n preemph = 0.97\n n_fft = None\n sample_rate = npy_files_sr\n highfreq = None\n window_size = 0.02\n window_stride = 0.01\n normalize_type = 'per_feature'\n ref_pipeline = FilterbankFeatures(sample_rate=sample_rate, n_fft=n_fft, highfreq=highfreq, frame_splicing=1)\n recordings = []\n for fpath in npy_files:\n arr = np.load(fpath)\n arr = _convert_samples_to_float32(arr)\n recordings.append(arr)\n nrecordings = len(recordings)\n pipe = rnnt_train_pipe(audio_files, seed=42)\n pipe.build()\n reference_data = []\n for i in range(nrecordings):\n reference_data.append(\n ref_pipeline.forward(torch.tensor([recordings[i]]), torch.tensor([recordings[i].shape[0]]))\n )\n for i in range(nrecordings):\n dali_out = pipe.run()\n norm_log_features = np.array(dali_out[0][0])\n log_features = np.array(dali_out[1][0])\n mel_spec = np.array(dali_out[2][0])\n spec = np.array(dali_out[3][0])\n preemph_audio = np.array(dali_out[4][0])\n audio = np.array(dali_out[5][0])\n ref = reference_data[i].squeeze(0)\n assert ref.shape == norm_log_features.shape\n nfeatures, seq_len = ref.shape\n size = nfeatures * seq_len\n\n audio_ref = recordings[i]\n audio_len_ref = recordings[i].shape[0]\n np.testing.assert_allclose(audio, audio_ref, atol=1e-4)\n\n preemph_audio_ref = torch_preemphasis(audio_ref, preemph=preemph)\n np.testing.assert_allclose(preemph_audio, preemph_audio_ref, atol=1e-4)\n\n spec_ref = torch_spectrogram(preemph_audio_ref, npy_files_sr,\n window_size=window_size, window_stride=window_stride,\n center=True, pad_mode='reflect',\n window=\"hann\", n_fft=n_fft)\n np.testing.assert_allclose(spec, spec_ref, atol=1e-4)\n\n mel_spec_ref = torch_mel_fbank(spec_ref, npy_files_sr)\n np.testing.assert_allclose(mel_spec, mel_spec_ref, atol=1e-4)\n\n log_features_ref = torch_log(mel_spec_ref)\n np.testing.assert_allclose(log_features, log_features_ref, atol=1e-3)\n log_features_ref2 = torch_log(mel_spec)\n np.testing.assert_allclose(log_features, log_features_ref2, atol=1e-4)\n\n norm_log_features_ref = torch_normalize(log_features_ref, normalize_type)\n np.testing.assert_allclose(norm_log_features, norm_log_features_ref, atol=1e-3)\n\n norm_log_features_ref2 = torch_normalize(log_features, normalize_type)\n np.testing.assert_allclose(norm_log_features, norm_log_features_ref2, atol=1e-4)\n\n # The reference pipeline calculate the number of windows in a wrong way, when using centered windows.\n # Here we are trying to recreate that behavior.\n seq_len = ref_pipeline.get_seq_len(torch.tensor([audio_len_ref]))\n norm_log_features_ref3 = torch_normalize(log_features_ref, normalize_type, seq_len=seq_len)\n ref_output = ref_pipeline.forward(torch.tensor([audio_ref]), torch.tensor([audio_len_ref])).squeeze(0)\n np.testing.assert_allclose(norm_log_features_ref3[:, :seq_len[0]], ref_output[:, :seq_len[0]], atol=1e-4)\n","sub_path":"dali/test/python/test_torch_pipeline_rnnt.py","file_name":"test_torch_pipeline_rnnt.py","file_ext":"py","file_size_in_byte":21220,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"471207113","text":"#\n# Copyright 2016-present Ciena Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\nfrom CordContainer import Container, Onos, OnosCord, Quagga, Radius, reinitContainerClients\nfrom nose.tools import nottest\nfrom SimpleXMLRPCServer import SimpleXMLRPCServer\nimport daemon\nimport xmlrpclib\nimport os\nimport json\nimport time\nimport threading\n\n##Server to handle container restart/stop requests from test container.\n##Used now to restart ONOS from vrouter test container\n\nCORD_TEST_HOST = '172.17.0.1'\nCORD_TEST_PORT = 25000\n\nclass QuaggaStopWrapper(Container):\n def __init__(self, name = Quagga.NAME, image = Quagga.IMAGE, tag = 'latest'):\n super(QuaggaStopWrapper, self).__init__(name, image, tag = tag)\n if self.exists():\n self.kill()\n\nclass CordTestServer(object):\n\n onos_cord = None\n\n def __restart_onos(self, config = None):\n if self.onos_cord:\n onos_config = '{}/network-cfg.json'.format(OnosCord.onos_config_dir)\n else:\n onos_config = '{}/network-cfg.json'.format(Onos.host_config_dir)\n if config is None:\n try:\n os.unlink(onos_config)\n except:\n pass\n print('Restarting ONOS')\n if self.onos_cord:\n self.onos_cord.start(restart = True, network_cfg = config)\n else:\n Onos(restart = True, network_cfg = config)\n return 'DONE'\n\n def restart_onos(self, kwargs):\n return self.__restart_onos(**kwargs)\n\n def __restart_quagga(self, config = None, boot_delay = 30 ):\n config_file = Quagga.quagga_config_file\n if config is not None:\n quagga_config = '{}/testrib_gen.conf'.format(Quagga.host_quagga_config)\n config_file = '{}/testrib_gen.conf'.format(Quagga.guest_quagga_config)\n with open(quagga_config, 'w+') as fd:\n fd.write(str(config))\n print('Restarting QUAGGA with config file %s, delay %d' %(config_file, boot_delay))\n Quagga(restart = True, config_file = config_file, boot_delay = boot_delay)\n return 'DONE'\n\n def restart_quagga(self, kwargs):\n return self.__restart_quagga(**kwargs)\n\n def stop_quagga(self):\n quaggaStop = QuaggaStopWrapper()\n time.sleep(5)\n try:\n quagga_config_gen = '{}/testrib_gen.conf'.format(Quagga.host_quagga_config)\n os.unlink(quagga_config_gen)\n except: pass\n return 'DONE'\n\n def __run_shell_quagga(self, cmd = None):\n ret = 0\n if cmd is not None:\n exec_cmd = 'docker exec {} {}'.format(Quagga.NAME, cmd)\n ret = os.system(exec_cmd)\n return ret\n\n def run_shell_quagga(self, kwargs):\n return self.__run_shell_quagga(**kwargs)\n\n def restart_radius(self):\n print('Restarting RADIUS Server')\n Radius(restart = True)\n return 'DONE'\n\n@nottest\ndef cord_test_server_start(daemonize = True, cord_test_host = CORD_TEST_HOST,\n cord_test_port = CORD_TEST_PORT, onos_cord = None):\n server = SimpleXMLRPCServer( (cord_test_host, cord_test_port) )\n server.register_instance(CordTestServer())\n CordTestServer.onos_cord = onos_cord\n if daemonize is True:\n d = daemon.DaemonContext(files_preserve = [server],\n detach_process = True)\n with d:\n reinitContainerClients()\n server.serve_forever()\n else:\n task = threading.Thread(target = server.serve_forever)\n ##terminate when main thread exits\n task.daemon = True\n task.start()\n return server\n\n@nottest\ndef cord_test_server_stop(server):\n server.shutdown()\n server.server_close()\n\n@nottest\ndef get_cord_test_loc():\n host = os.getenv('CORD_TEST_HOST', CORD_TEST_HOST)\n port = int(os.getenv('CORD_TEST_PORT', CORD_TEST_PORT))\n return host, port\n\ndef rpc_server_instance():\n '''Stateless'''\n host, port = get_cord_test_loc()\n rpc_server = 'http://{}:{}'.format(host, port)\n return xmlrpclib.Server(rpc_server, allow_none = True)\n\n@nottest\ndef __cord_test_onos_restart(**kwargs):\n return rpc_server_instance().restart_onos(kwargs)\n\n@nottest\ndef cord_test_onos_restart(config = None):\n '''Send ONOS restart to server'''\n data = __cord_test_onos_restart(config = config)\n if data == 'DONE':\n return True\n return False\n\n@nottest\ndef __cord_test_quagga_restart(**kwargs):\n return rpc_server_instance().restart_quagga(kwargs)\n\n@nottest\ndef cord_test_quagga_restart(config = None, boot_delay = 30):\n '''Send QUAGGA restart to server'''\n data = __cord_test_quagga_restart(config = config, boot_delay = boot_delay)\n if data == 'DONE':\n return True\n return False\n\n@nottest\ndef __cord_test_quagga_shell(**kwargs):\n return rpc_server_instance().run_shell_quagga(kwargs)\n\n@nottest\ndef cord_test_quagga_shell(cmd = None):\n '''Send QUAGGA shell cmd to server'''\n return __cord_test_quagga_shell(cmd = cmd)\n\n@nottest\ndef cord_test_quagga_stop():\n data = rpc_server_instance().stop_quagga()\n if data == 'DONE':\n return True\n return False\n\n@nottest\ndef cord_test_radius_restart():\n '''Send Radius server restart to server'''\n data = rpc_server_instance().restart_radius()\n if data == 'DONE':\n return True\n return False\n","sub_path":"src/test/utils/CordTestServer.py","file_name":"CordTestServer.py","file_ext":"py","file_size_in_byte":5839,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"221077564","text":"class Solution(object):\n def isRectangleCover(self, rectangles):\n \"\"\"\n :type rectangles: List[List[int]]\n :rtype: bool\n \"\"\"\n # start coding at 10:16\n vec = rectangles[0];\n self.left = vec[0];\n self.right = vec[2];\n self.top = vec[3];\n self.down = vec[1];\n \n def deal(mp):\n ans = 0;\n for vec in rectangles:\n self.left = min(self.left, vec[0]);\n self.right = max(self.right, vec[2]);\n self.down = min(self.down, vec[1]);\n self.top = max(self.top, vec[3]);\n \n ans += (vec[2]-vec[0])*(vec[3]-vec[1]);\n \n temp1 = (vec[0], vec[1]);\n if temp1 in mp: mp[temp1] += 1;\n else: mp[temp1] = 1;\n \n temp2 = (vec[2], vec[3]);\n if temp2 in mp: mp[temp2] += 1;\n else: mp[temp2] = 1;\n \n temp3 = (vec[0], vec[3]);\n if temp3 in mp: mp[temp3] += 1;\n else: mp[temp3] = 1;\n \n temp4 = (vec[2], vec[1]);\n if temp4 in mp: mp[temp4] += 1;\n else: mp[temp4] = 1;\n return ans;\n \n mp = {};\n if deal(mp) != (self.top-self.down)*(self.right-self.left): return False;\n for k in mp:\n if (k[0] == self.left or k[0] == self.right) and (k[1] == self.top or k[1] == self.down):\n if mp[k] != 1: return False;\n elif (k[0] == self.left or k[0] == self.right) != (k[1] == self.top or k[1] == self.down):\n if mp[k] != 2: return False;\n else:\n if mp[k] % 2: return False;\n return True;\n # first submit at 10:24\n ","sub_path":"301-400/391-400/py/391_perfect_rectangle.py","file_name":"391_perfect_rectangle.py","file_ext":"py","file_size_in_byte":1851,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"36552295","text":"# -*- coding: utf-8 -*-\n\n\"\"\"\nManage expense reports\n\"\"\"\n\nimport csv\nimport io\nfrom flask import g, flash, url_for, render_template, request, redirect, Response\nfrom werkzeug.datastructures import MultiDict\nfrom coaster.utils import format_currency as coaster_format_currency\nfrom coaster.views import load_model, load_models\nfrom baseframe import request_is_xhr\nfrom baseframe.forms import render_form, render_redirect, render_delete_sqla, ConfirmDeleteForm\n\nfrom kharcha import app, lastuser\nfrom kharcha.forms import ExpenseReportForm, ExpenseForm\nfrom kharcha.views.workflows import ExpenseReportWorkflow\nfrom kharcha.models import db, Workspace, ExpenseReport, Expense, Budget\n\n\n@app.template_filter('format_currency')\ndef format_currency(value):\n return coaster_format_currency(value, decimals=2)\n\n\ndef available_reports(workspace, user=None, all=False):\n if user is None:\n user = g.user\n query = ExpenseReport.query.filter_by(workspace=workspace).order_by(ExpenseReport.datetime)\n # FIXME+TODO: Replace with per-workspace permissions\n if all and 'review' in g.permissions:\n # Get all reports owned by this user and in states where the user can review them\n query = query.filter(db.or_(\n ExpenseReport.user == user,\n ExpenseReport.status.in_(ExpenseReportWorkflow.reviewable.values)))\n else:\n query = query.filter_by(user=user)\n return query\n\n\n@app.route('//budgets/')\n@lastuser.requires_login\n@load_models(\n (Workspace, {'name': 'workspace'}, 'g.workspace'),\n (Budget, {'name': 'budget', 'workspace': 'workspace'}, 'budget'),\n permission='view'\n )\ndef budget(workspace, budget):\n unsorted_reports = available_reports(workspace).filter_by(budget=budget).all()\n if unsorted_reports:\n noreports = False\n else:\n noreports = True\n reports = ExpenseReportWorkflow.sort_documents(unsorted_reports)\n return render_template('budget.html.jinja2', budget=budget, reports=reports, noreports=noreports)\n\n\n@app.route('//reports/')\n@lastuser.requires_login\n@load_model(Workspace, {'name': 'workspace'}, 'g.workspace', permission='view')\ndef reports(workspace):\n # Sort reports by status\n reports = ExpenseReportWorkflow.sort_documents(available_reports(workspace).all())\n return render_template('reports.html.jinja2', reports=reports)\n\n\n@app.route('//reports/all')\n@lastuser.requires_login\n@load_model(Workspace, {'name': 'workspace'}, 'g.workspace', permission='view')\ndef reports_all(workspace):\n # Sort reports by status\n reports = ExpenseReportWorkflow.sort_documents(available_reports(workspace, all=True).all())\n return render_template('reports.html.jinja2', reports=reports)\n\n\ndef report_edit_internal(workspace, form, report=None, workflow=None):\n if form.validate_on_submit():\n if report is None:\n report = ExpenseReport(workspace=workspace)\n report.user = g.user\n db.session.add(report)\n form.populate_obj(report)\n report.make_name()\n db.session.commit()\n return redirect(url_for('report', workspace=workspace.name, report=report.url_name), code=303)\n # TODO: Ajax handling here (but then again, is it required?)\n return render_template('reportnew.html.jinja2',\n workspace=workspace, form=form, report=report, workflow=workflow)\n\n\n@app.route('//reports/new', methods=['GET', 'POST'])\n@lastuser.requires_login\n@load_model(Workspace, {'name': 'workspace'}, 'g.workspace', permission='new-report')\ndef report_new(workspace):\n form = ExpenseReportForm(prefix='report')\n return report_edit_internal(workspace, form)\n\n\n@app.route('//reports/', methods=['GET', 'POST'])\n@lastuser.requires_login\n@load_models(\n (Workspace, {'name': 'workspace'}, 'g.workspace'),\n (ExpenseReport, {'url_name': 'report', 'workspace': 'workspace'}, 'report'),\n permission='view'\n )\ndef report(workspace, report):\n workflow = report.workflow()\n expenseform = ExpenseForm()\n expenseform.report = report\n if expenseform.validate_on_submit():\n if expenseform.id.data:\n expense = Expense.query.get(expenseform.id.data)\n else:\n expense = Expense()\n # FIXME: Replace this with SQLAlchemy's sequence ordering extension\n\n # Find the highest sequence number for expenses in this report.\n # If None, assume 0, then add 1 to get the next sequence number\n expense.seq = (db.session.query(\n db.func.max(Expense.seq).label('seq')).filter_by(\n report_id=report.id).first().seq or 0) + 1\n expenseform.populate_obj(expense)\n report.expenses.append(expense)\n db.session.flush()\n report.update_total()\n db.session.commit()\n if request_is_xhr():\n # Return with a blank form\n return render_template(\"expense.html.jinja2\", report=report, expenseform=ExpenseForm(MultiDict()))\n else:\n return redirect(url_for('report', workspace=workspace.name, report=report.url_name), code=303)\n if request_is_xhr():\n return render_template(\"expense.html.jinja2\", report=report, expenseform=expenseform)\n return render_template('report.html.jinja2',\n report=report,\n workflow=workflow,\n transitions=workflow.transitions(),\n expenseform=expenseform)\n\n\n@app.route('//reports//expensetable')\n@load_models(\n (Workspace, {'name': 'workspace'}, 'g.workspace'),\n (ExpenseReport, {'url_name': 'report', 'workspace': 'workspace'}, 'report'),\n permission='view'\n )\ndef report_expensetable(workspace, report):\n workflow = report.workflow()\n return render_template('expensetable.html.jinja2',\n report=report, workflow=workflow)\n\n\n@app.route('//reports//csv')\n@lastuser.requires_login\n@load_models(\n (Workspace, {'name': 'workspace'}, 'g.workspace'),\n (ExpenseReport, {'url_name': 'report', 'workspace': 'workspace'}, 'report'),\n permission='view'\n )\ndef report_csv(workspace, report):\n outfile = io.StringIO()\n out = csv.writer(outfile)\n out.writerow(['Date', 'Category', 'Description', 'Amount'])\n for expense in report.expenses:\n out.writerow([expense.date.strftime('%Y-%m-%d'),\n expense.category.title.encode('utf-8'),\n expense.description.encode('utf-8'),\n '%.2f' % expense.amount])\n response = Response(outfile.getvalue(),\n content_type='text/csv; charset=utf-8',\n headers={'Content-Disposition': 'attachment; filename=\"%s.csv\"' % report.url_name,\n 'Cache-Control': 'no-store',\n 'Pragma': 'no-cache'})\n return response\n\n\n@app.route('//reports//edit', methods=['GET', 'POST'])\n@lastuser.requires_login\n@load_models(\n (Workspace, {'name': 'workspace'}, 'g.workspace'),\n (ExpenseReport, {'url_name': 'report', 'workspace': 'workspace'}, 'report'),\n permission='edit'\n )\ndef report_edit(workspace, report):\n workflow = report.workflow()\n form = ExpenseReportForm(obj=report)\n return report_edit_internal(workspace, form, report, workflow)\n\n # All okay. Allow editing\n if form.validate_on_submit():\n form.populate_obj(report)\n db.session.commit()\n flash(\"Edited report '%s'.\" % report.title, 'success')\n return render_redirect(url_for('report', workspace=workspace.name, report=report.url_name), code=303)\n return render_form(form=form, title=\"Edit expense report\",\n formid=\"report_edit\", submit=\"Save\",\n cancel_url=url_for('report', workspace=workspace.name, report=report.url_name))\n\n\n@app.route('//reports//delete', methods=['GET', 'POST'])\n@lastuser.requires_login\n@load_models(\n (Workspace, {'name': 'workspace'}, 'g.workspace'),\n (ExpenseReport, {'url_name': 'report', 'workspace': 'workspace'}, 'report'),\n permission='delete'\n )\ndef report_delete(workspace, report):\n # Confirm delete\n return render_delete_sqla(report, db, title=\"Confirm delete\",\n message=\"Delete expense report '%s'?\" % report.title,\n success=\"You have deleted report '%s'.\" % report.title,\n next=url_for('reports', workspace=workspace.name))\n\n\n@app.route('//reports///delete', methods=['GET', 'POST'])\n@lastuser.requires_login\n@load_models(\n (Workspace, {'name': 'workspace'}, 'g.workspace'),\n (ExpenseReport, {'url_name': 'report', 'workspace': 'workspace'}, 'report'),\n (Expense, {'report': 'report', 'id': 'expense'}, 'expense'),\n permission='delete'\n )\ndef expense_delete(workspace, report, expense):\n form = ConfirmDeleteForm()\n if form.validate_on_submit():\n if 'delete' in request.form:\n db.session.delete(expense)\n db.session.commit()\n report.update_total()\n report.update_sequence_numbers()\n db.session.commit()\n return redirect(url_for('report', workspace=workspace.name, report=report.url_name), code=303)\n return render_template('baseframe/delete.html.jinja2', form=form, title=\"Confirm delete\",\n message=\"Delete expense item '%s' for %s %s?\" % (\n expense.description, report.currency, format_currency(expense.amount)))\n\n\n@app.route('//reports//submit', methods=['POST'])\n@lastuser.requires_login\n@load_models(\n (Workspace, {'name': 'workspace'}, 'g.workspace'),\n (ExpenseReport, {'url_name': 'report', 'workspace': 'workspace'}, 'report'),\n permission='edit'\n )\ndef report_submit(workspace, report):\n wf = report.workflow()\n if wf.document.expenses == []:\n flash(\"This expense report does not list any expenses.\", 'error')\n return redirect(url_for('report', workspace=workspace.name, report=report.url_name), code=303)\n wf.submit()\n db.session.commit()\n flash(\"Your expense report has been submitted.\", 'success')\n return redirect(url_for('report', workspace=workspace.name, report=report.url_name), code=303)\n\n\n@app.route('//reports//resubmit', methods=['POST'])\n@lastuser.requires_login\n@load_models(\n (Workspace, {'name': 'workspace'}, 'g.workspace'),\n (ExpenseReport, {'url_name': 'report', 'workspace': 'workspace'}, 'report'),\n permission='edit'\n )\ndef report_resubmit(workspace, report):\n wf = report.workflow()\n if wf.document.expenses == []:\n flash(\"This expense report does not list any expenses.\", 'error')\n return redirect(url_for('report', workspace=workspace.name, report=report.url_name), code=303)\n wf.resubmit()\n db.session.commit()\n flash(\"Your expense report has been submitted.\", 'success')\n return redirect(url_for('report', workspace=workspace.name, report=report.url_name), code=303)\n\n\n@app.route('//reports//accept', methods=['POST'])\n@lastuser.requires_login\n@load_models(\n (Workspace, {'name': 'workspace'}, 'g.workspace'),\n (ExpenseReport, {'url_name': 'report', 'workspace': 'workspace'}, 'report'),\n permission='review'\n )\ndef report_accept(workspace, report):\n wf = report.workflow()\n wf.accept(reviewer=g.user)\n db.session.commit()\n flash(\"Expense report '%s' has been accepted.\" % report.title, 'success')\n return redirect(url_for('reports_all', workspace=workspace.name), code=303)\n\n\n@app.route('//reports//return_for_review', methods=['POST'])\n@lastuser.requires_login\n@load_models(\n (Workspace, {'name': 'workspace'}, 'g.workspace'),\n (ExpenseReport, {'url_name': 'report', 'workspace': 'workspace'}, 'report'),\n permission='review'\n )\ndef report_return(workspace, report):\n wf = report.workflow()\n wf.return_for_review(reviewer=g.user, notes='') # TODO: Form for notes\n db.session.commit()\n flash(\"Expense report '%s' has been returned for review.\" % report.title,\n 'success')\n return redirect(url_for('reports_all', workspace=workspace.name), code=303)\n\n\n@app.route('//reports//reject', methods=['POST'])\n@lastuser.requires_login\n@load_models(\n (Workspace, {'name': 'workspace'}, 'g.workspace'),\n (ExpenseReport, {'url_name': 'report', 'workspace': 'workspace'}, 'report'),\n permission='review'\n )\ndef report_reject(workspace, report):\n wf = report.workflow()\n wf.reject(reviewer=g.user, notes='') # TODO: Form for notes\n db.session.commit()\n flash(\"Expense report '%s' has been rejected.\" % report.title, 'success')\n return redirect(url_for('reports_all', workspace=workspace.name), code=303)\n\n\n@app.route('//reports//withdraw', methods=['POST'])\n@lastuser.requires_login\n@load_models(\n (Workspace, {'name': 'workspace'}, 'g.workspace'),\n (ExpenseReport, {'url_name': 'report', 'workspace': 'workspace'}, 'report'),\n permission='withdraw'\n )\ndef report_withdraw(workspace, report):\n wf = report.workflow()\n wf.withdraw()\n db.session.commit()\n flash(\"Expense report '%s' has been withdrawn.\" % report.title, 'success')\n return redirect(url_for('reports', workspace=workspace.name), code=303)\n\n\n@app.route('//reports//close', methods=['POST'])\n@lastuser.requires_login\n@load_models(\n (Workspace, {'name': 'workspace'}, 'g.workspace'),\n (ExpenseReport, {'url_name': 'report', 'workspace': 'workspace'}, 'report'),\n permission='review'\n )\ndef report_close(workspace, report):\n wf = report.workflow()\n wf.close()\n db.session.commit()\n flash(\"Expense report '%s' has been closed.\" % report.title, 'success')\n return redirect(url_for('reports_all', workspace=workspace.name), code=303)\n","sub_path":"kharcha/views/expenses.py","file_name":"expenses.py","file_ext":"py","file_size_in_byte":13744,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"251709846","text":"# CONFIG\n# ---------\ntoken = \"NTcyMTIzNTU2NzI1NTIyNDcz.XN8zjA.LEyio-Nc4XOaBAQFk_FQXNaDIsU\" # This is what the bot uses to log into Discord.\nprefix = \"emb/\" # This will be used at the start of commands.\nembed_role = \"Embed Creator\" # The role in your server used for embedding.\ngame = \"with embeds!\" # This will display as the game on Discord.\n# ----------\n\nfrom discord.ext import commands\nfrom discord.ext.commands import Bot\nimport discord\n\nbot = commands.Bot(command_prefix=prefix)\nbot.remove_command(\"help\")\n\n\n@bot.event\nasync def on_ready():\n print(\"Ready when you are. ;)\") \n print(\"Name: {}\".format(bot.user.name))\n print(\"ID: {}\".format(bot.user.id))\n await bot.change_presence(activity=discord.Game(name=game))\n\n@bot.command(pass_context=True)\nasync def help(ctx):\n channeldd = ctx.message.channel\n embed = discord.Embed(title=\"Help!\", description=\"Basically, this is how I'm used.\", color=0x00a0ea)\n embed.add_field(name=\"{}embed\".format(prefix), value=\"Creates a quick embed with the users input after the command is called.\")\n embed.add_field(name=\"{}rembed\".format(prefix), value=\"Let's you embed with more user input. After entering your message the bot will ask questions about the color and thumbnail.\")\n embed.set_footer(text=\"Embed Creator\")\n await channeldd.send(embed=embed)\n\n@bot.command(pass_context=True)\n@commands.has_role(embed_role)\nasync def rembed(ctx, *, a_sMessage):\n channeldd = ctx.message.channel\n color = None\n thumb = None\n embed_color = discord.Embed(title=\"🕑 Tick-Tock\", description=\"Would you like to use a **custom color**? If **yes**, state it. If **no** simply say *no*.\", color=0xffff00)\n embed_color.set_footer(text=\"Simply type a color name such as green in plaintext.\")\n embed_thumb = discord.Embed(title=\"🕑 Tick-Tock\", description=\"Would you like to use a **custom thumbnail**? If **yes**, state it. If **no** simply say *no*.\", color=0xffff00)\n embed_thumb.set_footer(text=\"Simply type an image URL in plaintext.\")\n await bot.delete_message(ctx.message)\n ques1 = await bot.send_message(ctx.message.channel, embed=embed_color)\n ques1\n msg = await bot.wait_for_message(author=ctx.message.author, timeout=60)\n if msg.content.lower() == \"green\":\n await bot.delete_message(ques1)\n await bot.delete_message(msg)\n color = 0x00ff00\n ques2 = await channeldd.send(embed=embed_thumb)\n ques2\n elif msg.content.lower() == \"yellow\":\n await bot.delete_message(ques1)\n await bot.delete_message(msg)\n color = 0xFFFF00\n ques2 = await channeldd.send(embed=embed_thumb)\n ques2\n elif msg.content.lower() == \"blue\":\n await bot.delete_message(ques1)\n await bot.delete_message(msg)\n color = 0x0000ff\n ques2 = await channeldd.send(embed=embed_thumb)\n ques2\n elif msg.content.lower() == \"red\":\n await bot.delete_message(ques1)\n await bot.delete_message(msg)\n color = 0xff0000\n ques2 = await channeldd.send(embed=embed_thumb)\n ques2\n else:\n await bot.delete_message(ques1)\n await bot.delete_message(msg)\n color = 0x00a0ea\n ques2 = await channeldd.send(embed=embed_thumb)\n ques2\n msg = await bot.wait_for_message(author=ctx.message.author, timeout=60)\n if msg.content.lower() == \"no\":\n await bot.delete_message(ques2)\n await bot.delete_message(msg)\n thumb = ctx.message.author.avatar_url\n else:\n await bot.delete_message(ques2)\n await bot.delete_message(msg)\n thumb = msg.content\n embed = discord.Embed(description=a_sMessage, color=color)\n embed.set_thumbnail(url=thumb)\n embed.set_author(name=ctx.message.author.name + \" says..\")\n embed.set_footer(text=\"Embed-This!\")\n await channeldd.send(embed=embed)\n print(ctx.message.author.name + \" has embedded a message in \" + ctx.message.server.name)\n\n@bot.command(pass_context=True)\n@commands.has_role(embed_role)\nasync def embed(ctx, *, a_sMessage):\n channeldd = ctx.message.channel\n embed = discord.Embed(description=a_sMessage, color=0x00a0ea)\n embed.set_thumbnail(url=ctx.message.author.avatar_url)\n embed.set_author(name=ctx.message.author.name + \" says..\")\n embed.set_footer(text=\"Embed Creator\")\n await bot.delete_message(ctx.message)\n await channeldd.send(embed=embed)\n print(ctx.message.author.name + \" has embedded a message in \" + ctx.message.server.name)\n\nbot.run(token)\n","sub_path":"bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":4499,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"484180712","text":"# coding=utf-8\nfrom datetime import datetime, date, timedelta\nfrom functools import wraps, reduce\nimport json\n\n\ndef running_time(fn):\n @wraps(fn)\n def process_fn(*args, **kwargs):\n t1 = datetime.now().timestamp()\n res = fn(*args, **kwargs)\n used_time = datetime.now().timestamp() - t1\n # process_fn(used_time)\n print(fn.__name__, \"used(s):\", used_time)\n return res\n return process_fn\n\n\ndef print_file_lineno():\n import inspect\n callerframerecord = inspect.stack()[1]\n frame = callerframerecord[0]\n info = inspect.getframeinfo(frame)\n return info.filename, info.lineno # , info.function, info.code_context\n\n\n@running_time\ndef all_prem(num_list):\n \"\"\"\n reduce to ger perm\n func_perm = lambda x, code=',': reduce(lambda x, y: ['%s%s' % (i, j) for i in x for j in y], x)\n :param num_list:\n :return:\n \"\"\"\n lists = [num_list for i in range(len(num_list))]\n func_perm = lambda x: reduce(lambda x, y: ['%s%s' % (i, j) for i in x for j in y], x)\n lists = func_perm(lists)\n print(len(lists))\n return lists\n\n\n@running_time\ndef recursion_perm(lists, k):\n \"\"\"\n recursion to get perm\n lists: [1,2,3,4,5,]\n k: length\n \"\"\"\n if k == 1:\n return lists\n all_perm = []\n result = recursion_perm(lists, k - 1)\n for item in result:\n for j in lists:\n all_perm += [str(j) + str(item)]\n return all_perm\n\n\nclass Singleton(type):\n \"\"\"\n class SingleClass(metaclass=Singleton) # python3\n __metaclass__ = Singleton # python2\n \"\"\"\n def __init__(self, *args, **kwargs):\n self.__instance = None\n super().__init__(*args, **kwargs)\n\n def __call__(self, *args, **kwargs):\n if self.__instance is None:\n self.__instance = super().__call__(*args, **kwargs)\n return self.__instance\n else:\n return self.__instance\n\n\nclass JsonSerialize(object):\n\n @staticmethod\n def str2json(str_msg):\n assert isinstance(str_msg, str)\n return json.loads(str_msg)\n\n @staticmethod\n def byte2json(byte_msg):\n assert isinstance(byte_msg, bytes)\n byte_msg = byte_msg.decode('utf-8')\n return json.loads(byte_msg)\n\n @staticmethod\n def json2str(json_msg):\n assert isinstance(json_msg, dict)\n return json.dumps(json_msg)\n\n @staticmethod\n def json2byte(json_msg):\n assert isinstance(json_msg, dict)\n return json.dumps(json_msg).encode('utf-8')\n\n @staticmethod\n def json2file(json_msg, filename):\n with open(filename, 'w+') as f:\n f.write(json.dumps(json_msg, indent=4))\n\n @staticmethod\n def file2json(filename):\n with open(filename, 'r') as f:\n json_data = json.load(f)\n return json_data\n\n @staticmethod\n def json_sort(json_msg, key=0):\n json_msg = sorted(json_msg, key=lambda d: d[key])\n return json_msg\n\n\nclass DatetimeManage(object):\n FMTDEFAULT = \"%Y-%m-%d %H:%M:%S\"\n FMTES = \"%Y-%m-%dT%H:%M:%S\"\n FDATE = \"%Y-%m-%d\"\n NDATEF = \"%Y%m%d\"\n\n def __init__(self):\n pass\n\n @staticmethod\n def datetime2str(dt, fmt=FMTDEFAULT):\n \"\"\"\n :param dt:\n :param fmt: default=\"%Y-%m-%d %H:%M:%S\"\n :return:\n \"\"\"\n if isinstance(dt, datetime):\n return dt.strftime(fmt)\n else:\n return None\n\n @staticmethod\n @running_time\n def str2datetime(st, fmt=FMTDEFAULT):\n try:\n return datetime.strptime(st, fmt)\n except Exception as e:\n return None\n\n @staticmethod\n def iso2datetime(tt, fmt=\"%Y-%m-%dT%H:%M:%S.%f\"):\n try:\n return datetime.strptime(tt, fmt)\n except Exception as e:\n return None\n\n @staticmethod\n def get_current_week(weeks_ago=0, str_result=False):\n \"\"\"\n get monday and sunday of current date\n :param weeks_ago: int\n :return:\n \"\"\"\n monday, sunday = date.today() - timedelta(days=7*weeks_ago), date.today() - timedelta(days=7*weeks_ago)\n one_day = timedelta(days=1)\n while monday.weekday() != 0:\n monday -= one_day\n while sunday.weekday() != 6:\n sunday += one_day\n if str_result:\n return monday.strftime(\"%Y-%m-%d\"), sunday.strftime(\"%Y-%m-%d\")\n return monday, sunday\n\n @staticmethod\n def get_days_ago(days):\n days_ago = date.today() - timedelta(days=days)\n return days_ago.strftime(DatetimeManage.NDATEF)\n\n@running_time\ndef test():\n sum = 0\n for i in range(123000):\n sum += i**2 + 1\n return sum\n\n\nif __name__ == \"__main__\":\n test()\n data = [i for i in range(1, 9)]\n data1 = all_prem(data)\n data2 = recursion_perm(data, 8)\n print(len(data2))\n","sub_path":"FSTornado/common/common_base.py","file_name":"common_base.py","file_ext":"py","file_size_in_byte":4811,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"501759291","text":"\nimport matplotlib.pyplot as plt\nfrom scripts import ColorDeconvolution\n\n\nif __name__==\"__main__\":\n\n # reading the image\n #path=\"Tumor_CD31_LoRes.png\"\n path=\"figure9.jpg\"\n img = plt.imread(path)\n img = img[:, :, 0:3]\n\n satin = ColorDeconvolution(img)\n satin.separateStain()\n satin.showStains()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":318,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"601102728","text":"\nfrom ll.org.Export.Scripts.SpecsBuilder import linksetSpecsData, linksetSpecsDataItr\nfrom ll.org.Export.Tests.TestData import DATA_DIR as DIR\nfrom os.path import join\nfrom csv import reader as csv_reader\n\ntest, chiara, leon = True, False, False\nsave_in, csv, leon = \"/Users/al/Downloads/\", \"Accepted.csv\", \"Leon-38.txt\"\n\n\n# CHIARA JOBS RESULT OVERVIEW ####################################################################################\n# 01. 306ff87eccb2ef66dfe9d4521f90fcc4 : PROBLEM WITH NON EXISTING LONG URI FOR rdaGr2:dateOfBirth\n# 02. fb1cb1d40b5af63fe92fac088e21738f : NO PROBLEM OBSERVED\n# ################################################################################################################\n\njobs = [\"306ff87eccb2ef66dfe9d4521f90fcc4\", \"fb1cb1d40b5af63fe92fac088e21738f\", \"\"]\nUSED_JOB = 1\nLIST_1 = [0, 1, 2, 5, 6, 7, 17, 18]\nLIST_2 = [0, 1, 2, 4, 5, 10, 12, 14, 15, 18, 20, 21, 22, 24, 25, 24, 28, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41]\nfor i in [38]:\n with open(join(DIR, leon)) as file_data:\n data = csv_reader(file_data)\n linksetSpecsDataItr(linksetId=i, job=jobs[USED_JOB], lst_result=data,\n starReification=True, save_in=save_in, printSpec=False)\n","sub_path":"src/ll/org/Export/Tests/Runs/Leon.py","file_name":"Leon.py","file_ext":"py","file_size_in_byte":1228,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"473059088","text":"# coding: utf-8\nfrom sqlalchemy import create_engine\n\n\ndef checkdb(val1, val2,inkey):\n #key1= '1710240037'\n key1= inkey\n strsql='select u_id,repair_order_type from repair_order where repair_order_code = \\''\n strsql += key1\n strsql += '\\';'\n #print (strsql)\n rs = connection.execute(strsql)\n #print (rs.rowcount)\n data = rs.fetchone()\n #print(data)\n #print ('1 ok')\n if rs.rowcount > 0:\n str1=data[0]\n str2=data[1]\n #print (str1)\n strsql = 'select trouble_name,trouble_way from repair_trouble where repair_order_uid = \\''\n strsql += str1\n strsql += '\\' and repair_trouble_type = '\n strsql += '\\'回单\\''\n #print (strsql)\n #print ('2 ok')\n rs = connection.execute(strsql)\n if rs.rowcount > 0:\n data2 = rs.fetchone()\n #print(key1+'||'+data2[0]+'||'+data2[1])\n h2.write(key1+'||'+data2[0]+'||'+data2[1]+'\\n')\n\n\n\nh1=open('e:\\list3.txt','r')\nwq=\"mysql+pymysql://root@localhost:3306/track_db?charset=utf8\"\nh2 = open('e:\\list416.txt', 'a+')\nengine = create_engine(wq, max_overflow=3)\nconnection = engine.connect()\nconnection = connection.execution_options( isolation_level=\"READ COMMITTED\")\nfor r1 in h1:\n if r1.startswith('16'):\n s1=r1.replace('\\n','')\n #print (s1)\n checkdb(wq,\"五汽票务\",s1)\n\nconnection.close\nh2.close()","sub_path":"corp_sr/analize_zh/stage3.py","file_name":"stage3.py","file_ext":"py","file_size_in_byte":1394,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"610990141","text":"\nimport os\n#do I need to use glob?\n\n\n#currentDirectory = os.getcwd() #not just on my desktop\n#nevermind the above I can just use \".\"\n\n\nimage_list = open(\"output.csv\",\"w+\")\n\nfor path, subdirs, files in os.walk(\".\"):\n for x in files:\n if x.endswith(\".jpg\"):\n image_list.write(os.path.join(path, x) + \",\") \n path_sans_point = str(path).replace(\".\",\"\") #get rid of point\n new_point = path_sans_point.replace(\"\\\\\",\"\") #get rid of slash in pathname\n image_list.write(new_point +\"\\n\")\n\n if x.endswith(\".png\"):\n image_list.write(os.path.join(path, x) + \",\")\n path_sans_point = str(path).replace(\".\",\"\")\n new_point = path_sans_point.replace(\"\\\\\",\"\")\n image_list.write(new_point +\"\\n\")\n\n\nimage_list.close()\n\nwith open(\"output.csv\",\"r\") as print_list: #so I can look at it in console\n\tprint (print_list.read())\n\n","sub_path":"pathfile.py","file_name":"pathfile.py","file_ext":"py","file_size_in_byte":906,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"303010192","text":"'''\nhttps://leetcode.com/problems/construct-binary-search-tree-from-preorder-traversal/\n'''\n# Definition for a binary tree node.\nclass TreeNode:\n def __init__(self, val=0, left=None, right=None):\n self.val = val\n self.left = left\n self.right = right\nclass Solution:\n def bstFromPreorder(self, preorder: List[int]) -> TreeNode:\n root = None\n \n def f(root, item):\n if root is None:\n root = TreeNode(item)\n else:\n if item < root.val:\n root.left = f(root.left, item)\n else:\n root.right = f(root.right, item)\n return root\n for item in preorder:\n root = f(root, item)\n return root","sub_path":"BST/build-bst-from-preOrder.py","file_name":"build-bst-from-preOrder.py","file_ext":"py","file_size_in_byte":763,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"401582455","text":"# Copyright (c) 2012-2022 by the GalSim developers team on GitHub\n# https://github.com/GalSim-developers\n#\n# This file is part of GalSim: The modular galaxy image simulation toolkit.\n# https://github.com/GalSim-developers/GalSim\n#\n# GalSim is free software: redistribution and use in source and binary forms,\n# with or without modification, are permitted provided that the following\n# conditions are met:\n#\n# 1. Redistributions of source code must retain the above copyright notice, this\n# list of conditions, and the disclaimer given in the accompanying LICENSE\n# file.\n# 2. Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions, and the disclaimer given in the documentation\n# and/or other materials provided with the distribution.\n#\n\nfrom __future__ import print_function\n\nimport sys\nimport time\nimport numpy as np\nimport galsim\n\n# Put the salient numbers up here so they are easy to adjust.\ncounts_per_iter = 4.e3 # a few thousand is probably fine. (bigger is faster of course.)\ncounts_total = 80.e3 # 80K flats\nnx = 509\nny = 2000\nnborder = 2\nnflats = 10\ntreering_amplitude = 0.0#0.26\ntreering_period = 47.\ntreering_center = galsim.PositionD(0,0)\nseed = 31415\n\nfor counts in range(1,6):\n counts_total = 20000.0 * counts # 80.e3 # 80K flats\n\n # This is very similar to treering_skybg2.py, which builds a sky image.\n # But here we build up to a much higher flux level where B/F is important.\n\n t0 = time.time()\n\n rng = galsim.UniformDeviate(seed)\n\n treering_func = galsim.SiliconSensor.simple_treerings(treering_amplitude, treering_period)\n\n niter = int(counts_total / counts_per_iter + 0.5)\n counts_per_iter = counts_total / niter # Recalculate in case not even multiple.\n print('Total counts = {} = {} * {}'.format(counts_total,niter,counts_per_iter))\n\n # Not an LSST wcs, but just make sure this works properly with a non-trivial wcs.\n wcs = galsim.FitsWCS('../../tests/fits_files/tnx.fits')\n\n base_image = galsim.ImageF(nx+2*nborder, ny+2*nborder, wcs=wcs)\n print('image bounds = ',base_image.bounds)\n\n # nrecalc is actually irrelevant here, since a recalculation will be forced on each iteration.\n # Which is really the point. We need to set coundsPerIter appropriately so that the B/F effect\n # doesn't change *too* much between iterations.\n sensor = galsim.SiliconSensor(rng=rng,\n treering_func=treering_func, treering_center=treering_center)\n\n # We also need to account for the distortion of the wcs across the image. \n # This expects sky_level in ADU/arcsec^2, not ADU/pixel.\n base_image.wcs.makeSkyImage(base_image, sky_level=1.)\n base_image.write('wcs_area.fits')\n\n # Rescale so that the mean sky level per pixel is skyCounts\n mean_pixel_area = base_image.array.mean()\n\n sky_level_per_iter = counts_per_iter / mean_pixel_area # in ADU/arcsec^2 now.\n base_image *= sky_level_per_iter\n\n # The base_image has the right level to account for the WCS distortion, but not any sensor effects.\n # This is the noise-free level that we want to add each iteration modulated by the sensor.\n\n noise = galsim.PoissonNoise(rng)\n\n t1 = time.time()\n print('Initial setup time = ',t1-t0)\n\n # Make flats\n for n in range(nflats):\n t1 = time.time()\n # image is the image that we will build up in steps.\n # We add on a border of 2 pixels, since the outer row/col get a little messed up by photons\n # falling off the edge, but not coming on from the other direction.\n # We do 2 rows/cols rather than just 1 to be safe, since I think diffusion can probably go\n # 2 pixels, even though the deficit is only really evident on the outer pixel.\n image = galsim.ImageF(nx+2*nborder, ny+2*nborder, wcs=wcs)\n\n for i in range(niter):\n t2 = time.time()\n # temp is the additional flux we will add to the image in this iteration.\n # Start with the right area due to the sensor effects.\n temp = sensor.calculate_pixel_areas(image)\n temp.write('sensor_area.fits')\n\n # Multiply by the base image to get the right mean level and wcs effects\n temp *= base_image \n temp.write('nonoise.fits')\n\n # Finally, add noise. What we have here so far is the expectation value in each pixel.\n # We need to realize this according to Poisson statistics with these means.\n temp.addNoise(noise)\n temp.write('withnoise.fits')\n\n # Add this to the image we are building up.\n image += temp\n t3 = time.time()\n print('Iter {}: time = {}'.format(i,t3-t2))\n\n # Cut off the outer border where things don't work quite right.\n print('bounds = ',image.bounds)\n image = image.subImage(galsim.BoundsI(1+nborder,nx+nborder,1+nborder,ny+nborder))\n print('bounds => ',image.bounds)\n image.setOrigin(1,1)\n print('bounds => ',image.bounds)\n\n t4 = time.time()\n print('Total time to make flat image with level {} = {}'.format(counts_total, t4-t1))\n\n image.write('csl_flats/flat_%d_%02d.fits'%(counts,n))\n\n t5 = time.time()\n print('Total time to make {} flat images = {}'.format(nflats, t5-t0))\n","sub_path":"devel/lsst/treering_flat_csl_18apr18.py","file_name":"treering_flat_csl_18apr18.py","file_ext":"py","file_size_in_byte":5334,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"445865372","text":"from mie2c.e2c import Encoder, Decoder, Transition, LinearTransition, PWATransition\n\nimport torch\nfrom torch import nn\n\ndef get_bounce_encoder(dim_in, dim_z): \n channels_enc = [6, 32, 32, 16, 16]\n ff_shape = [128, 128, 128]\n\n conv_activation = torch.nn.ReLU()\n ff_activation = torch.nn.ReLU()\n\n n_channels = len(channels_enc) - 1\n kernel_enc = [5, 3, 5, 3, 5] \n stride= [2, 1, 2, 1, 2]\n padding= [2, 1, 2, 1, 2]\n pool = [None, 2, None, 2, 2]\n\n return Encoder(dim_in, dim_z, channels_enc, ff_shape, kernel_enc, stride, padding, pool, conv_activation=conv_activation, ff_activation=ff_activation)\n\n\ndef get_bounce_decoder(dim_in, dim_out): \n channels_dec = [6, 32, 32, 16, dim_out[0]]\n ff_shape = [128, 128, 128]\n\n conv_activation = torch.nn.ReLU()\n ff_activation = torch.nn.ReLU()\n\n n_channels = len(channels_dec) - 1\n kernel_dec = [5, 3, 5, 3, 5] \n stride = [1, 1, 1, 1, 2]\n padding = [2, 1, 2, 1, 2]\n\n return Decoder(dim_in, dim_out, channels_dec, ff_shape, kernel_dec, stride, padding, ff_activation=ff_activation, conv_activation=conv_activation)\n\n\ndef get_bounce_transition(dim_z, dim_u):\n nn_width = 32\n trans = nn.Sequential(\n nn.Linear(dim_z, nn_width),\n nn.BatchNorm1d(nn_width),\n nn.ReLU(),\n nn.Linear(nn_width, nn_width),\n nn.BatchNorm1d(nn_width),\n nn.ReLU(),\n nn.Linear(nn_width, dim_z*2)\n )\n\n return Transition(trans, dim_z, dim_u)\n\n\ndef get_bounce_linear_transition(dim_z, dim_u, low_rank=True):\n A = torch.nn.Parameter(2. * (torch.randn(dim_z, dim_z) - .5))\n r = torch.nn.Parameter(2. * (torch.randn(dim_z) - .5))\n v = torch.nn.Parameter(2. * (torch.randn(dim_z) - .5))\n B = torch.nn.Parameter(2. * (torch.randn(dim_z, dim_u) - .5))\n o = torch.nn.Parameter(2. * (torch.randn(dim_z, 1) - .5))\n\n return LinearTransition(dim_z, dim_u, r, v, A, B, o, low_rank=low_rank)\n\n\ndef get_bounce_pwa_transition(num_modes, dim_z, dim_u, low_rank=True):\n mode_classifier = nn.Linear(dim_z, num_modes)\n As = torch.nn.ParameterList()\n rs = torch.nn.ParameterList()\n vs = torch.nn.ParameterList()\n Bs = torch.nn.ParameterList()\n os = torch.nn.ParameterList()\n for mode in range(num_modes):\n As.append(torch.nn.Parameter(2. * (torch.randn(dim_z, dim_z) - .5)))\n rs.append(torch.nn.Parameter(2. * (torch.randn(dim_z) - .5)))\n vs.append(torch.nn.Parameter(2. * (torch.randn(dim_z) - .5)))\n Bs.append(torch.nn.Parameter(2. * (torch.randn(dim_z, dim_u) - .5)))\n os.append(torch.nn.Parameter(2. * (torch.randn(dim_z, 1) - .5)))\n\n return PWATransition(dim_z, dim_u, mode_classifier, rs, vs, As, Bs, os, low_rank=low_rank)\n","sub_path":"mie2c/bounce_model.py","file_name":"bounce_model.py","file_ext":"py","file_size_in_byte":2707,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"246132839","text":"# Marcelo Campos de Medeiros\n# ADS UNIFIP\n# REVISÃO DE PYTHON\n# AULA 14 LAÇO DE REPETIÇÃO WHILE ---> GUSTAVO GUANABARA\n\n'''\nFaça um Programa que leia vários números inteiros pelo teclado. O programa só vai parar \nquando o usuário digitar o valor 999, que é a condição de parada. No final mostre quantos \nnúmeros foram digitados e qual foi a soma entre eles (desconsidere o flag)\n'''\nprint('='*30)\nprint('{:*^30}'.format(' SOMA ATÉ DIGITAR 999 '))\nprint('='*30)\nprint()\n\n# contadores n /cont / soma\n# forma siplificada n = cont = soma = 0\nn = 0\ncont = 0\nsoma = 0 \n# um loop de n até digitar 999\nwhile n != 999:\n n = int(input('Digite uma número inteiro [para parar 999]: '))\n if n != 999:\n cont += 1\n soma += n\n \nprint(f'Foram digitados {cont} e a soma entre eles é {soma}')\n","sub_path":"desafio064.py","file_name":"desafio064.py","file_ext":"py","file_size_in_byte":814,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"245174473","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Sep 4 20:56:35 2021\n\n@author: Pano\n\"\"\"\n# This is a comment\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport time\n\ndef sigmoid(x):\n return 1/(1+np.exp(-x))\n\n#####################################\n##################################### FINDING OPTIMAL FUNCTION FOR RELU\n#####################################\n##################### Two different implementations of the RELU function\n\n# More efficient - faster to implement\ndef relu1(x):\n return x if(x>0) else 0.0\n\n# Less efficent - takes longer to implement\ndef relu2(x):\n return max(0.0,x)\n#####################################\n#######################\n###############\n\ndef relu(x):\n return x if(x>0) else 0.0 \n \n###############\n#######################\n#####################################\n\n# This code below will run if this file is run directly. If we import this \n# script to another module, __name__ will change to the name of this file\n# i.e. __name__ will then be equal to \"activation-functions.py\"\n# (1) We compare times by testing the relu functions relu1 and relu2 \n# (2) Plots are made of either functions \n\n\nif __name__==\"__main__\":\n def timeComparison(func,rangeOfValues):\n startTime=time.time()\n for i in range(-rangeOfValues,rangeOfValues):\n a=func(i)\n stopTime=time.time()\n time_relu=stopTime-startTime\n return time_relu \n\n # from minus rangeOfVal to positive rangeOfVal\n rangeOfVal=10000000\n time_relu1=timeComparison(relu1,rangeOfVal)\n time_relu2=timeComparison(relu2,rangeOfVal)\n \n #####################################\n print(\"____________________________________\")\n print(\"Time comparison of Relu 1 vs Relu 2\")\n print(\"Relu 1\",time_relu1)\n print(\"Relu 2\",time_relu2)\n print(\"____________________________________\")\n #####################################\n if (time_relu1>time_relu2):\n relu_final=relu2\n print(\"Relu 2 (using max) is the faster implementation\\n\")\n else:\n relu_final=relu1\n print(\"Relu 1 (using conditional logic) is the faster implementation\\n\")\n ### Test that the function works\n startTime=time.time()\n for i in range(-rangeOfVal,rangeOfVal):\n a=relu_final(i)\n stopTime=time.time()\n time_relu=stopTime-startTime\n #####################################\n print(\"Time comparison of optimal relu\")\n print(\"Relu time\",time_relu) \n \n #####################################\n ##################################### PLOTTING SOME GRAPHS\n #####################################\n \n sig_Graph=[]\n relu_Graph=[]\n x_val=[] \n StartStop_X_Index=20\n\n for i,val in enumerate(range(-StartStop_X_Index,StartStop_X_Index)):\n relu_Graph.append(relu(val))\n sig_Graph.append(sigmoid(val))\n x_val.append(val)\n \n plt.subplot(2,1,1) \n plt.title(\"Sigmoid Funtion\")\n plt.xlabel(\"x axis (inputs)\")\n plt.ylabel(\"y axis (output range)\")\n plt.plot(x_val,sig_Graph)\n plt.subplot(2,1,2) \n plt.title(\"Relu Funtion\") \n plt.xlabel(\"x axis (inputs)\")\n plt.ylabel(\"y axis (output range)\") \n plt.plot(x_val,relu_Graph) \n plt.show()\n \n\n\n\n\n","sub_path":"activation-functions.py","file_name":"activation-functions.py","file_ext":"py","file_size_in_byte":3214,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"300721938","text":"#encoding: UTF-8\n\nfrom typing import List, Sequence, Union\nfrom functools import lru_cache\nfrom pyecharts import options as opts\nfrom pyecharts.charts import Kline, Line, Bar, Grid, Tab\nfrom pyecharts.globals import ThemeType\nfrom flask import render_template\n\n\nfrom options_monitor.data_manager import SIVManager\nfrom options_monitor.data_ref import \\\n PRODUCT_GROUP_NAME, FUTURE_HV_NAMES_REVERSE, \\\n IV_NAME, IV_PER, OPEN_INTEREST_NAME, HV_20_NAME, HV_250_NAME, \\\n CLOSE_PRICE_NAME, VOLUME_NAME\n\n\nimport pandas as pd\n\nTHEME_ME = ThemeType.DARK\n\n\n#----------------------------------------------------------------------\n@lru_cache\ndef get_siv_info(now_date_str: str):\n \"\"\"analyze\"\"\"\n siv_mgr = SIVManager()\n all_dfs = siv_mgr.prepare(None, now_date_str)\n return all_dfs\n\n\n#----------------------------------------------------------------------\ndef get_iv_data(product: str, date_str: str):\n \"\"\"get the iv data by contract and date\"\"\"\n analyze_dfs = get_siv_info(date_str)\n product_rev = FUTURE_HV_NAMES_REVERSE.get(product)\n for df in analyze_dfs:\n if df[PRODUCT_GROUP_NAME][0] == product_rev:\n return df[df.index <= date_str]\n\n\n#----------------------------------------------------------------------\ndef kline_chart(data: pd.DataFrame, product: str):\n # 最后的 Grid\n grid_chart = Grid(init_opts = opts.InitOpts(theme = THEME_ME))\n dates = data.index.to_list()\n\n colors = ['ivory', 'crimson', 'gold', 'cyan', 'teal', 'tan', 'cyan', 'red']\n\n # two lines to show ivp, normal ivp with cyan, warn vip for red.\n ivp = data[IV_PER]\n ivp_shift_left = ivp.shift(-1)\n data['ivp_warn'] = data[IV_PER][(ivp >= 91) | (ivp <= 15) |\n (ivp_shift_left >= 91) | (ivp_shift_left <= 15)]\n\n hv_show = True\n if not data[IV_NAME].isnull().all():\n hv_show = False\n\n kline = (\n Line(init_opts = opts.InitOpts())\n .add_xaxis(xaxis_data = dates)\n .add_yaxis(\n series_name = \"kline\",\n y_axis = data[CLOSE_PRICE_NAME],\n color = colors[7],\n linestyle_opts = opts.LineStyleOpts(opacity = 1, width = 2.),\n markline_opts = opts.MarkLineOpts(\n data = [\n opts.MarkLineItem(type_ = \"min\", name = \"最低价\", symbol = 'none'),\n opts.MarkLineItem(type_ = \"max\", name = \"最高价\", symbol = 'none'),\n ]\n ),\n )\n .add_yaxis(\n series_name = \"siv\",\n y_axis = data[IV_NAME] * 100,\n yaxis_index = 1,\n is_symbol_show = False,\n # is_smooth=True,\n color = colors[6],\n markline_opts = opts.MarkLineOpts(\n data = [\n opts.MarkLineItem(type_ = \"min\", name = \"ivl\"),\n opts.MarkLineItem(type_ = \"max\", name = \"ivh\"),\n ]\n ),\n linestyle_opts = opts.LineStyleOpts(opacity = 1, width = 1.5),\n label_opts = opts.LabelOpts(is_show = False),\n )\n .add_yaxis(\n series_name = \"siv5\",\n y_axis = data[IV_NAME].rolling(5).mean() * 100,\n yaxis_index = 1,\n is_symbol_show = False,\n # is_smooth=True,\n color = colors[5],\n linestyle_opts = opts.LineStyleOpts(opacity = 0.9, width = 1.2),\n label_opts = opts.LabelOpts(is_show = False),\n )\n .add_yaxis(\n series_name = \"siv10\",\n y_axis = data[IV_NAME].rolling(10).mean() * 100,\n yaxis_index = 1,\n is_symbol_show = False,\n # is_smooth=True,\n color = colors[4],\n linestyle_opts = opts.LineStyleOpts(opacity = 0.9, width = 1.2),\n label_opts = opts.LabelOpts(is_show = False),\n )\n .add_yaxis(\n series_name = \"hv20\",\n y_axis = data[HV_20_NAME] * 100,\n yaxis_index = 1,\n is_symbol_show = False,\n is_selected = hv_show,\n # is_smooth=True,\n color = colors[3],\n linestyle_opts = opts.LineStyleOpts(opacity = 0.9, width = 1.2),\n label_opts = opts.LabelOpts(is_show = False),\n )\n .add_yaxis(\n series_name = \"hv250\",\n y_axis = data[HV_250_NAME] * 100,\n yaxis_index = 1,\n is_symbol_show = False,\n is_selected = hv_show,\n # is_smooth=True,\n color = colors[2],\n linestyle_opts = opts.LineStyleOpts(opacity = 0.8, width = 1),\n label_opts = opts.LabelOpts(is_show = False),\n )\n .add_yaxis(\n series_name = \"ivp\",\n y_axis = ivp,\n yaxis_index = 2,\n is_symbol_show = False,\n # is_smooth=True,\n color = colors[1],\n linestyle_opts = opts.LineStyleOpts(\n opacity = 1,\n width = 1.2),\n label_opts = opts.LabelOpts(is_show = False),\n )\n .add_yaxis(\n series_name = \"ivp_warn\",\n y_axis = data['ivp_warn'],\n yaxis_index = 2,\n is_symbol_show = False,\n color = colors[0],\n linestyle_opts = opts.LineStyleOpts(\n opacity = 1,\n width = 1.2),\n # is_smooth=True,\n label_opts = opts.LabelOpts(is_show = False),\n )\n .extend_axis(\n yaxis = opts.AxisOpts(\n name = 'vix',\n is_scale = True,\n axislabel_opts = opts.LabelOpts(formatter = \"{value} %\"), interval = 5\n )\n )\n .extend_axis(\n yaxis = opts.AxisOpts(\n is_show = False,\n is_scale = False,\n min_ = 0,\n max_ = 500,\n ),\n )\n .set_series_opts(\n label_opts = opts.LabelOpts(is_show=False),\n # 'circle', 'rect', 'roundRect', 'triangle', 'diamond', 'pin', 'arrow', 'none'\n )\n .set_global_opts(\n title_opts = opts.TitleOpts(title = f\"{product} siv\", pos_left=\"0\"),\n xaxis_opts = opts.AxisOpts(\n type_=\"category\",\n is_scale=True,\n boundary_gap=False,\n axisline_opts=opts.AxisLineOpts(is_on_zero=False),\n splitline_opts=opts.SplitLineOpts(is_show=False),\n split_number=20,\n min_=\"dataMin\",\n max_=\"dataMax\",\n ),\n yaxis_opts = opts.AxisOpts(\n name = \"指数价格\",\n position = \"left\",\n is_scale = True,\n splitline_opts = opts.SplitLineOpts(is_show = True),\n ),\n tooltip_opts = opts.TooltipOpts(trigger = \"axis\", axis_pointer_type = \"line\"),\n legend_opts = opts.LegendOpts(is_show = True),\n datazoom_opts = opts.DataZoomOpts(type_ = \"slider\", range_start = 0, range_end = 100),\n )\n )\n\n return kline\n\n\n#----------------------------------------------------------------------\ndef get_template(product: str, date_str: str):\n kwargs = {\"product\" : product,\n \"date\" : date_str,\n \"tabs\": FUTURE_HV_NAMES_REVERSE.keys()}\n return render_template('options.html', **kwargs)\n\n\n#----------------------------------------------------------------------\ndef get_data(product: str, date_str: str):\n data = get_iv_data(product, date_str)\n kline = kline_chart(data, product)\n return kline.dump_options_with_quotes()\n","sub_path":"pyecharts_flask_kline/options_handlers.py","file_name":"options_handlers.py","file_ext":"py","file_size_in_byte":7594,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"354662291","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Nov 4 00:56:19 2018\n\n@author: erlisuo\n\"\"\"\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\nfrom sklearn.preprocessing import LabelEncoder, OneHotEncoder\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.cluster import KMeans\n\n\ndef run_pca(n_c, X_train, X_test):\n #apply ICA\n from sklearn.decomposition import PCA\n pca = PCA(n_components = n_c)\n X_train = pca.fit_transform(X_train)\n X_test = pca.transform(X_test)\n# explained_variance = pca.explained_variance_ratio_\n# print( explained_variance)\n return [X_train, X_test]\n\n\ndef draw_kmeans(X_train):\n wcss = []\n n_cluster = 30\n for i in range(1,n_cluster):\n kmeans = KMeans(n_clusters = i, \n init = 'k-means++',\n max_iter = 300,\n n_init = 10,\n random_state = 0)\n kmeans.fit(X_train)\n wcss.append(kmeans.inertia_)\n plt.plot(range(1,n_cluster), wcss)\n plt.title('kmean K selection')\n plt.xlabel('Number of clusters')\n plt.ylabel('WCSS')\n plt.show()\n return wcss\n\ndef run_kmeans(n_c, X_train, X_test): \n kmeans = KMeans(n_clusters=n_c, init='k-means++', n_init=10, max_iter = 500, random_state = 0)\n y_pred_train = kmeans.fit_predict(X_train)\n y_pred_test = kmeans.predict(X_test)\n return [y_pred_train, y_pred_test]\n\ndataset = pd.read_csv('seeds.csv')\nX = dataset.iloc[:,0:7]\nX_values = X.values\ny = dataset.iloc[:,7]\ny_values = y.values\n\n\nX_train, X_test, y_train, y_test = train_test_split(X_values, y_values, test_size = 0.2, random_state = 0)\n\nfrom sklearn.preprocessing import StandardScaler\nsc = StandardScaler()\nX_train = sc.fit_transform(X_train)\nX_test = sc.transform(X_test)\n\nn_classes = len(np.unique(y_values))\n\nr = run_pca(3, X_train, X_test)\nX_train = r[0]\nX_test = r[1]\n\n#wcss = draw_kmeans(X_train)\ny_preds = run_kmeans(n_c = 3, X_train = X_train, X_test = X_test)\ny_pred_train = y_preds[0]\ny_pred_test = y_preds[1]\n\ncolors = ['navy', 'red', 'darkorange']\nfor n, color in enumerate(colors):\n data = X_train[y_pred_train == n]\n plt.scatter(data[:,0], data[:,1], s=0.8, color=color)\nfor n, color in enumerate(colors):\n data = X_test[y_pred_test == n]\n plt.scatter(data[:,0], data[:,1], marker='x', color=color)\nplt.title('cluster attribute area and perimeter')\nplt.xlabel('area')\nplt.ylabel('perimeter') \n#plt.xticks(())\n#plt.yticks(())\n\nplt.show()\n\n\n#for n, color in enumerate(colors):\n# data = X_train[y_pred_train == n]\n# plt.scatter(data[:,2], data[:,3], s=0.8, color=color)\n#for n, color in enumerate(colors):\n# data = X_test[y_pred_test == n]\n# plt.scatter(data[:,2], data[:,3], marker='x', color=color)\n#plt.title('cluster attribute length of kernel and width of kernel')\n#plt.xlabel('length of kernel')\n#plt.ylabel('width of kernel') \n##plt.xticks(())\n##plt.yticks(())\n#\n#plt.show()\n#\n#\n#for n, color in enumerate(colors):\n# data = X_train[y_pred_train == n]\n# plt.scatter(data[:,4], data[:,5], s=0.8, color=color)\n#for n, color in enumerate(colors):\n# data = X_test[y_pred_test == n]\n# plt.scatter(data[:,4], data[:,5], marker='x', color=color)\n#plt.title('cluster attribute asymmetry coefficient and length of kernel groove')\n#plt.xlabel('asymmetry coefficient')\n#plt.ylabel('length of kernel groove') \n##plt.xticks(())\n##plt.yticks(())\n#\n#plt.show()","sub_path":"kmeans_dr_seends.py","file_name":"kmeans_dr_seends.py","file_ext":"py","file_size_in_byte":3452,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"397587897","text":"n = int(input())\n\ncards = [[0 for i2 in range(14)] for i1 in range(4)]\n\nfor i in range(n):\n a = [i for i in input().split()]\n a1 = int(a[1])\n\n if(a[0] == 'S'):\n cards[0][a1] = 1\n if(a[0] == 'H'):\n cards[1][a1] = 1\n if(a[0] == 'C'):\n cards[2][a1] = 1\n if(a[0] == 'D'):\n cards[3][a1] = 1\n\nfor s in range(4):\n for r in range(1, 14):\n if(cards[s][r] == 1):\n continue\n if (s == 0):\n print(\"S \"+ str(r))\n if (s == 1):\n print(\"H \"+ str(r))\n if (s == 2):\n print(\"C \"+ str(r))\n if (s == 3):\n print(\"D \"+ str(r))\n","sub_path":"Python3/ITP1_06_B.py","file_name":"ITP1_06_B.py","file_ext":"py","file_size_in_byte":642,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"496708016","text":"from random import randint\n\nimport threading\nfrom time import sleep\n\n\ndef f(id):\n sleep(randint(0, 9))\n print('function: %s' % id)\n\ntmax = 10\nthreads = []\nfor i in range(100):\n t = threading.Thread(target=f, args=(i,))\n\n while len(threads) > tmax:\n threads = [t for t in threads if t.is_alive()]\n sleep(.1)\n\n threads.append(t)\n\n t.start()","sub_path":"libs/python3.6/threading.py","file_name":"threading.py","file_ext":"py","file_size_in_byte":362,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"329815307","text":"\ndef stats_text_en(text):\n # 将符号变为空格,将字符串分割\n if not isinstance(text,str):#如果不是字符串,则抛出异常\n raise ValueError(\"输入的不是字符串类型!\")\n text0=[]\n txt= text.replace('\\n', ' ').replace('.', ' ').replace(':', ' ').replace('--', ' ').replace('.',' ').replace('*',' ').replace('「',' ').replace('」',' ')\n txt1=txt.split()\n for i in txt1:\n if i >'\\u9fff' or i<'\\u4e00':\n text0.append(i)\n counts = {}\n # 如果字典里再次出现该单词,次数加一,否则次数为一\n for text in text0:\n if text in counts.keys():\n counts[text] = counts[text] + 1\n else:\n counts[text] = 1\n #排序,从高到低\n count_text=sorted(counts.items(),key=lambda x:x[1],reverse=True)\n return count_text\n\n\ndef stats_text_cn(text):\n # 将text中符号变为空格,然后放到list中\n if not isinstance(text,str):#如果不是字符串,则抛出异常\n raise ValueError(\"输入的不是字符串类型!\")\n list=[]\n for i in text:\n if '\\u4e00'<=i<='\\u9fff':\n list.append(i)\n counts = {}\n # 如果字典里再次出现该单词,次数加一,否则次数为一\n for i in list:\n if i in counts.keys():\n counts[i] = counts[i] + 1\n else:\n counts[i] = 1\n #排序,从高到低\n count_text=sorted(counts.items(),key=lambda x:x[1],reverse=True)\n return count_text\n\ndef stats_text(text): #定义函数,分别调⽤stats_text_en , stats_text_cn ,输出合并词频统计结果\n if not isinstance(text,str):#如果不是字符串,则抛出异常\n raise ValueError(\"输入的不是字符串类型!\")\n result = stats_text_en(text) + stats_text_cn(text)\n return result \n\n\ntext='''\nThe Zen of Python, by Tim Peters.\n\n\nBeautiful is better than ugly.\nExplicit is better than implicit.\nSimple is better than complex.\nComplex is better than complicated. 9 Flat is better than nested.\nSparse is better than dense.\nReadability counts.\nSpecial cases aren't special enough to break the rules.\nAlthough practicality beats purity.\nErrors should never pass silently.\nUnless explicitly silenced.\nIn the face of ambxiguity, refuse the temptation to guess.\nThere should be one-- and preferably only one --obvious way to do\n it.\nAlthough that way may not be obvious at first unless you're Dutch.\nNow is better than never.\nAlthough never is often better than *right* now.\nIf the implementation is hard to explain, it's a bad idea.\nIf the implementation is easy to explain, it may be a good idea.\nNamespaces are one honking great idea -- let's do more of those!\n\n美胜于丑。\n显式优于隐式。\n简单胜于复杂。\n复杂总比复杂好。\n稀疏胜于稠密。\n可读性计数。\n特殊情况不足以打破规则。\n尽管实用性胜过纯洁性。\n错误永远不会悄悄地过去。\n除非明确沉默。\n面对悠闲,拒绝猜测的诱惑。\n应该有一种--最好只有一种--显而易见的方法\n它。\n不过,如果不是荷兰语的话,这种方式一开始可能并不明显。\n现在总比没有好。\n虽然从来没有比现在更好。\n如果实现很难解释,那是个坏主意。\n如果实现很容易解释,这可能是一个好主意。\n名称空间是一个非常好的主意——让我们做更多的事情吧!\n'''\n#if __name__ == \"__main__\":\n #result=stats_text(text)\n #print('统计==>\\n',result)","sub_path":"exercises/1901090066/d08/mymodule/state_word.py","file_name":"state_word.py","file_ext":"py","file_size_in_byte":3487,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"438372053","text":"import strawberry\nfrom django.db import models\nfrom strawberry import auto\nfrom strawberry.type import StrawberryList, StrawberryOptional\n\nimport strawberry_django\nfrom strawberry_django import fields\n\n\nclass TypeModel(models.Model):\n boolean = models.BooleanField()\n string = models.CharField(max_length=50)\n foreign_key = models.ForeignKey(\n \"TypeModel\",\n blank=True,\n related_name=\"related_foreign_key\",\n on_delete=models.CASCADE,\n )\n one_to_one = models.OneToOneField(\n \"TypeModel\",\n blank=True,\n related_name=\"related_one_to_one\",\n on_delete=models.CASCADE,\n )\n many_to_many = models.ManyToManyField(\n \"TypeModel\", related_name=\"related_many_to_many\"\n )\n\n\ndef test_type():\n @strawberry_django.type(TypeModel)\n class Type:\n id: auto\n boolean: auto\n string: auto\n\n assert [(f.name, f.type) for f in fields(Type)] == [\n (\"id\", strawberry.ID),\n (\"boolean\", bool),\n (\"string\", str),\n ]\n\n\ndef test_inherit(testtype):\n @testtype(TypeModel)\n class Base:\n id: auto\n boolean: auto\n\n @strawberry_django.type(TypeModel)\n class Type(Base):\n string: auto\n\n assert [(f.name, f.type) for f in fields(Type)] == [\n (\"id\", strawberry.ID),\n (\"boolean\", bool),\n (\"string\", str),\n ]\n\n\ndef test_default_value():\n @strawberry_django.type(TypeModel)\n class Type:\n string: auto = \"data\"\n string2: str = strawberry.field(default=\"data2\")\n string3: str = strawberry_django.field(default=\"data3\")\n\n assert [(f.name, f.type) for f in fields(Type)] == [\n (\"string\", str),\n (\"string2\", str),\n (\"string3\", str),\n ]\n assert Type().string == \"data\"\n assert Type().string2 == \"data2\"\n assert Type().string3 == \"data3\"\n\n\ndef test_relationship_inherit(testtype):\n @testtype(TypeModel)\n class Base:\n foreign_key: auto\n related_foreign_key: auto\n one_to_one: auto\n related_one_to_one: auto\n many_to_many: auto\n related_many_to_many: auto\n another_name: auto = strawberry_django.field(field_name=\"foreign_key\")\n\n @strawberry_django.type(TypeModel)\n class Type(Base):\n pass\n\n assert [(f.name, f.type or f.child.type, f.is_list) for f in fields(Type)] == [\n (\"foreign_key\", strawberry_django.DjangoModelType, False),\n (\n \"related_foreign_key\",\n StrawberryList(strawberry_django.DjangoModelType),\n True,\n ),\n (\"one_to_one\", strawberry_django.DjangoModelType, False),\n (\n \"related_one_to_one\",\n StrawberryOptional(strawberry_django.DjangoModelType),\n False,\n ),\n (\n \"many_to_many\",\n StrawberryList(strawberry_django.DjangoModelType),\n True,\n ),\n (\n \"related_many_to_many\",\n StrawberryList(strawberry_django.DjangoModelType),\n True,\n ),\n (\"another_name\", strawberry_django.DjangoModelType, False),\n ]\n","sub_path":"tests/types2/test_type.py","file_name":"test_type.py","file_ext":"py","file_size_in_byte":3107,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"500443487","text":"from __future__ import print_function\n\nfrom argparse import ArgumentParser\n\nimport pika\n\nfrom data_processing import DefaultConsumer, EntityManager\nfrom schemas import CustomerSchema\n\n\ndef main():\n arg_parser = ArgumentParser()\n arg_parser.add_argument(\"-rabbit_host\", action=\"store\", dest=\"rabbit_host\",\n help=\"Broker hostname\", default=\"localhost\")\n arg_parser.add_argument(\"-rabbit_port\", action=\"store\", dest=\"rabbit_port\",\n help=\"Broker port\", default=5672)\n arg_parser.add_argument(\"-db_host\", action=\"store\", dest=\"db_host\",\n help=\"Database host\", default=\"localhost\")\n arg_parser.add_argument(\"-db_port\", action=\"store\", dest=\"db_port\",\n help=\"Database port\", default=27017)\n arg_parser.add_argument(\"-db_collection\", action=\"store\", dest=\"db_collection\",\n help=\"Database collection\", default=\"customers\")\n args = arg_parser.parse_args()\n\n customer_schema = CustomerSchema()\n manager = EntityManager(args.db_host, args.db_port, \"customers\", customer_schema)\n consumer = DefaultConsumer(manager)\n\n exchange_name = \"oms\"\n queue_name = \"customer\"\n routing_key = \"customer.*\"\n\n connection = pika.BlockingConnection(pika.ConnectionParameters(args.rabbit_host, args.rabbit_port))\n channel = connection.channel()\n channel.exchange_declare(exchange=exchange_name, type=\"topic\", auto_delete=False, durable=True)\n channel.queue_declare(queue=queue_name, auto_delete=False, exclusive=False, durable=True)\n channel.queue_bind(exchange=exchange_name, queue=queue_name, routing_key=routing_key)\n\n channel.basic_consume(consumer.consume, queue=queue_name, no_ack=False)\n channel.start_consuming()\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"customer_service/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1825,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"523504864","text":"import pickle\nimport os.path\nfrom nltk.util import bigrams\nfrom nltk.probability import ConditionalFreqDist\nimport re\n\ndata = open('food', 'r')\ntext = data.read()\n#stopWords = set(nltk.corpus.stopwords.words(\"french\")) #List of french stopwords\ntempBigrams = []\nfor sentence in re.split('\\.|\\?|!', text):\n #relevantWords = [w for w in re.sub(r',|;|:|\"', '', sentence).lower().split() not in stopWords]\n relevantWords = [w for w in re.sub(r',|;|:|\"', '', sentence).lower().split()]\n tempBigrams += list(bigrams(relevantWords))\n\n\nif os.path.isfile('feed.p'):\n predictions = pickle.load(open('feed.p', 'rb'))\n predictions.update(tempBigrams)\nelse:\n predictions = ConditionalFreqDist(tempBigrams)\n\ndata.close()\npickle.dump(predictions, open('feed.p', 'wb'))\n","sub_path":"word-prediction.py","file_name":"word-prediction.py","file_ext":"py","file_size_in_byte":766,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"398661443","text":"import mysql.connector\nimport sys\nimport os\nsys.path.insert(0,os.path.abspath(os.path.join(os.path.dirname(__file__),'..')))\nfrom config import config\n\nhostName = None\nuserName = None\nuserPwd = None\nuserDb=None\n\nif sys.platform==\"darwin\":\n os.system(\"clear\")\n hostName=config.macMysql[\"host\"]\n userName=config.macMysql[\"user\"]\n userPwd=config.macMysql[\"pwd\"]\n userDb=config.macMysql[\"database\"]\nelse:\n os.system(\"cls\")\n print(\"This script currently works with Mac OS only\")\n\ntry:\n conn=mysql.connector.connect(host=hostName,user=userName,passwd=userPwd,database=userDb)\n cur=conn.cursor()\n cur.execute(\"select * from hr.countries\")\n res = cur.fetchall()\n print(cur.description)\n print(res)\nexcept Exception as e:\n print(e)\nfinally:\n if cur:\n cur.close()\n if conn:\n conn.close()","sub_path":"src_python/dbmysql.py","file_name":"dbmysql.py","file_ext":"py","file_size_in_byte":840,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"652733332","text":"class Solution:\n def missingNumber(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: int\n \"\"\"\n nums_len = len(nums)\n sum_nums = sum(nums)\n a_sum = int((1 + nums_len) * nums_len / 2)\n return a_sum - sum_nums\n\n\nif __name__ == '__main__':\n a = Solution()\n print(a.missingNumber([3, 0, 1]))\n print(a.missingNumber([9, 6, 4, 2, 3, 5, 7, 0, 1]))\n","sub_path":"leetcode/268.MissingNumber.py","file_name":"268.MissingNumber.py","file_ext":"py","file_size_in_byte":408,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"453505367","text":"from bot_handler.line_formater import LineFormater\nfrom bot_handler.message import Message\nfrom utils.amazon.tools import AmazonTools\nfrom utils.singleton import Singleton\n\n\nclass MessageCustomizer(metaclass=Singleton):\n\n @classmethod\n def __has_at_least_minimal_information(cls, product):\n has_title = product.title is not None and len(product.title) > 0\n has_price = product.price is not None\n has_img_url = product.image_url is not None\n return has_img_url and has_price and has_title\n\n @classmethod\n def build_message(cls, product, user=None, coupon=None):\n \"\"\"\n Coupon is a dictionary with the following structure:\n cupon = {\n 'code': args[0],\n 'final_price': args[1],\n 'urls': args[2]\n }\n \"\"\"\n\n if user is not None:\n style = user.get_telegram_name()\n tag = user.amazon_tag\n else:\n style = 'default'\n tag = None\n\n title = product.title\n description = product.description\n features = product.features\n price = product.price\n size = product.size\n old_price = product.standard_price\n img_url = product.image_url\n\n if tag is None:\n url = product.url\n else:\n url = AmazonTools.modify_url(url=product.url, tag=tag)\n\n end_date = product.end_date\n\n if cls.__has_at_least_minimal_information(product):\n\n msg = Message(\n image_url_line=LineFormater.get_image_url_line(img_url=img_url),\n title_line=LineFormater.get_title_line(title=title,\n style=style),\n description_line=LineFormater.get_description_line(description=description,\n features=features,\n coupon=coupon),\n size_line=LineFormater.get_size_line(size=size, style=style),\n temporal_line=LineFormater.get_temporal_line(end_date=end_date,\n style=style),\n old_price_line=LineFormater.get_old_price_line(old_price=old_price,\n style=style),\n price_line=LineFormater.get_price_line(price=price, style=style),\n coupon_line=LineFormater.get_coupon_line(coupon=coupon, style=style),\n shop_line=LineFormater.get_shop_line(url=url, style=style),\n link_line=LineFormater.get_link_line(url=url, style=style),\n watched_in_line=LineFormater.watched_in(style=style)\n )\n print(f\"Final message\\n{msg}\")\n return msg\n\n else:\n return None\n\n","sub_path":"bot_handler/message_customizer.py","file_name":"message_customizer.py","file_ext":"py","file_size_in_byte":3141,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"608475949","text":"import sys\nimport csv\n\ndef conver(self):\n fname_in = self\n fname_out = 'c{}.csv'.format(''.join(fname_in))\n\n with open(fname_in, newline='') as fin, \\\n open(fname_out, mode='w', newline='') as fout:\n\n reader = csv.reader(fin, delimiter=' ', skipinitialspace=True)\n writer = csv.writer(fout)\n\n writer.writerows(reader)\n \n","sub_path":"model/mkcsv.py","file_name":"mkcsv.py","file_ext":"py","file_size_in_byte":359,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"617422807","text":"\nclient_doc = {\n '_id': '123ABC',\n 'name': 'some client in Germany',\n 'c': 'DE',\n 'static': [\n {'k': 1, 'v': 1}\n ],\n 'attributes': [\n {'id': '123RD_GE', 'k': 'DE_TICKER', 'v': 'SCG'},\n {'id': '124RD_GE', 'k': 'DE_LOCATION', 'v': 'somewhere in germany'}\n ]\n\n}\n\n\nref_data = {\n 'c': 'DE', 'id': '123RD_GE', 'k': 'DE_TICKER_NEW', 'values': [1,2,3,4,5]\n}","sub_path":"loader/kv_example/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":394,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"88258014","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport scipy\nimport math\nimport curvefunctions\n\n# constant\nNUM_OF_FUNCTIONS = 10\n# variable\nNUM_OF_SIMULATION_TIME = 5 #20000\nNUM_OF_INSTANCE = 2 #500\nSTEP_SIZE = 2\nTARGET_POS = 20\nTHRESHOLD = 0.05\n\nclass mcmc_sampling(object):\n def __init__(self, curve_history, best_performance = 0, target_pos = TARGET_POS):\n # TODO(Shufan): make sure number of curve_history > 0\n self.curve_history = curve_history\n self.history_num = len(curve_history)\n print (\"history_lenth = \", self.history_num)\n init_xi = np.ones((NUM_OF_FUNCTIONS), dtype=np.float) / NUM_OF_FUNCTIONS\n for model in curve_combination_models:\n init_xi = np.concatenate((init_xi, model_para[model]))\n self.xi_dim = len(init_xi)\n self.samples = np.broadcast_to(init_xi, (NUM_OF_INSTANCE, self.xi_dim))\n print (self.samples)\n self.target_pos = target_pos\n self.best_performance = best_performance\n \n def f_comb(self, x, Xi):\n # TODO(Shufan):calculation f_comb once a time (when update Xi)\n print (\"x = \", x)\n ret = np.zeros(NUM_OF_INSTANCE)\n for i in range (NUM_OF_INSTANCE):\n idx = NUM_OF_FUNCTIONS\n for j in range (NUM_OF_FUNCTIONS):\n if dimention_of_para[j] == 2:\n ret += Xi[i][j] * all_models[curve_combination_models[j]](x, Xi[i][idx], Xi[i][idx + 1])\n idx += 2\n elif dimention_of_para[j] == 3:\n print (\"Xi = \", Xi)\n ret += Xi[i][j] * all_models[curve_combination_models[j]](x, Xi[i][idx], Xi[i][idx + 1], Xi[i][idx + 2])\n idx += 3\n elif dimention_of_para[j] == 4:\n ret += Xi[i][j] * all_models[curve_combination_models[j]](x, Xi[i][idx], Xi[i][idx + 1], Xi[i][idx + 2], Xi[i][idx + 3])\n idx += 4\n print (\"idx = \", idx, \"value = \", Xi[i][idx])\n return ret\n\n def sigma_sq(self, Xi):\n # TODO(Shufan): calculate sigma once a time\n ret = 0\n for i in range(self.history_num):\n temp = self.curve_history[i] - self.f_comb(i, Xi)\n ret += temp * temp\n return 1.0 * ret / self.history_num\n\n def prior(self, Xi):\n if not self.f_comb(1, Xi) < self.f_comb(self.target_pos, Xi):\n return 0\n for i in range(10):\n if not self.samples[i] > 0:\n return 0\n return 1.0 / np.sqrt(self.sigma_sq(Xi))\n\n def gaussian_distribution(self, x, Xi):\n return np.exp(np.square(self.curve_history[x] - self.f_comb(x, Xi)) / (-2.0 * self.sigma_sq(Xi))) / np.sqrt(2 * np.pi * np.sqrt(self.sigma_sq(Xi)))\n\n def likelihood(self, Xi):\n ret = 1\n for i in range(self.history_num):\n ret *= self.gaussian_distribution(i, Xi)\n return ret\n\n def target_distribution(self, Xi):\n return self.likelihood(Xi) * self.prior(Xi)\n\n def HM_sampling(self):\n for i in range(NUM_OF_SIMULATION_TIME):\n # get new Instance\n new_values = np.random.randn(NUM_OF_INSTANCE, self.xi_dim) * STEP_SIZE + self.samples\n\n alpha = np.minimum(1, self.target_distribution(new_values) / self.target_distribution(self.samples))\n\n u = np.random.rand(NUM_OF_INSTANCE, self.xi_dim)\n\n change_value_flag = (u < alpha).astype(np.int)\n\n self.samples = self.samples * (1 - change_value_flag) + new_values * change_value_flag\n\n # print (self.samples)\n\n def generate_expect_y():\n return np.sum(self.f_comb(TARGET_POS)) / NUM_OF_INSTANCE\n\n def assess():\n greater_num = 0\n y = self.y_comb(TARGET_POS)\n for i in range(NUM_OF_INSTANCE):\n if y[i] > self.best_performance:\n greater_num += 1\n if greater_num / NUM_OF_INSTANCE < THRESHOLD:\n return False\n else:\n return True\n\nif __name__ == \"__main__\":\n ret = mcmc_sampling([1,2,3,4,5], 0.95)\n ret.HM_sampling()\n print (\"Predict y = \", ret.generate_expect_y())\n","sub_path":"src/sdk/pynni/nni/curvefitting_assessor_rewrite/curvefitting_assessor.py","file_name":"curvefitting_assessor.py","file_ext":"py","file_size_in_byte":4136,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"315423264","text":"#!/usr/bin/python\n#\n# spec_rw.py\n#\n# Read and write routines used when reducing spectra. Specifically\n# designed for data taken with the Adams Observatory Lhires III\n# long-slit spectrograph.\n#\n# List of Functions:\n# RDFITSNGL: Reads a single 2-D FITS image file\n# RDFITSMULT: Read a list of 2-D FITS image files\n# RDFITSHEAD: Read in the FITS headers of a list of FITS files\n# WRTFITSNGL: Write a single 2-D FITS image to file\n# READSPEC: Read an ASCII file of between 2 and 10 columns\n# WRTSPEC: Write an ASCII file of a reduced spectrum, 6 columns (see below)\n# PLOTSNGLSPEC: Write a single spectrum to an EPS plot\n#\n# Dependencies:\n# pyfits\n# numpy\n# asciitable\n# matplotlib (for PLOTSNGLSPEC)\n#\n# History:\n# DGW, 2016-05-10: re-written from the original, spec_redux.py\n# DGW, 2016-05-13: edited WRTSPEC to include a sixth column (wvcal)\n# DGW, 2016-06-14: edited READSPEC to read as many as 15 columns\n# DGW, 2016-10-24: Included PLOTSNGLSPEC from old spec_redux.py module\n# DGW, 2017-10-17: Wrote RDFITSHEAD\n#\n\nimport pyfits\nimport numpy\nimport asciitable\nimport matplotlib.pyplot as plt\n\n# Function RDFITSNGL: Read a single 2-D FITS image file using pyfits.\n# DIR: string directory path\n# IM: string FITS image name\n# Returns: a 2-D array with the image data\ndef rdfitsngl(dir,im):\n hdulist = pyfits.open(dir+im)\n data = hdulist[0].data\n return data\n\n# Function RDFITSMULT: Read in 2-D FITS images from a list.\n# DIR: string directory path\n# LIST: string list of 2-D FITS image names\n# Returns: a 3-D array of 2-D images; Z-axis length is length of list\ndef rdfitsmult(dir,list):\n numims = len(list)\n # Open a test image to determine image dimensions\n hdulist = pyfits.open(dir+list[0], ignore_missing_end=True)\n data = hdulist[0].data\n xsz = data.shape[0]\n ysz = data.shape[1]\n # Create the data array\n imarr = numpy.zeros((xsz,ysz,numims)).copy()\n loop_over = range(numims)\n for i in loop_over:\n hdulist = pyfits.open(dir+list[i], ignore_missing_end=True)\n data = hdulist[0].data\n imarr[:,:,i] = data\n return imarr\n\n# Function RDFITSHEAD: Read in FITS image headers from a list.\n# DIR: string directory path\n# LIST: string list of 2-D FITS image names\n# Returns: headers for each FITS image\ndef rdfitshead(dir,list):\n numims = len(list)\n # Open a test image to determine image dimensions\n hdulist = pyfits.open(dir+list[0], ignore_missing_end = True)\n header = hdulist[0].header\n headlength = len(header)\n # Create the list of headers\n headlst = [\"\" for x in range(len(list))]\n loop_over = range(numims)\n for i in loop_over:\n hdulist = pyfits.open(dir+list[i], ignore_missing_end = True)\n header = hdulist[0].header\n #headarr[:,i] = header\n headlst [i] = header\n return headlst\n\n# Function WRTFITSNGL: Writes out single FITS image, dimensions unspecified\n# IMAGE: the image array\n# NAME: string name for output -- can include directory path\n# Returns: a flag valued 1\ndef wrtfitsngl(image,name):\n hdu = pyfits.PrimaryHDU(image)\n hdulist = pyfits.HDUList([hdu])\n hdulist.writeto(name)\n flag = 1\n return flag\n\n# Function READSPEC: reads in an ASCII txt format spectrum using numpy\n# FILENAME: the .txt file to read in\n# NSKIP: the number of rows to skip at beginning of file (header length)\n# Returns: the data, separated into columns (2 MIN, 15 MAX)\ndef readspec(filename,nskip):\n data = numpy.loadtxt(filename,skiprows=nskip)\n ncols = data.shape[1]\n # Assign column variables\n if ncols >= 2:\n d1 = data[:,0]\n d2 = data[:,1]\n if ncols >= 3:\n d3 = data[:,2]\n if ncols >= 4:\n d4 = data[:,3]\n if ncols >= 5:\n d5 = data[:,4]\n if ncols >= 6:\n d6 = data[:,5]\n if ncols >= 7:\n d7 = data[:,6]\n if ncols >= 8:\n d8 = data[:,7]\n if ncols >= 9:\n d9 = data[:,8]\n if ncols >=10:\n d10= data[:,9]\n if ncols >=11:\n d11= data[:,10]\n if ncols >=12:\n d12= data[:,11]\n if ncols >=13:\n d13 = data[:,12]\n if ncols >=14:\n d14 = data[:,13]\n if ncols >=15:\n d15 = data[:,14]\n # Return the results\n if ncols == 2:\n return d1,d2\n if ncols == 3:\n return d1,d2,d3\n if ncols == 4:\n return d1,d2,d3,d4\n if ncols == 5:\n return d1,d2,d3,d4,d5\n if ncols == 6:\n return d1,d2,d3,d4,d5,d6\n if ncols == 7:\n return d1,d2,d3,d4,d5,d6,d7\n if ncols == 8:\n return d1,d2,d3,d4,d5,d6,d7,d8\n if ncols == 9:\n return d1,d2,d3,d4,d5,d6,d7,d8,d9\n if ncols == 10:\n return d1,d2,d3,d4,d5,d6,d7,d8,d9,d10\n if ncols == 11:\n return d1,d2,d3,d4,d5,d6,d7,d8,d9,d10,d11\n if ncols == 12:\n return d1,d2,d3,d4,d5,d6,d7,d8,d9,d10,d11,d12\n if ncols == 13:\n return d1,d2,d3,d4,d5,d6,d7,d8,d9,d10,d11,d12,d13\n if ncols == 14:\n return d1,d2,d3,d4,d5,d6,d7,d8,d9,d10,d11,d12,d13,d14\n if ncols == 15:\n return d1,d2,d3,d4,d5,d6,d7,d8,d9,d10,d11,d12,d13,d14,d15\n\n# Function WRTSPEC: writes out a reduced spec in ASCII format\n# N.B.: WANT TO INCLUDE A HEADER\n# SPEX: Output spectra from extract/wvcal/contrect\n# NAMES: column names\n# FILENAMES: what to call them\n# Returns: a flag of value 1\ndef wrtspec(spex,names,filenames):\n numcols = spex.shape[1]\n numsp = spex.shape[2]\n n1 = names[0]\n n2 = names[1]\n n3 = names[2]\n n4 = names[3]\n n5 = names[4]\n n6 = names[5]\n loop_over = range(numsp)\n for i in loop_over:\n asciitable.write({n1:spex[:,0,i],n2:spex[:,1,i],n3:spex[:,2,i],\n n4:spex[:,3,i],n5:spex[:,4,i],n6:spex[:,5,i]},\n filenames[i],names=names)\n flag = 1\n return flag\n\n# Function plotsnglspec: plot a single spectrum in .eps format\n# wv: the wavelength array\n# sp: the extracted spectrum\n# figxsz: the int x-size of the figure\n# figysz: the int y-size of the figure\n# xlabel: the string label on the x-axis\n# ylabel: the string label on the y-axis\n# filename: the string filename (must include .eps)\n# yscale: optional input, axis scale defaults to linear, but could be 'log'\ndef plotsnglspec(wv,sp,wvmin,wvmax,spmin,spmax,\n figxsz,figysz,xlabel,ylabel,filename,yscale='linear'):\n \n fig = plt.figure(figsize=(figxsz,figysz))\n \n ax1 = fig.add_subplot(111,autoscale_on=False,xlim=(wvmin,wvmax),\n ylim=(spmin,spmax))\n ax1.set_xlabel(xlabel)\n ax1.set_ylabel(ylabel)\n ax1.set_yscale(yscale)\n ax1.plot(wv,sp,'k-')\n plt.savefig(filename,dpi=200,facecolor='w',edgecolor='w',\n orientation='portrait',transparent='False',format='eps')\n plt.close(fig)\n flag = 1\n return flag\n","sub_path":"Honors Thesis Research/Code/Old Versions/OG Research Program/spec_rw.py","file_name":"spec_rw.py","file_ext":"py","file_size_in_byte":6768,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"467804910","text":"# -*- coding: utf-8 -*-\n\n\"\"\"\n3.5.1\n\"\"\"\n\n# 基本形なモジュールのインポート\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom util import get_iris_data\nfrom util import plot_decision_regions\n\n\"\"\"XORの分離\"\"\"\n\n# データセットの準備\nnp.random.seed(0)\n\nX_xor = np.random.randn(200, 2)\ny_xor = np.logical_xor(X_xor[:, 0] > 0, X_xor[:, 1] >0)\n\ny_xor = np.where(y_xor, 1, -1)\n\n# RBFカーネルによるSVM\nfrom sklearn.svm import SVC\n\n# RBFカーネルによるSVM\nsvm = SVC(kernel='rbf', random_state=0, gamma=0.10, C=10.0)\nsvm.fit(X_xor, y_xor)\nplot_decision_regions(X_xor, y_xor,\n classifier=svm)\n\n# 決定境界のプロット\nplt.legend(loc='upper left')\nplt.tight_layout()\nplt.show()\n\n\n\n\n\"\"\"irisをSVMで分類\"\"\"\n\n# データセットの準備\nX_train_std, X_test_std, y_train, y_test = get_iris_data()\n\nX_combined_std = np.vstack((X_train_std, X_test_std))\ny_combined = np.hstack((y_train, y_test))\n\n# RBFカーネルによるSVM\nsvm = SVC(kernel='rbf', random_state=0, gamma=0.2, C=1.0)\nsvm.fit(X_xor, y_xor)\nplot_decision_regions(X_xor, y_xor,\n classifier=svm)\n\n# 決定境界のプロット\nplt.legend(loc='upper left')\nplt.tight_layout()\nplt.show()\n\n","sub_path":"03/3-5-1_kernel_tric_SVM_for_XOR.py","file_name":"3-5-1_kernel_tric_SVM_for_XOR.py","file_ext":"py","file_size_in_byte":1228,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"554605151","text":"import io\nimport os\nimport time\nimport sqlite3\nimport pandas as pd\nimport numpy as np\nfrom multiprocessing import Pool\n\n\ndef query_NPDB(cur_mass_row, npdb):\n results = []\n\n for row in npdb.to_dict('records'):\n if (row['monoisotopic_mass'] >= cur_mass_row[3]) and (row['monoisotopic_mass'] <= cur_mass_row[4]):\n temp = [cur_mass_row[0], cur_mass_row[1], cur_mass_row[2], cur_mass_row[5], row['structure_id'], row['monoisotopic_mass'], row['source_id'], row['source_name'], row['inchi'], row['inchi_key2'], row['smile']]\n results.append(temp)\n else:\n pass\n labels = ['ms_name', 'mz', 'observed_mm', 'adduct_name', 'NPDB_ID', 'NPDB_mm', 'source_id', 'source_name', 'InChI', 'InChI_key', 'SMILES']\n results = pd.DataFrame(results, columns=labels)\n #print(results.columns)\n return results\n\n\ndef NPDB_to_pd(npdb):\n conn = sqlite3.connect(npdb)\n c = conn.cursor()\n query = c.execute(\"SELECT structure.structure_id,structure.monoisotopic_mass, structure_has_data_source.source_id, structure_has_data_source.source_name, structure.inchi,structure.inchi_key2,structure.smile FROM structure left join structure_has_data_source on structure_has_data_source.structure_id = structure.structure_id\")\n cols = [column[0] for column in query.description]\n results = pd.DataFrame.from_records(data=query.fetchall(), columns=cols)\n c.close()\n temp = results.head(n=100)\n temp.to_csv(\"table4.csv\")\n return results\n\n\ndef get_mass_range(cur_mass_row, tolerance):\n\n mz = cur_mass_row[1]\n tol = mz * (tolerance / 1000000)\n top = mz + tol\n btm = mz - tol\n temp = np.append(cur_mass_row, [btm, top], axis=0)\n return temp\n\n\ndef get_adduct_data(cur_mass_row, adducts_df, tolerance):\n \"\"\"\n Gets possible mm's for the mz of cur_mass\n \"\"\"\n def get_cur_mm_adducts(cur_adduct_row):\n mm_low = (cur_mass_row[2] - cur_adduct_row[5]) / cur_adduct_row[4]\n mm_high = (cur_mass_row[3] - cur_adduct_row[5]) / cur_adduct_row[4]\n mm = (cur_mass_row[1] - cur_adduct_row[5]) / cur_adduct_row[4]\n adduct_name = cur_adduct_row[0]\n if mm <= 0:\n return pd.DataFrame()\n else:\n df = pd.DataFrame([[cur_mass_row[0], cur_mass_row[1], mm, mm_low, mm_high, adduct_name]],\n columns=['ms_name', 'mz', 'observed_mm', 'mm_low', 'mm_high', 'adduct_name'])\n return df\n\n cur_mass_row = get_mass_range(cur_mass_row, tolerance)\n\n results_df = []\n for row in adducts_df.values:\n temp = get_cur_mm_adducts(row)\n results_df.append(temp)\n\n results_df = pd.concat(results_df).reset_index(drop=True)\n\n return results_df\n\n\ndef main():\n\n adducts_df = pd.read_csv(\"./docs/ESI-MS-adducts.csv\")\n print(\"Step 1: Adducts file is loaded!\")\n df = pd.read_csv(\"./Wisecaver/test.txt\", index_col=None, header=0)\n print(\"Step 2: Metabolome file is loaded!\")\n df.columns = ['ms_name', 'mz']\n df = df.astype({'mz': float})\n\n results = []\n tolerance = 30\n\n with Pool(processes=5) as pool:\n for cur_mass_row in df.values:\n # for i, cur_mass_row in df.iterrows():\n pool.apply_async(get_adduct_data, args=(cur_mass_row, adducts_df, tolerance), callback=results.append)\n pool.close()\n pool.join()\n df = pd.concat(results).reset_index(drop=True)\n print(\"Step 3: Adducts are added to the metabolome file!\")\n temp2 = df.head(n=100)\n temp2.to_csv(\"table3.csv\")\n\n npdb = NPDB_to_pd(\"NPDB.sqlite\")\n print(\"Step 4: NPDB is converted to a dataframe!\")\n results_df = []\n with Pool(processes=4) as pool:\n for cur_mass_row in df.values:\n pool.apply_async(query_NPDB, args=(cur_mass_row, npdb), callback=results_df.append)\n pool.close()\n pool.join()\n\n results_df = pd.concat(results_df).reset_index(drop=True)\n temp3 = results_df.head(n=100)\n temp3.to_csv(\"table5.csv\")\n print(\"Step 5: NPDB is queried with the curated adducts dataframe!\")\n\n # write first set of files\n if not results_df.empty:\n print(\"Step 6: Writing 1st set of files!\")\n res_file = os.path.join(\"./MassNPDB\", 'full.results.csv')\n results_df.to_csv(res_file, sep=',', index=False)\n # ^^ ms_name, mz, observed_mm, NPDB_ID, NPDB_mm, source_id, source_name, InChI, InChI_key, SMILES\n\n res2_file = os.path.join(\"./MassNPDB\", 'NPDB_entries.txt')\n results_df[['NPDB_ID', 'NPDB_mm', 'SMILES']].drop_duplicates('NPDB_ID').to_csv(res2_file, index=False)\n # ^^ NPDB_ID, NPDB_mm, SMILES\n\n\n # Write second set of files\n main_cols = ['ms_name', 'NPDB_ID', 'observed_mm', 'SMILES', 'adduct_name', 'InChI']\n if not results_df.empty:\n print(\"Step 7: Writing 2nd set of files!\")\n # for main results, we only want one entry per ms_name_npdb_id\n res_file = os.path.join(\"./massNPDB\", 'predicted_structures.csv')\n results_df[main_cols].to_csv(res_file, sep=',', index=False)\n # ^^ ms_name, NPDB_ID, NPDB_mm, SMILES, InChI\n\n res_file = os.path.join(\"./massNPDB\", 'observed_mm.csv')\n results_df[['ms_name', 'mz', 'observed_mm']].drop_duplicates().to_csv(res_file, sep=',', index=False)\n\nif __name__ == \"__main__\":\n start_time = time.time()\n main()\n end_time = time.time()\n print(end_time - start_time)","sub_path":"integrative_omics/queryMassNPDB2.py","file_name":"queryMassNPDB2.py","file_ext":"py","file_size_in_byte":5362,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"189258595","text":"\n\n#calss header\nclass _THREADBARE():\n\tdef __init__(self,): \n\t\tself.name = \"THREADBARE\"\n\t\tself.definitions = [u'Threadbare material or clothes have become thin or damaged because they have been used a lot: ', u'A threadbare excuse, argument, or idea is not strong and no longer persuades people because it is old or has been used too much: ']\n\n\t\tself.parents = []\n\t\tself.childen = []\n\t\tself.properties = []\n\t\tself.jsondata = {}\n\n\n\t\tself.specie = 'adjectives'\n\n\n\tdef run(self, obj1, obj2):\n\t\tself.jsondata[obj2] = {}\n\t\tself.jsondata[obj2]['properties'] = self.name.lower()\n\t\treturn self.jsondata\n","sub_path":"xai/brain/wordbase/adjectives/_threadbare.py","file_name":"_threadbare.py","file_ext":"py","file_size_in_byte":594,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"183388462","text":"\"\"\"\nQuestion: https://leetcode.com/problems/best-time-to-buy-and-sell-stock/\nSay you have an array for which the ith element is the price of a given stock on day i.\n\nIf you were only permitted to complete at most one transaction (i.e., buy one and sell one share of the stock),\n design an algorithm to find the maximum profit.\n\nNote that you cannot sell a stock before you buy one.\n\nExample 1:\n\nInput: [7,1,5,3,6,4]\nOutput: 5\nExplanation: Buy on day 2 (price = 1) and sell on day 5 (price = 6), profit = 6-1 = 5.\n Not 7-1 = 6, as selling price needs to be larger than buying price.\nExample 2:\n\nInput: [7,6,4,3,1]\nOutput: 0\nExplanation: In this case, no transaction is done, i.e. max profit = 0.\n\"\"\"\nimport unittest\n\n\ndef get_max_profit_single_purchase(stock_prices, can_lose=True):\n # Calculate the max profit\n if len(stock_prices) < 2:\n # Initial clause removes the possibility of an empty set of array prices.\n raise Exception(\"Not enough data to evaluate the profit.\")\n lowest_buy = stock_prices[0]\n max_profit = -float('inf') if can_lose else 0\n\n # Time complexity = O(n) must iterate over the full array.\n for cur_idx in range(1, len(stock_prices)):\n cur_price = stock_prices[cur_idx]\n\n sell_profit = cur_price - lowest_buy\n max_profit = max(max_profit, sell_profit)\n\n # If a lower buy in price is met then try to get a better buy\n # in as any price after this point must be better than the previous low.\n lowest_buy = min(lowest_buy, cur_price)\n\n # If no profit is able to be made then return 0 as no profit could have been made that day.\n # Should it return a - value in this case?\n return max_profit\n\n\n# Tests\nclass Test(unittest.TestCase):\n\n def test_error_with_empty_prices(self):\n with self.assertRaises(Exception):\n get_max_profit_single_purchase([])\n\n def test_error_with_one_price(self):\n with self.assertRaises(Exception):\n get_max_profit_single_purchase([1])\n\n def test_price_goes_down_all_day(self):\n actual = get_max_profit_single_purchase([9, 7, 4, 1], can_lose=True)\n expected = -2\n self.assertEqual(actual, expected)\n\n def test_price_goes_down_all_day_no_losing_days(self):\n # Don't buy if there was no uptick for that day.\n actual = get_max_profit_single_purchase([9, 7, 4, 1], can_lose=False)\n expected = 0\n self.assertEqual(actual, expected)\n\n def test_price_goes_up_then_down(self):\n actual = get_max_profit_single_purchase([1, 5, 3, 2])\n expected = 4\n self.assertEqual(actual, expected)\n\n def test_price_goes_down_then_up(self):\n actual = get_max_profit_single_purchase([7, 2, 8, 9])\n expected = 7\n self.assertEqual(actual, expected)\n\n def test_price_goes_up_all_day(self):\n actual = get_max_profit_single_purchase([1, 6, 7, 9])\n expected = 8\n self.assertEqual(actual, expected)\n\n def test_price_stays_the_same_all_day(self):\n actual = get_max_profit_single_purchase([1, 1, 1, 1])\n expected = 0\n self.assertEqual(actual, expected)\n\n\nif __name__ == '__main__':\n unittest.main(verbosity=2)\n","sub_path":"python/coding_challenges/leet_code/best_time_to_buy_and_sell_stock.py","file_name":"best_time_to_buy_and_sell_stock.py","file_ext":"py","file_size_in_byte":3205,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"626093863","text":"import sys\nsys.path.insert(0, '/home/emy24/data/cobre_mcic_oasis/utilities')\nfrom matplotlib import cm\nfrom imgaug import augmenters as iaa\nfrom tqdm import tqdm_notebook as tqdm\nfrom mpl_toolkits.mplot3d.axes3d import Axes3D\nfrom preprocessing import crop_empty, get_loader\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.model_selection import StratifiedShuffleSplit\n\nimport re\nimport scipy\nimport torch\nimport numpy as np\nimport pandas as pd\nimport scipy.ndimage as sn\nimport matplotlib.pyplot as plt\nimport torch.utils.data as data_utils\n\ndef get_oasis_info(IDs, study):\n if study == 1:\n study = 'MR1'\n elif study == 2:\n study = 'MR2'\n \n file_path = \"/home/emy24/data/cobre_mcic_oasis/oasis_phenotype.csv\"\n phen = pd.read_csv(file_path, low_memory=False, encoding = \"ISO-8859-1\")\n phen = phen.rename(index=str, columns={\"M/F\": \"Gender\", \"CDR\": \"SubjectType\"})\n\n '''Remove the other MR'''\n keep = []\n for n in range(phen.shape[0]):\n keep += [study in phen.iloc[n,0]]\n keep = np.asarray(keep)\n keep = np.argwhere(keep*1).flatten()\n phen = phen.iloc[keep].reset_index(drop=True)\n\n '''Clean up data'''\n phen = phen.fillna(0) # Fill NaN with 0\n phen[\"Hand\"] = (phen[\"Hand\"] == \"R\")*1 # L(0) R(1) \n phen[\"Gender\"] = (phen[\"Gender\"] == \"F\")*1 # Male(0) Female(1)\n\n #Get rid of string in ID\n ID = []\n for i in phen[\"ID\"]:\n ID += [int(re.findall(\"\\d+\", i)[1])]\n ID = np.asarray(ID).astype(int)\n ID = pd.DataFrame(ID)\n phen[\"ID\"] = ID\n\n '''Match existing subject to spreadsheet'''\n subjects_indeces = []\n for subject in IDs:\n subjects_indeces += [np.argwhere(subject == phen[\"ID\"]).flatten()]\n subjects_indeces = np.asarray(subjects_indeces).flatten()\n subjects_phen = phen.iloc[subjects_indeces]\n\n return subjects_phen.reset_index(drop=True)\n\ndef pandas_to_array(data):\n \"\"\"\n Transform target values that are in pandas object to array. \n It also eliminate all the target values that we are not interested.\n data: tuple with image and phen (in pandas obeject)\n one_hot: whether to one-hot encode the sites\n \"\"\"\n phen = data\n phen = phen[['ID', 'Age', 'Gender', 'SubjectType']]\n phen = phen.reset_index(drop=True)\n phen = phen.values\n\n return phen\n\n\ndef get_seg(study):\n '''study 1 for MR1 and study 2 for MR2'''\n \n aseg = np.load('/home/emy24/data/cobre_mcic_oasis/oasis'+str(study)+'_taseg.npy')\n image_id = np.load('/home/emy24/data/cobre_mcic_oasis/oasis'+str(study)+'_id.npy')\n image_phen = get_oasis_info(image_id, study) \n \n seq = iaa.Sequential([iaa.Flipud(1)], random_order=False)\n\n '''Caudate'''\n right_caudate = (aseg == 50).astype('uint8') \n left_caudate = (aseg == 11).astype('uint8') \n flipped_caudate = seq.augment_images(left_caudate)\n caudate = np.concatenate((right_caudate, flipped_caudate),0)\n caudate, _ = crop_empty((caudate, image_phen),(80,80,80))\n caudate = np.expand_dims(caudate,1)\n right_caudate = []\n left_caudate = []\n flipped_caudate = []\n\n '''Putamen'''\n right_putamen = (aseg == 51).astype('uint8') \n left_putamen = (aseg == 12).astype('uint8') \n flipped_putamen = seq.augment_images(left_putamen)\n putamen = np.concatenate((right_putamen, flipped_putamen),0)\n putamen, _ = crop_empty((putamen, image_phen),(80,80,80))\n putamen = np.expand_dims(putamen,1)\n right_putamen = []\n left_putamen = []\n flipped_putamen = []\n\n '''Pallidum'''\n right_pallidum = (aseg == 52).astype('uint8') \n left_pallidum = (aseg == 13).astype('uint8') \n flipped_pallidum = seq.augment_images(left_pallidum)\n pallidum = np.concatenate((right_pallidum, flipped_pallidum),0)\n pallidum, _ = crop_empty((pallidum, image_phen),(80,80,80))\n pallidum = np.expand_dims(pallidum,1)\n right_pallidum = []\n left_pallidum = []\n flipped_pallidum = []\n\n '''Thalamus(proper)'''\n right_thalamus = (aseg == 49).astype('uint8') \n left_thalamus = (aseg == 10).astype('uint8') \n flipped_thalamus = seq.augment_images(left_thalamus)\n thalamus = np.concatenate((right_thalamus, flipped_thalamus),0)\n thalamus, _ = crop_empty((thalamus, image_phen),(80,80,80))\n thalamus = np.expand_dims(thalamus,1)\n right_thalamus = []\n left_thalamus = []\n flipped_thalamus = []\n\n '''Hippocampus'''\n right_hippocampus = (aseg == 53).astype('uint8') \n left_hippocampus = (aseg == 17).astype('uint8') \n flipped_hippocampus = seq.augment_images(left_hippocampus)\n hippocampus = np.concatenate((right_hippocampus, flipped_hippocampus),0)\n hippocampus, _ = crop_empty((hippocampus, image_phen),(80,80,80))\n hippocampus = np.expand_dims(hippocampus,1)\n right_hippocampus = []\n left_hippocampus = []\n flipped_hippocampus = []\n\n segs = np.concatenate((caudate, putamen, pallidum, thalamus, hippocampus), 1)\n phen = pd.concat([image_phen, image_phen],ignore_index=True, axis=0)\n \n return segs, phen\n\ndef thrshld_finder(rescaled_sample, avg_vols):\n T = np.linspace(0.1,0.9,41)\n vol = []\n for t in T:\n vol += [(rescaled_sample>t).sum()]\n vol = np.asarray(vol)\n dif = np.abs(vol-avg_vols)\n arg_min = np.argmin(dif)\n return T[arg_min]\n\n\ndef volume_normalizer(segs, avg_vols=5660):\n norm_segs = np.zeros_like(segs)\n N = segs.shape[0]\n C = segs.shape[1]\n\n '''Normalization'''\n for c in tqdm(range(C)):\n for n in tqdm(range(N)):\n sample = segs[n,c,:,:,:].astype('float32')\n vol = sample.sum()\n scale = avg_vols/vol\n \n '''Matrix'''\n _s = 1/((scale)**(1/3))\n M = np.array([[_s,0,0],[0,_s,0],[0,0,_s]]) \n \n '''Offset'''\n #As in goo.gl/AzNjwU\n c_in = 0.5*np.array(sample.shape)\n c_out = c_in\n offset=c_in-c_out.dot(M) \n \n '''Scaling'''\n rescaled_sample = scipy.ndimage.affine_transform(sample, M, offset=offset)\n thrshld = thrshld_finder(rescaled_sample, avg_vols)\n rescaled_sample = (rescaled_sample>thrshld).astype('uint8')\n \n '''Save''' \n norm_segs[n,c,:,:,:] = rescaled_sample\n \n return norm_segs\n\n\ndef data(batch_size, vol_norm, seed=23, train_drop_last=True, train_shuffle=True):\n if vol_norm:\n _norm_vol1 = np.load('/home/emy24/shape_analysis/checkpoint/oasis1_norm_segs.npy')\n norm_vol2 = np.load('/home/emy24/shape_analysis/checkpoint/oasis2_norm_segs.npy')\n else:\n _norm_vol1 = np.load('/home/emy24/shape_analysis/checkpoint/oasis1_segs.npy')\n norm_vol2 = np.load('/home/emy24/shape_analysis/checkpoint/oasis2_segs.npy')\n \n _vol1_side1 = _norm_vol1[:_norm_vol1.shape[0]//2]\n _vol1_side2 = _norm_vol1[_norm_vol1.shape[0]//2:]\n\n vol2_side1 = norm_vol2[:norm_vol2.shape[0]//2]\n vol2_side2 = norm_vol2[norm_vol2.shape[0]//2:]\n\n image_id1 = np.load('/home/emy24/data/cobre_mcic_oasis/oasis1_id.npy')\n image_id2 = np.load('/home/emy24/data/cobre_mcic_oasis/oasis2_id.npy')\n\n phen1 = get_oasis_info(image_id1, 1)\n phen2 = get_oasis_info(image_id2, 2) \n \n healthy_idx1 = []\n healthy_idx1 += [np.argwhere(phen1['SubjectType'] == 0).flatten()]\n healthy_idx1 = np.sort(np.hstack(healthy_idx1))\n\n _vol1_side1 = _vol1_side1[healthy_idx1]\n _vol1_side2 = _vol1_side2[healthy_idx1]\n _image_phen1 = phen1.iloc[healthy_idx1].reset_index(drop=True)\n\n healthy_idx2 = []\n healthy_idx2 += [np.argwhere(phen2['SubjectType'] == 0).flatten()]\n healthy_idx2 = np.sort(np.hstack(healthy_idx2))\n\n vol2_side1 = vol2_side1[healthy_idx2]\n vol2_side2 = vol2_side2[healthy_idx2]\n image_phen2 = phen2.iloc[healthy_idx2].reset_index(drop=True)\n\n '''Remove MR2 subject from MR1'''\n test_idx1 = []\n mr2_id = image_phen2['ID']\n for i in mr2_id:\n test_idx1 += [np.argwhere(_image_phen1['ID'] == i).flatten()]\n test_idx1 = np.asarray(test_idx1).flatten()\n\n phen1_idx = np.arange(_image_phen1.shape[0])\n train_val_test_idx = np.setdiff1d(phen1_idx, test_idx1)\n\n # Removing logitudinal study\n vol1_side1 = _vol1_side1[train_val_test_idx] \n vol1_side2 = _vol1_side2[train_val_test_idx] \n image_phen1 = _image_phen1.iloc[train_val_test_idx].reset_index(drop=True)\n\n '''Convert the Pandas object to numpy array'''\n image_phen1 = pandas_to_array(image_phen1) # Removed Logitudinal Study\n _image_phen1 = pandas_to_array(_image_phen1) #Subjects from MR1 without removing longitudinal study\n image_phen2 = pandas_to_array(image_phen2)\n\n '''Obtain train, val and test index'''\n xTrain = []\n yTrain = []\n xVal = []\n yVal = []\n xTest = []\n yTest = []\n\n idx = np.arange(image_phen1.shape[0])\n age = np.min(image_phen1[:,1])\n group = np.zeros(image_phen1.shape[0])\n for i in range(8):\n idx0 = np.argwhere(image_phen1[:,1]>age).flatten()\n idx1 = np.argwhere(image_phen1[:,1]<(age+9.5)).flatten()\n age = age + 9.5\n idx = np.intersect1d(idx0, idx1)\n np.put(group, idx, i)\n\n sss = StratifiedShuffleSplit(n_splits=1, test_size=0.27, random_state=seed)\n trn_val_idx, test_idx2 = sss.split(group, group).__next__()\n\n sss2 = StratifiedShuffleSplit(n_splits=1, test_size=0.23, random_state=seed)\n _group = group[trn_val_idx]\n trn_idx, val_idx = sss2.split(_group, _group).__next__()\n\n '''Concatenate'''\n trn_vol1_side1 = vol1_side1[trn_val_idx][trn_idx]\n trn_vol1_side2 = vol1_side2[trn_val_idx][trn_idx]\n xTrain += [trn_vol1_side1] + [trn_vol1_side2]\n yTrain += [image_phen1[trn_val_idx][trn_idx]] + [image_phen1[trn_val_idx][trn_idx]]\n\n val_vol1_side1 = vol1_side1[trn_val_idx][val_idx]\n val_vol1_side2 = vol1_side2[trn_val_idx][val_idx]\n xVal += [val_vol1_side1] + [val_vol1_side2]\n yVal += [image_phen1[trn_val_idx][val_idx]] + [image_phen1[trn_val_idx][val_idx]]\n\n test2_vol1_side1 = vol1_side1[test_idx2]\n test2_vol1_side2 = vol1_side2[test_idx2]\n xTest += [test2_vol1_side1] + [test2_vol1_side2]\n yTest += [image_phen1[test_idx2]] + [image_phen1[test_idx2]]\n\n test1_vol1_side1 = _vol1_side1[test_idx1]\n test1_vol1_side2 = _vol1_side2[test_idx1]\n xTest += [test1_vol1_side1] + [test1_vol1_side2]\n yTest += [_image_phen1[test_idx1]] + [_image_phen1[test_idx1]]\n\n image_phen2[:,0] = image_phen2[:,0]*-1\n xTest += [vol2_side1] + [vol2_side2]\n yTest += [image_phen2] + [image_phen2]\n\n xTrain = np.concatenate(xTrain,0)\n yTrain = np.concatenate(yTrain,0)\n\n xVal = np.concatenate(xVal,0)\n yVal = np.concatenate(yVal,0)\n\n xTest = np.concatenate(xTest,0)\n yTest = np.concatenate(yTest,0)\n \n train = (xTrain, yTrain)\n val = (xVal, yVal)\n test = (xTest,yTest)\n \n '''Dataloader'''\n\n xTrain = torch.from_numpy(xTrain)\n xVal = torch.from_numpy(xVal)\n xTest = torch.from_numpy(xTest)\n\n yTrain = torch.from_numpy(yTrain)\n yVal = torch.from_numpy(yVal)\n yTest = torch.from_numpy(yTest)\n\n\n trainset = data_utils.TensorDataset(xTrain, yTrain)\n trainloader = torch.utils.data.DataLoader(trainset, batch_size= batch_size, \n shuffle=train_shuffle, drop_last=train_drop_last, num_workers=0)\n\n valset = data_utils.TensorDataset(xVal, yVal)\n valloader = torch.utils.data.DataLoader(valset, batch_size = batch_size, \n shuffle=True, drop_last=True, num_workers=0)\n\n testset = data_utils.TensorDataset(xTest, yTest)\n testloader = torch.utils.data.DataLoader(testset, batch_size = batch_size, \n shuffle=False, drop_last=False, num_workers=0)\n \n return train, val, test, trainloader, valloader, testloader\n\n\ndef reference(batch_size, train, region, smooth=False):\n x = []\n for r in region:\n template_idx = np.argwhere(train[1][:,1] == 43).flatten()[0]\n template_subj = train[1][template_idx]\n if r==1 and smooth: #Smooth out the caudate a bit, looks kinda rough\n _template = train[0][template_idx]\n _template = _template[r].astype('float32')\n struc = np.ones((3,3,3))\n _template = sn.binary_opening(_template, structure=struc).astype('float32')\n _template = torch.from_numpy(_template).unsqueeze(0).unsqueeze(0)\n template = _template.repeat(batch_size,1,1,1,1)\n template = template.float().cuda()\n x += [template]\n else:\n _template = torch.from_numpy(train[0][template_idx]).unsqueeze(0)\n template = _template.repeat(batch_size,1,1,1,1)\n template = template[:,r,:,:,:].unsqueeze(1)\n template = template.float().cuda()\n x += [template]\n return torch.cat(x,0), template_subj","sub_path":"functions/dataloader.py","file_name":"dataloader.py","file_ext":"py","file_size_in_byte":12900,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"648572224","text":"import pandas as pd\nimport numpy as np\n\nfrom src.database.manager import DatabaseManager\nfrom src.movie.movie_utils import estimate_img_url_exist\n\n\nclass MovieRecommence:\n\n def __init__(self):\n\n self.db_manager = DatabaseManager()\n self.rating_df, rating_matrix_df = self.db_manager.get_movie_matrix()\n self.movie_index = rating_matrix_df.columns\n self.corr_matrix = np.corrcoef(rating_matrix_df.T)\n\n def get_similar_movies(self, movie_title):\n \"\"\"\n Returns correlation vector for a movie\n \"\"\"\n\n movie_idx = list(self.movie_index).index(movie_title)\n\n return self.corr_matrix[movie_idx]\n\n def get_movie_recommendations(self, user_movies):\n \"\"\"given a set of movies, it returns all the movies sorted by their correlation with the user\"\"\"\n\n similar_movies_df = pd.DataFrame()\n movie_similarities = np.zeros(self.corr_matrix.shape[0])\n for movie_id in user_movies:\n movie_similarities = movie_similarities + self.get_similar_movies(movie_id)\n\n similar_movies_df = pd.DataFrame({\n 'title': self.movie_index,\n 'sum_similarity': movie_similarities\n })\n\n similar_movies_df = similar_movies_df.sort_values(by=['sum_similarity'], ascending=False)\n # print(similar_movies_df)\n return similar_movies_df\n\n def get_user_rec(self, sample_user):\n \"\"\"\n This extracts the similar movies according to the movie user rated\n :param sample_user: user id\n :return: 24 similar movies\n \"\"\"\n self.rating_df[self.rating_df.user_id == sample_user].sort_values(by=['ratings'], ascending=False)\n\n sample_user_movies = self.rating_df[self.rating_df.user_id == sample_user].title.tolist()\n recommendations = self.get_movie_recommendations(sample_user_movies)\n l_ = 20\n\n # We get the top 20 recommended movies\n inner_l = l_ + 24\n rec = recommendations.title.head(inner_l)[l_:]\n #\n reviews = []\n\n for item in rec:\n\n img, im_db, rating_info = self.db_manager.get_movie_info_title(item=item)\n img_url = img[0]\n rates = []\n rate_dates = []\n for rate_info in rating_info:\n rates.append(rate_info[0])\n rate_dates.append(rate_info[1])\n max_rate = max(rates)\n rate_date = rate_dates[rates.index(max_rate)].strftime(\"%Y/%m/%d\")\n img_url = estimate_img_url_exist(img_url=img_url)\n # x = plot(int(im_db[0]))\n reviews.append([int(im_db[0]), item, img_url, str(\"\"), max_rate / 100, rate_date])\n\n return reviews\n\n\nif __name__ == '__main__':\n\n MovieRecommence()\n","sub_path":"src/movie/user_recommender.py","file_name":"user_recommender.py","file_ext":"py","file_size_in_byte":2757,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"495281384","text":"# -*- coding: utf-8 -*-\nimport datetime\nfrom south.db import db\nfrom south.v2 import SchemaMigration\nfrom django.db import models\n\n\nclass Migration(SchemaMigration):\n\n def forwards(self, orm):\n # Deleting model 'Caption'\n db.delete_table(u'frontpage_caption')\n\n # Deleting model 'RobotGroupCarousel'\n db.delete_table(u'frontpage_robotgroupcarousel')\n\n # Adding model 'indexPage'\n db.create_table(u'frontpage_indexpage', (\n (u'page_ptr', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['frontpage.Page'], unique=True, primary_key=True)),\n ))\n db.send_create_signal(u'frontpage', ['indexPage'])\n\n # Deleting field 'HeaderPicture.image'\n db.delete_column(u'frontpage_headerpicture', 'image')\n\n # Adding M2M table for field images on 'HeaderPicture'\n db.create_table(u'frontpage_headerpicture_images', (\n ('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),\n ('headerpicture', models.ForeignKey(orm[u'frontpage.headerpicture'], null=False)),\n ('picture', models.ForeignKey(orm[u'frontpage.picture'], null=False))\n ))\n db.create_unique(u'frontpage_headerpicture_images', ['headerpicture_id', 'picture_id'])\n\n # Adding M2M table for field carousel on 'Page'\n db.create_table(u'frontpage_page_carousel', (\n ('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),\n ('page', models.ForeignKey(orm[u'frontpage.page'], null=False)),\n ('carousel', models.ForeignKey(orm[u'frontpage.carousel'], null=False))\n ))\n db.create_unique(u'frontpage_page_carousel', ['page_id', 'carousel_id'])\n\n # Adding field 'Carousel.name'\n db.add_column(u'frontpage_carousel', 'name',\n self.gf('django.db.models.fields.CharField')(default=datetime.datetime(2014, 2, 9, 0, 0), max_length=200),\n keep_default=False)\n\n # Adding field 'Carousel.template'\n db.add_column(u'frontpage_carousel', 'template',\n self.gf('django.db.models.fields.CharField')(default='ROBOTCAROUSEL', max_length=200),\n keep_default=False)\n\n\n def backwards(self, orm):\n # Adding model 'Caption'\n db.create_table(u'frontpage_caption', (\n ('text', self.gf('django.db.models.fields.CharField')(max_length=200)),\n (u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),\n ))\n db.send_create_signal(u'frontpage', ['Caption'])\n\n # Adding model 'RobotGroupCarousel'\n db.create_table(u'frontpage_robotgroupcarousel', (\n (u'carousel_ptr', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['frontpage.Carousel'], unique=True, primary_key=True)),\n ('leagueType', self.gf('django.db.models.fields.CharField')(max_length=200)),\n ))\n db.send_create_signal(u'frontpage', ['RobotGroupCarousel'])\n\n # Deleting model 'indexPage'\n db.delete_table(u'frontpage_indexpage')\n\n\n # User chose to not deal with backwards NULL issues for 'HeaderPicture.image'\n raise RuntimeError(\"Cannot reverse this migration. 'HeaderPicture.image' and its values cannot be restored.\")\n # Removing M2M table for field images on 'HeaderPicture'\n db.delete_table('frontpage_headerpicture_images')\n\n # Removing M2M table for field carousel on 'Page'\n db.delete_table('frontpage_page_carousel')\n\n # Deleting field 'Carousel.name'\n db.delete_column(u'frontpage_carousel', 'name')\n\n # Deleting field 'Carousel.template'\n db.delete_column(u'frontpage_carousel', 'template')\n\n\n models = {\n u'frontpage.carousel': {\n 'Meta': {'object_name': 'Carousel'},\n u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),\n 'images': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u\"orm['frontpage.Picture']\", 'null': 'True', 'blank': 'True'}),\n 'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),\n 'template': ('django.db.models.fields.CharField', [], {'default': \"'ROBOTCAROUSEL'\", 'max_length': '200'})\n },\n u'frontpage.headerpicture': {\n 'Meta': {'object_name': 'HeaderPicture'},\n u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),\n 'images': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u\"orm['frontpage.Picture']\", 'null': 'True', 'blank': 'True'})\n },\n u'frontpage.indexpage': {\n 'Meta': {'object_name': 'indexPage', '_ormbases': [u'frontpage.Page']},\n u'page_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u\"orm['frontpage.Page']\", 'unique': 'True', 'primary_key': 'True'})\n },\n u'frontpage.page': {\n 'Meta': {'object_name': 'Page'},\n 'carousel': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u\"orm['frontpage.Carousel']\", 'null': 'True', 'blank': 'True'}),\n u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),\n 'images': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u\"orm['frontpage.Picture']\", 'null': 'True', 'blank': 'True'}),\n 'text': ('django.db.models.fields.CharField', [], {'max_length': '2000', 'null': 'True', 'blank': 'True'}),\n 'title': ('django.db.models.fields.CharField', [], {'max_length': '200'})\n },\n u'frontpage.pagegroup': {\n 'Meta': {'object_name': 'PageGroup'},\n u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),\n 'pages': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u\"orm['frontpage.Page']\", 'null': 'True', 'blank': 'True'}),\n 'title': ('django.db.models.fields.CharField', [], {'max_length': '200'})\n },\n u'frontpage.picture': {\n 'Meta': {'object_name': 'Picture'},\n u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),\n 'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100'})\n },\n u'frontpage.robotpage': {\n 'Meta': {'object_name': 'RobotPage', '_ormbases': [u'frontpage.Page']},\n 'challenge': ('django.db.models.fields.CharField', [], {'max_length': '200'}),\n 'leagueType': ('django.db.models.fields.CharField', [], {'max_length': '200'}),\n u'page_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u\"orm['frontpage.Page']\", 'unique': 'True', 'primary_key': 'True'}),\n 'robotName': ('django.db.models.fields.CharField', [], {'max_length': '200'})\n }\n }\n\n complete_apps = ['frontpage']","sub_path":"robotics/frontpage/migrations/0005_auto__del_caption__del_robotgroupcarousel__add_indexpage__del_field_he.py","file_name":"0005_auto__del_caption__del_robotgroupcarousel__add_indexpage__del_field_he.py","file_ext":"py","file_size_in_byte":7019,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"443120276","text":"import pygame\nimport Constants\nfrom ScoreHandler import ScoreHandler\n\nclass ScoreState:\n def __init__(self, window):\n self.window = window\n self.score_handler = ScoreHandler()\n self.name = \"score\"\n self.curr_st_str = \"score\"\n self.curser_pos = 0\n pygame.font.init()\n self.flappy_font = pygame.font.Font('res/04B_19__.TTF', 20)\n self.mono_font = pygame.font.SysFont('Courier', 30)\n self.background_surface = pygame.transform.scale((pygame.image.load('res/background.png')), (Constants.WINDOW_WIDTH, Constants.WINDOW_HEIGHT))\n self.scoreboard_surface = pygame.Surface((715,500))\n self.scoreboard_surface.set_alpha(225)\n self.scoreboard_surface.fill(0)\n\n def input_handler(self):\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n pygame.quit()\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_DOWN:\n self.curser_pos = (self.curser_pos + 1) % 2\n if event.key == pygame.K_UP:\n self.curser_pos = (self.curser_pos - 1) % 2\n if event.key == pygame.K_RETURN:\n if self.curser_pos == 0:\n self.curr_st_str = \"menu\"\n if self.curser_pos == 1:\n pygame.quit()\n\n def draw(self):\n self.window.blit(self.background_surface, (0, 0))\n self.window.blit(self.scoreboard_surface, (Constants.WINDOW_WIDTH/2-320,20))\n #pygame.draw.rect(self.window,(0,0,0),(Constants.WINDOW_WIDTH/2-320,20,715,500))\n self.score_handler.blit_highscores(self.window,Constants.WINDOW_WIDTH/2-300,20,self.mono_font)\n x_pos = 50\n y_pos = Constants.WINDOW_HEIGHT - 120\n textsurface = self.flappy_font.render('MENU', False, Constants.WHITE)\n self.window.blit(textsurface,(x_pos, y_pos))\n textsurface = self.flappy_font.render('EXIT', False, Constants.WHITE)\n self.window.blit(textsurface,(x_pos,y_pos+50))\n textsurface = self.flappy_font.render('*', False, Constants.WHITE)\n self.window.blit(textsurface,(x_pos-20,y_pos+self.curser_pos*50))\n\n def update(self):\n self.draw()\n self.input_handler()\n","sub_path":"src/ScoreState.py","file_name":"ScoreState.py","file_ext":"py","file_size_in_byte":2278,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"530502197","text":"import random\r\ndef get_h_cost(board):\r\n h = 0\r\n for i in range(len(board)):\r\n #Check every column we haven't already checked\r\n for j in range(i + 1,len(board)): # i=0 1-3,2-3,3-3\r\n #Queens are in the same row\r\n if board[i] == board[j]:\r\n h += 1\r\n #Get the difference between the current column and the check column\r\n offset = j - i\r\n #To be a diagonal, the check column value has to be \r\n #equal to the current column value +/- the offset\r\n if board[i] == board[j] - offset or board[i] == board[j] + offset:\r\n h += 1\r\n return h\r\n\r\n#main\r\nn=4 # 4 queen problem\r\ni=0\r\nboard=[random.randint(0,n-1) for i in range(n)]\r\nprint(board)\r\nfor i in range(n):\r\n for j in range(n):\r\n print(get_h_cost(board),end=' ')\r\n board[i]=j\r\n print()","sub_path":"nqueen.py","file_name":"nqueen.py","file_ext":"py","file_size_in_byte":793,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"208394340","text":"\"\"\"This module contains the available structural response functions and their base class\"\"\"\nfrom __future__ import print_function, absolute_import, division\n\n# importing the Kratos Library\nfrom KratosMultiphysics import *\nimport structural_mechanics_analysis\n\nimport time as timer\n\nclass ResponseFunctionBase(object):\n \"\"\"The base class for structural response functions. Each response function\n is able to calculate its response value and gradient.\n All the necessary steps have to be implemented, like e.g. initializing,\n solving of primal (and adjoint) analysis ...\n \"\"\"\n\n def Initialize(self):\n pass\n\n def CalculateValue(self):\n raise NotImplementedError(\"CalculateValue needs to be implemented by the base class\")\n\n def CalculateGradient(self):\n raise NotImplementedError(\"CalculateGradient needs to be implemented by the base class\")\n\n def GetShapeGradient(self):\n raise NotImplementedError(\"GetShapeGradient needs to be implemented by the base class\")\n\n def Finalize(self):\n pass\n\nclass StrainEnergyResponseFunction(ResponseFunctionBase):\n \"\"\"Linear strain energy response function. It triggers the primal analysis and\n uses the primal analysis results to evaluate response value and gradient.\n\n Attributes\n ----------\n primal_analysis : Primal analysis object of the response function\n response_function_utility: Cpp utilities object doing the actual computation of response value and gradient.\n \"\"\"\n\n def __init__(self, identifier, response_settings, model_part = None):\n self.identifier = identifier\n self.response_settings = response_settings\n\n self.response_function_utility = StructuralMechanicsApplication.StrainEnergyResponseFunctionUtility(model_part, response_settings)\n\n with open(response_settings[\"primal_settings\"].GetString()) as parameters_file:\n ProjectParametersPrimal = Parameters(parameters_file.read())\n\n self.primal_analysis = structural_mechanics_analysis.StructuralMechanicsAnalysis(ProjectParametersPrimal, model_part)\n self.primal_analysis.GetModelPart().AddNodalSolutionStepVariable(SHAPE_SENSITIVITY)\n\n def Initialize(self):\n self.primal_analysis.Initialize()\n self.response_function_utility.Initialize()\n\n def CalculateValue(self):\n Logger.PrintInfo(\"\\n> Starting primal analysis for response:\", self.identifier)\n\n startTime = timer.time()\n self.primal_analysis.InitializeTimeStep()\n self.primal_analysis.SolveTimeStep()\n self.primal_analysis.FinalizeTimeStep()\n Logger.PrintInfo(\"> Time needed for solving the primal analysis = \",round(timer.time() - startTime,2),\"s\")\n\n startTime = timer.time()\n value = self.response_function_utility.CalculateValue()\n Logger.PrintInfo(\"> Time needed for calculating the response value = \",round(timer.time() - startTime,2),\"s\")\n\n return value\n\n def CalculateGradient(self):\n self.response_function_utility.CalculateGradient()\n\n def GetShapeGradient(self):\n gradient = {}\n for node in self.primal_analysis.GetModelPart().Nodes:\n gradient[node.Id] = node.GetSolutionStepValue(SHAPE_SENSITIVITY)\n return gradient\n\n def Finalize(self):\n self.primal_analysis.Finalize()\n\nclass EigenFrequencyResponseFunction(StrainEnergyResponseFunction):\n \"\"\"Eigenfrequency response function. The internal procedure is the same as\n for the StrainEnergyResponseFunction. It triggers the primal analysis and\n uses the primal analysis results to evaluate response value and gradient.\n Only the response_function_utility is a different object.\n\n Attributes\n ----------\n primal_analysis : Primal analysis object of the response function\n response_function_utility: Cpp utilities object doing the actual computation of response value and gradient.\n \"\"\"\n\n def __init__(self, identifier, response_settings, model_part = None):\n self.identifier = identifier\n self.response_settings = response_settings\n\n if not response_settings.Has(\"weighting_method\") or response_settings[\"weighting_method\"].GetString() == \"none\":\n self.response_function_utility = StructuralMechanicsApplication.EigenfrequencyResponseFunctionUtility(model_part, response_settings)\n elif response_settings[\"weighting_method\"].GetString() == \"linear_scaling\":\n self.response_function_utility = StructuralMechanicsApplication.EigenfrequencyResponseFunctionLinScalUtility(model_part, response_settings)\n else:\n raise NameError(\"The following weighting_method is not valid for eigenfrequency response: \" + response_settings[\"weighting_method\"].GetString() +\n \".\\nAvailable weighting methods are: 'none', 'linear_scaling'. Default: 'none'\")\n\n with open(response_settings[\"primal_settings\"].GetString()) as parameters_file:\n ProjectParametersPrimal = Parameters(parameters_file.read())\n\n self.primal_analysis = structural_mechanics_analysis.StructuralMechanicsAnalysis(ProjectParametersPrimal, model_part)\n self.primal_analysis.GetModelPart().AddNodalSolutionStepVariable(SHAPE_SENSITIVITY)\n\nclass MassResponseFunction(ResponseFunctionBase):\n \"\"\"Mass response function. It reads the materials for the model part and\n calculates response value and gradient.\n\n Attributes\n ----------\n model_part : Model part object of the response function\n response_function_utility: Cpp utilities object doing the actual computation of response value and gradient.\n \"\"\"\n\n def __init__(self, identifier, response_settings, model_part):\n self.identifier = identifier\n self.response_settings = response_settings\n\n self.response_function_utility = StructuralMechanicsApplication.MassResponseFunctionUtility(model_part, response_settings)\n\n self.model_part = model_part\n self.model_part.AddNodalSolutionStepVariable(SHAPE_SENSITIVITY)\n\n def Initialize(self):\n import read_materials_process\n # Create a dictionary of model parts.\n model = Model()\n model.AddModelPart(self.model_part)\n # Add constitutive laws and material properties from json file to model parts.\n read_materials_process.ReadMaterialsProcess(model, self.response_settings[\"material_import_settings\"])\n self.response_function_utility.Initialize()\n\n def CalculateValue(self):\n value = self.response_function_utility.CalculateValue()\n return value\n\n def CalculateGradient(self):\n self.response_function_utility.CalculateGradient()\n\n def GetShapeGradient(self):\n gradient = {}\n for node in self.model_part.Nodes:\n gradient[node.Id] = node.GetSolutionStepValue(SHAPE_SENSITIVITY)\n return gradient\n\n\n\n\n","sub_path":"applications/StructuralMechanicsApplication/python_scripts/structural_response.py","file_name":"structural_response.py","file_ext":"py","file_size_in_byte":6864,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"238329343","text":"# https://atcoder.jp/contests/abc094/tasks/arc095_a\n\nfrom copy import deepcopy\n\nn = int(input().strip())\nx = list(map(int, input().split()))\ny = deepcopy(x)\ny.sort()\nm1 = y[n // 2 - 1]\nm2 = y[(n // 2)]\n\nfor e in x:\n if e <= m1:\n print(m2)\n else:\n print(m1)\n","sub_path":"beginner_contests/094/C/python/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":277,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"41789316","text":"def isSubString(string_parent, string_child):\n for i in range(len(string_parent) - len(string_child)):\n if string_parent[i:i + len(string_child)] == string_child:\n return True\n return False\ndef stringRotation(string_a, string_b):\n if len(string_a) == len(string_b):\n return isSubString(string_a + string_a, string_b)\n else:\n return False\nif __name__ == \"__main__\":\n print(stringRotation(\"源远中美关系流长\",\"流长源远中美关系\"))","sub_path":"Chapter 1 Arrays and Strings/1.9 String Rotation/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":489,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"550292171","text":"# my solution\nf = open('028PartialPermutations.txt','r')\ntemp = [int(s) for s in f.readline().split()]\nn = temp[0]\nk = temp[1]\nf.close()\n\ndef fact(n):\n \"\"\"\n return factorial \n \"\"\"\n if n==1:\n return 1\n else:\n return n*fact(n-1)\n\ndef getPartialPermutations1(n,k):\n \"\"\"\n return partial permutations\n >>> getPartialPermutations1(21,7)\n 616512\n \"\"\"\n return (fact(n) / fact(n-k))%1000000\n\ndef getPartialPermutations2(n,k):\n \"\"\"\n return partial permutations\n >>> getPartialPermutations2(21,7)\n 616000\n \"\"\"\n ans = 1\n for i in range(k):\n ans = ans * (n-i)\n return ans%1000000\n\nprint (getPartialPermutations1(n,k))\nprint (getPartialPermutations2(n,k))\n\n# How are these two results not the same?\ndef getPartialPermutations3(n,k):\n \"\"\"\n return partial permutations\n >>> getPartialPermutations1(21,7)\n 616000\n \"\"\"\n return (fact(n) // fact(n-k))%1000000\n\nprint (getPartialPermutations3(n,k))","sub_path":"stronghold/028PartialPermutations.py","file_name":"028PartialPermutations.py","file_ext":"py","file_size_in_byte":988,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"385509477","text":"import pandas as pd\nimport numpy as np\nimport os\nfrom src import utils\n\ndef ingest_raw_nfirs_data(data_dir):\n \"\"\"Ingest single year of raw nfirs data, perform basic cleaning, merging, and filtering to \n generate one years worth of nfirs data ready to be geocoded.\n \n Args:\n data_dir: nfirs directory with one year of data\n \n Returns:\n pandas dataframe of cleaned nfirs data (not geocoded yet)\n \"\"\"\n \n # Read tables and switch columns to lower case\n basic = pd.read_csv(os.path.join(data_dir, 'basicincident.txt'), sep = '^', encoding = 'latin-1', low_memory = False)\n address = pd.read_csv(os.path.join(data_dir, 'incidentaddress.txt'), sep = '^', encoding = 'latin-1', low_memory = False)\n fire = pd.read_csv(os.path.join(data_dir, 'fireincident.txt'), sep = '^', encoding = 'latin-1', low_memory = False)\n \n basic.columns = basic.columns.str.lower()\n address.columns = address.columns.str.lower()\n fire.columns = fire.columns.str.lower()\n \n # Columns to merge the 3 datasets on\n merge_cols = ['state','fdid','inc_date','inc_no','exp_no']\n \n # Drop duplicates based on those merge columns. For nfirs 2016, there were 110 duplicates\n # dropped from the basic table, 65 from the address table, and 5 from the fire table. \n basic = basic.drop_duplicates(merge_cols)\n address = address.drop_duplicates(merge_cols)\n fire = fire.drop_duplicates(merge_cols)\n \n # Subset the basic data by inc_type and prop_use values which correspond to home fires\n inc_type_vals = [111, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122]\n \n mask1 = basic['inc_type'].isin(inc_type_vals)\n mask2 = basic['prop_use'].str.startswith('4')\n\n basic = basic[mask1 & mask2]\n \n # Left join the address and fire tables to the basic table\n df = (basic.merge(address, how = 'inner', on = merge_cols)\n .merge(fire, how = 'left', on = merge_cols, indicator = 'fire_merge')\n )\n \n # Convert the address to a datetime object\n df['inc_date'] = pd.to_datetime(df['inc_date'].astype(str).str.zfill(8), format = '%m%d%Y')\n \n ### Combine the street address parts into a single address field\n # Clean address parts\n address_parts = ['num_mile','street_pre','streetname','streettype','streetsuf']\n for part in address_parts:\n df[part] = df[part].fillna('').astype(str).str.upper()\n\n # Some streetnames included the street_pre as part of the field (i.e. N N 21st st, or E E John Blvd). This\n # line replaces street_pre with '' if that is the case\n df['street_pre'] = np.where(df['street_pre'] == df['streetname'].str.split(' ').str[0], '', df['street_pre'])\n\n # Combines and cleans the address parts into a single address field\n df['address'] = df['num_mile'] + ' ' + df['street_pre'] + ' ' + df['streetname'] + ' ' + df['streettype'] + ' ' +\\\n df['streetsuf']\n df['address'] = df['address'].str.replace('\\s+',' ', regex=True).str.strip()\n \n # Replace erroneous zip codes with null values\n erroneous_zip_codes = ['00000','11111','22222','99999']\n df['zip5'] = df['zip5'].replace(erroneous_zip_codes, np.nan)\n \n # Fill null values for state (which corresponds to the state the fire department is in) with the state_id (which corresponds\n # to the state where the fire occurred. 99% of the time these are the same). Do the same for state_id using state.\n # In 2016 there were 19 null values for state, and 4 for state_id\n df['state_id'] = df['state_id'].fillna(df['state'])\n df['state'] = df['state'].fillna(df['state_id'])\n \n # Fill null values for oth_inj and oth_death with 0. Assumption is that if there were really an injury or death, these \n # fields would have been filled out. \n df['oth_inj'] = df['oth_inj'].fillna(0)\n df['oth_death'] = df['oth_death'].fillna(0)\n \n # Fill null values for prop_loss and cont_loss with 0. Assumption is that if there were really a large property \n # loss or content loss then these fields would have been filled out. \n df['prop_loss'] = df['prop_loss'].fillna(0)\n df['cont_loss'] = df['cont_loss'].fillna(0)\n \n # Calculate the total loss column\n df['tot_loss'] = df['prop_loss'] + df['cont_loss']\n \n # Convert fdid column to str, and left pad with zeros to match documentation\n df['fdid'] = df['fdid'].astype(str).str.zfill(5)\n\n # Create st_fdid column with unique identifier for each fire department in the country\n df['st_fdid'] = df['state'] + '_' + df['fdid']\n \n # Zero pad dept_sta column to align with documentation\n df['dept_sta'] = (df['dept_sta'].astype(str)\n .str.zfill(3)\n .replace('nan',np.nan))\n \n # Capitalize cities\n df['city'] = df['city'].astype(str).str.upper()\n \n # Convert to str and zero pad inc_no and exp_no\n df['inc_no'] = df['inc_no'].str.zfill(7)\n df['exp_no'] = df['exp_no'].astype(str).str.zfill(3)\n\n # Create id column which is just combination of the primary key fields from nfirs\n df['unique_id'] = df['state'] + '_' + df['fdid'] + '_' + df['inc_date'].astype(str) + '_' + df['inc_no'] + '_' + df['exp_no']\n \n # Subset the data by the columns we've selected for further use\n usecols = ['state','fdid','st_fdid','dept_sta','inc_date','inc_no','exp_no','inc_type','prop_use','address','city','state_id',\n 'zip5','oth_inj','oth_death','prop_loss','cont_loss','tot_loss','detector','det_type','det_power',\n 'det_operat','det_effect','det_fail','aes_pres','aes_type','aes_oper','no_spr_op','aes_fail','unique_id']\n \n df = df[usecols]\n \n return(df)\n\ndef ingest_all_nfirs():\n \n \"\"\"This function calls the ingest_raw_nfirs_data function on each\n directory within data/raw/nfirs, and saves the cleaned output files to\n data/interim/nfirs, ready to be geocoded.\n \"\"\"\n \n raw_nfirs_path = utils.DATA[\"raw\"] / 'nfirs'\n interim_nfirs_path = utils.DATA['interim'] / 'nfirs'\n\n for year in os.listdir(raw_nfirs_path):\n year_path = os.path.join(raw_nfirs_path, year)\n\n if not os.path.isdir(year_path):\n continue\n\n output_name = f'nfirs_cleaned_{year}.csv'\n output_path = os.path.join(interim_nfirs_path, output_name)\n\n df = ingest_raw_nfirs_data(year_path)\n df.to_csv(output_path, index = False)\n \n return\n\nif __name__ == \"__main__\":\n ingest_all_nfirs()\n","sub_path":"src/data/nfirs.py","file_name":"nfirs.py","file_ext":"py","file_size_in_byte":6459,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"236761478","text":"import os\nfrom flask import Flask, render_template, jsonify, request\n\ntemplateDirectory = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'app/backend/templates')\nstaticDirectory = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'app/backend/static')\n\napp = Flask(\n __name__,\n template_folder = templateDirectory,\n static_folder = staticDirectory\n)\n\ntestData = {\n 1: {\n 'userid': 1,\n 'username': 'james',\n 'posts': [\n 'Hello!',\n 'Flask + React is awesome',\n 'Go Bills'\n ]\n },\n 2: {\n 'userid': 2,\n 'username': 'yeezy',\n 'posts': [\n 'Pablo',\n 'Yeezus',\n '808s'\n ]\n },\n 3: {\n 'userid': 3,\n 'username': 'user246',\n 'posts': [\n 'Reddit'\n ]\n }\n}\n\ntestDataNameToID = {\n 'james': 1,\n 'yeezy': 2,\n 'user246': 3\n}\n\n### Routes ###\n\n@app.route('/')\ndef index():\n return render_template('index.html')\n\n@app.route('/', methods=['GET'])\ndef userPage(username):\n userid = getUserIDFromUsername(username)\n data = getUserInfo(userid)\n print(data)\n return data\n\n@app.route('//posts', methods=['GET'])\ndef userPosts(username):\n userid = getUserIDFromUsername(username)\n data = getUserPosts(userid)\n return data\n\n###############\n\n### API ###\n\n# retrieve user info for a provided userid\n@app.route('/api/1.0/user/', methods=['GET'])\ndef getUserInfo(userid):\n data = testData[userid]\n return jsonify({'userData': data})\n\n@app.route('/api/1.0/user//posts', methods=['GET'])\ndef getUserPosts(userid):\n posts = testData[userid]['posts']\n return jsonify({'posts': posts})\n\n@app.errorhandler(404)\ndef pageNotFound(error):\n return render_template('404.html')\n\n###############\n\n### Helpers ###\n\ndef getUserIDFromUsername(username):\n return testDataNameToID[username]\n\n###############","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":1954,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"150861806","text":"from Meth_blue_06_09 import get_data_paths, calculate_graph_data\nimport matplotlib.pyplot as plt\nimport os\nimport numpy as np\n\n\ndef plot_cv_data(blank,path, ax):\n\n sub_data_2, data, legend = calculate_graph_data(path)\n blank_data, blank, legend = calculate_graph_data(blank)\n\n #Normalises results to blank and squares\n #sub_data_2[1][:] = [np.square(data - blank) for data, blank in zip(sub_data_2[1], blank_data[1])]\n #sub_data_2[2][:] = [np.square(data + blank) for data, blank in zip(sub_data_2[2], blank_data[2])]\n legend_str = path[0].split('/')[-1]\n legend = legend_str.split('_')[0]\n ax.plot(sub_data_2[0], sub_data_2[1])\n\ndirectory = '/Users/st659/Google Drive/Resazurin Bulk 18-1-17'\nplt.style.use(['seaborn-white', 'seaborn-notebook'])\n\npaths = get_data_paths(directory)\n\nsorted_paths = [paths[x:x+3] for x in range(0, len(paths), 3)]\n\nfig = plt.figure()\nax = fig.add_subplot(111)\n\ndata = [plot_cv_data(sorted_paths[0],path, ax) for path in sorted_paths]\n\nlegends = [\"Blank\", 'Ecoli MG1655 $10^{4}$ cfu/ml', 'Ecoli MG1655 $10^{5}$ cfu/ml', 'Ecoli MG1655 $10^{6}$ cfu/ml']\n\nplt.legend(legends,loc = 'lower right')\nplt.ylabel('Current (mA)')\nplt.xlabel('Voltage vs AgAgCl (V)')\nplt.savefig(os.path.join(directory,'ResazurinBulk_18-1-17.png'), dpi=300)\nplt.show()\n\n","sub_path":"Resazurin Bulk 18-1-17.py","file_name":"Resazurin Bulk 18-1-17.py","file_ext":"py","file_size_in_byte":1296,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"119276830","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Aug 10 14:01:34 2018\n\n@author: kazuki.onodera\n\"\"\"\n\nimport gc, os\nfrom tqdm import tqdm\nimport pandas as pd\nimport numpy as np\nimport sys\nsys.path.append(f'/home/{os.environ.get(\"USER\")}/PythonLibrary')\nimport lgbextension as ex\nimport lightgbm as lgb\nfrom multiprocessing import cpu_count, Pool\nfrom glob import glob\n#import count\nimport utils, utils_best\n#utils.start(__file__)\n#==============================================================================\n\nSEED = 71\n\n\nparam = {\n 'objective': 'binary',\n 'metric': 'auc',\n 'learning_rate': 0.01,\n \n 'max_depth': 6,\n 'num_leaves': 63,\n 'max_bin': 255,\n \n 'min_child_weight': 10,\n 'min_data_in_leaf': 150,\n 'reg_lambda': 0.5, # L2 regularization term on weights.\n 'reg_alpha': 0.5, # L1 regularization term on weights.\n \n 'colsample_bytree': 0.9,\n 'subsample': 0.9,\n# 'nthread': 32,\n 'nthread': cpu_count(),\n 'bagging_freq': 1,\n 'verbose':-1,\n 'seed': SEED\n }\n\n\n# =============================================================================\n# load\n# =============================================================================\nX = pd.read_pickle('../data/X_train_nejumi_gp.pkl.gz')\ny = utils.read_pickles('../data/label').TARGET\n\n\nif X.columns.duplicated().sum()>0:\n raise Exception(f'duplicated!: { X.columns[X.columns.duplicated()] }')\nprint('no dup :) ')\nprint(f'X.shape {X.shape}')\n\ngc.collect()\n\n\n# =============================================================================\n# cv\n# =============================================================================\ndtrain = lgb.Dataset(X, y )\ngc.collect()\n\nret, models = lgb.cv(param, dtrain, 99999, nfold=7,\n early_stopping_rounds=100, verbose_eval=50,\n seed=111)\n\ny_pred = ex.eval_oob(X, y, models, 111)\n\nresult = f\"CV auc-mean: {ret['auc-mean'][-1]} + {ret['auc-stdv'][-1]}\"\nprint(result)\nutils.send_line(result)\n\nimp = ex.getImp(models)\n\n\n# =============================================================================\n# cv loop\n# =============================================================================\nfrom sklearn.metrics import roc_auc_score\n\ndtrain = lgb.Dataset(X, y, free_raw_data=False)\ngc.collect()\n\ny_pred = pd.Series(0, index=y.index)\n\nfor i in range(5):\n ret, models = lgb.cv(param, dtrain, 99999, nfold=7,\n early_stopping_rounds=100, verbose_eval=50,\n seed=i)\n \n y_pred += ex.eval_oob(X, y, models, i).rank()\n\ny_pred /= y_pred.max()\n\nauc_mean = roc_auc_score(y, y_pred)\nresult = f\"CV auc-mean(nejumi gp): {auc_mean}\"\nprint(result)\nutils.send_line(result)\n\n\n#==============================================================================\nutils.end(__file__)\n#utils.stop_instance()\n\n\n\n","sub_path":"Kaggle/Playgroud/RiskPrediction/Home-Credit-Default-Risk-master/py/trash/814_cv_nejumi_gp.py","file_name":"814_cv_nejumi_gp.py","file_ext":"py","file_size_in_byte":2945,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"165931050","text":"\r\n#coding = utf-8\r\n\r\n#\tAlorigthm: \t\t\tKNN\r\n# feature extraction:\tn highist frequency (n = VecDimension)\r\n#\tModel:\t\t\t\twordvec\r\n#\tDistance:\t\t\tcos-distance\r\n\r\nimport numpy as np\r\nimport math\r\n\r\n\r\nK = 10\r\nVecDimension = 2000\r\nClassNum = 20\r\n\r\n#GET EACH WORD'S FREQUENCY\r\ndef getFreDictionary(TestSetNum):\r\n FreDictionary = {}\r\n for i in range(0,9):\r\n if i == TestSetNum: continue\r\n \r\n fp = open('C:/Users/LYF/Desktop/ML/data/list_'+'%d'%i+'.txt','r')\r\n for line in fp:\r\n temp = line.split()\r\n docpath = 'C:/Users/LYF/Desktop/ML/data/rtexts/'+temp[0]\r\n doc = open(docpath,'r')\r\n words = (doc.readlines()[0]).split()\r\n words = list(set(words)) #去重\r\n for word in words:\r\n if word not in FreDictionary:\r\n FreDictionary[word] = np.zeros(ClassNum)\r\n FreDictionary[word][int(temp[1])] = FreDictionary[word][int(temp[1])] + 1\r\n doc.close()\r\n fp.close()\r\n for word in FreDictionary:\r\n \ttemp = sum(FreDictionary[word])\r\n \tFreDictionary[word] = FreDictionary[word] / temp\r\n return FreDictionary\r\n\r\n#TRANSLATE DOC INTO VEC\r\ndef doc2vec(docpath,dictionary):\r\n doc = open(docpath,'r')\r\n words = (doc.readlines()[0]).split()\r\n vec = np.zeros(ClassNum)\r\n for word in words:\r\n if word in dictionary:\r\n vec = vec + dictionary[word]\r\n length = math.sqrt(sum(np.square(vec)))\r\n vec = vec / length\r\n return vec\r\n\r\ndef KNN_train(TestSetNum):\r\n dictionary = getFreDictionary(TestSetNum)\r\n\r\n vectors = []\r\n for i in range(0,9):\r\n if i == TestSetNum: continue\r\n \r\n fp = open('C:/Users/LYF/Desktop/ML/data/list_'+'%d'%i+'.txt','r')\r\n for line in fp:\r\n temp = line.split()\r\n docpath = 'C:/Users/LYF/Desktop/ML/data/rtexts/'+temp[0]\r\n vec = doc2vec(docpath,dictionary)\r\n vectors.append([vec,temp[1]])\r\n return dictionary,vectors\r\n \r\ndef KNN_test(TestSetNum,dictionary,vectors,K):\r\n num = 0\r\n right = 0\r\n fp = open('C:/Users/LYF/Desktop/ML/data/list_'+'%d'%TestSetNum+'.txt','r')\r\n for line in fp:\r\n num = num + 1\r\n temp = line.split()\r\n docpath = 'C:/Users/LYF/Desktop/ML/data/rtexts/'+temp[0]\r\n vec = doc2vec(docpath,dictionary)\r\n #对每个文本对应向量,���最近K个向量\r\n distances = []\r\n for vector in vectors:\r\n distances.append((vector[1],sum(vector[0] * vec)))\r\n distances.sort(key = lambda x:x[1],reverse = True)\r\n count = {}\r\n for i in range(0,K-1):\r\n if distances[i][0] in count:\r\n count[distances[i][0]] = count[distances[i][0]] + 1\r\n else:\r\n count[distances[i][0]] = 1\r\n \r\n countlist = sorted(count.items(),key=lambda x:x[1],reverse=True)\r\n textclass = countlist[0][0]\r\n if textclass == temp[1]:\r\n right = right + 1\r\n return right / num\r\n \r\n \r\n\r\n\r\na = []\r\nfor i in range(10): \r\n dictionary,vectors = KNN_train(i)\r\n a.append(KNN_test(i,dictionary,vectors,K))\r\nprint(a)\r\n","sub_path":"Python Codes/Machine Learning/Classification/KNN(wordvec).py","file_name":"KNN(wordvec).py","file_ext":"py","file_size_in_byte":3177,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"341729845","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Author: MingZ\n# @Date created: 05 April 2018\n# @Date last modified: 24 May 2018\n# Python Version: 3.6\n# Description: Construct a classification model on the MNIST data set in the form of a deep neural network\n# Perform and compare two or more methods of hyperparameter optimization on this model, and comment on the comparison.\n# hyperparameter: learning_rate, epochs, batch_size, activation function, hidden layers and neurons, weight initialization, dropout for regularization\n# hyperparameter tuning:grid search, randomized search\n\n# import packages\nimport numpy as np\nimport pickle as pk\nimport gzip\nimport time\n\nfrom sklearn.model_selection import train_test_split, GridSearchCV\n# from keras.datasets import mnist\nfrom keras.backend import clear_session\nfrom keras.utils.np_utils import to_categorical\nfrom keras.utils import print_summary\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPool2D, BatchNormalization\n# optimizers: SGD, RMSprop, Adagrad, Adadelta, Adam, Adamax, Nadam, TFOptimizer\nfrom keras.optimizers import *\nfrom keras.callbacks import ReduceLROnPlateau\nfrom keras.preprocessing.image import ImageDataGenerator\nfrom keras.wrappers.scikit_learn import KerasClassifier\n\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\n# ---------------------------Convolutional Neural Network--------------------------\n# model building\ndef build_model(filters,kernel_size,dropout_rate,layer_size,num_layer):\n clear_session() # clear backend model\n\n # model construction\n model = Sequential()\n\n model.add(Conv2D(filters = filters, kernel_size = kernel_size, activation = 'relu', input_shape = (28,28,1)))\n model.add(BatchNormalization())\n model.add(Conv2D(filters = filters, kernel_size = kernel_size, activation = 'relu'))\n model.add(MaxPool2D(pool_size = 2))\n model.add(Dropout(dropout_rate))\n\n for n in range(num_layer):\n model.add(Conv2D(filters = (2**(n+1))*filters, kernel_size = kernel_size, activation = 'relu'))\n model.add(BatchNormalization())\n model.add(Conv2D(filters = (2**(n+1))*filters, kernel_size = kernel_size, activation = 'relu'))\n model.add(MaxPool2D(pool_size = 2))\n model.add(Dropout(dropout_rate))\n\n model.add(Flatten())\n model.add(Dense(layer_size, activation = 'relu'))\n model.add(Dropout(dropout_rate))\n model.add(Dense(10, activation = \"softmax\"))\n\n # compile\n model.compile(\n optimizer = Adadelta(lr = 1.0, rho = 0.95, epsilon = None, decay = 0.0),\n loss = 'categorical_crossentropy',\n metrics = ['accuracy'])\n return model\n\nif __name__ == \"__main__\": \n # ---------------------------Data Preparation---------------------------------\n # load the dataset\n file = gzip.open('mnist.pkl.gz','rb')\n train_set, valid_set, test_set = pk.load(file,encoding='latin1')\n file.close()\n\n x_train = train_set[0]\n # print(x_train.shape) #(50000, 784)\n y_train = train_set[1]\n del train_set # free some space\n\n x_test = test_set[0]\n # print(x_test.shape) #(10000, 784)\n y_test = test_set[1]\n del test_set\n\n x_val = valid_set[0]\n y_val = valid_set[1]\n del valid_set\n\n # reshape (sample population=-1, 28*28, chanel=1(black&white))\n x_train = x_train.reshape(-1,28,28,1)\n x_test = x_test.reshape(-1,28,28,1)\n x_val = x_val.reshape(-1,28,28,1)\n\n # map labels to on hot vectors in order to be applied into \n # model with categorical_crossentropy objective funtion\n y_train = to_categorical(y_train, num_classes=10)\n y_val = to_categorical(y_val,num_classes=10)\n\n # -----------------------Grid Search------------------------------------\n start = time.time()\n dropout_rate = [0.25]\n layer_size = [256]\n kernel_size = [5]\n filters = [32]\n epochs = [1]\n batch_size = [80, 100]\n num_layer = [1]\n\n # pass parameters to GridSearchCV\n param_grid = dict(\n num_layer = num_layer,\n dropout_rate = dropout_rate,\n layer_size = layer_size,\n kernel_size = kernel_size,\n filters = filters,\n epochs = epochs,\n batch_size = batch_size)\n model = KerasClassifier(build_fn = build_model, verbose = 2)\n models = GridSearchCV(model, param_grid, n_jobs = -1)\n\n # fit\n results = models.fit(x_train,y_train)\n print('best model:')\n print(results.best_params_)\n print(time.time() - start)\n # -------------------------score---------------------------------\n print(results.score(x_test,y_test))\n","sub_path":"hyperparameters optimization/grid_searchCV.py","file_name":"grid_searchCV.py","file_ext":"py","file_size_in_byte":4579,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"438439806","text":"# Program Function:\n# plots the s parameter data\n\nimport csv\nfrom matplotlib import pyplot as plot\nfrom matplotlib import cm # color map\nfrom cycler import cycler\nimport numpy as np\n\n# ***************************** Simulated Data *****************************\n# *************************************************************************\n\n# ***************************** Measured Data *****************************\nfile2 = open('t0.csv', 'rb') # open the file for reading\nmeasured_csv_s11_mag_state0 = csv.reader(file2, delimiter=',') # convert cvs file to python list\nfile2 = open('t3.csv', 'rb') # open the file for reading\nmeasured_csv_s11_mag_state3 = csv.reader(file2, delimiter=',') # convert cvs file to python list\nfile2 = open('t4.csv', 'rb') # open the file for reading\nmeasured_csv_s11_mag_state4 = csv.reader(file2, delimiter=',') # convert cvs file to python list\nfile2 = open('t5.csv', 'rb') # open the file for reading\nmeasured_csv_s11_mag_state5 = csv.reader(file2, delimiter=',') # convert cvs file to python list\nfile2 = open('t7.csv', 'rb') # open the file for reading\nmeasured_csv_s11_mag_state7 = csv.reader(file2, delimiter=',') # convert cvs file to python list\n# *************************************************************************\n\n# extract information from csv and store in the following lists for plotting and processing\nfreq = []\nmeasured_s11_mag_state0 = []\nmeasured_s11_mag_state3 = []\nmeasured_s11_mag_state4 = []\nmeasured_s11_mag_state5 = []\nmeasured_s11_mag_state7 = []\n\n# ***************************** Simulated Data *****************************\n# *************************************************************************\n\n# ***************************** Measured Data *****************************\nfor row in measured_csv_s11_mag_state0: # row contains the row data\n if measured_csv_s11_mag_state0.line_num > 8: # ignore the first line\n if row[0] == 'END': # check if row reaches the last one\n break\n freq.append(row[0])\n measured_s11_mag_state0.append(20*np.log10(np.sqrt(float(row[1])**2+float(row[2])**2)))\n\nfor row in measured_csv_s11_mag_state3: # row contains the row data\n if measured_csv_s11_mag_state3.line_num > 8: # ignore the first line\n if row[0] == 'END': # check if row reaches the last one\n break\n measured_s11_mag_state3.append(20*np.log10(np.sqrt(float(row[1])**2+float(row[2])**2)))\n\nfor row in measured_csv_s11_mag_state4: # row contains the row data\n if measured_csv_s11_mag_state4.line_num > 8: # ignore the first line\n if row[0] == 'END': # check if row reaches the last one\n break\n measured_s11_mag_state4.append(20*np.log10(np.sqrt(float(row[1])**2+float(row[2])**2)))\n\nfor row in measured_csv_s11_mag_state5: # row contains the row data\n if measured_csv_s11_mag_state5.line_num > 8: # ignore the first line\n if row[0] == 'END': # check if row reaches the last one\n break\n measured_s11_mag_state5.append(20*np.log10(np.sqrt(float(row[1])**2+float(row[2])**2)))\n\nfor row in measured_csv_s11_mag_state7: # row contains the row data\n if measured_csv_s11_mag_state7.line_num > 8: # ignore the first line\n if row[0] == 'END': # check if row reaches the last one\n break\n measured_s11_mag_state7.append(20*np.log10(np.sqrt(float(row[1])**2+float(row[2])**2)))\n# *************************************************************************\n\n# ***************************** plotting **********************************\nconstant_0dB = np.linspace(0, 0, len(freq)) # constant 0dB reference line\n\nplot.figure(1)\n# plot.rc('axes', prop_cycle=(cycler('color', ['r', 'g', 'b', 'y', 'c', 'm'])))\nax0 = plot.subplot(111)\nax0.plot(freq, constant_0dB, 'k--', label=\"0dB reference\")\nax0.plot(freq, measured_s11_mag_state0, '-', label=\"|s11| state 0\")\nax0.plot(freq, measured_s11_mag_state3, '-', label=\"|s11| state 3\")\nax0.plot(freq, measured_s11_mag_state4, '-', label=\"|s11| state 4\")\nax0.plot(freq, measured_s11_mag_state5, '-', label=\"|s11| state 5\")\nax0.plot(freq, measured_s11_mag_state7, '-', label=\"|s11| state 7\")\nax0.axis([1e9, 5e9, -50, 5]) # [xmin, xmax, ymin, ymax]\nplot.xlabel('Frequency (Hz)')\nplot.ylabel('|S11| (dB)')\nplot.title('Measured |S11|')\nplot.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.) # loc=4 => bottom right corner\nplot.tight_layout(pad=0.15, w_pad=0, h_pad=0)\nplot.subplots_adjust(left=0.1, right=0.7, top=0.9, bottom=0.1, hspace=0, wspace=0)\n# plot.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.) # loc=4 => bottom right corner\n\nplot.show()\n# *************************************************************************\n\nfile2.close()\n","sub_path":"Python/pycharm/programs/LMA_data_plotting/panel2A/3/plotting.py","file_name":"plotting.py","file_ext":"py","file_size_in_byte":4749,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"499815445","text":"from __future__ import division\r\n\r\nimport re\r\nimport random\r\nimport numpy as np\r\nimport networkx as nx\r\nfrom Queue import PriorityQueue\r\nfrom flow import Flow\r\n\r\n\r\nclass Simulation(object):\r\n def __init__(self, algorithm, graph, total_time, arrive_rate, reciprocal_lambda, min_bandwidth, max_bandwidth):\r\n self.algorithm = algorithm\r\n self.graph = graph\r\n self.total_time = total_time\r\n self.arrive_rate = arrive_rate\r\n self.reciprocal_lambda = reciprocal_lambda\r\n self.max_bandwidth = max_bandwidth\r\n self.min_bandwidth = min_bandwidth\r\n # self.current_time = 0\r\n self.flows = self.create_flow_samples()\r\n\r\n def create_flow_samples(self):\r\n flows = []\r\n for current_time in range(self.total_time):\r\n flow_num = np.random.poisson(self.arrive_rate)\r\n flows_come_in_current_time = []\r\n for _ in range(flow_num):\r\n src_num, dst_num = self.generate_random_src_dst_pair()\r\n start_time = current_time\r\n finish_time = start_time + self.generate_random_exist_time()\r\n bandwidth = self.generate_random_bandwidth()\r\n\r\n flow = Flow(src_num, dst_num, start_time, finish_time, bandwidth)\r\n flows_come_in_current_time.append(flow)\r\n flows.append(flows_come_in_current_time)\r\n\r\n return flows\r\n\r\n def generate_random_src_dst_pair(self):\r\n node_num = self.graph.__len__()\r\n src_num = random.randint(1, node_num)\r\n dst_num = random.randint(1, node_num)\r\n while src_num == dst_num:\r\n dst_num = random.randint(1, node_num)\r\n\r\n return src_num, dst_num\r\n\r\n def generate_random_start_time(self):\r\n return random.randint(0, self.total_time)\r\n\r\n def generate_random_exist_time(self):\r\n # TODO still need to discuss\r\n return int(random.expovariate(self.reciprocal_lambda))\r\n\r\n def generate_random_bandwidth(self):\r\n return random.randint(self.min_bandwidth, self.max_bandwidth)\r\n\r\n def run(self):\r\n rejected_flows = []\r\n current_flows = PriorityQueue()\r\n total_spectrum = self.get_total_spectrum()\r\n spectrum_utilization_per_sec = []\r\n fragment_rate_per_sec = []\r\n\r\n for current_time in range(self.total_time):\r\n # process incoming flows\r\n for flow in self.flows[current_time]:\r\n paths = self.algorithm(self.graph, flow)\r\n if not paths:\r\n rejected_flows.append(flow)\r\n else:\r\n current_flows.put((flow.finish_time, paths))\r\n # cleanup finished flows\r\n if not current_flows.empty():\r\n while not current_flows.empty() and current_flows.queue[0][0] == current_time:\r\n paths = current_flows.get()[1]\r\n self.remove_paths(paths)\r\n # calculate spectrum utilization per round\r\n spectrum_utilization_per_sec.append(self.calculate_spectrum_utilization(total_spectrum))\r\n fragment_rate_per_sec.append(self.calculate_fragment_rate())\r\n # calculate block rate\r\n block_rate = self.calculate_block_rate(rejected_flows)\r\n # print('Block Rate: %f' % block_rate)\r\n # print('Spectrum Utilization: '),\r\n # print(spectrum_utilization_per_sec)\r\n # print('Fragment Rate: '),\r\n # print(fragment_rate_per_sec)\r\n # for e in self.graph.edges(data=True):\r\n # print(e)\r\n # print(reduce(lambda x, y: x + len(y), self.flows, 0))\r\n # print(len(rejected_flows))\r\n return block_rate, sum(spectrum_utilization_per_sec) / self.total_time, sum(fragment_rate_per_sec)/self.total_time\r\n\r\n def remove_paths(self, paths):\r\n for path, start_index, total_num in paths:\r\n for i in range(len(path) - 1):\r\n src_num, dst_num = path[i], path[i + 1]\r\n spectrum_slots = self.graph.get_edge_data(src_num, dst_num)['spectrum_slots']\r\n for j in range(start_index, start_index + total_num):\r\n spectrum_slots[j] = 0\r\n\r\n def calculate_block_rate(self, rejected_flows):\r\n blocked_bandwidth = reduce(lambda x, y: x + y.bandwidth, rejected_flows, 0)\r\n total_bandwidth = 0\r\n for flows in self.flows:\r\n total_bandwidth = reduce(lambda x, y: x + y.bandwidth, flows, total_bandwidth)\r\n\r\n return blocked_bandwidth / total_bandwidth\r\n\r\n def get_total_spectrum(self):\r\n total_spectrum = reduce(lambda x, y: x + len(y[2]['spectrum_slots']), self.graph.edges(data=True), 0)\r\n\r\n return total_spectrum\r\n\r\n def calculate_spectrum_utilization(self, total_spectrum):\r\n occupied_spectrum = \\\r\n reduce(lambda x, y: x + sum([1 for _ in y[2]['spectrum_slots'] if _ != 0]), self.graph.edges(data=True), 0)\r\n\r\n return occupied_spectrum / total_spectrum\r\n\r\n def calculate_fragment_rate(self):\r\n patten = re.compile(r'0+')\r\n fragment_rate = 0\r\n for edge in self.graph.edges(data=True):\r\n spectrum_slots_str = ''.join(map(str, edge[2]['spectrum_slots']))\r\n length_result = map(len, patten.findall(spectrum_slots_str))\r\n try:\r\n fragment_rate += 1 - max(length_result) / sum(length_result)\r\n except ValueError:\r\n fragment_rate += 0\r\n\r\n return fragment_rate / len(self.graph.edges())","sub_path":"dpp-usb/simulation.py","file_name":"simulation.py","file_ext":"py","file_size_in_byte":5504,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"549911217","text":"#!/usr/bin/env python\n\n######### /!\\ #############\n#\n# you will need the sendEmail.py file to get this working as it is\n# in addition get the myIp.py file or set up an empty myIp.py file with just \n# myIp = ''\n# into it, put the file in the same dir\n#\n###########################\n\nimport os\nimport myIp\nimport sendEmail as se\n\nip = os.popen('wget -qO- http://ipecho.net/plain').readlines()\n\nif ip[0] != myIp.myIp:\n se.sendEmail('subject : ip changed', 'The new ip is ' + ip[0] + '.', 'whotomailitto@mail.com') \n\n output = open(\"myIp.py\", \"w\")\n output.write(\"myIp = \")\n output.write(\"'\"+ip[0]+\"'\")\n output.close()\n","sub_path":"findMyIp.py","file_name":"findMyIp.py","file_ext":"py","file_size_in_byte":619,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"154682729","text":"import json\nimport datetime\nfrom buspy.incoming_bus_checker import IncomingBusChecker\nfrom buspy.datamall_query import RequestSender, ArrivalFetcher\nfrom buspy.datetime_helpers import gettime, now\n\nrequest_sender = RequestSender()\narrival_fetcher = ArrivalFetcher(request_sender)\n\nrel_path2=\"../buspy/data/bus_stops.json\"\nwith open(rel_path2) as f:\n bus_stops = json.load(f) \n\nrel_path3=\"../buspy/data/bus_routes.json\"\nwith open(rel_path3) as f:\n bus_routes = json.load(f) \n\ndef build_checker(bus_stop_code, service_no, requested_time_str, original_request_time_str, owner_id=None):\n formatted_bus_stop_code = f\"{bus_stop_code}\"\n formatted_service_no = f\"{service_no}\"\n if bus_stop_code not in bus_stops:\n return (None, f\"I couldn't find your bus stop {formatted_bus_stop_code}. Please try again.\")\n\n if f\"{service_no}_{bus_stop_code}\" not in bus_routes:\n return (None, f\"I couldn't find your bus {formatted_service_no} at your bus stop {formatted_bus_stop_code}. Please try again.\")\n \n try:\n requested_time = datetime.datetime.fromisoformat(requested_time_str)\n except:\n requested_time = None\n\n if not requested_time:\n return (None, f\"I don't understand your time {requested_time_str}. Please try again.\")\n\n if requested_time < now():\n return (None, f\"Your time {original_request_time_str} is in the past. Please try again.\")\n \n return (IncomingBusChecker(bus_stop_code, service_no, requested_time, arrival_fetcher.get_arrival_time, owner_id=owner_id), None)","sub_path":"buspy/checker_builder.py","file_name":"checker_builder.py","file_ext":"py","file_size_in_byte":1579,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"446356914","text":"from django import http\nfrom django.conf import settings\nfrom django.contrib.auth import login\nfrom django.shortcuts import render, redirect\n\n# Create your views here.\nfrom django.urls import reverse\nfrom django.views import View\n\nfrom apps.oauth.models import OAuthQQUser\nfrom apps.users.models import User\nfrom utils.response_code import RETCODE\nfrom QQLoginTool.QQtool import OAuthQQ\n\n\n# 判断是否绑定openid\ndef is_bind_openid(openid, request):\n # 判断 openid 在不在 QQ等录表OAuthQQUser\n try:\n qq_user = OAuthQQUser.objects.get(openid=openid)\n except OAuthQQUser.DoesNotExist:\n # 不存在--跳转到 绑定页面\n context = {'openid': openid}\n response = render(request, 'oauth_callback.html', context)\n else:\n # 存在\n user = qq_user.user\n # 1.保持登录装填\n login(request, user)\n # 2. cookie保存用户名\n response = redirect(reverse('contents:index'))\n response.set_cookie('username', user.username, max_age=14 * 2 * 24 * 3600)\n\n # 3. 重定向首页\n return response\n\n\n# http://www.meiduo.site:8000/oauth_callback?code=D185854B840E6F9C3038199FE7735995&state=None\nclass QQOauthCallbackView(View):\n def get(self, request):\n # 1.code request.GET.get\n code = request.GET.get('code')\n\n # uri > url\n oauth = OAuthQQ(client_id=settings.QQ_CLIENT_ID, client_secret=settings.QQ_CLIENT_SECRET,\n redirect_uri=settings.QQ_REDIRECT_URI)\n\n # 2. code-->acess_token\n token = oauth.get_access_token(code)\n\n # 3. acesss_token =--->openid\n openid = oauth.get_open_id(token)\n\n # 4. 判断是否绑定openid\n response = is_bind_openid(openid, request)\n\n return response\n\n def post(self, request):\n # 1.接收参数\n mobile = request.POST.get('mobile')\n pwd = request.POST.get('password')\n sms_code_client = request.POST.get('sms_code')\n openid = request.POST.get('openid')\n\n # 2. 正则校验\n if not openid:\n return render(request, 'oauth_callback.html', {'openid_errmsg': '无效的openid'})\n\n # 3. 判断 手机号 --存不存在\n # 存在的额=---密码\n try:\n user = User.objects.get(mobile=mobile)\n except User.DoesNotExist:\n\n # 不存在--新建用户\n user = User.objects.create_user(username=mobile, password=pwd, mobile=mobile)\n else:\n\n if not user.check_password(pwd):\n return render(request, 'oauth_callback.html', {'account_errmsg': '用户名或密码错误'})\n\n # 4.绑定openid 操作OAuthQQUser表--新建数据\n OAuthQQUser.objects.create(user=user, openid=openid)\n\n\n # 1.保持登录装填\n login(request, user)\n # 2. cookie保存用户名\n response = redirect(reverse('contents:index'))\n response.set_cookie('username', user.username, max_age=14 * 2 * 24 * 3600)\n\n # 5.返回首页\n return response\n\n\nclass QQLoginView(View):\n # QQ 登录网址\n def get(self, request):\n # 1.导包 qq登录工具\n from QQLoginTool.QQtool import OAuthQQ\n\n # 2.实例化对象--->认证的参数\n oauth = OAuthQQ(\n client_id=settings.QQ_CLIENT_ID,\n client_secret=settings.QQ_CLIENT_SECRET,\n redirect_uri=settings.QQ_REDIRECT_URI,\n state=None\n )\n\n # 3.获取qq登录地址 返回给前端 JsonResponse\n login_url = oauth.get_qq_url()\n\n return http.JsonResponse({'code': RETCODE.OK, 'errmsg': 'OK', 'login_url': login_url})\n","sub_path":"meiduo_mall/apps/oauth/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3661,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"80212684","text":"# 练习1:使用迭代器原理遍历元组\n# (\"铁扇公主\",\"铁锤公主\",\"扳手王子\")\ntuple01 = (\"铁扇公主\", \"铁锤公主\", \"扳手王子\")\ntuple01_iteration = tuple01.__iter__()\nwhile True:\n try:\n key = tuple01_iteration.__next__()\n print(key)\n except StopIteration:\n break\n\n\n# 练习2:不使用for,获取字典所有数据\n# {\"铁扇公主\":101,\"铁锤公主\":102,\"扳手王子\":103}\ndict01 = {\"铁扇公主\":101,\"铁锤公主\":102,\"扳手王子\":103}\ndict01_iteration = dict01.__iter__()\nwhile True:\n try:\n key = dict01_iteration.__next__()#默认获取key\n print(key,dict01[key])\n except StopIteration:\n break\n\n","sub_path":"first_step/iterator_generator/ex01_iterable.py","file_name":"ex01_iterable.py","file_ext":"py","file_size_in_byte":687,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"88989937","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n'''\n获取当前文件所在目录\n'''\nimport os,sys\n\n# a = os.path.dirname('d:\\\\env2.7\\\\pip-selfcheck.json')\n# print(a)\n#\n# os.path.exists()\n\ndef dir_file(dirs):\n if os.path.isfile(dirs):\n return os.path.dirname(dirs)\n else:\n return 0\n\na = dir_file('d:\\\\env2.7\\\\pip-selfcheck.json')\nprint(a)","sub_path":"day6/练习6.py","file_name":"练习6.py","file_ext":"py","file_size_in_byte":354,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"565521274","text":"# @ build_board.py\r\n# This adds additional functions to the build_bios.py\r\n#\r\n# Copyright (c) 2019, Intel Corporation. All rights reserved.
\r\n# SPDX-License-Identifier: BSD-2-Clause-Patent\r\n#\r\n\r\n\"\"\"\r\nThis module serves as an additional build steps for the Mt Olympus board\r\n\"\"\"\r\n\r\nimport os\r\nimport sys\r\n\r\n\r\ndef pre_build_ex(config, functions):\r\n \"\"\"Additional Pre BIOS build function\r\n\r\n :param config: The environment variables to be used in the build process\r\n :type config: Dictionary\r\n :param functions: A dictionary of function pointers\r\n :type functions: Dictionary\r\n :returns: nothing\r\n \"\"\"\r\n print(\"Info: re-generating PlatformOffset header files\")\r\n\r\n execute_script = functions.get(\"execute_script\")\r\n\r\n command = [\"build\", \"-D\", \"MAX_SOCKET=\" + config.get(\"MAX_SOCKET\", \"1\"),\r\n \"-m\",\r\n os.path.join(config[\"PLATFORM_BOARD_PACKAGE\"],\r\n \"Acpi\", \"BoardAcpiDxe\", \"Dsdt.inf\"),\r\n \"-y\",\r\n config.get(\"PRE_BUILD_REPORT\",\r\n os.path.join(config[\"WORKSPACE\"],\r\n \"preBuildReport.txt\")),\r\n \"--log=\" + config.get(\"PRE_BUILD_LOG\",\r\n os.path.join(config[\"WORKSPACE\"],\r\n \"prebuild.log\"))]\r\n\r\n _, _, _, code = execute_script(command, config)\r\n if code != 0:\r\n print(\" \".join(command))\r\n print(\"Error re-generating PlatformOffset header files\")\r\n sys.exit(1)\r\n\r\n config[\"AML_FILTER\"] = \"\\\"PSYS\\\" .MCTL\\\" .FIX[0-9,A-Z]\\\"\"\r\n print(\"AML_FILTER= \", config.get(\"AML_FILTER\"))\r\n\r\n # build the command with arguments\r\n command = [\"python\",\r\n os.path.join(config[\"MIN_PACKAGE_TOOLS\"],\r\n \"AmlGenOffset\",\r\n \"AmlGenOffset.py\"),\r\n \"-d\", \"--aml_filter\", config[\"AML_FILTER\"],\r\n \"-o\", os.path.join(config[\"WORKSPACE_PLATFORM\"],\r\n config[\"PLATFORM_BOARD_PACKAGE\"],\r\n \"Acpi\", \"BoardAcpiDxe\",\r\n \"AmlOffsetTable.c\"),\r\n os.path.join(config[\"BUILD_X64\"],\r\n \"PurleyOpenBoardPkg\",\r\n \"Acpi\",\r\n \"BoardAcpiDxe\",\r\n \"DSDT\",\r\n \"OUTPUT\",\r\n \"Dsdt\", \"WFPPlatform.offset.h\")]\r\n\r\n # execute the command\r\n _, _, _, code = execute_script(command, config)\r\n if code != 0:\r\n print(\" \".join(command))\r\n print(\"Error re-generating PlatformOffset header files\")\r\n sys.exit(1)\r\n\r\n print(\"GenOffset done\")\r\n return config\r\n\r\n\r\ndef build_ex(config, functions):\r\n \"\"\"Additional BIOS build function\r\n\r\n :param config: The environment variables to be used in\r\n the build process\r\n :type config: Dictionary\r\n :param functions: A dictionary of function pointers\r\n :type functions: Dictionary\r\n :returns: config dictionary\r\n :rtype: Dictionary\r\n \"\"\"\r\n print(\"build_ex\")\r\n return None\r\n\r\n\r\ndef post_build_ex(config, functions):\r\n \"\"\"Additional Post BIOS build function\r\n\r\n :param config: The environment variables to be used in the post\r\n build process\r\n :type config: Dictionary\r\n :param functions: A dictionary of function pointers\r\n :type functions: Dictionary\r\n :returns: config dictionary\r\n :rtype: Dictionary\r\n \"\"\"\r\n print(\"post_build_ex\")\r\n\r\n execute_script = functions.get(\"execute_script\")\r\n\r\n if not execute_script:\r\n print(\"post_build_ex Error\")\r\n sys.exit(1)\r\n\r\n common_patch_command = [os.path.join(config[\"PYTHON_HOME\"], \"python\"),\r\n os.path.join(config[\"MIN_PACKAGE_TOOLS\"],\r\n \"PatchFv\", \"PatchBinFv.py\"),\r\n config[\"TARGET\"],\r\n os.path.join(config[\"WORKSPACE_SILICON_BIN\"],\r\n \"PurleySiliconBinPkg\", \"FV\"),\r\n os.path.join(config[\"WORKSPACE\"],\r\n \"BuildReport.log\")]\r\n\r\n fvs_to_patch = [\"FvTempMemorySilicon\",\r\n \"FvPreMemorySilicon\",\r\n \"FvPostMemorySilicon\",\r\n \"FvLateSilicon\"]\r\n for fv in fvs_to_patch:\r\n patch_command = common_patch_command + [fv]\r\n _, _, _, code = execute_script(patch_command, config)\r\n if code != 0:\r\n print(\" \".join(patch_command))\r\n print(\"Patch Error!\")\r\n sys.exit(1)\r\n\r\n common_rebase_command = [os.path.join(config[\"PYTHON_HOME\"], \"python\"),\r\n os.path.join(config[\"MIN_PACKAGE_TOOLS\"],\r\n \"PatchFv\", \"RebaseBinFv.py\"),\r\n config[\"TARGET\"],\r\n os.path.join(config[\"WORKSPACE_SILICON_BIN\"],\r\n \"PurleySiliconBinPkg\", \"FV\"),\r\n os.path.join(config[\"WORKSPACE\"],\r\n \"BuildReport.log\")]\r\n\r\n rebase_command = common_rebase_command +\\\r\n [\"FvPreMemorySilicon\",\r\n \"gMinPlatformPkgTokenSpaceGuid.PcdFlashFvFspMBase\"]\r\n\r\n _, _, _, code = execute_script(rebase_command, config)\r\n if code != 0:\r\n print(\" \".join(rebase_command))\r\n print(\"Patch Error!\")\r\n sys.exit(1)\r\n\r\n rebase_command = common_rebase_command +\\\r\n [\"FvPostMemorySilicon\",\r\n \"gMinPlatformPkgTokenSpaceGuid.PcdFlashFvFspSBase\"]\r\n\r\n _, _, _, code = execute_script(rebase_command, config)\r\n if code != 0:\r\n print(\" \".join(rebase_command))\r\n print(\"Patch Error!\")\r\n sys.exit(1)\r\n\r\n return None\r\n\r\n\r\ndef clean_ex(config, functions):\r\n \"\"\"Additional clean function\r\n\r\n :param config: The environment variables to be used in the build process\r\n :type config: Dictionary\r\n :param functions: A dictionary of function pointers\r\n :type functions: Dictionary\r\n :returns: config dictionary\r\n :rtype: Dictionary\r\n \"\"\"\r\n print(\"clean_ex\")\r\n return None\r\n","sub_path":"Platform/Intel/PurleyOpenBoardPkg/BoardMtOlympus/build_board.py","file_name":"build_board.py","file_ext":"py","file_size_in_byte":6288,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"200608000","text":"import os\nimport requests\nfrom flask import Flask, request\nimport telebot\nfrom telebot import types\n\nBOT_TOKEN = os.environ.get(\"BOT_TOKEN\")\nbot = telebot.TeleBot(BOT_TOKEN)\nserver = Flask(__name__)\n\nWEATHER_API_TOKEN = os.environ.get(\"API_KEY\")\nSERVER_URL = \"https://test-weather-the-best-2.herokuapp.com\"\n\n@bot.message_handler(commands=[\"start\"])\ndef start(message):\n bot.reply_to(message, 'Привет, ' + message.from_user.first_name)\n\n\n@bot.message_handler(commands=['location'])\ndef location(message):\n keyboard = types.ReplyKeyboardMarkup(row_width=1, resize_keyboard=True)\n button_geo = types.KeyboardButton(text=\"Send de way\", request_location=True)\n keyboard.add(button_geo)\n bot.send_message(message.chat.id, \"Do u now de way\", reply_markup=keyboard)\n\n\n@bot.message_handler(content_types=[\"location\"])\ndef location(message):\n coord = \"{}{}\".format(SERVER_URL, \"/coord/{}/{}\".format(message.location.latitude, message.location.longitude))\n url = requests.get(coord).json()\n bot.send_message(message.chat.id, text=f'{url[\"city\"][\"city_name\"]}, {url.get(\"temp\")}')\n\n\n@bot.message_handler(func=lambda message: True)\ndef echo_message(message):\n city = message.text\n url = \"{}{}\".format(SERVER_URL, \"/weather/{}\".format(city))\n\n try:\n resp = requests.get(url).json()\n except Exception as exc:\n print(exc)\n else:\n bot.send_message(message.chat.id, text=f'{resp[\"city\"][\"city_name\"]}, {resp.get(\"temp\")}')\n\n\n@server.route('/' + BOT_TOKEN, methods=['POST'])\ndef getMessage():\n bot.process_new_updates([telebot.types.Update.de_json(request.stream.read().decode(\"utf-8\"))])\n return \"!\", 200\n\n\nif __name__ == \"__main__\":\n bot.remove_webhook()\n bot.set_webhook(url='https://weather-django-bot.herokuapp.com/' + BOT_TOKEN)\n server.run(host=\"0.0.0.0\", port=int(os.environ.get('PORT', 5000)))\n","sub_path":"PoC_bot.py","file_name":"PoC_bot.py","file_ext":"py","file_size_in_byte":1865,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"382560677","text":"from collections import defaultdict\nclass Solution:\n def groupStrings(self, strings: List[str]) -> List[List[str]]:\n grouped_string, res = defaultdict(list), []\n for string in strings:\n key = [ord(s) - ord(string[0]) for s in string]\n key = tuple([number + 26 if number < 0 else number for number in key])\n grouped_string[key].append(string)\n for key in grouped_string:\n res.append(grouped_string[key])\n return res ","sub_path":"hash_table/group_shifted_strings.py","file_name":"group_shifted_strings.py","file_ext":"py","file_size_in_byte":490,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"224195736","text":"\n#-*- coding: UTF-8 -*-\nimport matplotlib.pyplot as plt\n\ndef scatter_test():\n #define points list\n points = [(10, 20), (25, 40), (80, 60), (60, 90), (10, 20), (80, 90), (50, 60), (30, 80)]\n x, y = zip(*points)\n\n plt.figure()\n plt.scatter(x, y)\n plt.show()\n\n return\n\ndef main():\n scatter_test()\n return\n\nif __name__ == \"__main__\":\n main()","sub_path":"practice/Lesson_04/Lesson_04_visual.py","file_name":"Lesson_04_visual.py","file_ext":"py","file_size_in_byte":367,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"53337597","text":"#\n# minmax.py\n#\n# Copyright © 2010-2015, 2017 Monotype Imaging Inc. All Rights Reserved.\n#\n\n\"\"\"\nDefinitions relating to MinMax subtables in OpenType 'BASE' tables.\n\"\"\"\n\n# System imports\nimport functools\nimport logging\nimport operator\n\n# Other imports\nfrom fontio3.BASE import coordinate, minmax_record, minmax_recorddict\nfrom fontio3.fontdata import simplemeta\n\n# -----------------------------------------------------------------------------\n\n#\n# Classes\n#\n\nclass MinMax(object, metaclass=simplemeta.FontDataMetaclass):\n \"\"\"\n Objects representing minimum and maximum extent values. These are simple\n collections of the following attributes:\n \n minCoord\n maxCoord\n featRecs\n \n >>> _testingValues[2].pprint(namer=namer.testingNamer())\n Minimum coordinate:\n Coordinate: -10\n Device table:\n Tweak at 12 ppem: -5\n Tweak at 13 ppem: -3\n Tweak at 14 ppem: -1\n Tweak at 18 ppem: 2\n Tweak at 20 ppem: 3\n Maximum coordinate:\n Coordinate: 0\n Glyph: xyz26\n Point: 9\n Feature-specific MinMax values:\n Feature 'abcd':\n Minimum coordinate:\n Coordinate: 0\n Maximum coordinate:\n Coordinate: 15\n Device table:\n Tweak at 12 ppem: -2\n Tweak at 14 ppem: -1\n Tweak at 18 ppem: 1\n Feature 'wxyz':\n Maximum coordinate:\n Coordinate: -10\n Glyph: xyz15\n Point: 12\n \n >>> obj = _testingValues[1].__deepcopy__()\n >>> CS = coordinate_simple.Coordinate_simple\n >>> obj.minCoord = CS(-20000)\n >>> obj.maxCoord = CS(20000)\n >>> logger = utilities.makeDoctestLogger(\"minmax_test\")\n >>> e = _fakeEditor()\n >>> obj.isValid(logger=logger, editor=e)\n minmax_test.maxCoord - WARNING - The FUnit value 20000 is more than two ems away from the origin, which seems unlikely.\n minmax_test.minCoord - WARNING - The FUnit value -20000 is more than two ems away from the origin, which seems unlikely.\n True\n \"\"\"\n \n #\n # Class definition variables\n #\n \n attrSpec = dict(\n minCoord = dict(\n attr_followsprotocol = True,\n attr_label = \"Minimum coordinate\"),\n \n maxCoord = dict(\n attr_followsprotocol = True,\n attr_label = \"Maximum coordinate\"),\n \n featRecs = dict(\n attr_followsprotocol = True,\n attr_initfunc = minmax_recorddict.RecordDict,\n attr_label = \"Feature-specific MinMax values\",\n attr_showonlyiftrue = True))\n \n attrSorted = ('minCoord', 'maxCoord', 'featRecs')\n \n #\n # Methods\n #\n \n def buildBinary(self, w, **kwArgs):\n \"\"\"\n Adds the binary data for the MinMax object to the specified LinkedWriter.\n \n >>> utilities.hexdump(_testingValues[2].binaryString())\n 0 | 0016 002A 0002 6162 6364 0032 001C 7778 |...*..abcd.2..wx|\n 10 | 797A 0000 0022 0003 FFF6 0020 0003 000F |yz...\"..... ....|\n 20 | 0026 0002 FFF6 000E 000C 0002 0000 0019 |.&..............|\n 30 | 0009 0001 0000 000C 0014 0002 BDF0 0020 |............... |\n 40 | 3000 000C 0012 0001 8C04 |0......... |\n \"\"\"\n \n if 'stakeValue' in kwArgs:\n stakeValue = kwArgs.pop('stakeValue')\n w.stakeCurrentWithValue(stakeValue)\n else:\n stakeValue = w.stakeCurrent()\n \n coordPool = kwArgs.get('coordinatePool', {})\n doLocalCoords = 'coordinatePool' not in kwArgs\n devPool = kwArgs.pop('devicePool', {})\n doLocalDevs = 'devicePool' not in kwArgs\n \n for obj in (self.minCoord, self.maxCoord):\n if obj is not None:\n immut = obj.asImmutable(**kwArgs)\n \n if immut not in coordPool:\n coordPool[immut] = (obj, w.getNewStake())\n \n w.addUnresolvedOffset(\"H\", stakeValue, coordPool[immut][1])\n \n else:\n w.add(\"H\", 0)\n \n w.add(\"H\", len(self.featRecs))\n ig0 = operator.itemgetter(0)\n \n for key, rec in sorted(self.featRecs.items(), key=ig0):\n w.add(\"4s\", key)\n \n for obj in (rec.minCoord, rec.maxCoord):\n if obj is not None:\n immut = obj.asImmutable(**kwArgs)\n \n if immut not in coordPool:\n coordPool[immut] = (obj, w.getNewStake())\n \n w.addUnresolvedOffset(\"H\", stakeValue, coordPool[immut][1])\n \n else:\n w.add(\"H\", 0)\n \n if doLocalCoords:\n for immut, (obj, stake) in sorted(coordPool.items(), key=ig0):\n obj.buildBinary(\n w,\n stakeValue = stake,\n devicePool = devPool,\n **kwArgs)\n \n if doLocalDevs:\n keyFunc = (lambda x: sorted(x[0][1]))\n \n for immut, (obj, stake) in sorted(devPool.items(), key=keyFunc):\n obj.buildBinary(w, stakeValue=stake, **kwArgs)\n \n @classmethod\n def fromvalidatedwalker(cls, w, **kwArgs):\n \"\"\"\n Creates and returns a new MinMax object from the specified walker,\n doing source validation.\n \n >>> s = _testingValues[2].binaryString()\n >>> logger = utilities.makeDoctestLogger(\"minmax_fvw\")\n >>> fvb = MinMax.fromvalidatedbytes\n >>> obj = fvb(s, logger=logger)\n minmax_fvw.minmax - DEBUG - Walker has 74 remaining bytes.\n minmax_fvw.minmax.minimum.coordinate - DEBUG - Coordinate format 3.\n minmax_fvw.minmax.minimum.coordinate_device - DEBUG - Walker has 52 remaining bytes.\n minmax_fvw.minmax.minimum.coordinate_device.device - DEBUG - Walker has 20 remaining bytes.\n minmax_fvw.minmax.minimum.coordinate_device.device - DEBUG - StartSize=12, endSize=20, format=2\n minmax_fvw.minmax.minimum.coordinate_device.device - DEBUG - Data are (48624, 32, 12288)\n minmax_fvw.minmax.maximum.coordinate - DEBUG - Coordinate format 2.\n minmax_fvw.minmax.maximum.coordinate_point - DEBUG - Walker has 32 remaining bytes.\n minmax_fvw.minmax.tag 'abcd'.coordinate - DEBUG - Coordinate format 1.\n minmax_fvw.minmax.tag 'abcd'.coordinate_simple - DEBUG - Walker has 24 remaining bytes.\n minmax_fvw.minmax.tag 'abcd'.coordinate - DEBUG - Coordinate format 3.\n minmax_fvw.minmax.tag 'abcd'.coordinate_device - DEBUG - Walker has 46 remaining bytes.\n minmax_fvw.minmax.tag 'abcd'.coordinate_device.device - DEBUG - Walker has 8 remaining bytes.\n minmax_fvw.minmax.tag 'abcd'.coordinate_device.device - DEBUG - StartSize=12, endSize=18, format=1\n minmax_fvw.minmax.tag 'abcd'.coordinate_device.device - DEBUG - Data are (35844,)\n minmax_fvw.minmax.tag 'wxyz'.coordinate - DEBUG - Coordinate format 2.\n minmax_fvw.minmax.tag 'wxyz'.coordinate_point - DEBUG - Walker has 40 remaining bytes.\n >>> obj == _testingValues[2]\n True\n \n >>> fvb(s[:3], logger=logger)\n minmax_fvw.minmax - DEBUG - Walker has 3 remaining bytes.\n minmax_fvw.minmax - ERROR - Insufficient bytes.\n \"\"\"\n \n logger = kwArgs.pop('logger', logging.getLogger())\n logger = logger.getChild(\"minmax\")\n \n logger.debug((\n 'V0001',\n (w.length(),),\n \"Walker has %d remaining bytes.\"))\n \n if w.length() < 6:\n logger.error(('V0004', (), \"Insufficient bytes.\"))\n return None\n \n minOffset, maxOffset, featCount = w.unpack(\"3H\")\n d = {}\n fvw = coordinate.Coordinate_validated\n \n if minOffset:\n obj = fvw(\n w.subWalker(minOffset),\n logger = logger.getChild(\"minimum\"),\n **kwArgs)\n \n if obj is None:\n return None\n \n d['minCoord'] = obj\n \n if maxOffset:\n obj = fvw(\n w.subWalker(maxOffset),\n logger = logger.getChild(\"maximum\"),\n **kwArgs)\n \n if obj is None:\n return None\n \n d['maxCoord'] = obj\n \n if featCount:\n if w.length() < 8 * featCount:\n logger.error((\n 'V0640',\n (),\n \"The feature records are missing or incomplete.\"))\n \n return None\n \n featDict = minmax_recorddict.RecordDict()\n \n for tag, minOffset, maxOffset in w.group(\"4s2H\", featCount):\n dd = {}\n itemLogger = logger.getChild(\n \"tag '%s'\" % (utilities.ensureUnicode(tag),))\n \n if minOffset:\n obj = fvw(\n w.subWalker(minOffset),\n logger = itemLogger,\n **kwArgs)\n \n if obj is None:\n return None\n \n dd['minCoord'] = obj\n \n if maxOffset:\n obj = fvw(\n w.subWalker(maxOffset),\n logger = itemLogger,\n **kwArgs)\n \n if obj is None:\n return None\n \n dd['maxCoord'] = obj\n \n if dd:\n featDict[tag] = minmax_record.Record(**dd)\n \n d['featRecs'] = featDict\n \n return cls(**d)\n \n @classmethod\n def fromwalker(cls, w, **kwArgs):\n \"\"\"\n Creates and returns a MinMax object from the specified walker.\n \n >>> for i in range(3):\n ... obj = _testingValues[i]\n ... obj == MinMax.frombytes(obj.binaryString())\n True\n True\n True\n \"\"\"\n \n minOffset, maxOffset, featCount = w.unpack(\"3H\")\n d = {}\n fw = coordinate.Coordinate\n \n if minOffset:\n d['minCoord'] = fw(w.subWalker(minOffset), **kwArgs)\n \n if maxOffset:\n d['maxCoord'] = fw(w.subWalker(maxOffset), **kwArgs)\n \n if featCount:\n d['featRecs'] = featDict = minmax_recorddict.RecordDict()\n \n for tag, minOffset, maxOffset in w.group(\"4s2H\", featCount):\n dd = {}\n \n if minOffset:\n dd['minCoord'] = fw(w.subWalker(minOffset), **kwArgs)\n \n if maxOffset:\n dd['maxCoord'] = fw(w.subWalker(maxOffset), **kwArgs)\n \n featDict[tag] = minmax_record.Record(**dd)\n \n return cls(**d)\n\n# -----------------------------------------------------------------------------\n\n#\n# Test code\n#\n\nif 0:\n def __________________(): pass\n\nif __debug__:\n from fontio3 import utilities\n from fontio3.BASE import coordinate_simple\n from fontio3.utilities import namer\n \n def _fakeEditor():\n from fontio3.head import head\n \n e = utilities.fakeEditor(0x10000)\n e.head = head.Head()\n return e\n \n _cv = coordinate._testingValues\n _dv = minmax_recorddict._testingValues\n \n _testingValues = (\n MinMax(),\n MinMax(minCoord=_cv[2], maxCoord=_cv[3]),\n MinMax(minCoord=_cv[6], maxCoord=_cv[3], featRecs=_dv[1]))\n \n del _cv, _dv\n\ndef _test():\n import doctest\n doctest.testmod()\n\nif __name__ == \"__main__\":\n if __debug__:\n _test()\n","sub_path":"fontio3/fontio3/BASE/minmax.py","file_name":"minmax.py","file_ext":"py","file_size_in_byte":11945,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"176247700","text":"# Licensed under a 3-clause BSD style license - see LICENSE.rst\nimport copy\nimport logging\nimport numpy as np\nfrom astropy.convolution import Tophat2DKernel\nfrom astropy.coordinates import Angle\nfrom gammapy.datasets import MapDataset, MapDatasetOnOff\nfrom gammapy.maps import Map\nfrom gammapy.stats import CashCountsStatistic, WStatCountsStatistic\nfrom .core import Estimator\n\n__all__ = [\n \"ExcessMapEstimator\",\n]\n\nlog = logging.getLogger(__name__)\n\n\ndef convolved_map_dataset_counts_statistics(dataset, kernel):\n \"\"\"Return CountsDataset objects containing smoothed maps from the MapDataset\"\"\"\n # Kernel is modified later make a copy here\n kernel = copy.deepcopy(kernel)\n kernel.normalize(\"peak\")\n\n # fft convolution adds numerical noise, to ensure integer results we call\n # np.rint\n n_on_conv = np.rint(dataset.counts.convolve(kernel.array).data)\n\n if isinstance(dataset, MapDatasetOnOff):\n background = dataset.background\n background.data[dataset.acceptance_off.data == 0] = 0.0\n background_conv = background.convolve(kernel.array).data\n\n n_off_conv = dataset.counts_off.convolve(kernel.array).data\n\n with np.errstate(invalid=\"ignore\", divide=\"ignore\"):\n alpha_conv = background_conv / n_off_conv\n\n return WStatCountsStatistic(n_on_conv.data, n_off_conv.data, alpha_conv.data)\n else:\n background_conv = dataset.npred().convolve(kernel.array).data\n return CashCountsStatistic(n_on_conv.data, background_conv.data)\n\n\nclass ExcessMapEstimator(Estimator):\n \"\"\"Computes correlated excess, significance and errors for MapDatasets.\n\n Parameters\n ----------\n correlation_radius : ~astropy.coordinate.Angle\n correlation radius to use\n n_sigma : float\n Confidence level for the asymmetric errors expressed in number of sigma.\n Default is 1.\n n_sigma_ul : float\n Confidence level for the upper limits expressed in number of sigma.\n Default is 3.\n \"\"\"\n tag = \"ExcessMapEstimator\"\n\n def __init__(self, correlation_radius=\"0.1 deg\", nsigma=1, nsigma_ul=3):\n self.correlation_radius = correlation_radius\n self.nsigma = nsigma\n self.nsigma_ul = nsigma_ul\n\n @property\n def correlation_radius(self):\n return self._correlation_radius\n\n @correlation_radius.setter\n def correlation_radius(self, correlation_radius):\n \"\"\"Sets radius\"\"\"\n self._correlation_radius = Angle(correlation_radius)\n\n def run(self, dataset, steps=\"all\"):\n \"\"\"Compute correlated excess, Li & Ma significance and flux maps\n\n Parameters\n ----------\n dataset : `~gammapy.datasets.MapDataset` or `~gammapy.datasets.MapDatasetOnOff`\n input image-like dataset\n steps : list of str\n Which steps to execute. Available options are:\n\n * \"ts\": estimate delta TS and significance\n * \"err\": estimate symmetric error\n * \"errn-errp\": estimate asymmetric errors.\n * \"ul\": estimate upper limits.\n\n By default all steps are executed.\n\n Returns\n -------\n images : dict\n Dictionary containing result correlated maps. Keys are:\n\n * counts : correlated counts map\n * background : correlated background map\n * excess : correlated excess map\n * ts : delta TS map\n * significance : sqrt(delta TS), or Li-Ma significance map\n * err : symmetric error map (from covariance)\n * errn : negative error map\n * errp : positive error map\n * ul : upper limit map\n\n \"\"\"\n if not isinstance(dataset, MapDataset):\n raise ValueError(\"Unsupported dataset type\")\n\n pixel_size = np.mean(np.abs(dataset.counts.geom.wcs.wcs.cdelt))\n size = self.correlation_radius.deg / pixel_size\n kernel = Tophat2DKernel(size)\n\n geom = dataset.counts.geom\n\n self.counts_stat = convolved_map_dataset_counts_statistics(dataset, kernel)\n\n n_on = Map.from_geom(geom, data=self.counts_stat.n_on)\n bkg = Map.from_geom(geom, data=self.counts_stat.n_on - self.counts_stat.excess)\n excess = Map.from_geom(geom, data=self.counts_stat.excess)\n\n result = {\"counts\": n_on, \"background\": bkg, \"excess\": excess}\n\n if steps == \"all\":\n steps = [\"ts\", \"err\", \"errn-errp\", \"ul\"]\n\n if \"ts\" in steps:\n tsmap = Map.from_geom(geom, data=self.counts_stat.delta_ts)\n significance = Map.from_geom(geom, data=self.counts_stat.significance)\n result.update({\"ts\": tsmap, \"significance\": significance})\n\n if \"err\" in steps:\n err = Map.from_geom(geom, data=self.counts_stat.error)\n result.update({\"err\": err})\n\n if \"errn-errp\" in steps:\n errn = Map.from_geom(geom, data=self.counts_stat.compute_errn(self.nsigma))\n errp = Map.from_geom(geom, data=self.counts_stat.compute_errp(self.nsigma))\n result.update({\"errn\": errn, \"errp\": errp})\n\n if \"ul\" in steps:\n ul = Map.from_geom(\n geom, data=self.counts_stat.compute_upper_limit(self.nsigma_ul)\n )\n result.update({\"ul\": ul})\n return result\n","sub_path":"gammapy/estimators/excess_map.py","file_name":"excess_map.py","file_ext":"py","file_size_in_byte":5339,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"329004283","text":"#!/usr/bin/python\nimport socket\nimport subprocess\nHOST = '2401:ec00:21:7:99b9:2dbe:374:2330'\nPORT = 12345\ns= socket.socket(socket.AF_INET6, socket.SOCK_STREAM)\ns.bind((HOST, PORT))\ns.listen(1)\nprint(\"serving....\")\nrun=True\nwhile run:\n print(\"No connecting....\")\n user, addr = s.accept()\n print('Connected by', addr)\n while 1:\n data = user.recv(1024)\n command=data.decode()\n if command==\"close\":\n user.close()\n break\n if command==\"close server\":\n run=False\n print(\"close server\")\n break\n p = subprocess.Popen(command, shell=True,stdout=subprocess.PIPE, stderr=subprocess.STDOUT)\n print(command)\n if p.wait(10) == 0:\n user.sendall('Done.'.encode())\n else:\n user.sendall('Failed'.encode()) \n resualt=\"result\\n\"\n for line in p.stdout.readlines():\n resualt+=line.decode('GBK')\n user.sendall(resualt.encode('GBK'))\n \n","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":1176,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"557231013","text":"\n\n#defining a function\ndef celciusToFahrenheit():\n myInput = int(input(\"Enter a temperature in Celcius : \"))\n tempInFahrenheit = (myInput*9 + 160) / 5\n print(\"The temp of \" , myInput , \"in celcius is \" ,\n tempInFahrenheit , \"in Fahreiheit\")\n\n\ndef fahrenheitToCelcius():\n myInput = int(input(\"Enter a temperature in Fahrenheit : \"))\n tempInCelcius = ((myInput - 32)/9)*5\n print(\"The temp of \" , myInput , \"in fahrenheit is\" ,\n tempInCelcius , \"in Celcius\")\n\noptionSelected = input(\"Enter 'c' for output in celcius and\"\n \"Enter 'f' for output in fahrenheit\")\n#calling a function\nif(optionSelected == 'c'):\n fahrenheitToCelcius()\nelif(optionSelected == 'f'):\n celciusToFahrenheit()\nelse:\n print(\"You selected an invalid option\")\n\n","sub_path":"Temperature_Conversion_function.py","file_name":"Temperature_Conversion_function.py","file_ext":"py","file_size_in_byte":794,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"593149762","text":"from PIL import Image, ImageDraw,ImageFont\nfrom google_images_download import google_images_download as imgdl\n\ndef getImages(nameString):\n nsize = 256, 256\n\n # #Laddar ner bilder på personer -> byt ut keyword mot argument\n response = imgdl.googleimagesdownload()\n\n arguments = {\"keywords\":nameString, \"limit\":1, \"size\":\"medium\"}\n paths = response.download(arguments)\n\n\n #Öppnar bilderna och skriver ut på vem bilderna visas.\n for name, path in paths[0].items():\n try:\n imgPath = path[0]\n img = Image.open(imgPath)\n font = ImageFont.truetype(\"arial.ttf\", 75)\n draw = ImageDraw.Draw(img)\n draw.text((0,0), name, font=font, fill=\"black\")\n img.thumbnail(nsize)\n img.show()\n except:\n print(\"Kunde inte visa bilden\")","sub_path":"TestDownload.py","file_name":"TestDownload.py","file_ext":"py","file_size_in_byte":836,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"291686961","text":"import cv2\n\n# Create a VideoCapture object and read from input file\n# If the input is the camera, pass 0 instead of the video file name\ncap = cv2.VideoCapture('/home/nickioan/Downloads/raw_video_feed.mp4')\n \n# Check if camera opened successfully\nif (cap.isOpened()== False): \n print(\"Error opening video stream or file\")\n \n# Read until video is completed\nwhile(cap.isOpened()):\n # Capture frame-by-frame\n ret, frame = cap.read()\n if ret == True:\n \n # Display the resulting frame\n img_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n threshold = 64\n _, img_bin = cv2.threshold(img_gray, threshold, 255, cv2.THRESH_BINARY)\n img_copy = img_bin[220,:]\n\n flag = 0\n #Check if last index is included\n for i in range(320):\n px = img_copy[i]\n if px == 0 and flag == 0:\n start = i\n flag = 1\n if flag == 1 and px == 255:\n end = i\n break\n\n center = int((end + start)/2)\n final_img = cv2.circle(frame,(center,220),12,(0,0,255),-1)\n cv2.imshow('Frame',final_img)\n \n # Press Q on keyboard to exit\n if cv2.waitKey(25) & 0xFF == ord('q'):\n break\n \n # Break the loop\n else: \n break\n \n# When everything done, release the video capture object\ncap.release()\n \n# Closes all the frames\ncv2.destroyAllWindows()\n","sub_path":"lab2.py","file_name":"lab2.py","file_ext":"py","file_size_in_byte":1305,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"484110190","text":"# Mostrar error \"x\" veces.\nimport os\n#input\nx=int(os.sys.argv[1])\n#validacion de datos\nx_invalido=(x<0)\nwhile(x_invalido):\n x=int(input(\"ingrese valor correcto de x:\"))\n x_invalido = (x < 0)\n#processing\ni=0\n#bucle_while\nwhile(i<=x):\n print(\"Error\")\n i +=1\n\n#fin_while\n\n\n\n","sub_path":"angulo/bucle_iteracion/ejer5.py","file_name":"ejer5.py","file_ext":"py","file_size_in_byte":279,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"414837998","text":"# lcd160_gui.py Micropython GUI library for LCD160CR displays\n\n# Released under the MIT License (MIT). See LICENSE.\n# Copyright (c) 2016-2020 Peter Hinch\n\nimport framebuf\nfrom uctypes import bytearray_at, addressof\nfast_mode = False\ntry:\n from gui.framebuf_utils.framebuf_utils import render\n fast_mode = True\n print('Using fast mode')\nexcept ValueError:\n print('Ignoring framebuf_utils.mpy: compiled for incorrect architecture.')\nexcept ImportError:\n pass\n\nimport uasyncio as asyncio\nimport gc\nfrom gui.core.lcd160cr import LCD160CR\nfrom gui.primitives.delay_ms import Delay_ms\nfrom gui.core.constants import *\ngc.collect()\n\n# *********** UTILITY FUNCTIONS ***********\n\nclass _A():\n pass\n\nClassType = type(_A)\n\nclass UguiException(Exception):\n pass\n\n# Null function\ndolittle = lambda *_ : None\n\nasync def _g():\n pass\ntype_coro = type(_g())\n\n# *********** INTERNAL FONTS ***********\n\nclass IFont:\n size = ((4, 5), (6, 7), (8, 8), (9, 13)) # (w, h) for each font\n def __init__(self, family, scale=0, bold_h=0, bold_v=0):\n self.bold = (bold_h & 3) | ((bold_v & 3) << 2)\n self.scale = scale\n self.width = (scale + 1) * self.size[family][0]\n self.vheight = (scale + 1) * self.size[family][1]\n self.family = family\n\n def stringsize(self, s):\n return len(s) * self.width, self.vheight\n\n def render(self, tft, x, y, s, style):\n tft.set_pos(x, y)\n tft.set_text_color(tft.rgb(*style[0]), tft.rgb(*style[1]))\n tft.set_font(self.family, self.scale, self.bold, 0, 0)\n tft.write(s)\n\n def height(self):\n return self.vheight\n\n def max_width(self):\n return self.width\n\n def hmap(self):\n return True\n\n def reverse(self):\n return False\n\n def monospaced(self):\n return True\n\n# *********** STRINGS ***********\n\ndef get_stringsize(s, font):\n if isinstance(font, IFont):\n return font.stringsize(s)\n hor = 0\n for c in s:\n _, vert, cols = font.get_ch(c)\n hor += cols\n return hor, vert\n\n\n# Style is (fgcolor, bgcolor, font)\ndef print_centered(tft, x, y, s, style):\n font = style[2]\n length, height = get_stringsize(s, font)\n x, y = max(x - length // 2, 0), max(y - height // 2, 0)\n if isinstance(font, IFont):\n return font.render(tft, x, y, s, style)\n tft.text_style(style)\n tft.set_text_pos(x, y)\n tft.print_string(s)\n\n# Style is (fgcolor, bgcolor, font)\n# Rudimentary: prints a single line.\ndef print_left(tft, x, y, s, style, tab=32):\n if s == '':\n return\n tft.text_style(style)\n tft.set_text_pos(x, y)\n font = style[2]\n if isinstance(font, IFont): # Tabs unsupported for internal fonts\n return font.render(tft, x, y, s, style)\n tft.print_string(s, tab=tab)\n\n# *********** LCD160CR_G CLASS ************\n\n\n# Subclass LCD160CR to enable greying out of controls and to provide extra methods\nclass LCD160CR_G(LCD160CR):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n if 'bufsize' in kwargs:\n bs = kwargs['bufsize']\n else:\n bs = 1058 # font14 is 23*23 pixels\n self.glyph_buf = bytearray(bs)\n self._is_grey = False\n self.dim(2) # Default grey-out: dim colors by factor of 2\n self.desaturate(True)\n # Default colors. These never change. They serve as global defaults.\n # bgcolor is also used to blank screen areas.\n self.fgcolor = (WHITE)\n self.bgcolor = (BLACK)\n self.text_fgcolor = self.fgcolor # colors set by user\n self.text_bgcolor = self.bgcolor\n self.text_fgc = self.fgcolor # colors used by text rendering allowing for grey status\n self.text_bgc = self.bgcolor\n self.text_font = IFont(3) # Default\n\n def get_fgcolor(self):\n return self.fgcolor\n\n def get_bgcolor(self):\n return self.bgcolor\n\n def _setcolor(self, color):\n if self._is_grey:\n color = self._greyfunc(color, self._factor)\n lf = self.rgb(*color)\n self.set_pen(lf, lf) # line and fill colors are the same\n\n def desaturate(self, value=None):\n if value is not None:\n self._desaturate = value\n def do_dim(color, factor): # Dim a color\n if color is not None:\n return tuple(int(x / factor) for x in color)\n\n def do_desat(color, factor): # Desaturate and dim\n if color is not None:\n f = int(max(color) / factor)\n return (f, f, f)\n # Specify the local function\n self._greyfunc = do_desat if value else do_dim\n return self._desaturate\n\n def dim(self, factor=None):\n if factor is not None:\n if factor <= 1:\n raise ValueError('Dim factor must be > 1')\n self._factor = factor\n return self._factor\n\n def usegrey(self, val): # tft.usegrey(True) sets greyed-out\n self._is_grey = val\n\n # self.rect() doesn't do the same thing - seems to draw > 1 pixel wide\n def draw_rectangle(self, x1, y1, x2, y2, color):\n self._setcolor(color)\n self.draw_hline(x1, y1, x2 - x1)\n self.draw_hline(x1, y2, x2 - x1)\n self.draw_vline(x1, y1, y2 - y1)\n self.draw_vline(x2, y1, y2 - y1)\n\n def fill_rectangle(self, x1, y1, x2, y2, color):\n width = x2 - x1 + 1\n height = y2 - y1 + 1\n self._setcolor(color)\n self.rect(x1, y1, width, height)\n\n def draw_clipped_rectangle(self, x1, y1, x2, y2, color):\n if x1 > x2:\n x1, x2 = x2, x1\n if y1 > y2:\n y1, y2 = y2, y1\n if (x2-x1) > 4 and (y2-y1) > 4:\n self._setcolor(color)\n self.dot(x1 + 2, y1 + 1)\n self.dot(x1 + 1, y1 + 2)\n self.dot(x2 - 2, y1 + 1)\n self.dot(x2 - 1, y1 + 2)\n self.dot(x1 + 2, y2 - 1)\n self.dot(x1 + 1, y2 - 2)\n self.dot(x2 - 2, y2 - 1)\n self.dot(x2 - 1, y2 - 2)\n self.draw_hline(x1 + 3, y1, x2 - x1 - 5)\n self.draw_hline(x1 + 3, y2, x2 - x1 - 5)\n self.draw_vline(x1, y1 + 3, y2 - y1 - 5)\n self.draw_vline(x2, y1 + 3, y2 - y1 - 5)\n\n def fill_clipped_rectangle(self, x1, y1, x2, y2, color):\n if x1 > x2:\n t = x1; x1 = x2; x2 = t\n if y1 > y2:\n t = y1; y1 = y2; y2 = t\n if (x2-x1) > 4 and (y2-y1) > 4:\n self._setcolor(color)\n for i in range(((y2 - y1) // 2) + 1):\n if i == 0:\n self.draw_hline(x1 + 3, y1 + i, x2 - x1 - 5)\n self.draw_hline(x1 + 3, y2 - i, x2 - x1 - 5)\n elif i == 1:\n self.draw_hline(x1 + 2, y1 + i, x2 - x1 - 3)\n self.draw_hline(x1 + 2, y2 - i, x2 - x1 - 3)\n elif i == 2:\n self.draw_hline(x1 + 1, y1 + i, x2 - x1 - 1)\n self.draw_hline(x1 + 1, y2 - i, x2 - x1 - 1)\n else:\n self.draw_hline(x1, y1 + i, x2 - x1 + 1)\n self.draw_hline(x1, y2 - i, x2 - x1 + 1)\n\n def draw_circle(self, x, y, radius, color):\n x = int(x)\n y = int(y)\n radius = int(radius)\n self._setcolor(color)\n f = 1 - radius\n ddF_x = 1\n ddF_y = -2 * radius\n x1 = 0\n y1 = radius\n\n self.dot(x, y + radius)\n self.dot(x, y - radius)\n self.dot(x + radius, y)\n self.dot(x - radius, y)\n\n while x1 < y1:\n if f >= 0:\n y1 -= 1\n ddF_y += 2\n f += ddF_y\n x1 += 1\n ddF_x += 2\n f += ddF_x\n self.dot(x + x1, y + y1)\n self.dot(x - x1, y + y1)\n self.dot(x + x1, y - y1)\n self.dot(x - x1, y - y1)\n self.dot(x + y1, y + x1)\n self.dot(x - y1, y + x1)\n self.dot(x + y1, y - x1)\n self.dot(x - y1, y - x1)\n\n # pen color has been set by caller\n def fill_circle(self, x, y, radius, color):\n self._setcolor(color)\n x = int(x)\n y = int(y)\n radius = int(radius)\n r_square = radius * radius * 4\n for y1 in range (-(radius * 2), 1):\n y_square = y1 * y1\n for x1 in range (-(radius * 2), 1):\n if x1 * x1 + y_square <= r_square:\n x1i = x1 // 2\n y1i = y1 // 2\n self.draw_hline(x + x1i, y + y1i, 2 * -x1i)\n self.draw_hline(x + x1i, y - y1i, 2 * -x1i)\n break;\n\n # Save and restore a rect region to a 16 bit array.\n # Regions are inclusive of start and end (to match fill_rectangle)\n\n def save_region(self, buf, x0, y0, x1, y1):\n self.screen_dump(buf, x0, y0, x1 - x0 + 1, y1 - y0 + 1)\n\n # 1.1ms\n def restore_region(self, buf, x0, y0, x1, y1):\n self.set_spi_win(x0, y0, x1 - x0 + 1, y1 - y0 + 1)\n self.show_framebuf(buf)\n\n def set_text_pos(self, x, y):\n self.text_y = y\n self.text_x = x\n\n # Get or set the text style (fgcolor, bgcolor, font)\n # colors are (r, g, b)\n # Sets self.text_bgc and self.text_fgc for rendering methods\n def text_style(self, style=None):\n if style is not None:\n if self._is_grey:\n self.text_bgc = self._greyfunc(style[1], self._factor)\n else:\n self.text_bgc = style[1]\n self.text_fgc = style[0]\n\n font = style[2]\n if not isinstance(font, IFont):\n if not font.hmap():\n raise UguiException('Font must be horizontally mapped')\n if font.height() * font.max_width() * 2 > len(self.glyph_buf):\n raise UguiException('Font too large for buffer')\n self.text_font = font # colors allow for disabled status\n\n return (self.text_fgc, self.text_bgc, self.text_font)\n\n return (self.text_fgcolor, self.text_bgcolor, self.text_font)\n\n def _newline(self, rows):\n self.text_x = 0\n self.text_y += rows\n\n def print_char(self, c, wrap, fgcolor, bgcolor, tab=32):\n# get the character's pixel bitmap and dimensions\n if self.text_font:\n glyph, rows, cols = self.text_font.get_ch(c)\n else:\n raise AttributeError('No font selected')\n if c == '\\n':\n self._newline(rows)\n return 0\n if c == '\\t':\n xs = self.text_x\n self.text_x += tab - self.text_x % tab\n return self.text_x - xs\n\n# test char fit\n if wrap:\n if self.text_x + cols >= self.w: # does the glyph fit on the screen?\n self._newline(rows) # wrap to next text row then print\n if self.text_x + cols >= self.w or self.text_y + rows >= self.h:\n return 0 # Glyph is not entirely on screen\n fbuf = framebuf.FrameBuffer(self.glyph_buf, cols, rows, framebuf.RGB565)\n if fast_mode:\n buf = bytearray_at(addressof(glyph), len(glyph)) # Object with buffer protocol\n fbc = framebuf.FrameBuffer(buf, cols, rows, framebuf.MONO_HLSB)\n render(fbuf, fbc, 0, 0, fgcolor, bgcolor)\n else:\n div, mod = divmod(cols, 8) # Horizontal mapping\n gbytes = div + 1 if mod else div # No. of bytes per row of glyph\n for row in range(rows):\n for col in range(cols):\n gbyte, gbit = divmod(col, 8)\n if gbit == 0: # Next glyph byte\n data = glyph[row * gbytes + gbyte]\n fbuf.pixel(col, row, fgcolor if data & (1 << (7 - gbit)) else bgcolor)\n\n self.set_spi_win(self.text_x, self.text_y, cols, rows)\n self.show_framebuf(fbuf)\n self.text_x += cols\n return cols\n\n def print_string(self, s, wrap=False, tab=32):\n fgcolor = self.rgb(*self.text_fgc)\n bgcolor = self.rgb(*self.text_bgc)\n length = 0\n for c in s:\n length += self.print_char(c, wrap, fgcolor, bgcolor, tab)\n return length\n\n# Convenience methods to ease porting from TFT\n# Draw a vertical line with 1 Pixel width, from x,y to x, y + l - 1\n def draw_vline(self, x, y, l, color=None):\n if color is not None: # caller hasn't issued _setcolor\n self._setcolor(color)\n self.line(x, y, x, y + l -1)\n\n# Draw a horizontal line with 1 Pixel width, from x,y to x + l - 1, y\n def draw_hline(self, x, y, l, color=None):\n if color is not None: # caller hasn't issued _setcolor\n self._setcolor(color)\n self.line(x, y, x + l - 1, y)\n\n def draw_line(self, x1, y1, x2, y2, color=None):\n if color is not None: # caller hasn't issued _setcolor\n self._setcolor(color)\n self.line(x1, y1, x2, y2)\n\n def clr_scr(self):\n self._setcolor((0, 0, 0))\n self.rect(0, 0, self.w, self.h)\n\n# *********** BASE CLASSES ***********\n\nclass Screen:\n current_screen = None\n tft = None\n is_shutdown = asyncio.Event()\n\n @classmethod\n def setup(cls, lcd):\n cls.tft = lcd\n\n# get_tft() when called from user code, ensure greyed_out status is updated.\n @classmethod\n def get_tft(cls, greyed_out=False):\n cls.tft.usegrey(greyed_out)\n return cls.tft\n\n @classmethod\n def set_grey_style(cls, *, desaturate=True, factor=2):\n cls.tft.dim(factor)\n cls.tft.desaturate(desaturate)\n if Screen.current_screen is not None: # Can call before instantiated\n for obj in Screen.current_screen.displaylist:\n if obj.visible and obj.greyed_out():\n obj.redraw = True # Redraw static content\n obj.draw_border()\n obj.show()\n\n @classmethod\n def show(cls):\n for obj in cls.current_screen.displaylist:\n if obj.visible: # In a buttonlist only show visible button\n obj.redraw = True # Redraw static content\n obj.draw_border()\n obj.show()\n\n @classmethod\n def change(cls, cls_new_screen, *, forward=True, args=[], kwargs={}):\n init = cls.current_screen is None\n if init:\n Screen() # Instantiate a blank starting screen\n else: # About to erase an existing screen\n for entry in cls.current_screen.tasklist:\n if entry[1]: # To be cancelled on screen change\n entry[0].cancel()\n cs_old = cls.current_screen\n cs_old.on_hide() # Optional method in subclass\n if forward:\n if type(cls_new_screen) is ClassType:\n new_screen = cls_new_screen(*args, **kwargs) # Instantiate new screen\n else:\n raise ValueError('Must pass Screen class or subclass (not instance)')\n new_screen.parent = cs_old\n cs_new = new_screen\n else:\n cs_new = cls_new_screen # An object, not a class\n cls.current_screen = cs_new\n cs_new.on_open() # Optional subclass method\n cs_new._do_open(cs_old) # Clear and redraw\n cs_new.after_open() # Optional subclass method\n if init:\n try:\n asyncio.run(Screen.monitor())\n finally:\n asyncio.new_event_loop()\n\n @classmethod\n async def monitor(cls):\n await cls.is_shutdown.wait()\n for entry in cls.current_screen.tasklist:\n entry[0].cancel()\n await asyncio.sleep_ms(0) # Allow subclass to cancel tasks\n cls.current_screen = None # Ensure another demo can run\n\n @classmethod\n def back(cls):\n parent = cls.current_screen.parent\n if parent is not None:\n cls.change(parent, forward = False)\n\n @classmethod\n def addobject(cls, obj):\n if cls.current_screen is None:\n raise OSError('You must create a Screen instance')\n if isinstance(obj, Touchable):\n cls.current_screen.touchlist.append(obj)\n cls.current_screen.displaylist.append(obj)\n\n @classmethod\n def shutdown(cls):\n cls.tft.clr_scr()\n cls.is_shutdown.set()\n cls.is_shutdown.clear()\n\n def __init__(self):\n self.touchlist = []\n self.displaylist = []\n self.tasklist = [] # Allow instance to register tasks for shutdown\n self.modal = False\n if Screen.current_screen is None: # Initialising class and coro\n tft = Screen.get_tft()\n if tft.text_font is None:\n raise UguiException('The lcd set_font method has not been called')\n asyncio.create_task(self._touchtest()) # One coro only\n asyncio.create_task(self._garbage_collect())\n Screen.current_screen = self\n self.parent = None\n\n async def _touchtest(self): # Singleton coro tests all touchable instances\n touch_panel = Screen.tft\n while True:\n await asyncio.sleep_ms(0)\n tl = Screen.current_screen.touchlist\n ids = id(Screen.current_screen)\n touched, x, y = touch_panel.get_touch()\n if touched:\n # The following fixes a problem with the driver/panel where the first\n # coordinates read are incorrect. Reading again after a delay seems to fix it\n await asyncio.sleep_ms(20)\n touched, xx, yy = touch_panel.get_touch()\n if touched: # Still touched: update x and y with the latest values\n x = xx\n y = yy\n for obj in iter(a for a in tl if a.visible and not a.greyed_out()):\n obj._trytouch(x, y) # Run user \"on press\" callback if touched\n if ids != id(Screen.current_screen): # cb may have changed screen\n break # get new touchlist\n else:\n for obj in iter(a for a in tl if a.was_touched):\n obj.was_touched = False # Call _untouched once only\n obj.busy = False\n obj._untouched() # Run \"on release\" callback\n\n def _do_open(self, old_screen): # Aperture overrides\n show_all = True\n tft = Screen.get_tft()\n# If opening a Screen from an Aperture just blank and redraw covered area\n if old_screen.modal:\n show_all = False\n x0, y0, x1, y1 = old_screen._list_dims()\n tft.fill_rectangle(x0, y0, x1, y1, tft.get_bgcolor()) # Blank to screen BG\n for obj in [z for z in self.displaylist if z.overlaps(x0, y0, x1, y1)]:\n if obj.visible:\n obj.redraw = True # Redraw static content\n obj.draw_border()\n obj.show()\n# Normally clear the screen and redraw everything\n else:\n tft.clr_scr()\n Screen.show()\n\n def on_open(self): # Optionally implemented in subclass\n return\n\n def after_open(self): # Optionally implemented in subclass\n return\n\n def on_hide(self): # Optionally implemented in subclass\n return\n\n def reg_task(self, task, on_change=False): # May be passed a coro or a Task\n if isinstance(task, type_coro):\n task = asyncio.create_task(task)\n self.tasklist.append([task, on_change])\n\n async def _garbage_collect(self):\n while True:\n await asyncio.sleep_ms(100)\n gc.collect()\n gc.threshold(gc.mem_free() // 4 + gc.mem_alloc())\n\n# Very basic window class. Cuts a rectangular hole in a screen on which content may be drawn\nclass Aperture(Screen):\n _value = None\n def __init__(self, location, height, width, *, draw_border=True, bgcolor=None, fgcolor=None):\n Screen.__init__(self)\n self.location = location\n self.height = height\n self.width = width\n self.draw_border = draw_border\n self.modal = True\n tft = Screen.get_tft()\n self.fgcolor = fgcolor if fgcolor is not None else tft.get_fgcolor()\n self.bgcolor = bgcolor if bgcolor is not None else tft.get_bgcolor()\n\n def locn(self, x, y):\n return (self.location[0] + x, self.location[1] + y)\n\n def _do_open(self, old_screen):\n tft = Screen.get_tft()\n x, y = self.location[0], self.location[1]\n tft.fill_rectangle(x, y, x + self.width, y + self.height, self.bgcolor)\n if self.draw_border:\n tft.draw_rectangle(x, y, x + self.width, y + self.height, self.fgcolor)\n Screen.show()\n\n def _list_dims(self):\n x0 = self.location[0]\n x1 = self.location[0] + self.width\n y0 = self.location[1]\n y1 = self.location[1] + self.height\n return x0, y0, x1, y1\n\n # Mechanism for passing the outcome of a modal dialog box to the calling screen.\n @classmethod\n def value(cls, val=None):\n if val is not None:\n cls._value = val\n return cls._value\n\n# Base class for all displayable objects\nclass NoTouch:\n _greyed_out = False # Disabled by user code\n def __init__(self, location, font, height, width, fgcolor, bgcolor, fontcolor, border, value, initial_value):\n Screen.addobject(self)\n self.screen = Screen.current_screen\n self.redraw = True # Force drawing of static part of image\n self.location = location\n self._value = value\n self._initial_value = initial_value # Optionally enables show() method to handle initialisation\n self.fontcolor = WHITE if fontcolor is None else fontcolor\n self.height = height\n self.width = width\n self.fill = bgcolor is not None\n self.visible = True # Used by ButtonList class for invisible buttons\n# self._greyed_out = False # Disabled by user code\n tft = Screen.get_tft(False) # Not greyed out\n if font is None:\n self.font = tft.text_font\n else:\n self.font = font\n\n if fgcolor is None:\n self.fgcolor = tft.get_fgcolor()\n if bgcolor is None:\n self.bgcolor = tft.get_bgcolor()\n else:\n self.bgcolor = bgcolor\n self.fontbg = self.bgcolor\n else:\n self.fgcolor = fgcolor\n if bgcolor is None:\n self.bgcolor = tft.get_bgcolor() # black surround to circle button etc\n self.fontbg = fgcolor # Fonts are drawn on bg of foreground color\n else:\n self.bgcolor = bgcolor\n self.fontbg = bgcolor\n\n self.text_style = tft.text_style((self.fontcolor, self.fontbg, self.font))\n self.border = 0 if border is None else int(max(border, 0)) # width\n self.bdcolor = self.fgcolor # Border is always drawn in original fgcolor\n self.callback = dolittle # Value change callback\n self.args = []\n self.cb_end = dolittle # Touch release callbacks\n self.cbe_args = []\n\n @property\n def tft(self):\n return Screen.get_tft(self._greyed_out)\n\n def greyed_out(self):\n return self._greyed_out # Subclass may be greyed out\n\n def value(self, val=None, show=True): # User method to get or set value\n if val is not None:\n if type(val) is float:\n val = min(max(val, 0.0), 1.0)\n if val != self._value:\n self._value = val\n self._value_change(show)\n return self._value\n\n def _value_change(self, show): # Optional override in subclass\n self.callback(self, *self.args) # CB is not a bound method. 1st arg is self\n if show:\n self.show_if_current()\n\n def show_if_current(self):\n if self.screen is Screen.current_screen:\n self.show()\n\n# Called by Screen.show(). Draw background and bounding box if required\n def draw_border(self):\n if self.screen is Screen.current_screen:\n tft = self.tft\n x = self.location[0]\n y = self.location[1]\n if self.fill:\n tft.fill_rectangle(x, y, x + self.width, y + self.height, self.bgcolor)\n if self.border > 0: # Draw a bounding box\n tft.draw_rectangle(x, y, x + self.width, y + self.height, self.bdcolor)\n return self.border # border width in pixels\n\n def overlaps(self, xa, ya, xb, yb): # Args must be sorted: xb > xa and yb > ya\n x0 = self.location[0]\n y0 = self.location[1]\n x1 = x0 + self.width\n y1 = y0 + self.height\n if (ya <= y1 and yb >= y0) and (xa <= x1 and xb >= x0):\n return True\n return False\n\n# Base class for touch-enabled classes.\nclass Touchable(NoTouch):\n def __init__(self, location, font, height, width, fgcolor, bgcolor, fontcolor, border, can_drag, value, initial_value):\n super().__init__(location, font, height, width, fgcolor, bgcolor, fontcolor, border, value, initial_value)\n self.can_drag = can_drag\n self.busy = False\n self.was_touched = False\n\n def _set_callbacks(self, cb, args, cb_end=None, cbe_args=None):\n self.callback = cb\n self.args = args\n if cb_end is not None:\n self.cb_end = cb_end\n self.cbe_args = cbe_args\n\n def greyed_out(self, val=None):\n if val is not None and self._greyed_out != val:\n tft = self.tft\n tft.usegrey(val)\n self._greyed_out = val\n self.draw_border()\n self.redraw = True\n self.show_if_current()\n return self._greyed_out\n\n def _trytouch(self, x, y): # If touched in bounding box, process it otherwise do nothing\n x0 = self.location[0]\n x1 = self.location[0] + self.width\n y0 = self.location[1]\n y1 = self.location[1] + self.height\n if x0 <= x <= x1 and y0 <= y <= y1:\n self.was_touched = True # Cleared by Screen._touchtest\n if not self.busy or self.can_drag:\n self._touched(x, y) # Called repeatedly for draggable objects\n self.busy = True # otherwise once only\n\n def _untouched(self): # Default if not defined in subclass\n self.cb_end(self, *self.cbe_args) # Callback not a bound method so pass self\n","sub_path":"gui/core/lcd160_gui.py","file_name":"lcd160_gui.py","file_ext":"py","file_size_in_byte":26344,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"503323432","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.6 (3379)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.macosx-10.7-x86_64/egg/airflow/migrations/versions/bba5a7cfc896_add_a_column_to_track_the_encryption_.py\n# Compiled at: 2019-09-11 02:44:38\n# Size of source mod 2**32: 1374 bytes\n\"\"\"Add a column to track the encryption state of the 'Extra' field in connection\n\nRevision ID: bba5a7cfc896\nRevises: bbc73705a13e\nCreate Date: 2016-01-29 15:10:32.656425\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\nrevision = 'bba5a7cfc896'\ndown_revision = 'bbc73705a13e'\nbranch_labels = None\ndepends_on = None\n\ndef upgrade():\n op.add_column('connection', sa.Column('is_extra_encrypted', (sa.Boolean), default=False))\n\n\ndef downgrade():\n op.drop_column('connection', 'is_extra_encrypted')","sub_path":"pycfiles/apache_airflow_arup-1.10.5-py3.6/bba5a7cfc896_add_a_column_to_track_the_encryption_.cpython-36.py","file_name":"bba5a7cfc896_add_a_column_to_track_the_encryption_.cpython-36.py","file_ext":"py","file_size_in_byte":850,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"623635162","text":"from PyQt5.QtWidgets import QMainWindow\r\nfrom PyQt5 import uic\r\nfrom PyQt5.QtWidgets import QTableWidgetItem\r\nfrom PyQt5.QtCore import Qt, QTimer, QTime\r\nimport pandas as pd\r\n\r\n# Qt Designer의 객체를 form_class 에 할당\r\nform_class_0345 = uic.loadUiType(\"pytrader_0345.ui\")[0] # 실시간 잔고\r\nform_class_0156 = uic.loadUiType(\"pytrader_0156.ui\")[0] # 실시간 조건 검색\r\nform_class_4989 = uic.loadUiType(\"pytrader_4989.ui\")[0] # 매수/매도/미체결\r\n# 실시간 체결\r\n# 일별 수익률, 누적 수익률\r\n\r\nclass WindowSetting0345(QMainWindow, form_class_0345):\r\n def __init__(self, kiwoom_instance, logger_):\r\n super().__init__()\r\n self.kiwoom = kiwoom_instance\r\n self.logger = logger_\r\n self.setupUi(self)\r\n self.check_status()\r\n\r\n # [0345] 실시간 잔고 계산\r\n\r\n # 실시간 등록\r\n self.kiwoom.real_time_registration()\r\n # self.auto_registration(20) # 20초에 한 번씩 호출\r\n\r\n # 화면에 뿌리기\r\n self._display_profit_ratio()\r\n self.auto_display(0.1) # 0.1 초에 한 번 씩 또는 0.5 초에 한 번 씩 호출\r\n\r\n \"\"\" [0345] 화면 구성 메서드 \"\"\"\r\n def _display_profit_ratio(self):\r\n # self.logger.debug(\"수익률 display\")\r\n possession_df, summary_df = self.kiwoom.real_time_profit_ratio()\r\n\r\n # [0345] 실시간 잔고(합계)\r\n self._set_table_widget(self.tableWidget_03451, summary_df)\r\n\r\n # [0345] 실시간 잔고(개별 종목)\r\n possession_columns = ['종목코드', '종목명', '평가손익(추정)', '수익률(추정)', '보유량', '평가금액', '매매가능수량',\r\n '매수금액', '손익분기(추정)', '현재가', '수수료(추정)', '세금(추정)']\r\n self._set_table_widget(self.tableWidget_03452, possession_df[possession_columns])\r\n\r\n @staticmethod\r\n def _set_table_widget(table_widget, table_df):\r\n table_widget_columns = table_df.columns\r\n item_count = len(table_df[table_widget_columns[0]])\r\n table_widget.setRowCount(item_count)\r\n for row in range(item_count):\r\n for col, key in enumerate(table_widget_columns):\r\n item = QTableWidgetItem(str(table_df[key][row]))\r\n item.setTextAlignment(Qt.AlignVCenter | Qt.AlignRight)\r\n table_widget.setItem(row, col, item)\r\n table_widget.resizeRowsToContents()\r\n\r\n \"\"\" 이하 timeout을 위한 메서드\"\"\"\r\n\r\n def check_status(self):\r\n # 1초에 한 번씩 이벤트 발생 시킴\r\n timer = QTimer(self)\r\n timer.start(1000)\r\n timer.timeout.connect(self._timeout)\r\n\r\n def _timeout(self):\r\n current_time = QTime.currentTime()\r\n text_time = current_time.toString(\"hh:mm:ss\")\r\n time_msg = \"현재시간: \" + text_time\r\n state = self.kiwoom.get_connect_state()\r\n if state == 1:\r\n state_msg = \"서버 연결 정상\"\r\n else:\r\n state_msg = \"서버 연결 끊김\"\r\n self.statusbar.showMessage(state_msg + \" | \" + time_msg)\r\n\r\n def auto_registration(self, seconds):\r\n timer2 = QTimer(self)\r\n timer2.start(1000 * seconds) # 20초\r\n timer2.timeout.connect(self._timeout2)\r\n\r\n def _timeout2(self):\r\n self.kiwoom.real_time_registration()\r\n\r\n def auto_display(self, seconds):\r\n timer3 = QTimer(self)\r\n timer3.start(1000 * seconds) # 0.1초\r\n timer3.timeout.connect(self._timeout3)\r\n\r\n def _timeout3(self):\r\n self._display_profit_ratio()\r\n\r\n\r\nclass WindowSetting0156(QMainWindow, form_class_0156):\r\n \"\"\"\r\n 매수 조건\r\n 1. 거래량의 증가 속도가 평균보다 n배 이상 가팔라야 한다.\r\n 2. 매수 잔량이 매도 잔량보다 많아야 한다(?)\r\n 3. 시가보다 2%이상 오르면 안된다.\r\n \"\"\"\r\n def __init__(self, kiwoom_instance, logger_):\r\n super().__init__()\r\n self.kiwoom = kiwoom_instance\r\n self.logger = logger_\r\n self.subscribed_df = pd.DataFrame()\r\n\r\n self.setupUi(self)\r\n self.check_status()\r\n\r\n # 조건식 목록 조회\r\n condition_name_dict = self._load_condition_name()\r\n self.listWidget_01561.addItems(list(condition_name_dict.keys()))\r\n\r\n # 클릭 시 이벤트 처리\r\n self.listWidget_01561.setAlternatingRowColors(True)\r\n self.listWidget_01562.setAlternatingRowColors(True)\r\n\r\n self.toolButton_01561.clicked.connect(self.go_right)\r\n self.toolButton_01562.clicked.connect(self.go_left)\r\n\r\n # 0.1초에 한 번 씩 업데이트\r\n self.auto_update(0.1)\r\n\r\n # 조건식 목록을 listWidget에 추가\r\n def _load_condition_name(self):\r\n return self.kiwoom.get_condition_name()\r\n\r\n # 구독 버튼 클릭 시\r\n def go_right(self):\r\n self._subscribe_current_item(self.listWidget_01561)\r\n self._move_current_item(self.listWidget_01561, self.listWidget_01562)\r\n\r\n # 구독 취소 버튼 클릭 시\r\n def go_left(self):\r\n self._unsubscribe_current_item(self.listWidget_01562)\r\n self._move_current_item(self.listWidget_01562, self.listWidget_01561)\r\n\r\n @staticmethod\r\n def _move_current_item(source, target):\r\n if source.currentItem():\r\n row = source.currentRow()\r\n target.addItem(source.takeItem(row)) # pop\r\n\r\n # 구독 버튼 클릭시 발생\r\n def _subscribe_current_item(self, source):\r\n if source.currentItem():\r\n condition_name = source.currentItem().text()\r\n # 조건식 만족하는 dictionary 가져오기(self.condition_tr_dict 와 동일)\r\n condition_tr_dict = self.kiwoom.get_condition_tr_dict(condition_name)\r\n\r\n # 구독 신청\r\n self.subscribed_df = self.kiwoom.init_condition_tr_dict(condition_tr_dict)\r\n\r\n # [0156] 조건식을 만족하는 종목 테이블에 추가\r\n self._set_table_widget(self.tableWidget_01561, self.subscribed_df)\r\n\r\n # 구독 취소 버튼 클릭시 발생\r\n def _unsubscribe_current_item(self, source):\r\n if source.currentItem():\r\n condition_name = source.currentItem().text()\r\n # 구독 취소하기(self.condition_tr_dict 와 동일)\r\n self.subscribed_df = self.kiwoom.stop_condition_tr_dict(condition_name, self.subscribed_df)\r\n\r\n # [0156] 조건식을 만족하는 종목 테이블에 추가\r\n self._set_table_widget(self.tableWidget_01561, self.subscribed_df)\r\n\r\n @staticmethod\r\n def _set_table_widget(table_widget, subscribed_df):\r\n # row 길이 정의\r\n item_count = len(subscribed_df['조건식'])\r\n table_widget.setRowCount(item_count)\r\n table_widget_columns = subscribed_df.columns.tolist()\r\n # 테이블에 숫자 집어넣기\r\n for row in range(item_count):\r\n for col, column_name in enumerate(table_widget_columns):\r\n item = QTableWidgetItem(str(subscribed_df[column_name][row]))\r\n item.setTextAlignment(Qt.AlignVCenter | Qt.AlignRight)\r\n table_widget.setItem(row, col, item)\r\n table_widget.resizeRowsToContents()\r\n\r\n \"\"\" 이하 timeout을 위한 메서드\"\"\"\r\n\r\n def check_status(self):\r\n # 1초에 한 번씩 이벤트 발생 시킴\r\n timer = QTimer(self)\r\n timer.start(1000)\r\n timer.timeout.connect(self._timeout)\r\n\r\n def _timeout(self):\r\n current_time = QTime.currentTime()\r\n text_time = current_time.toString(\"hh:mm:ss\")\r\n time_msg = \"현재시간: \" + text_time\r\n state = self.kiwoom.get_connect_state()\r\n if state == 1:\r\n state_msg = \"서버 연결 정상\"\r\n else:\r\n state_msg = \"서버 연결 끊김\"\r\n self.statusbar.showMessage(state_msg + \" | \" + time_msg)\r\n\r\n def auto_update(self, seconds):\r\n timer2 = QTimer(self)\r\n timer2.start(1000 * seconds) # seconds초\r\n timer2.timeout.connect(self._timeout2)\r\n\r\n def _timeout2(self):\r\n # self.logger.debug('조건식 종목을 update 합니다.')\r\n \"\"\" 얘는 그냥 실시간으로 들어온 현재가를 update 하는 용도임\"\"\"\r\n\r\n if len(self.subscribed_df) > 0:\r\n self.subscribed_df = self.kiwoom.update_real_time_price(self.subscribed_df)\r\n self._set_table_widget(self.tableWidget_01561, self.subscribed_df)\r\n\r\n \"\"\" #### 이상 확정 메서드 #### \"\"\"\r\n\r\n\r\nclass WindowSetting4989(QMainWindow, form_class_4989):\r\n \"\"\"\r\n (06/06) 해야할 일: 매수/매도 체결시, d2_deposit & possession_list 최신화하기\r\n \"\"\"\r\n def __init__(self, kiwoom_instance, logger_):\r\n super().__init__()\r\n self.kiwoom = kiwoom_instance\r\n self.logger = logger_\r\n self.setupUi(self)\r\n self.non_traded_df = self.kiwoom.get_non_chegyul_jongmok()\r\n self.check_status()\r\n\r\n # 미체결 list initializer 해야함...\r\n\r\n # 보유종목에 없고, 미체결 종목에 없는 조건식 목록 조회\r\n filtered_df = self.kiwoom.buy_available_list()\r\n self._set_table_widget_49891(self.tableWidget_49891, filtered_df)\r\n self._set_table_widget_49893(self.tableWidget_49893, self.non_traded_df)\r\n\r\n # # 클릭 시 이벤트 처리\r\n # self.listWidget_01561.setAlternatingRowColors(True)\r\n # self.listWidget_01562.setAlternatingRowColors(True)\r\n\r\n # self.toolButton_01561.clicked.connect(self.go_right)\r\n # self.toolButton_01562.clicked.connect(self.go_left)\r\n\r\n # 1초에 한 번 씩 업데이트\r\n self.auto_update(1)\r\n\r\n @staticmethod\r\n def _set_table_widget_49891(table_widget, subscribed_df):\r\n # row 길이 정의\r\n item_count = len(subscribed_df['조건식'])\r\n table_widget.setRowCount(item_count)\r\n # table_widget_columns = subscribed_df.columns.tolist()\r\n table_widget_columns = ['조건식', '종목코드', '종목명']\r\n # 테이블에 숫자 집어넣기\r\n for row in range(item_count):\r\n for col, column_name in enumerate(table_widget_columns):\r\n item = QTableWidgetItem(str(subscribed_df[column_name][row]))\r\n item.setTextAlignment(Qt.AlignVCenter | Qt.AlignRight)\r\n table_widget.setItem(row, col, item)\r\n table_widget.resizeRowsToContents()\r\n\r\n @staticmethod\r\n def _set_table_widget_49893(table_widget, subscribed_df):\r\n # row 길이 정의\r\n item_count = len(subscribed_df['종목코드'])\r\n table_widget.setRowCount(item_count)\r\n # table_widget_columns = subscribed_df.columns.tolist()\r\n table_widget_columns = ['종목코드', '종목명']\r\n # 테이블에 숫자 집어넣기\r\n for row in range(item_count):\r\n for col, column_name in enumerate(table_widget_columns):\r\n item = QTableWidgetItem(str(subscribed_df[column_name][row]))\r\n item.setTextAlignment(Qt.AlignVCenter | Qt.AlignRight)\r\n table_widget.setItem(row, col, item)\r\n table_widget.resizeRowsToContents()\r\n\r\n\r\n \"\"\" 이하 timeout을 위한 메서드\"\"\"\r\n\r\n def check_status(self):\r\n # 1초에 한 번씩 이벤트 발생 시킴\r\n timer = QTimer(self)\r\n timer.start(1000)\r\n timer.timeout.connect(self._timeout)\r\n\r\n def _timeout(self):\r\n current_time = QTime.currentTime()\r\n text_time = current_time.toString(\"hh:mm:ss\")\r\n time_msg = \"현재시간: \" + text_time\r\n state = self.kiwoom.get_connect_state()\r\n if state == 1:\r\n state_msg = \"서버 연결 정상\"\r\n else:\r\n state_msg = \"서버 연결 끊김\"\r\n self.statusbar.showMessage(state_msg + \" | \" + time_msg)\r\n\r\n def auto_update(self, seconds):\r\n timer2 = QTimer(self)\r\n timer2.start(1000 * seconds) # seconds초\r\n timer2.timeout.connect(self._timeout2)\r\n # timer2.timeout.connect(self._timeout3)\r\n timer2.timeout.connect(self._timeout4)\r\n\r\n def _timeout2(self):\r\n # 매수할 종목\r\n filtered_df = self.kiwoom.buy_available_list()\r\n self._set_table_widget_49891(self.tableWidget_49891, filtered_df)\r\n\r\n # 매수 신청\r\n if self.checkBox_49891.isChecked():\r\n self.kiwoom.buy_condition_list(filtered_df)\r\n\r\n def _timeout3(self):\r\n # 매도할 종목\r\n filtered_df = self.kiwoom.buy_available_list()\r\n self._set_table_widget_49892(self.tableWidget_49892, filtered_df)\r\n\r\n # 매도\r\n if self.checkBox_49892.isChecked():\r\n self.kiwoom.sell_condition_list()\r\n\r\n def _timeout4(self):\r\n # 미체결 종목\r\n self.non_traded_df = self.kiwoom.get_non_traded_df(self.non_traded_df)\r\n self._set_table_widget_49893(self.tableWidget_49893, self.non_traded_df)\r\n\r\n # 미체결 종목 자동 취소\r\n # if self.checkBox_49893.isChecked():\r\n # self.kiwoom.sell_condition_list()\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","sub_path":"window_setting.py","file_name":"window_setting.py","file_ext":"py","file_size_in_byte":13135,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"543545911","text":"\"\"\"HMAC 256 (Keyed-Hashing for Message Authentication) Python module.\n\nImplements the HMAC 256 algorithm as described by RFC 2104.\nPort of the hmac library using uhashlib.\n\"\"\"\n\nimport uhashlib as _hashlib\n\ntrans_5C = bytes((x ^ 0x5C) for x in range(256))\ntrans_36 = bytes((x ^ 0x36) for x in range(256))\n\n\ndef translate(d, t):\n return bytes(t[x] for x in d)\n\n\ndigest_size = 32\n\n\nclass HMAC256:\n \"\"\"RFC 2104 HMAC class. Also complies with RFC 4231.\n\n Port of the hmac library using uhashlib.\n \"\"\"\n\n blocksize = 64 # 256-bit HMAC; cannot be changed in subclasses.\n\n def __init__(self, key, msg=None):\n \"\"\"Create a new HMAC object.\n\n key: key for the keyed hash object.\n msg: Initial input for the hash, if provided.\n\n Note: key and msg must be a bytes or bytearray objects.\n \"\"\"\n\n if not isinstance(key, (bytes, bytearray)):\n raise TypeError(\n \"key: expected bytes or bytearray, but got %r\" % type(key).__name__\n )\n\n self.digest_cons = lambda d=b\"\": _hashlib.sha256(d)\n\n self.outer = self.digest_cons()\n self.inner = self.digest_cons()\n blocksize = self.blocksize\n\n if len(key) > blocksize:\n key = self.digest_cons(key).digest()\n\n key = key + bytes(blocksize - len(key))\n self.outer.update(translate(key, trans_5C))\n self.inner.update(translate(key, trans_36))\n if msg is not None:\n self.update(msg)\n\n @property\n def name(self):\n return \"hmac-sha256\"\n\n def update(self, msg):\n \"\"\"Update this hashing object with the string msg.\n \"\"\"\n self.inner.update(msg)\n\n def digest(self):\n \"\"\"Return the hash value of this hashing object.\n\n This returns a string containing 8-bit data. The object is\n not altered in any way by this function; you can continue\n updating the object after calling this function.\n \"\"\"\n h = self.outer\n h.update(self.inner.digest())\n return h.digest()\n","sub_path":"src/ujwt/hmac.py","file_name":"hmac.py","file_ext":"py","file_size_in_byte":2059,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"536143412","text":"import os\nimport unittest\n\nimport numpy as np\nimport xarray as xr\n\nfrom xcube.api.gen.config import get_config_dict\nfrom xcube.api.gen.gen import gen_cube\nfrom xcube.util.dsio import rimraf\nfrom .helpers import get_inputdata_path\n\n\ndef clean_up():\n files = ['l2c-single.nc', 'l2c.nc', 'l2c.zarr', 'l2c-single.zarr']\n for file in files:\n rimraf(file)\n rimraf(file + '.temp.nc') # May remain from Netcdf4DatasetIO.append()\n rimraf(get_inputdata_path(\"input.txt\"))\n\n\nclass DefaultProcessTest(unittest.TestCase):\n\n def setUp(self):\n clean_up()\n\n def tearDown(self):\n clean_up()\n\n # noinspection PyMethodMayBeStatic\n def test_process_inputs_single(self):\n status = gen_cube_wrapper(\n [get_inputdata_path('20170101-IFR-L4_GHRSST-SSTfnd-ODYSSEA-NWE_002-v2.0-fv1.0.nc')],\n 'l2c-single.nc',\n False\n )\n self.assertEqual(True, status)\n\n def test_process_inputs_append_multiple_nc(self):\n status = gen_cube_wrapper(\n [get_inputdata_path('201701??-IFR-L4_GHRSST-SSTfnd-ODYSSEA-NWE_002-v2.0-fv1.0.nc')],\n 'l2c.nc',\n True\n )\n self.assertEqual(True, status)\n\n def test_process_inputs_append_multiple_zarr(self):\n status = gen_cube_wrapper(\n [get_inputdata_path('201701??-IFR-L4_GHRSST-SSTfnd-ODYSSEA-NWE_002-v2.0-fv1.0.nc')],\n 'l2c.zarr',\n True\n )\n self.assertEqual(True, status)\n\n def test_input_txt(self):\n f = open((os.path.join(os.path.dirname(__file__), 'inputdata', \"input.txt\")), \"w+\")\n for i in range(1, 4):\n file_name = \"2017010\" + str(i) + \"-IFR-L4_GHRSST-SSTfnd-ODYSSEA-NWE_002-v2.0-fv1.0.nc\"\n file = get_inputdata_path(file_name)\n f.write(\"%s\\n\" % file)\n f.close()\n status = gen_cube_wrapper(\n [get_inputdata_path('input.txt')],\n 'l2c.zarr',\n True\n )\n self.assertEqual(True, status)\n\n def test_handle_360_lon(self):\n status = gen_cube_wrapper(\n [get_inputdata_path('20170101120000-UKMO-L4_GHRSST-SSTfnd-OSTIAanom-GLOB-v02.0-fv02.0.nc')],\n 'l2c-single.zarr',\n True\n )\n self.assertEqual(True, status)\n ds = xr.open_zarr('l2c-single.zarr')\n self.assertIn('lon', ds.coords)\n self.assertFalse(np.any(ds.coords['lon'] > 180.))\n\n\n# noinspection PyShadowingBuiltins\ndef gen_cube_wrapper(input_paths, output_path, append_mode):\n config = get_config_dict(locals())\n return gen_cube(input_processor='default',\n output_size=(320, 180),\n output_region=(-4., 47., 12., 56.),\n output_resampling='Nearest',\n output_variables=[('analysed_sst', dict(name='SST'))],\n dry_run=False,\n monitor=None,\n **config)\n","sub_path":"test/api/gen/default/test_gen.py","file_name":"test_gen.py","file_ext":"py","file_size_in_byte":2931,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"223006539","text":"class Solution(object):\n def solveNQueens(self, n):\n \"\"\"\n :type n: int\n :rtype: List[List[str]]\n \"\"\"\n res = []\n s = '.' * n\n\n def backtrack(path=[], i=0, col_selected=[], z_diag=set(),\n f_diag=set()):\n if i == n:\n res.append(path)\n return\n for j in range(n):\n if j not in col_selected and i - j not in z_diag and i + j not in f_diag:\n backtrack(path + [s[:j] + 'Q' + s[j + 1:]], i + 1,\n col_selected + [j], z_diag | {i - j}, f_diag | {i + j})\n\n backtrack()\n return res","sub_path":"Week2/N-Queens.py","file_name":"N-Queens.py","file_ext":"py","file_size_in_byte":670,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"385575090","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\nimport logging\nimport os\nimport sys\n\n\nsys.path.append(os.path.join(os.path.dirname(__file__), \"../\"))\nfrom examples.config import (\n CHANNEL_ERROR,\n BOT_TOKEN,\n OWNER,\n DEBUG,\n PROXY,\n MONGO_USER,\n MONGO_PASS,\n MONGO_IP,\n MONGO_PORT,\n)\nfrom examples.CustomParser import PARSER\nfrom telegramnews.tools import get_channels, start_many, start_tool\n\nlogging.basicConfig(\n format=\"%(asctime)s - %(name)s - %(levelname)s - %(message)s\", level=logging.INFO\n)\n\n\ndef main(filter=0):\n channels = get_channels(\"channels.json\", filter=filter)\n custom_parser = PARSER\n mongo = {\"port\": MONGO_PORT, \"ip\": MONGO_IP, \"pass\": MONGO_PASS, \"user\": MONGO_USER}\n start_many(\n channels,\n custom_parser,\n BOT_TOKEN,\n owner=OWNER,\n filter=filter,\n debug=DEBUG,\n mongo=mongo,\n proxy=PROXY,\n error=CHANNEL_ERROR,\n )\n\n\nif __name__ == \"__main__\":\n if len(sys.argv) > 1:\n arg = \"\".join(sys.argv[1:])\n if arg[:9] == \"--filter=\":\n main(filter=int(arg[9:]))\n else:\n start_tool(arg)\n else:\n main()\n","sub_path":"examples/crawler.py","file_name":"crawler.py","file_ext":"py","file_size_in_byte":1177,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"206695498","text":"\"\"\"\nThis modules implements methods for generating weight distributions from real value weights\n\"\"\"\n\nfrom typing import List\n\nimport torch\nfrom torch import Tensor\n\n\ndef discretize_weights_shayer(real_weights: Tensor, discrete_weight_values):\n \"\"\"\n Discretizes the weights from a matrix with real weights based on the Shayer method (pg. 9 of paper). Called by\n discretize_weights_probabilistic.\n :param real_weights: a (output_features x input_features) matrix with real weights\n :param discrete_weight_values: a sorted list of discrete values a weight may take\n :return: a (num_discrete_values x output_features x input_features) with the probabilities\n \"\"\"\n\n # the minimum probability for a value\n q_min = 0.05\n # number of discrete values\n number_values = len(discrete_weight_values)\n # maximum probability\n q_max = 1 - (number_values - 1) * q_min\n delta_q = q_max - q_min\n\n # list of tensors with the probabilities of each of the discrete values\n probability_tensors = []\n for inx, discrete_value in enumerate(discrete_weight_values):\n p_tensor = torch.ones_like(real_weights)\n # add default probability\n p_tensor *= q_min\n if inx == 0:\n # this is the first discretization level. need to account for real values smaller than it\n p_tensor[real_weights <= discrete_value] = q_max\n # weight values greater than the discretized value but smaller than next discretization value\n # the value of the next discretization level\n\n elif inx == number_values - 1:\n # last discretization level\n p_tensor[real_weights > discrete_value] = q_max\n\n if inx < number_values - 1:\n # apply to values that are not the last\n next_disc_value = discrete_weight_values[inx + 1]\n # calculating a mask of the weights that are between the current discretization value and the next\n greater_than_current = real_weights > discrete_value\n smaller_than_next_level = real_weights <= next_disc_value\n # update the value for which both conditions are true\n mask = torch.stack((greater_than_current, smaller_than_next_level)).all(dim=0)\n p_tensor[mask] = \\\n q_min + delta_q * (next_disc_value - real_weights[mask]) / \\\n (next_disc_value - discrete_value)\n\n if inx > 0:\n # do not apply to the first discretization level\n prev_disc_value = discrete_weight_values[inx - 1]\n smaller_than_current = real_weights <= discrete_value\n greater_than_prev = real_weights > prev_disc_value\n mask = torch.stack((smaller_than_current, greater_than_prev)).all(dim=0)\n # weight values smaller than the discretized value\n p_tensor[mask] = \\\n q_min + delta_q * (real_weights[mask] - prev_disc_value) / \\\n (discrete_value - prev_disc_value)\n probability_tensors.append(p_tensor)\n return torch.stack(probability_tensors)\n\n\ndef discretize_weights_probabilistic(real_weights: Tensor, discrete_weight_values: List):\n \"\"\"\n Initializes a linear layer with discrete weights. Redistributes weights and calls discretize_weights_shayer\n :param real_weights: a (output_features x input_features) matrix with real weights\n :param discrete_weight_values: a sorted list of discrete values a weight may take\n :returns: weight distributions (as a mean and standard deviations)\n (num_discrete_values x output_features x input_features)\n\n \"\"\"\n\n def empirical_cdf(weight_row):\n \"\"\"\n calculates a cumulative density function over the values of real weights on the weight matrix and normalizes\n the weights over the unit interval\n :param: weight_row: a 1D row of weights (num weights)\n\n :returns: a 1D row of weights with their respective cdf values\n \"\"\"\n num_weights = weight_row.shape[0]\n sort_val, sort_indices = weight_row.sort(dim=0) # indices start with zero so we have to shift by 1\n # sort_indices has the indices of the values in the sorted order. The first index is the index to the\n # smallest element\n\n # to compute the cdf it would be useful, for every value, to have its position in the list. we must invert\n # sort indices\n\n position_per_value = torch.zeros_like(weight_row, dtype=torch.float64)\n position_per_value[sort_indices] = torch.tensor(list(range(num_weights))).double().to(weight_row.device) + 1.0\n cdf = position_per_value / num_weights\n\n return cdf\n\n delta_w = discrete_weight_values[1] - discrete_weight_values[0]\n\n # compute cdf separately for positive and negative weights\n weight_cdf = torch.zeros_like(real_weights, dtype=torch.float64)\n # normalizing according to paper [w_1 - \\delta_w/2, w_D - \\delta_w/2]\n weight_cdf[real_weights <= 0] = empirical_cdf(real_weights[real_weights <= 0]) * \\\n (-discrete_weight_values[0] + delta_w / 2) - (-discrete_weight_values[0] + delta_w / 2)\n weight_cdf[real_weights > 0] = empirical_cdf(real_weights[real_weights > 0]) * \\\n (discrete_weight_values[-1] + delta_w / 2)\n # need to shift back to a matrix\n # weight_cdf = weight_cdf.reshape(real_weights.shape)\n\n # we use shayers discretization method with the shifted weights\n shayers_discretized = discretize_weights_shayer(weight_cdf, discrete_weight_values)\n\n # we need to have log-probabilities\n shayers_discretized = torch.log(shayers_discretized)\n\n return shayers_discretized\n\n\ndef generate_weight_probabilities(logit_weights):\n \"\"\"\n calculates the probabilities of all discrete weights for the logits provided\n\n :param logit_weights: a tensor with dimensions (discretization levels x output features x input_features)\n with the discrete distribution as logits\n :return: a tensor with dimensions (discretization levels x output features x input_features)\n with the discrete distribution as probabilities\n \"\"\"\n weight_probabilities = torch.exp(logit_weights)\n weight_probabilities = weight_probabilities / weight_probabilities.sum(dim=0)\n return weight_probabilities\n\n\ndef get_gaussian_dist_parameters(logit_weights, discrete_values):\n \"\"\"\n Fits a gaussian distribution to the logits in logit_weights.\n :param logit_weights: a tensor with dimensions (discretization levels x output features x input_features)\n with the discrete distribution as logits\n :param discrete_values: a sorted list of discrete weight values\n :return: a tuple with the means of the gaussian distributions as a (output features x input_features) tensor\n and a the standard deviations in a tensor of the same format\n \"\"\"\n weight_probabilities = generate_weight_probabilities(logit_weights)\n discrete_val_tensor = torch.zeros_like(logit_weights)\n # set the device to match that of logits\n discrete_val_tensor = discrete_val_tensor.to(logit_weights.device)\n for inx, discrete_weight in enumerate(discrete_values):\n discrete_val_tensor[inx, :, :] = discrete_weight\n discrete_val_tensor.requires_grad = True\n weight_mean = discrete_val_tensor * weight_probabilities\n weight_mean = weight_mean.sum(dim=0)\n\n weight_var = weight_probabilities * torch.pow(discrete_val_tensor - weight_mean, 2)\n weight_var = weight_var.sum(dim=0)\n return weight_mean, weight_var\n","sub_path":"discrete_nn/layers/weight_utils.py","file_name":"weight_utils.py","file_ext":"py","file_size_in_byte":7461,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"75178925","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Jun 12 17:11:00 2021\npython mongo_genebuilder.py -c 'USDZAR' -i 5 -l 0\n@author: christostrydom\n\"\"\"\n\nimport numpy as np\n# import matplotlib.pyplot as plt\nfrom datetime import datetime\nfrom datetime import timedelta\nimport pandas as pd\nfrom pymongo import MongoClient\n\nimport os\nfrom os import listdir\nfrom os.path import isfile, join\n\n# import subprocess\nimport sys,getopt\n\nfrom bson.objectid import ObjectId\nclient = MongoClient()\ndb=client.Currencies\n\n\n\n# position 1 peakprice:perc25;\n# position 2 peakprice:perc50;\n# position 3 peakprice:perc75;\n# position 4 bar length(this bar):bar length(previous bar);\n# position 5 perc25(this bar):perc25 (previous bar);\n# position 6 perc25(this bar):perc50 (previous bar);\n# position 7 perc25(this bar):perc75 (previous bar);\n# position 8 perc25(this bar):peakprice (previous bar);\n# position 9 perc50(this bar):perc25 (previous bar);\n# position 10 perc50(this bar):perc50 (previous bar);\n# position 11 perc50(this bar):perc75 (previous bar);\n# position 12 perc50(this bar):peakprice (previous bar);\n# position 13 perc75(this bar):perc25 (previous bar);\n# position 14 perc75(this bar):perc50 (previous bar);\n# position 15 perc75(this bar):perc75 (previous bar);\n# position 16 perc75(this bar):peakprice (previous bar);\n# position 17 peakprice(this bar):perc25 (previous bar);\n# position 18 peakprice(this bar):perc50 (previous bar);\n# position 19 peakprice(this bar):perc75 (previous bar);\n# position 20 peakprice(this bar):peakprice (previous bar);\n\ndef get_opts():\n # currency,stop_loss,profit_margin,strategy=get_opts()\n\n return_dict={}\n \n argv = sys.argv[1:]\n \n try:\n opts, args = getopt.getopt(argv, \"c:l:i:\")\n \n except:\n print(\"Error\")\n \n for opt, arg in opts:\n if opt in ['-c']:\n return_dict['currency'] = arg\n elif opt in ['-l']:\n return_dict['level'] = int(arg)\n elif opt in ['-i']:\n return_dict['interval'] = (arg)+'MIN' \n\n return return_dict\n\ndef return_gene(gene, gene_type):\n gene_list=list(db.ohlc_dna.find({'codestr':gene}))\n if not gene_list:\n# check_list=list(db.ohlc_dna.find({}).sort([('number',-1)]).limit(1))\n new_list=list(db.ohlc_dna.find({}).sort([('number',-1)]).limit(1))\n number=new_list[0]['number']+1\n description=genedescription(gene=gene)\n insertrecord={'number':number,\n 'codestr':gene,\n 'description':description,\n 'type': gene_type}\n db.ohlc_dna.insert_one(insertrecord) \n print(f\"inserted new gene {gene} at number {number} with description {description}\")\n else:\n# print(f'gene {gene} exists!')\n number=gene_list[0]['number']\n description=gene_list[0]['description']\n return number, description\n\ndef genedescription(gene):\n thislist=['peakprice',\n 'peakprice',\n 'peakprice',\n 'bar length(this bar)',\n 'perc25(this bar)',\n 'perc25(this bar)',\n 'perc25(this bar)',\n 'perc25(this bar)',\n 'perc50(this bar)',\n 'perc50(this bar)', \n 'perc50(this bar)',\n 'perc50(this bar)',\n 'perc75(this bar)',\n 'perc75(this bar)',\n 'perc75(this bar)', \n 'perc75(this bar)',\n 'peakprice(this bar)', \n 'peakprice(this bar)',\n 'peakprice(this bar)', \n 'peakprice(this bar)',]\n\n previouslist=['perc25',\n 'perc50',\n 'perc75',\n 'bar length(previous bar)',\n 'perc25 (previous bar)',\n 'perc50 (previous bar)',\n 'perc75 (previous bar)',\n 'peakprice (previous bar)',\n 'perc25 (previous bar)',\n 'perc50 (previous bar)',\n 'perc75 (previous bar)',\n 'peakprice (previous bar)',\n 'perc25 (previous bar)',\n 'perc50 (previous bar)',\n 'perc75 (previous bar)',\n 'peakprice (previous bar)',\n 'perc25 (previous bar)',\n 'perc50 (previous bar)',\n 'perc75 (previous bar)',\n 'peakprice (previous bar)',] \n genestr=\"\"\n for i in range(len(gene)):\n# print(gene[i])\n if int(gene[i])==0:\n compstr='<='\n else:\n compstr='>'\n genestr=genestr+thislist[i]+compstr+previouslist[i]+'; '\n return genestr\n\ndef genebuilder(docnow, docprevious):\n gene=genestart(docnow)\n docnow_list=[docnow['perc25'],docnow['perc50'],docnow['perc75'],docnow['peakprice']]\n gene.append((docnow['perc75']-docnow['perc25'])>(docprevious['perc75']-docprevious['perc25']))\n docprevious_list=[docprevious['perc25'],docprevious['perc50'],docprevious['perc75'],docprevious['peakprice']]\n for i in range(len(docnow_list)):\n for j in range(len(docprevious_list)):\n gene.append(docnow_list[i]>docprevious_list[j])\n string_ints = [str(int) for int in list(np.array(gene).astype(int))]\n str_of_ints = \"\".join(string_ints) \n return str_of_ints\n \ndef genestart(doc):\n return [doc['peakprice']>doc['perc25'],doc['peakprice']>doc['perc50'],doc['peakprice']>doc['perc75']]\n\n# price_collection = db[CURRENCY]\ncurrency_ohlc=db['CURRENCY_OHLC']\n# usdzar_ohlc=db['CURRENCY_OHLC']\nopts_dict=get_opts()\n\nCURRENCY=opts_dict['currency']\nINTERVAL=opts_dict['interval']\nLEVEL=opts_dict['level']\ngene_type=''\n# 00001000111011110000\n# m=1\n# startdate=datetime(2022,1,1)\n\n# the gene vector has 20 binary positions: (possible 1,048,576, last count = 1088)\n# position 1 peakprice>perc25;\n# position 2 peakprice>perc50;\n# position 3 peakprice>perc75;\n# position 4 bar length(this bar)>bar length(previous bar);\n# position 5 perc25(this bar)>perc25 (previous bar);\n# position 6 perc25(this bar)>perc50 (previous bar);\n# position 7 perc25(this bar)>perc75 (previous bar);\n# position 8 perc25(this bar)>peakprice (previous bar);\n# position 9 perc50(this bar)>perc25 (previous bar);\n# position 10 perc50(this bar)>perc50 (previous bar);\n# position 11 perc50(this bar)>perc75 (previous bar);\n# position 12 perc50(this bar)>peakprice (previous bar);\n# position 13 perc75(this bar)>perc25 (previous bar);\n# position 14 perc75(this bar)>perc50 (previous bar);\n# position 15 perc75(this bar)>perc75 (previous bar);\n# position 16 perc75(this bar)>peakprice (previous bar);\n# position 17 peakprice(this bar)>perc25 (previous bar);\n# position 18 peakprice(this bar)>perc50 (previous bar);\n# position 19 peakprice(this bar)>perc75 (previous bar);\n# position 20 peakprice(this bar)>peakprice (previous bar);\n\nstartdate=datetime(2050,1,1)\n# list(usdzar_ohlc.find({'interval':'5MIN'}).limit(1))\ngenelist=set()\ncounter=0\ngene=[]\n\nwhile True:\n one_doc_list=list(currency_ohlc.find({\"datetime\":{\"$lt\":startdate},\n \"currency\":CURRENCY,\n 'interval':INTERVAL,\n # \"$exists\":{f'genenumber_{LEVEL}':False}\n }).sort([('datetime',-1)]).limit(1))\n if one_doc_list:\n doc1=one_doc_list[0]\n# d1list=[doc1['perc25'],doc1['perc50'],doc1['perc75'],doc1['peakprice']]\n# gs=genestart(doc1)\n startdate=doc1['datetime']\n# for s in [doc2['peakprice'],]\n counter+=1\n one_doc_list=list(currency_ohlc.find({\"datetime\":{\"$lt\":startdate},\n \"currency\":CURRENCY,\n 'interval':INTERVAL}).sort([('datetime',-1)]).skip(LEVEL).limit(1))\n if one_doc_list:\n doc2=one_doc_list[0]\n gene=genebuilder(docnow=doc1, docprevious=doc2)\n# d2list=[doc2['perc25'],doc2['perc50'],doc2['perc75'],doc2['peakprice']]\n lapsed_time=(doc1['datetime']-doc2['datetime']).seconds/60\n # number=list(db.ohlc_dna.find({'codestr':gene}))[0]['number']\n number, description=return_gene(gene)\n setstr={'$set':{f'genenumber_{LEVEL}':number,\n f'lapsed_time_{LEVEL}':lapsed_time}}\n findfilter={'_id':doc1['_id']}\n result=currency_ohlc.update_one(findfilter,setstr)\n if counter%100==0:\n print('counter: ',counter,\n '; number: ',number,\n '; lapsed_time: ',lapsed_time,\n '; _id: ',doc1['_id'],\n '; result.matched_count: ',result.matched_count,\n '; result.modified_count: ',result.modified_count)\n\n# assert doc2['datetime'] != startdate\n# gene.append((doc1['perc75']-doc1['perc25'])>doc2['perc75']-doc2['perc25'])\n# for i in range(len(d1list)):\n# for j in range(len(d2list)):\n# gene.append(d1list[i]>d2list[j])\n# # gene.append(d1list[i]>d2list[j])\n\n # list(np.array(gene).astype(int))\n\n\n\n genelist.add(gene)\n else:\n break\n else:\n break\n\n\n","sub_path":"mongo_genebuilder.py","file_name":"mongo_genebuilder.py","file_ext":"py","file_size_in_byte":8937,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"406094102","text":"from django.core.management.base import BaseCommand\nfrom ._sparrow_rabbitmq_consumer import rabbitmq_consumer\nimport logging\n\nlogger = logging.getLogger(__name__)\n\n\nclass Command(BaseCommand):\n help = 'sparrow_rabbitmq_consumer'\n\n def add_arguments(self, parser):\n parser.add_argument('--queue', dest=\"queue\", default='', type=str)\n\n def handle(self, *args, **kwargs):\n queue = kwargs.get('queue', None)\n if queue:\n rabbitmq_consumer(queue=queue)\n else:\n logger.error('请在调用命令时传入参数:--queue')\n print('请在调用命令时传入参数:--queue')\n\n\n\n\n","sub_path":"sparrow_cloud/apps/message_service/management/commands/rabbitmq_consumer.py","file_name":"rabbitmq_consumer.py","file_ext":"py","file_size_in_byte":643,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"241856539","text":"import socket\nimport struct\nimport threading\n\nfrom Tool import MsgFactory\nfrom forwarder import FORWARDER\nfrom msgEntity.UVA_MSG import UVA_MSG\nimport Global_Values.SOCKETS as allSockets\n\nclass UVA_Communicator(threading.Thread):\n #与无人机通信,建立连接,并且保存socket等信息\n def __init__(self,recServer=\"192.168.5.155\",recPort=9090):\n '''\n :param recServer: 接收无人机信令的服务器地址,一般设置为本机地址\n :param recPort:接收无人机信令的服务器端口号,默认为9090\n '''\n threading.Thread.__init__(self)\n self.recServer = recServer\n self.recPort = recPort\n self.videoServer=self.recServer#暂时来说,用于与uva通信的服务器和前向转发的服务器地址是相同的\n self.msgRec = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP)\n self.msgRec.bind((self.recServer, self.recPORT))\n def run(self):\n while 1:\n data, addr = self.msgRec.recvfrom(1024)\n recMes = UVA_MSG()\n recData = struct.unpack(recMes.typeStr, data)\n print(recData)\n recMes.initFromData(recData)\n if recData[0] == b'\\x01':#接收到连接请求\n\n levelUVAs = allSockets.get_value(recMes.level)\n if levelUVAs:\n #已经有该地区的其他无人机,判断是否重复\n if recMes.cliNum not in levelUVAs.keys():\n # 接收到无人机的连接请求\n videoForwarder = FORWARDER(recMes)\n # 将同一个行政区的放在一起\n allSockets.set_value(recMes.level,videoForwarder)\n # 先启动监听线程\n videoForwarder.start()\n # 然后向无人机返回连接信息\n data = MsgFactory.getSERVER_MSG_READY(videoForwarder.ip, videoForwarder.port)\n # 发送ready命令,至此完成一个无人机的接入\n self.msgRec.sendto(data, addr)\n else:\n #如果多次连接,直接发送ready命令回去\n videoForwarder = levelUVAs[recMes.cliNum]\n data = MsgFactory.getSERVER_MSG_READY(videoForwarder.ip, videoForwarder.port)\n # 发送ready命令,至此完成一个无人机的接入\n self.msgRec.sendto(data, addr)\n else:\n #没有该地区的无人机,则直接新建,添加\n # 接收到无人机的连接请求\n videoForwarder = FORWARDER(recMes,serverCommunicateIP=self.recServer,serverCommunicatePort=self.recPORT,videoIP=self.videoServer)\n # 将同一个行政区的放在一起\n allSockets.set_value(recMes.level, videoForwarder)\n # 先启动监听线程,准备接收uva发送的视频\n videoForwarder.start()\n # 然后向无人机返回连接信息\n data = MsgFactory.getSERVER_MSG_READY(videoForwarder.videoIP, videoForwarder.videoPort)\n # 发送ready命令,至此完成一个无人机的接入\n self.msgRec.sendto(data, addr)\n elif recData[0]==b'\\x02':#接收到心跳响应\n levelUVAs=allSockets.get_value(recMes.level)\n videoForwarder = levelUVAs[recMes.cliNum]#获取videoForwarder\n heartResponseData = videoForwarder.getHearResponse()#生成心跳信息\n videoForwarder.uvaMsg=recMes#更新uva信息\n self.msgRec.sendto(heartResponseData,addr)\n","sub_path":"communicators/uvaCommunicator.py","file_name":"uvaCommunicator.py","file_ext":"py","file_size_in_byte":3810,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"89936011","text":"import logging\nfrom django.test import TestCase\nfrom django.urls import reverse\nfrom myforum.forum import models, views, auth, ajax\nfrom myforum.forum.tests.util import fake_request\nfrom myforum.accounts.models import User\n\nlogger = logging.getLogger('engine')\n\n\nclass PostTest(TestCase):\n\n def setUp(self):\n logger.setLevel(logging.WARNING)\n self.owner = User.objects.create(username=f\"test\", email=\"tested@tested.com\", password=\"tested\")\n\n # Create an existing tested post\n self.post = models.Post.objects.create(title=\"Test\", author=self.owner, content=\"Test\",\n type=models.Post.QUESTION)\n self.owner.save()\n pass\n\n def test_post_create(self):\n \"\"\"Test post creation with POST request\"\"\"\n\n content = f\"@{self.owner.username} \" + \"testing \" * 10\n\n # Create fake request\n data = {'post_type': models.Post.QUESTION,\n 'title': 'title tested post',\n \"tag_val\": \"tested,test3\",\n \"content\": content\n }\n\n request = fake_request(url=reverse('post_create'), data=data, user=self.owner)\n response = views.new_post(request=request)\n self.process_response(response=response)\n\n def test_comment(self):\n \"\"\"Test adding comment using POST request\"\"\"\n\n data = {\"parent_uid\": self.post.uid, \"content\": \"tested content for a question\"}\n url = reverse('create_comment', kwargs=dict(uid=self.post.uid))\n\n request = fake_request(url=url, data=data, user=self.owner)\n response = views.new_comment(request=request, uid=self.post.uid)\n\n self.assertEqual(response.status_code, 302, f\"Could not add comments\")\n\n def test_comment_traversal(self):\n \"\"\"Test comment rendering pages\"\"\"\n\n # Create a couple of comments to traverse\n\n comment = models.Post.objects.create(title=\"Test\", author=self.owner, content=\"Test\",\n type=models.Post.COMMENT, root=self.post,\n parent=self.post)\n comment2 = models.Post.objects.create(title=\"Test\", author=self.owner, content=\"Test\",\n type=models.Post.COMMENT, root=self.post,\n parent=comment)\n\n url = reverse(\"post_view\", kwargs=dict(uid=self.post.uid))\n\n request = fake_request(url=url, data={}, user=self.owner)\n\n response = views.post_view(request=request, uid=self.post.uid)\n\n self.assertTrue(response.status_code == 200, 'Error rendering comments')\n\n def test_ajax_subs(self):\n\n for stype in [\"unfollow\", \"messages\", \"email\", \"all\", \"default\"]:\n\n data = {\"sub_type\": stype, \"root_uid\": self.post.uid}\n request = fake_request(url=reverse('vote'), data=data, user=self.owner)\n response = ajax.ajax_subs(request)\n self.assertEqual(response.status_code, 200, f\"Could not preform subscription action:{stype}.\")\n\n def preform_votes(self, post, user):\n for vtype in [\"upvote\", \"bookmark\", \"accept\"]:\n\n data = {\"vote_type\": vtype, \"post_uid\": post.uid}\n request = fake_request(url=reverse('vote'), data=data, user=user)\n response = ajax.ajax_vote(request)\n self.assertEqual(response.status_code, 200, f\"Could not preform vote:{vtype}.\")\n\n def test_ajax_vote(self):\n \"\"\"Test the ajax voting using POST request \"\"\"\n # Create a different user to vote with\n user2 = User.objects.create(username=\"user\", email=\"user@tested.com\", password=\"tested\")\n\n answer = models.Post.objects.create(title=\"answer\", author=user2, content=\"tested foo bar too for\",\n type=models.Post.ANSWER, parent=self.post)\n\n self.preform_votes(post=answer, user=self.owner)\n self.preform_votes(post=self.post, user=self.owner)\n self.preform_votes(post=self.post, user=user2)\n\n return\n\n def test_edit_post(self):\n \"\"\"\n Test post edit for root and descendants\n \"\"\"\n url = reverse(\"post_edit\", kwargs=dict(uid=self.post.uid))\n\n title = \"Test title for long test\"\n tag_val = \"foo,bar,foo\"\n content = \"Test the content with more things \"\n\n longform_data = dict(title=title, tag_val=tag_val, content=content, post_type=models.Post.TUTORIAL)\n\n longform_request = fake_request(url=url, data=longform_data, user=self.owner)\n longform_response = views.edit_post(request=longform_request, uid=self.post.uid)\n self.process_response(longform_response)\n\n def test_post_answer(self):\n \"\"\"\n Test submitting answer through the post view\n \"\"\"\n url = reverse(\"post_view\", kwargs=dict(uid=self.post.uid))\n\n # Get form data\n data = dict(content=\"testing answer\", parent_uid=self.post.uid)\n request = fake_request(url=url, data=data, user=self.owner)\n response = views.post_view(request=request, uid=self.post.uid)\n self.process_response(response)\n return\n\n def test_markdown(self):\n \"Test the markdown rendering\"\n from django.core import management\n\n management.call_command(\"test_markdown\")\n\n def process_response(self, response):\n \"Check the response on POST request is redirected\"\n\n self.assertEqual(response.status_code, 302,\n f\"Could not redirect :\\nresponse:{response}\")\n\n\n\n","sub_path":"myforum/forum/tests/test_post.py","file_name":"test_post.py","file_ext":"py","file_size_in_byte":5480,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"408913926","text":"#!/usr/bin/env python\n\"\"\"\ninfo about project here\n\"\"\"\n\n\n__author__ = \"Johannes Coolen\"\n__email__ = \"johannes.coolen@student.kdg.be\"\n__status__ = \"development\"\n\n\ndef main():\n naam = input(\"geef een naam \")\n if naam == \"Phineas\":\n print(\"and ferb\")\n else:\n print(\"hallo \" + naam)\n\n\nif __name__ == '__main__': # code to execute if called from command-line\n main()\n","sub_path":"make/ifelsedemo1µ.py","file_name":"ifelsedemo1µ.py","file_ext":"py","file_size_in_byte":388,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"322111819","text":"import socket\nimport mylogger\nimport pickle\n\nHEADERSIZE = 10\nIPV4 = socket.AF_INET\nTCP = socket.SOCK_STREAM\nPORT = 1234\n#IPADDRESS = 'dbelab04'\nIPADDRESS = 'localhost' # localhost or 127.0.0.1\n#logging setup\nlogger = mylogger.init_logging(name='basic_client', loglevel=mylogger.DEBUG)\n\ns = socket.socket(IPV4, TCP) # create socket object\n\ns.connect((IPADDRESS, PORT)) # attempt connection to server\n\nwhile True:\n full_msg = b''\n new_msg = True # set new_msg flag\n while True:\n msg = s.recv(16) # buffer size 16 bytes for incoming message\n if new_msg:\n msg_len = int(msg[:HEADERSIZE]) # convert value in HEADER(expected message length) to int\n logger.debug('Expected message length: %s', msg_len) # print expected message length\n new_msg = False # clear new_msg flag\n\n #full_msg += msg.decode(\"utf-8\")\n full_msg += msg # append messages\n\n #print(len(full_msg))\n #logger.debug('Full message length: %s', full_msg) # print full message length\n\n if len(full_msg)-HEADERSIZE == msg_len: # execute when complete message is received based on size indicated in HEADER\n logger.debug('Full message received: %s', pickle.loads(full_msg[HEADERSIZE:]))\n new_msg = True # set new_msg flag\n full_msg = b\"\" # clear/empty message\n","sub_path":"advclient.py","file_name":"advclient.py","file_ext":"py","file_size_in_byte":1347,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"111941574","text":"import yaml\nfrom yaml import YAMLObject, Loader, Dumper\nfrom paws.stack.instance_config import build_instance_config\nfrom paws.stack.rds_config import build_rds_config\nfrom paws.stack.elb_config import build_elb_config\n\nclass StackConfig(object):\n \"\"\"\n Represents a stack configuration.\n \n Open the stack file, build and return the stack configuration.\n \"\"\"\n @classmethod\n def read_from_file(cls,\n config_file_name,\n env_config,\n build_instance = build_instance_config,\n build_rds = build_rds_config,\n build_elb = build_elb_config):\n def yaml_nodes_to_python(yaml_data):\n string = yaml.serialize(yaml_data)\n return yaml.load(string)\n\n class Env(YAMLObject):\n \"\"\"Represents a subclass for the !env tag/constructor\"\"\"\n\n yaml_loader = Loader\n yaml_dumper = Dumper\n yaml_tag = u'!env'\n\n @classmethod\n def from_yaml(cls, loader, node):\n \"\"\"Custom constructor to determine value selection based on the environment\"\"\"\n env = env_config.short_name\n env_value_in_array = [ v for (e, v) in node.value if e.value == env ]\n default_value_in_array = [ v for (e, v) in node.value if e.value == \"default\" ]\n if env_value_in_array:\n return yaml_nodes_to_python(env_value_in_array[0])\n elif default_value_in_array:\n return yaml_nodes_to_python(default_value_in_array[0])\n else:\n return None\n\n with open(config_file_name) as config_file:\n stack_config = yaml.load(config_file)\n stack_name = stack_config[\"name\"]\n pre_req = stack_config[\"pre_req\"] if \"pre_req\" in stack_config else None\n post_req = stack_config[\"post_req\"] if \"post_req\" in stack_config else None\n components_config = stack_config[\"components\"]\n instances_config = components_config.get(\"instances\", [])\n rds_instances_config = components_config.get(\"rds_instances\", [])\n elbs_config = components_config.get(\"elbs\", [])\n instances = [build_instance(i, env_config) for i in instances_config]\n rds_instances = [build_rds(r, env_config) for r in rds_instances_config]\n elbs = [build_elb(e, env_config) for e in elbs_config]\n stack = StackConfig(stack_name, pre_req, post_req, instances, rds_instances, elbs)\n return stack\n\n def __init__(self, name, pre_req, post_req, instances, rds_instances, elbs):\n self.name = name\n self.pre_req = pre_req\n self.post_req = post_req\n self.instances = instances\n self.rds_instances = rds_instances\n self.elbs = elbs\n","sub_path":"paws/stack/stack_config.py","file_name":"stack_config.py","file_ext":"py","file_size_in_byte":2875,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"473516292","text":"\"\"\"\nFind the correlation matrix\n\"\"\"\nimport numpy as np\nimport pandas as pd\nimport pickle\nimport sys\nimport matplotlib.pyplot as plt\nfrom sklearn.model_selection import train_test_split\n\n\ndef load_file(path):\n with open(path, 'rb') as f:\n\n file = pickle.load(f)\n\n if type(file) is not np.ndarray:\n file = np.array(file)\n\n return file\n\nX = load_file(r'/Users/sushenzhang/Documents/phd/second_year_code/X_values.pkl')\ny = load_file(r'/Users/sushenzhang/Documents/phd/second_year_code/Y_values.pkl')\nX = (X-X.min())/(X.max()-X.min())\ny= (y-y.min())/(y.max()-y.min())\n\ny = y[[0,1,3]]\ndf=pd.DataFrame(y)\ncorr=df.corr()\ncorr.style.background_gradient(cmap='coolwarm')\nfig, ax = plt.subplots(figsize=(3, 3))\nax.matshow(corr)\nplt.xticks(range(len(corr.columns)), corr.columns)\nplt.yticks(range(len(corr.columns)), corr.columns)\nplt.show()","sub_path":"corr_matrix.py","file_name":"corr_matrix.py","file_ext":"py","file_size_in_byte":864,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"68648137","text":"def check_moves(in_move):\r\n x_move = 0\r\n y_move = 0\r\n\r\n if len(in_move) > 10:\r\n return False\r\n\r\n for el in in_move:\r\n x_move += 1 if el == 'n' else 0\r\n x_move -= 1 if el == 's' else 0\r\n y_move += 1 if el == 'e' else 0\r\n y_move -= 1 if el == 'o' else 0\r\n\r\n if x_move is 0 and y_move is 0:\r\n return True\r\n else:\r\n return False\r\n\r\n\r\nif __name__ == '__main__':\r\n\r\n t = ['n','s','n','s','n','s','n','s','n','s']\r\n f = ['w','e','w','e','w','e','w','e','w','e','w','e']\r\n\r\n print(check_moves(t))\r\n print(check_moves(f))\r\n","sub_path":"other/game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":593,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"552678080","text":"from data.backend import LengthOrderedDataset, np, torch\nfrom data.delta import s_index, DeltaX, xtype_to_logits, preproc_cnf\nfrom data.penn_types import select_and_split_corpus, SourcePool\nfrom tqdm import tqdm\nfrom itertools import zip_longest, count\nfrom multiprocessing import Process, Queue\nfrom utils.file_io import DelayedKeyboardInterrupt\nfrom time import sleep\n# from data.delta import E_XDIM\n\nfields = 'token', 'tag', 'ftag'\nfieldx = 'label', 'xtype'\n# FieldOrder = 'token', 'tag', 'label', 'xtype', 'ftag', 'length'\n\nclass PennTreeKeeper:\n def __init__(self, tree, v2is, trapezoid_height):\n self._tree = tree\n self._v2is = v2is\n self._w_p = None\n self._factored = {}\n self._trapezoid_height = trapezoid_height\n\n def update_factored(self, factored, words):\n self._factored.update(factored)\n tree = self._tree\n for i, word in enumerate(words):\n if word == '(':\n tree[tree.leaf_treeposition(i)] = '('\n elif word == ')':\n tree[tree.leaf_treeposition(i)] = ')'\n\n def __getitem__(self, factor):\n if factor in self._factored:\n return self._factored[factor]\n\n w2i, t2i, l2i, x2i = self._v2is\n dx, _ = DeltaX.from_penn(self._tree, factor, do_preproc = False) # [not here] watch for keyaki arg wordtrace for preproc_cnf\n if self._w_p is None:\n word, tag = dx.word_tag(w2i, t2i)\n word = np.asarray(word)\n tag = np.asarray(tag)\n self._w_p = word, tag\n else:\n word, tag = self._w_p\n\n layers_of_labels = []\n layers_of_xtypes = []\n for labels, xtypes in dx.trapezoid_gen(self._trapezoid_height, l2i, x2i):\n labels = np.asarray(labels)\n xtypes = np.asarray(xtypes)\n layers_of_labels.append(labels)\n layers_of_xtypes.append(xtypes)\n\n factored = dict(token = word,\n tag = tag,\n label = layers_of_labels,\n xtype = layers_of_xtypes)\n self._factored[factor] = factored\n return factored\n\n def __str__(self):\n s = f'Keeper with ' + ', '.join(self._factored.keys()) + 'cached'\n return s\n\nfrom unidecode import unidecode\nclass StanTreeKeeper:\n def __init__(self, line, v2is, trapezoid_height):\n self._line = line\n self._v2is = v2is\n self._factored = None\n self._trapezoid_height = trapezoid_height\n\n def update_factored(self, factored):\n self._factored = factored\n\n def get(self):\n if self._factored is None:\n\n w2i, p2i, x2i = self._v2is\n tree_str = self._line.replace(b'\\\\/', b'/').replace(b'\\xc2\\xa0', b'.').decode('utf-8')\n tree_str = unidecode(tree_str)\n tree = Tree.fromstring(tree_str)\n dx = DeltaX.from_stan(tree)\n self._words = words = tree.leaves()\n token = np.asarray([w2i(w) for w in words])\n\n layers_of_polars = []\n layers_of_xtypes = []\n for polars, xtypes in dx.trapezoid_gen(self._trapezoid_height, p2i, x2i):\n polars = np.asarray(polars)\n xtypes = np.asarray(xtypes)\n layers_of_polars.append(polars)\n layers_of_xtypes.append(xtypes)\n\n factored = dict(token = token,\n polar = layers_of_polars,\n xtype = layers_of_xtypes)\n self._factored = words, len(words), tree_str, factored\n return self._factored\n \n# from data.multib import add_efficient_subs\nclass PennWorker(Process):\n def __init__(self, *args):\n Process.__init__(self)\n self._q_reader_fns_height_v2is_factors = args\n\n def run(self):\n (q, reader, fns, height, v2is, factors,\n word_trace) = self._q_reader_fns_height_v2is_factors\n\n for fn in fns:\n for tree in reader.parsed_sents(fn):\n try:\n preproc_cnf(tree, word_trace = word_trace) # watch for ktb\n except:\n print(tree)\n # _, tree = add_efficient_subs(tree)\n words = tree.leaves()\n length = len(words)\n keeper = PennTreeKeeper(tree, v2is, height)\n factored = {f: keeper[f] for f in factors}\n if '(' in words or ')' in words:\n for i, word in enumerate(words):\n if word == '(':\n tree[tree.leaf_treeposition(i)] = '-LRB-'\n elif word == ')':\n tree[tree.leaf_treeposition(i)] = '-RRB-'\n results = words, length, str(tree), factored\n q.put(results)\n\nclass StanWorker(Process):\n def __init__(self, *args):\n Process.__init__(self)\n self._args = args\n\n def run(self):\n q, jobs, v2is, trapezoid_height = self._args\n for line in jobs:\n q.put(StanTreeKeeper(line, v2is, trapezoid_height).get())\n\n\ndef mp_workers(works, q, core_fn, num_threads):\n text = []\n lengths = []\n keepers = []\n with tqdm(desc = f'Receiving from {num_threads} threads ...') as qbar:\n try:\n while any(x.is_alive() for x in works):\n if q.empty():\n sleep(0.00001)\n else:\n words, length, keeper = core_fn(*q.get())\n text.append(words)\n lengths.append(length)\n keepers.append(keeper)\n qbar.update(1)\n qbar.desc = f'TreeKeepers'\n except KeyboardInterrupt as ex:\n with DelayedKeyboardInterrupt(ignore = True):\n for x in works:\n x.kill()\n raise ex\n return text, lengths, keepers\n\n\nfrom data.penn_types import Tree\nfrom data.io import distribute_jobs\nclass TrapezoidDataset(LengthOrderedDataset):\n\n @classmethod\n def from_penn(cls,\n reader,\n get_fnames,\n data_split,\n trapezoid_height,\n field_v2is,\n paddings,\n device,\n factors,\n word_trace,\n min_len = 0,\n max_len = None,\n extra_text_helper = None,\n num_threads = 0):\n\n _, w2i = field_v2is['token']\n _, t2i = field_v2is['tag']\n _, l2i = field_v2is['label']\n x2i = lambda x: xtype_to_logits(x, to_str = False)\n v2is = w2i, t2i, l2i, x2i\n \n fnames = get_fnames(data_split)\n if num_threads < 1:\n from utils.types import num_threads\n works = distribute_jobs(fnames, num_threads)\n q = Queue()\n for i in range(num_threads):\n w = PennWorker(q, reader, works[i], trapezoid_height, v2is, factors, word_trace)\n w.start()\n works[i] = w\n def core_fn(words, length, tree_str, factored):\n keeper = PennTreeKeeper(Tree.fromstring(tree_str), v2is, trapezoid_height)\n keeper.update_factored(factored, words)\n return words, length, keeper\n text, lengths, keepers = mp_workers(works, q, core_fn, num_threads)\n return cls('token tag label xtype', keepers, lengths, text,\n trapezoid_height,\n field_v2is,\n paddings,\n device,\n factors,\n min_len,\n max_len,\n extra_text_helper)\n\n @classmethod\n def from_stan(cls,\n data_path,\n trapezoid_height,\n field_v2is,\n paddings,\n device,\n factors,\n min_len = 0,\n max_len = None,\n extra_text_helper = None,\n num_threads = 0):\n\n _, w2i = field_v2is['token']\n _, p2i = field_v2is['polar']\n x2i = lambda x: xtype_to_logits(x, to_str = False)\n v2is = w2i, p2i, x2i\n \n if num_threads < 1:\n from utils.types import num_threads\n with open(data_path, 'rb') as fr:\n lines = list(fr)\n works = distribute_jobs(lines, num_threads)\n q = Queue()\n\n for i in range(num_threads):\n w = StanWorker(q, works[i], v2is, trapezoid_height)\n w.start()\n works[i] = w\n def core_fn(words, length, tree_str, factored):\n keeper = StanTreeKeeper(None, v2is, trapezoid_height)\n keeper.update_factored(factored)\n return words, length, keeper\n text, lengths, keepers = mp_workers(works, q, core_fn, num_threads)\n return cls('token polar xtype', keepers, lengths, text,\n trapezoid_height,\n field_v2is,\n paddings,\n device,\n factors,\n min_len,\n max_len,\n extra_text_helper)\n\n def __init__(self,\n heads,\n keepers,\n lengths,\n text,\n trapezoid_height,\n field_v2is,\n paddings,\n device,\n factors,\n min_len,\n max_len,\n extra_text_helper):\n\n heads = tuple(heads.split())\n if extra_text_helper:\n c2i = field_v2is['char'][1] if 'char' in field_v2is else None\n extra_text_helper = extra_text_helper(text, device, c2i)\n super().__init__(heads, lengths, factors, min_len, max_len, extra_text_helper)\n\n self._paddings_device_height = paddings, device, trapezoid_height\n self._keepers = tuple(keepers)\n\n def at_idx(self, idx, factor, length, helper_outputs):\n sample = self._keepers[idx]\n if factor is None:\n sample = sample.get() #?\n else:\n sample = sample[factor]\n sample['length'] = length\n return sample\n\n def _collate_fn(self, batch):\n dtype = np.int32\n field_columns = {}\n paddings, device, height = self._paddings_device_height\n\n for field, column in zip(self.heads, zip(*batch)):\n if field == 'length':\n batch_size = len(column)\n lengths = np.asarray(column, dtype)\n max_len = np.max(lengths)\n if paddings:\n max_len += 2 # BOS and EOS\n offsets = (max_len - lengths) // 2\n field_columns['offset'] = offsets\n else:\n field_columns['offset'] = np.zeros_like(lengths)\n full_triangular_len = s_index(max_len)\n tensor = lengths\n elif field in fields: # word or tags\n tensor = np.zeros([batch_size, max_len], dtype)\n for i_, (values, length) in enumerate(zip(column, lengths)):\n if paddings:\n start = offsets[i_]\n end = start + length\n bid, eid = paddings[field]\n tensor[i_, :start] = bid\n tensor[i_, start:end] = values\n tensor[i_, end:] = eid\n else:\n tensor[i_, :length] = values\n # try:\n # except:\n # import pdb; pdb.set_trace()\n else: # label or xtype\n tensor = np.zeros([batch_size, full_triangular_len], dtype = np.uint8)\n cumu_length = 0\n track_label = field in ('label', 'polar')\n if track_label:\n segments = []\n mask_length = np.zeros([batch_size], dtype)\n seg_length = np.zeros([batch_size, max_len], dtype)\n top3_label = np.stack([np.concatenate(x[-1:-3:-1]) for x in column]) # [batch, 3]\n\n for l_, layer in enumerate(zip_longest(*column)):\n max_layer_len = max(len(x) for x in layer if x is not None)\n if paddings:\n max_layer_len += 2\n cumu_length += max_layer_len\n l_start = full_triangular_len - cumu_length\n l_end = l_start + max_layer_len\n if track_label:\n segments.append(max_layer_len)\n for i_, seq in enumerate(layer):\n if seq is None:\n continue\n seq_len = len(seq)\n if track_label:\n mask_length[i_] += max_layer_len\n seg_length[i_, -1 - l_] = seq_len\n if paddings:\n bid, eid = paddings[field]\n start = l_start + offsets[i_]\n end = start + seq_len\n tensor[i_, l_start:start] = bid\n tensor[i_, start:end] = seq\n tensor[i_, end:l_end] = eid\n else:\n end = l_start + seq_len\n tensor[i_, l_start:end] = seq\n tensor = tensor[:, -cumu_length:]\n\n field_columns[field] = tensor\n\n field_columns['mask_length'] = cumu_length - mask_length\n field_columns['top3_label'] = top3_label\n for f, column in field_columns.items():\n field_columns[f] = torch.as_tensor(column,\n dtype = (None if f == 'xtype' else torch.long),\n device = device)\n\n segments.reverse()\n # height_segments = []\n # while segments:\n # for i in count():\n # if i % height == 0:\n # height_segments.append(0)\n # height_segments[-1] += segments.pop()\n # if not segments:\n # break\n # height_segments.reverse()\n field_columns['height'] = height\n field_columns['segment'] = segments\n field_columns['seg_length'] = seg_length[:, -len(segments):]\n\n # if len(segments) > 15: # even still sooooo sparse\n # p_ = torch.arange(full_triangular_len, device = device)[None, :]\n # x_ = p_ >= field_columns['mask_length'][:, None]\n # import pdb; pdb.set_trace()\n\n return field_columns\n","sub_path":"data/trapezoid/dataset.py","file_name":"dataset.py","file_ext":"py","file_size_in_byte":14794,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"603300781","text":"'''\nassignment 2 of week 3, Stanford Machine Learning course\nhttps://www.coursera.org/learn/machine-learning/home/week/3\nmodified by Hao Qian, Mar 22, 2017\n'''\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom scipy.optimize import fmin_bfgs\n\nplt.style.use('ggplot')\n\n#load file into pandas dataframe\ndef read_data_file(filename):\n columns=['Exam1','Exam2','Admitted']\n df = pd.read_csv(filename,delimiter=\",\",names=columns)\n print(df.head())\n print(df.describe())\n print(df.dtypes)\n return df\n\n#visualize the data before analysis\ndef plotData(X,y):\n #X is matrix for scores of exams\n #y is the decision of admission\n pos=y.eq(1)\n neg=y.eq(0)\n pos_line = plt.scatter(X[pos].iloc[:,0],X[pos].iloc[:,1],s=40,marker='+',c='b')\n neg_line = plt.scatter(X[neg].iloc[:,0],X[neg].iloc[:,1],s=30,marker='o',c='y')\n plt.legend((pos_line,neg_line),('Admitted','Not admitted'),scatterpoints=1,\n loc='upper right',fontsize=8)\n \n\n#sigmoid function, 1/(1+e^(-z)) \ndef sigmoid(Z):\n g = 1./(1.+np.exp(-Z))\n #print(g)\n return g\n\n#calculate cost function \ndef costFunction(theta,X,y): \n m=X.shape[0]\n #theta=np.reshape(theta,(len(theta),1))\n cost=(1./m)*(-y.transpose().dot(np.log(sigmoid(X.dot(theta))))-(1-y).transpose().dot(np.log(1-sigmoid(X.dot(theta)))))\n print(\"cost shape\",cost.shape)\n return cost\n\ndef compute_cost(theta,X,y): #computes cost given predicted and actual values\n m = X.shape[0] #number of training examples\n #theta = np.reshape(theta,(len(theta),1))\n #y = reshape(y,(len(y),1)) \n J = (1./m) * (-np.transpose(y).dot(np.log(sigmoid(X.dot(theta)))) - np.transpose(1-y).dot(np.log(1-sigmoid(X.dot(theta))))) \n grad = np.transpose((1./m)*np.transpose(sigmoid(X.dot(theta)) - y).dot(X))\n #optimize.fmin expects a single value, so cannot return grad\n return J[0][0]#,grad\n\n#get the gradeint of cost\ndef gradeint(theta,X,y):\n m=X.shape[0]\n #theta=np.reshape(theta,(len(theta),1))\n grad=(1./m)*((sigmoid(X.dot(theta))-y).transpose().dot(X))\n print(\"grad shape\",grad.shape)\n return grad\n\ndef fminunc(theta,X,y,alpha,niter):\n cost_min, grad_min=costFunction(theta,X,y)\n grad=grad_min\n for j in np.arange(niter):\n for i in np.arange(len(theta)):\n theta[i] -= alpha*grad[i]\n cost, grad=costFunction(theta,X,y)\n print(theta,cost,grad)\n if(cost(b)\".format(\n from_statement, to_statement, edge_statement)\n tx.run(statement)\n\n\ndef runAddVertex(session, name: str, data: list):\n print('start thread:', threading.currentThread().getName())\n vertext_time = time.time()\n ids = 0\n batch_ids = 0\n total_count = len(data)\n while ids < total_count:\n batch_data = data[ids:ids + vertex_batch_size]\n batch_start_time = time.time()\n session.write_transaction(\n addVertex, batch_data, name)\n ids += vertex_batch_size\n batch_ids += 1\n print(\"%{}:vertex write {}/{} use {} seconds\".format(batch_ids,\n ids, total_count, time.time() - batch_start_time))\n vertex_use_time = {\n \"tagname\": name,\n \"use_time\": time.time() - vertext_time,\n \"property_keys\": [] if len(data) == 0 else list(data[0].keys()),\n \"length\": len(v['data'])\n }\n print(vertex_use_time)\n\n\ndef runAddEdge(session, name: str, data: list):\n edge_time = time.time()\n ids = 0\n batch_ids = 0\n total_count = len(data)\n while ids < total_count:\n batch_data = data[ids:ids + edge_batch_size]\n batch_start_time = time.time()\n session.write_transaction(\n addEdge, batch_data, name)\n ids += edge_batch_size\n batch_ids += 1\n print(\"%{}:edge write {}/{} use {} seconds\".format(batch_ids,\n ids, total_count, time.time() - batch_start_time))\n\n edge_use_time = {\n \"edgename\": name,\n \"use_time\": time.time() - edge_time,\n \"property_kest\": [] if len(data) == 0 else list(data[0].keys()),\n \"length\": len(data)\n }\n print(edge_use_time)\n\n\nif __name__ == '__main__':\n\n import argparse\n import time\n\n parser = argparse.ArgumentParser(description='import json file to neo4j')\n\n parser.add_argument('-f', '--file', help='json file path',\n type=str, required=True)\n parser.add_argument('-a', '--address', help='neo4j address',\n type=str, default=\"bolt://127.0.0.1:7687\")\n parser.add_argument('-p', '--password',\n help='neo4j password', type=str, default=\"neo4j\")\n parser.add_argument('-u', '--user', help='neo4j user name',\n type=str, default=\"neo4j\")\n parser.add_argument('-t', '--thread_num', type=int, default=4)\n args = parser.parse_args()\n\n time_report = {\"vertex_use_time\": [], \"edge_use_time\": [],\n \"vertex_batch_size\": vertex_batch_size, \"edge_batch_size\": edge_batch_size}\n vertex_use_time = time_report[\"vertex_use_time\"]\n edge_use_time = time_report[\"edge_use_time\"]\n\n thread_num = args.thread_num\n driver = GraphDatabase.driver(\n args.address, auth=(args.user, args.password))\n with open(args.file) as f:\n data = json.load(f)\n print(\"start write\")\n sessions = [driver.session() for i in range(thread_num)]\n total_start_time = time.time()\n time_report[\"total_start_time\"] = total_start_time\n\n thread_vertex = []\n tag_count = len(data['vertex'])\n thread_tag_count = thread_num // tag_count\n if thread_tag_count < 1:\n raise Exception(\"thread_num must great tag_count\")\n\n vertices_time = time.time()\n session_ids = 0\n for name, v in data[\"vertex\"].items():\n vertex_count = len(v['data'])\n per_thread_tag_count = vertex_count // thread_tag_count\n for i in range(thread_tag_count):\n end = (i + 1) * per_thread_tag_count if i + \\\n 1 < thread_tag_count else vertex_count\n t = threading.Thread(target=runAddVertex, name='thread-' + name, args=(\n sessions[session_ids], name, v['data'][i * per_thread_tag_count:end]))\n t.start()\n session_ids += 1\n thread_vertex.append(t)\n for t in thread_vertex:\n t.join()\n\n time_report[\"total_vertex\"] = {\n \"vertex_num\": len(data[\"vertex\"]),\n \"use_time\": time.time() - vertices_time\n }\n\n session_ids = 0\n thread_edge = []\n edge_type_count = len(data['edge'])\n thread_edge_type_count = thread_num // edge_type_count\n if thread_edge_type_count < 1:\n raise Exception(\"thread_num must great edge_type_count\")\n edges_time = time.time()\n for name, e in data[\"edge\"].items():\n edge_count = len(e['data'])\n per_thread_edge_count = edge_count // thread_edge_type_count\n for i in range(thread_edge_type_count):\n end = (i + 1) * per_thread_edge_count if i + 1 < thread_edge_type_count else edge_count\n t = threading.Thread(target=runAddEdge, name='thread-' + name, args=(\n sessions[session_ids], name, e['data'][i * per_thread_edge_count:end]\n ))\n t.start()\n session_ids += 1\n thread_vertex.append(t)\n for t in thread_edge:\n t.join()\n\n time_report[\"total_edge\"] = {\n \"edge_num\": len(data[\"edge\"]),\n \"use_time\": time.time() - edges_time\n }\n time_report[\"begin_to_end_insert\"] = time.time() - total_start_time\n map(lambda session: session.close(), sessions)\n driver.close()\n print(json.dumps(time_report, indent=4))\n","sub_path":"tools/exchange/scripts/json2neo4j.py","file_name":"json2neo4j.py","file_ext":"py","file_size_in_byte":6746,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"255039860","text":"import sys\n\nd = sys.argv[1].strip()\n\nmarks = [\"BMW\", \"Toyota\", \"Mercedes\", \"Lada\", \"Nissan\", \"Audi\"]\n\nmarks.remove(d)\n\nprint(marks)\n\n\"\"\"\nУДАЛЯЕМ МАРКУ, ЧАСТЬ 2\nВ программе ниже создан список с марками автомобилей. \nРасширьте код так, чтобы программа принимала из \nпервого аргумента командной строки название марки, \nа затем удаляла её из списка.\n\nПример использования:\n> python program.py Toyota\n> ['BMW', 'Mercedes', 'Lada', 'Nissan', 'Audi']\n\"\"\"","sub_path":"shultais_courses/lists_and_typles/list_methods/del_mark_part2.py","file_name":"del_mark_part2.py","file_ext":"py","file_size_in_byte":635,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"115302713","text":"# 8. Посчитать, сколько раз встречается определенная цифра в введенной последовательности чисел.\n# Количество вводимых чисел и цифра, которую необходимо посчитать, задаются вводом с клавиатуры.\n\n\nprint('Введите натуральное число')\nn = int(input('Сколько чисел вы хотите ввести? '))\n\nnumbers = []\n\nfor i in range(1, n+1):\n x_i = int(input('Введите целое число: '))\n numbers.append(x_i)\n\nprint('Введите целое положительное число меньше 10')\ny = int(input('Введите цифру: '))\n\nentry = 0\n\nfor i in numbers:\n for j in str(i):\n if int(j) == y:\n entry = entry + 1\n\nprint(f'Цифра {y} встречается в последовательности {numbers} {entry} раз')","sub_path":"lesson02/task8.py","file_name":"task8.py","file_ext":"py","file_size_in_byte":971,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"297466794","text":"# how many seats are occupied in the final state?\r\n\r\n# input\r\nwith open('11.txt', 'r') as file:\r\n input = file.read()\r\n\r\n# turn the input into a list, one element is one row of seats\r\ninput_list = list(input.split('\\n'))\r\n\r\n# turn each row into a list, so plane_seats has a list of lists\r\nplane_seats = [list(row) for row in input_list]\r\n\r\n# get size of array\r\nrow_len = len(plane_seats[0]) # row length\r\nnum_row = len(plane_seats) # number of rows\r\n\r\n# get number of seats\r\nnum_seats = 0\r\nfor i in range(0, row_len):\r\n for j in range(0, num_row):\r\n if plane_seats[j][i] == 'L':\r\n num_seats += 1\r\n\r\n# states given:\r\n# empty = L\r\n# occupied = #\r\n# floor = .\r\n\r\n# to avoid deep copies, i'm going to introduce two more states:\r\n# E = currently empty, will become occupied\r\n# O = currently occupied, will become empty\r\n\r\n# CA rules:\r\n# empty + sees no occupied => empty\r\n# occupied + sees five occupied => empty\r\n\r\n# get list of directions as offset (i,j), i.e. (-1,1) is 1 left 1 down\r\ndirs = [(-1,-1), (-1, 0), (-1, 1), (0, -1), (0, 1), (1, -1), (1, 0), (1, 1)]\r\n\r\n# get Number of Occupied Visible seats\r\ndef get_nov(i: int, j: int):\r\n nov = 0\r\n for dir in dirs:\r\n # get i-offset and j-offset\r\n io = dir[0]\r\n jo = dir[1]\r\n while 1:\r\n if i + io < 0 or i + io >= row_len or j + jo < 0 or j + jo >= num_row: # not in range\r\n break # we went too far\r\n if plane_seats[j + jo][i + io] == '.':\r\n # the spot in that direction is the floor\r\n # take another step in that direction\r\n io += dir[0]\r\n jo += dir[1]\r\n elif plane_seats[j + jo][i + io] in ['L', 'E']:\r\n # the first seat you see is empty\r\n # go to next dir\r\n break\r\n elif plane_seats[j + jo][i + io] in ['#', 'O']:\r\n # the first seat you see is occupied\r\n # add one to nov then go to next dir\r\n nov += 1\r\n break\r\n return nov\r\n\r\n# just gonna display the step\r\nstep = 0\r\n# and the number of seats occupied\r\nnum_occ = 0\r\n\r\n# loop until no change\r\nwhile 1:\r\n # track if a change was made at each step\r\n has_changed = False\r\n\r\n print('starting step ' + str(step) + '. seats occupied: ' + str(num_occ) + ' of ' + str(num_seats), end='\\r', flush=True)\r\n for i in range(0, row_len):\r\n for j in range(0, num_row):\r\n # get state of current seat\r\n seat = plane_seats[j][i]\r\n\r\n # L = currently empty, next state undetermined\r\n # # = currently occupied, next state undetermined\r\n # E = currently empty, will become occupied\r\n # O = currently occupied, will become empty\r\n\r\n # empty + sees no occupied => occupied\r\n if seat in ['L', 'E']:\r\n if get_nov(i,j) == 0:\r\n plane_seats[j][i] = 'E'\r\n has_changed = True\r\n # occupied + five or more occupied in vision => empty\r\n if seat in ['#', 'O']:\r\n if get_nov(i,j) >= 5:\r\n plane_seats[j][i] = 'O'\r\n has_changed = True\r\n # o/w no change\r\n # if no changes, we are done\r\n if not has_changed:\r\n break\r\n # o/w increase step and move to next stage (E -> #, O -> L)\r\n step += 1\r\n for i in range(0, row_len):\r\n for j in range(0, num_row):\r\n # get state of current seat\r\n seat = plane_seats[j][i]\r\n # see if it should go empty to occupied\r\n if seat == 'E':\r\n plane_seats[j][i] = '#'\r\n num_occ += 1\r\n # see if it should go occupied to empty\r\n if seat == 'O':\r\n plane_seats[j][i] = 'L'\r\n num_occ -= 1\r\n # if it's . (floor), L, or #, no change\r\n\r\nprint('(part b) final number of seats occupied: ' + str(num_occ) + ' of ' + str(num_seats))","sub_path":"11b.py","file_name":"11b.py","file_ext":"py","file_size_in_byte":4000,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"177324665","text":"# read camels dataset\nimport os\nimport pandas as pd\nimport numpy as np\nimport datetime as dt\nfrom hydroDL import utils, pathCamels\nfrom pandas.api.types import is_numeric_dtype, is_string_dtype\nimport time\nimport json\nfrom . import Dataframe\n\n# module variable\ntRange = [19800101, 20200101]\ntRangeobs = [19800101, 20200101] #[19801001, 20161001] # streamflow observations\ntLst = utils.time.tRange2Array(tRange)\ntLstobs = utils.time.tRange2Array(tRangeobs)\nnt = len(tLst)\nntobs = len(tLstobs)\n\n############### for a paper with two upstream node with temp data ###############\n#forcingLst = ['Q3Tw', 'Q7Tw', 'Q3Q', 'Q5Q', 'Q7Q',\n # 'Q9Q', 'Btamean', 'V', 'Pressure', 'SkyCov', 'SR.sum', 'Precip',\n# ]\n###################################################################################\n\n############### for stream_temp Module ####################\n##forcingLst = ['basin_ccov', 'basin_humid', 'basin_rain',\n ## 'basin_tave_air', 'basin_gwflow', 'basin_potet', 'basin_sroff',\n ## 'basin_ssflow', 'basin_swrad', 'basin_tave_gw',\n ## 'basin_tave_ss', 'network_width','outlet_width', 'outlet_outflow', 'gw_tau', 'ss_tau'] #, 'gw_tau', 'ss_tau' 'obs_discharge'\n\n##attrLstSel = ['hru_elev', 'hru_slope', 'network_elev',\n ## 'outlet_elev', 'network_length', 'network_slope', 'outlet_slope',\n ## 'basin_area']\n################################################################\n\n############## Water Temperature for CONUS scale ##########\nforcingLst = ['dayl(s)', 'prcp(mm/day)', 'srad(W/m2)', 'tmax(C)',\n 'tmin(C)', 'vp(Pa)', '00060_Mean'] #, 'pred_discharge' , '00060_Mean' ,'combine_discharge', 'combine_discharge' 'swe(mm)' ,'outlet_outflow',, 'pred_discharge', , '00060_Mean'\nattrLstSel = ['DRAIN_SQKM',\n\n 'STREAMS_KM_SQ_KM',\n 'STOR_NID_2009', 'FORESTNLCD06', 'PLANTNLCD06',\n 'SLOPE_PCT', 'RAW_DIS_NEAREST_MAJ_DAM',\n\n'PERDUN', 'RAW_DIS_NEAREST_DAM', 'RAW_AVG_DIS_ALL_MAJ_DAMS',\n'T_MIN_BASIN', 'T_MINSTD_BASIN', 'RH_BASIN', 'RAW_AVG_DIS_ALLDAMS', 'PPTAVG_BASIN',\n'HIRES_LENTIC_PCT','T_AVG_BASIN', 'T_MAX_BASIN','T_MAXSTD_BASIN', 'NDAMS_2009', 'ELEV_MEAN_M_BASIN'\n ] #, 'MAJ_NDAMS_2009',\n # Round1: 'PERDUN', 'RAW_DIS_NEAREST_DAM', 'RAW_AVG_DIS_ALL_MAJ_DAMS',\n # Round2: 'T_MIN_BASIN', 'T_MINSTD_BASIN', 'RH_BASIN',, 'RAW_AVG_DIS_ALLDAMS', 'PPTAVG_BASIN'\n # Round3: 'HIRES_LENTIC_PCT','T_AVG_BASIN', 'T_MAX_BASIN','T_MAXSTD_BASIN', 'NDAMS_2009','ELEV_MEAN_M_BASIN',\n# attrLstSel = ['DRAIN_SQKM', 'PPTAVG_BASIN', 'T_AVG_BASIN', 'T_MAX_BASIN',\n# 'T_MAXSTD_BASIN', 'T_MIN_BASIN', 'T_MINSTD_BASIN', 'RH_BASIN',\n# 'STREAMS_KM_SQ_KM', 'PERDUN', 'HIRES_LENTIC_PCT', 'NDAMS_2009',\n# 'STOR_NID_2009', 'FORESTNLCD06', 'PLANTNLCD06', 'ELEV_MEAN_M_BASIN',\n# 'SLOPE_PCT', 'RAW_DIS_NEAREST_DAM', 'RAW_AVG_DIS_ALLDAMS',\n# 'RAW_DIS_NEAREST_MAJ_DAM', 'RAW_AVG_DIS_ALL_MAJ_DAMS',\n# 'MAJ_NDAMS_2009', 'POWER_NUM_PTS', 'POWER_SUM_MW', 'lat', 'lon',\n# 'HYDRO_DISTURB_INDX', 'BFI_AVE', 'FRAGUN_BASIN', 'DEVNLCD06',\n# 'PERMAVE', 'RFACT', 'BARRENNLCD06', 'DECIDNLCD06', 'EVERGRNLCD06',\n# 'MIXEDFORNLCD06', 'SHRUBNLCD06', 'GRASSNLCD06', 'WOODYWETNLCD06',\n# 'EMERGWETNLCD06', 'GEOL_REEDBUSH_DOM_PCT',\n# 'STRAHLER_MAX', 'MAINSTEM_SINUOUSITY', 'REACHCODE', 'ARTIFPATH_PCT',\n# 'ARTIFPATH_MAINSTEM_PCT', 'PERHOR', 'TOPWET', 'CONTACT', 'CANALS_PCT',\n# 'RAW_AVG_DIS_ALLCANALS', 'NPDES_MAJ_DENS', 'RAW_AVG_DIS_ALL_MAJ_NPDES',\n# 'FRESHW_WITHDRAWAL', 'PCT_IRRIG_AG', 'ROADS_KM_SQ_KM',\n# 'PADCAT1_PCT_BASIN', 'PADCAT2_PCT_BASIN']\n\n########################################################################\n############# Streamflow prediction for CONUS scale ##########################\n# attrLstSel = ['ELEV_MEAN_M_BASIN', 'SLOPE_PCT', 'DRAIN_SQKM',\n# 'HYDRO_DISTURB_INDX', 'STREAMS_KM_SQ_KM', 'BFI_AVE', 'NDAMS_2009',\n# 'STOR_NID_2009', 'RAW_DIS_NEAREST_DAM', 'FRAGUN_BASIN', 'DEVNLCD06',\n# 'FORESTNLCD06', 'PLANTNLCD06', 'PERMAVE', 'RFACT',\n# 'PPTAVG_BASIN', 'BARRENNLCD06', 'DECIDNLCD06', 'EVERGRNLCD06',\n# 'MIXEDFORNLCD06', 'SHRUBNLCD06', 'GRASSNLCD06', 'WOODYWETNLCD06',\n# 'EMERGWETNLCD06', 'GEOL_REEDBUSH_DOM_PCT',\n# 'STRAHLER_MAX', 'MAINSTEM_SINUOUSITY', 'REACHCODE', 'ARTIFPATH_PCT',\n# 'ARTIFPATH_MAINSTEM_PCT', 'HIRES_LENTIC_PCT', 'PERDUN', 'PERHOR',\n# 'TOPWET', 'CONTACT', 'CANALS_PCT', 'RAW_AVG_DIS_ALLCANALS',\n# 'NPDES_MAJ_DENS', 'RAW_AVG_DIS_ALL_MAJ_NPDES',\n# 'RAW_AVG_DIS_ALL_MAJ_DAMS', 'FRESHW_WITHDRAWAL', 'PCT_IRRIG_AG',\n# 'POWER_NUM_PTS', 'POWER_SUM_MW', 'ROADS_KM_SQ_KM', 'PADCAT1_PCT_BASIN',\n# 'PADCAT2_PCT_BASIN'] # 'GEOL_REEDBUSH_SITE', , 'AWCAVE'\n##############################################################################\ndef readGageInfo(dirDB):\n gageFile = os.path.join(dirDB, 'basin_timeseries_v1p2_metForcing_obsFlow',\n 'basin_dataset_public_v1p2', 'basin_metadata',\n 'gauge_information.txt')\n\n data = pd.read_csv(gageFile, sep='\\t', header=None, skiprows=1)\n # header gives some troubles. Skip and hardcode\n fieldLst = ['huc', 'id', 'name', 'lat', 'lon', 'area']\n out = dict()\n for s in fieldLst:\n if s is 'name':\n out[s] = data[fieldLst.index(s)].values.tolist()\n else:\n out[s] = data[fieldLst.index(s)].values\n return out\n\ndef readUsgsGage(usgsId, *, readQc=False):\n ##ind = np.argwhere(gageDict['id'] == usgsId)[0][0]\n ##huc = gageDict['huc'][ind]\n ##usgsFile = os.path.join(dirDB, 'basin_timeseries_v1p2_metForcing_obsFlow',\n ## 'basin_dataset_public_v1p2', 'usgs_streamflow',\n ## str(huc).zfill(2),\n ## '%08d_streamflow_qc.txt' % (usgsId))\n ##dataTemp = pd.read_csv(usgsFile, sep=r'\\s+', header=None)\n ##obs = dataTemp[4].values\n obs = forcing_data.loc[forcing_data['site_no']==usgsId, TempTarget].to_numpy()\n ##obs[obs < 0] = np.nan\n if readQc is True:\n qcDict = {'A': 1, 'A:e': 2, 'M': 3}\n qc = np.array([qcDict[x] for x in dataTemp[5]])\n if len(obs) != ntobs:\n ## out = np.full([ntobs], np.nan)\n ##dfDate = dataTemp[[1, 2, 3]]\n ##dfDate.columns = ['year', 'month', 'day']\n ##date = pd.to_datetime(dfDate).values.astype('datetime64[D]')\n if 'datetime' in forcing_data.columns:\n date = forcing_data.loc[forcing_data['site_no']==usgsId, 'datetime']\n elif 'date' in forcing_data.columns:\n date = forcing_data.loc[forcing_data['site_no']==usgsId, 'date']\n [C, ind1, ind2] = np.intersect1d(date, tLstobs, return_indices=True)\n out[ind2] = obs\n if readQc is True:\n outQc = np.full([ntobs], np.nan)\n outQc[ind2] = qc\n else:\n out = obs\n if readQc is True:\n outQc = qc\n\n if readQc is True:\n return out, outQc\n else:\n return out\n\n\ndef readUsgs(usgsIdLst):\n t0 = time.time()\n y = np.empty([len(usgsIdLst), ntobs])\n for k in range(len(usgsIdLst)):\n dataObs = readUsgsGage(usgsIdLst[k])\n y[k, :] = dataObs\n print(\"read Stream Temperature\", time.time() - t0)\n return y\n\n\ndef readForcingGage(usgsId, varLst=forcingLst, *, dataset='nldas'):\n # dataset = daymet or maurer or nldas\n ##forcingLst = ['dayl', 'prcp', 'srad', 'swe', 'tmax', 'tmin', 'vp']\n ##ind = np.argwhere(gageDict['id'] == usgsId)[0][0]\n ##huc = gageDict['huc'][ind]\n\n ##dataFolder = os.path.join(\n ## dirDB, 'basin_timeseries_v1p2_metForcing_obsFlow',\n ##'basin_dataset_public_v1p2', 'basin_mean_forcing')\n if dataset is 'daymet':\n tempS = 'cida'\n else:\n tempS = dataset\n ## dataFile = os.path.join(dataFolder, dataset,\n ## str(huc).zfill(2),\n ## '%08d_lump_%s_forcing_leap.txt' % (usgsId, tempS))\n ##dataTemp = pd.read_csv(dataFile, sep=r'\\s+', header=None, skiprows=4)\n dataTemp = forcing_data.loc[forcing_data['site_no']==usgsId]\n nf = len(varLst)\n out = np.empty([nt, nf])\n for k in range(nf):\n # assume all files are of same columns. May check later.\n ##ind = forcingLst.index(varLst[k])\n ##out[:, k] = dataTemp[ind + 4].values\n out[:, k] = dataTemp[varLst[k]].values\n return out\n\n\ndef readForcing(usgsIdLst, varLst):\n t0 = time.time()\n\n x = np.empty([len(usgsIdLst), nt, len(varLst)])\n\n for k in range(len(usgsIdLst)):\n data = readForcingGage(usgsIdLst[k], varLst)\n x[k, :, :] = data\n print(\"read forcing data\", time.time() - t0)\n return x\n\n\ndef readAttrAll(*, saveDict=False):\n dataFolder = os.path.join(dirDB, 'camels_attributes_v2.0',\n 'camels_attributes_v2.0')\n fDict = dict() # factorize dict\n varDict = dict()\n varLst = list()\n outLst = list()\n keyLst = ['topo', 'clim', 'hydro', 'vege', 'soil', 'geol']\n\n for key in keyLst:\n dataFile = os.path.join(dataFolder, 'camels_' + key + '.txt')\n dataTemp = pd.read_csv(dataFile, sep=';')\n varLstTemp = list(dataTemp.columns[1:])\n varDict[key] = varLstTemp\n varLst.extend(varLstTemp)\n k = 0\n nGage = len(gageDict['id'])\n outTemp = np.full([nGage, len(varLstTemp)], np.nan)\n for field in varLstTemp:\n if is_string_dtype(dataTemp[field]):\n value, ref = pd.factorize(dataTemp[field], sort=True)\n outTemp[:, k] = value\n fDict[field] = ref.tolist()\n elif is_numeric_dtype(dataTemp[field]):\n outTemp[:, k] = dataTemp[field].values\n k = k + 1\n outLst.append(outTemp)\n out = np.concatenate(outLst, 1)\n if saveDict is True:\n fileName = os.path.join(dataFolder, 'dictFactorize.json')\n with open(fileName, 'w') as fp:\n json.dump(fDict, fp, indent=4)\n fileName = os.path.join(dataFolder, 'dictAttribute.json')\n with open(fileName, 'w') as fp:\n json.dump(varDict, fp, indent=4)\n return out, varLst\n\n\ndef readAttr(usgsIdLst, varLst):\n attrAll, varLstAll = readAttrAll()\n indVar = list()\n for var in varLst:\n indVar.append(varLstAll.index(var))\n idLstAll = gageDict['id']\n C, indGrid, ind2 = np.intersect1d(idLstAll, usgsIdLst, return_indices=True)\n temp = attrAll[indGrid, :]\n out = temp[:, indVar]\n return out\n\n\ndef calStat(x):\n a = x.flatten()\n bb = a[~np.isnan(a)] # kick out Nan\n b = bb[bb != (-999999)]\n p10 = np.percentile(b, 10).astype(float)\n p90 = np.percentile(b, 90).astype(float)\n mean = np.mean(b).astype(float)\n std = np.std(b).astype(float)\n if std < 0.001:\n std = 1\n return [p10, p90, mean, std]\n\ndef calStatgamma(x): # for daily streamflow and precipitation\n a = x.flatten()\n bb = a[~np.isnan(a)] # kick out Nan\n b = bb[bb != (-999999)]\n b = np.log10(np.sqrt(b)+0.1) # do some tranformation to change gamma characteristics\n p10 = np.percentile(b, 10).astype(float)\n p90 = np.percentile(b, 90).astype(float)\n mean = np.mean(b).astype(float)\n std = np.std(b).astype(float)\n if std < 0.001:\n std = 1\n return [p10, p90, mean, std]\n\ndef calStatbasinnorm(x): # for daily streamflow normalized by basin area and precipitation\n ## basinarea = readAttr(gageDict['id'], ['area_gages2'])\n x[x==(-999999)]=0\n #np.where(x==(-999999), 0, x)\n basinarea = attr_data['DRAIN_SQKM']\n ## meanprep = readAttr(gageDict['id'], ['p_mean'])\n meanprep = attr_data['PPTAVG_BASIN'] # anual average precipitation\n # meanprep = readAttr(gageDict['id'], ['q_mean'])\n temparea = np.tile(basinarea, ( x.shape[1], 1)).transpose()\n tempprep = np.tile(meanprep, ( x.shape[1],1)).transpose()\n flowua = (x * 0.0283168 * 3600 * 24) / ((temparea * (10 ** 6)) * (tempprep * 10 ** (-2))/365) # unit (m^3/day)/(m^3/day)\n a = flowua.flatten()\n b = a[~np.isnan(a)] # kick out Nan\n b = np.log10(np.sqrt(b)+0.1) # do some tranformation to change gamma characteristics plus 0.1 for 0 values\n p10 = np.percentile(b, 10).astype(float)\n p90 = np.percentile(b, 90).astype(float)\n mean = np.mean(b).astype(float)\n std = np.std(b).astype(float)\n if std < 0.001:\n std = 1\n return [p10, p90, mean, std]\n\n\ndef calStatAll():\n statDict = dict()\n ##idLst = gageDict['id']\n idLst = forcing_data['site_no'].unique()\n # usgs streamflow\n y = readUsgs(idLst)\n # statDict['usgsFlow'] = calStatgamma(y)\n ##statDict['00060_Mean'] = calStatbasinnorm(y)\n if TempTarget == '00060_Mean':\n statDict['00060_Mean'] = calStatbasinnorm(y)\n elif TempTarget == 'combine_discharge':\n statDict['00060_Mean'] = calStatbasinnorm(y)\n else:\n statDict[TempTarget] = calStat(y)\n # forcing\n x = readForcing(idLst, forcingLst)\n for k in range(len(forcingLst)):\n var = forcingLst[k]\n if var=='prcp(mm/day)':\n statDict[var] = calStatgamma(x[:, :, k])\n elif var=='00060_Mean':\n statDict[var] = calStatbasinnorm(x[:, :, k])\n elif var=='combine_discharge':\n statDict[var] = calStatbasinnorm(x[:, :, k])\n else:\n statDict[var] = calStat(x[:, :, k])\n # const attribute\n ##attrData, attrLst = readAttrAll()\n attrLst = attrLstSel\n attrData = np.empty([len(idLst), len(attrLst)])\n for i, ii in enumerate(attrLst):\n attrData[:,i] = attr_data[ii].values\n for k in range(len(attrLst)):\n var = attrLst[k]\n statDict[var] = calStat(attrData[:, k])\n statFile = os.path.join(dirDB, 'Statistics_basinnorm.json')\n with open(statFile, 'w') as fp:\n json.dump(statDict, fp, indent=4)\n\n\ndef transNorm(x, varLst, *, toNorm):\n if type(varLst) is str:\n varLst = [varLst]\n out = np.zeros(x.shape)\n\n for k in range(len(varLst)):\n var = varLst[k]\n\n stat = statDict[var]\n if toNorm is True:\n if len(x.shape) == 3:\n if var == 'prcp(mm/day)' or var == '00060_Mean' or var == 'combine_discharge':\n x[:, :, k] = np.log10(np.sqrt(x[:, :, k]) + 0.1)\n\n out[:, :, k] = (x[:, :, k] - stat[2]) / stat[3]\n elif len(x.shape) == 2:\n if var == 'prcp(mm/day)' or var == '00060_Mean' or var == 'combine_discharge':\n x[:, k] = np.log10(np.sqrt(x[:, k]) + 0.1)\n out[:, k] = (x[:, k] - stat[2]) / stat[3]\n else:\n if len(x.shape) == 3:\n out[:, :, k] = x[:, :, k] * stat[3] + stat[2]\n if var == 'prcp(mm/day)' or var == '00060_Mean' or var == 'combine_discharge':\n out[:, :, k] = (np.power(10, out[:, :, k]) - 0.1) ** 2\n\n elif len(x.shape) == 2:\n out[:, k] = x[:, k] * stat[3] + stat[2]\n if var == 'prcp(mm/day)' or var == '00060_Mean' or var == 'combine_discharge':\n out[:, k] = (np.power(10, out[:, k]) - 0.1) ** 2\n\n\n return out\n\ndef basinNorm(x, gageid, toNorm):\n # for regional training, gageid should be numpyarray\n #if type(gageid) is str:\n # if gageid == 'All':\n # gageid = gageDict['id']\n nd = len(x.shape)\n meanprep = attr_data['PPTAVG_BASIN']\n basinarea = attr_data['DRAIN_SQKM']\n\n # basinarea = readAttr(gageid, ['area_gages2'])\n # meanprep = readAttr(gageid, ['p_mean'])\n # # meanprep = readAttr(gageid, ['q_mean']) #this line was ponded from the beginning\n if nd == 3 and x.shape[2] == 1:\n x = x[:,:,0] # unsqueeze the original 3 dimension matrix\n temparea = np.tile(basinarea, ( x.shape[1], 1)).transpose()\n tempprep = np.tile(meanprep, (x.shape[1], 1)).transpose()\n if toNorm is True:\n flow = (x * 0.0283168 * 3600 * 24) / ((temparea * (10 ** 6)) * (tempprep * 10 ** (-2))/365) # (m^3/day)/(m^3/day)\n else:\n\n flow = x * ((temparea * (10 ** 6)) * (tempprep * 10 ** (-2))/365)/(0.0283168 * 3600 * 24)\n if nd == 3:\n flow = np.expand_dims(flow, axis=2)\n return flow\n\ndef createSubsetAll(opt, **kw):\n if opt is 'all':\n idLst = gageDict['id']\n subsetFile = os.path.join(dirDB, 'Subset', 'all.csv')\n np.savetxt(subsetFile, idLst, delimiter=',', fmt='%d')\n\n# Define and initialize module variables\nif os.path.isdir(pathCamels['DB']):\n dirDB = pathCamels['DB']\n gageDict = readGageInfo(dirDB)\n statFile = os.path.join(dirDB, 'Statistics_basinnorm.json')\n if not os.path.isfile(statFile):\n calStatAll()\n with open(statFile, 'r') as fp:\n statDict = json.load(fp)\nelse:\n dirDB = None\n gageDict = None\n statDict = None\n\ndef initcamels(forcing, attribute, target, rootDB = pathCamels['DB']):\n # reinitialize module variable\n global dirDB, gageDict, statDict, forcing_data, attr_data, TempTarget\n dirDB = rootDB\n forcing_data = forcing\n attr_data = attribute\n TempTarget = target\n # gageDict = readGageInfo(dirDB)\n statFile = os.path.join(dirDB, 'Statistics_basinnorm.json')\n if not os.path.isfile(statFile):\n calStatAll()\n with open(statFile, 'r') as fp:\n statDict = json.load(fp)\n\n\nclass DataframeCamels(Dataframe):\n def __init__(self, *, subset='All', tRange):\n self.subset = subset \n #if subset == 'All': # change to read subset later\n# self.usgsId = gageDict['id']\n # crd = np.zeros([len(self.usgsId), 2])\n # crd[:, 0] = gageDict['lat']\n # crd[:, 1] = gageDict['lon']\n # self.crd = crd\n # elif type(subset) is list:\n # self.usgsId = np.array(subset)\n # crd = np.zeros([len(self.usgsId), 2])\n # C, ind1, ind2 = np.intersect1d(self.usgsId, gageDict['id'], return_indices=True)\n # crd[:, 0] = gageDict['lat'][ind2]\n # crd[:, 1] = gageDict['lon'][ind2]\n # self.crd = crd\n # else:\n # raise Exception('The format of subset is not correct!')\n self.time = utils.time.tRange2Array(tRange) \n\n def getGeo(self):\n return self.crd\n\n def getT(self):\n return self.time\n\n def getDataObs(self, TempTarget, forcing_path, attr_path, *, doNorm=False, rmNan=False, basinnorm = False):\n\n df_pred = pd.DataFrame()\n inputfiles = os.path.join(forcing_path) #obs_18basins forcing_350days_T_S_GAGESII\n dfMain = pd.read_feather(inputfiles)\n inputfiles = os.path.join(attr_path) #attr_18basins attr_350days_T_S_GAGESII\n dfC = pd.read_feather(inputfiles)\n nNodes = len(dfC['site_no'])\n id_order_dfMain = dfMain['site_no'].unique()\n seg_id = pd.DataFrame()\n dfC1 = pd.DataFrame()\n for i, ii in enumerate(id_order_dfMain): # to have the same order of seg_id_nat in both dfMain & dfC\n A = dfC.loc[dfC['site_no'] == ii]\n dfC1 = dfC1.append(A, ignore_index=True)\n dfC = dfC1\n seg_id['site_no'] = dfC['site_no']\n df_pred[TempTarget] = dfMain[TempTarget] #\n\n y = np.empty([nNodes, ntobs])\n for i in range(nNodes):\n a = ntobs * i\n b = ntobs * (i + 1)\n data = df_pred.iloc[a:b]\n kk = dfMain.columns.get_loc('site_no')\n id = dfMain.iloc[a:a + 1, kk]\n val_mask = seg_id == id[a]\n k = val_mask.index[val_mask['site_no'] == True][0]\n y[k, :] = data.iloc[:, 0]\n\n\n\n data = y\n\n\n\n #data = readUsgs(self.usgsId)\n # if basinnorm is True:\n # for k in range(len(varLst)):\n # var = varLst[k]\n # stat = statDict[var]\n\n # data = basinNorm(data, gageid=self.usgsId, toNorm=True)\n data = np.expand_dims(data, axis=2)\n C, ind1, ind2 = np.intersect1d(self.time, tLstobs, return_indices=True)\n data = data[:, ind2, :]\n if doNorm is True:\n data = transNorm(data, TempTarget, toNorm=True)\n if rmNan is True:\n data[np.where(np.isnan(data))] = 0\n # data[np.where(np.isnan(data))] = -99\n return data\n\n def getDataTs(self,forcing_path, attr_path, out, *, varLst=forcingLst, doNorm=True, rmNan=True):\n if type(varLst) is str:\n varLst = [varLst]\n # read ts forcing\n #rootDatabase = os.path.join(os.path.sep, absRoot, 'scratch', 'SNTemp')\n inputfiles = os.path.join(forcing_path) # forcing_350days_T_S_GAGESII\n dfMain = pd.read_feather(inputfiles)\n inputfiles = os.path.join(attr_path) # attr_350days_T_S_GAGESII\n dfC = pd.read_feather(inputfiles)\n nNodes = len(dfC['site_no'])\n x = np.empty([nNodes, ntobs, len(forcingLst)])\n id_order_dfMain = dfMain['site_no'].unique()\n seg_id = pd.DataFrame()\n dfC1 = pd.DataFrame()\n for i, ii in enumerate(id_order_dfMain): # to have the same order of seg_id_nat in both dfMain & dfC\n A = dfC.loc[dfC['site_no'] == ii]\n dfC1 = dfC1.append(A, ignore_index=True)\n dfC = dfC1\n seg_id['site_no'] = dfC['site_no']\n forcing = pd.DataFrame()\n for i , ii in enumerate(forcingLst):\n forcing[ii] = dfMain[ii]\n for i in range(nNodes):\n a = ntobs * i\n b = ntobs * (i + 1)\n data = forcing.iloc[a:b, :]\n kk = dfMain.columns.get_loc('site_no')\n id = dfMain.iloc[a:a+1, kk]\n val_mask = seg_id == id[a]\n k = val_mask.index[val_mask['site_no'] == True][0]\n\n x[k, :, :] = data\n\n\n data = x # readForcing(self.usgsId, varLst) # data:[gage*day*variable]\n C, ind1, ind2 = np.intersect1d(self.time, tLst, return_indices=True)\n data = data[:, ind2, :]\n if os.path.isdir(out):\n pass\n else:\n os.makedirs(out)\n np.save(os.path.join(out, 'x.npy'), data)\n if doNorm is True:\n data = transNorm(data, varLst, toNorm=True)\n if rmNan is True:\n data[np.where(np.isnan(data))] = 0\n return data\n\n def getDataConst(self, forcing_path, attr_path, *, varLst=attrLstSel, doNorm=True, rmNan=True):\n if type(varLst) is str:\n varLst = [varLst]\n inputfiles = os.path.join(forcing_path) # obs_18basins forcing_350days_T_S_GAGESII\n dfMain = pd.read_feather(inputfiles)\n inputfiles = os.path.join(attr_path) #attr_18basins attr_350days_T_S_GAGESII\n dfC = pd.read_feather(inputfiles)\n nNodes = len(dfC['site_no'])\n x = np.empty([nNodes, ntobs, len(forcingLst)])\n id_order_dfMain = dfMain['site_no'].unique()\n seg_id = pd.DataFrame()\n dfC1 = pd.DataFrame()\n for i, ii in enumerate(id_order_dfMain): # to have the same order of seg_id_nat in both dfMain & dfC\n A = dfC.loc[dfC['site_no'] == ii]\n dfC1 = dfC1.append(A, ignore_index=True)\n dfC = dfC1\n c = np.empty([nNodes, len(varLst)])\n df_constant = pd.DataFrame()\n for i, ii in enumerate(varLst):\n df_constant[ii] = dfC[ii]\n c[:, i] = df_constant.iloc[:, i]\n # data = readAttr(self.usgsId, varLst)\n data = c\n if doNorm is True:\n data = transNorm(data, varLst, toNorm=True)\n if rmNan is True:\n data[np.where(np.isnan(data))] = 0\n return data\n","sub_path":"hydroDL/data/camels.py","file_name":"camels.py","file_ext":"py","file_size_in_byte":23443,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"618283663","text":"from flask import Flask, send_file, request\nfrom visualize_data import plot_var, do_plot\napp = Flask(__name__)\n\nvariable_units = [\n (\"temperature\", \"C\"),\n (\"pressure\", \"hPa\"),\n (\"humidity\", \"%\"),\n (\"light\", \"Lux\"),\n (\"oxidised\", \"kO\"),\n (\"reduced\", \"kO\"),\n (\"nh3\", \"kO\"),\n (\"pm1\", \"ug/m3\"),\n (\"pm25\", \"ug/m3\"),\n (\"pm10\", \"ug/m3\")\n]\nunits = {k: v for k, v in variable_units}\n\ndef print_urls():\n output = []\n for var, unit in variable_units:\n output.append(f'{var}')\n return '
'.join(output)\n\n@app.route('/')\ndef index():\n return print_urls()\n\n\n@app.route('/plot', methods=['GET'])\ndef plot():\n\n bytes_obj = do_plot(request.args['var'], request.args['unit'])\n\n return send_file(bytes_obj,\n attachment_filename='plot.png',\n mimetype='image/png')\n\nif __name__ == '__main__':\n app.debug = True\n app.run(host='0.0.0.0')\n","sub_path":"src/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":961,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"403228107","text":"\"\"\"\r\n杨志朗 1700094803\r\n这个程序用selenium来获取cookies以及模拟登录知乎\r\n首先要在主程序内输入 my_account 以及 my_password , 来模拟登陆知乎一次,获取cookies\r\ncookies 会保存为.json文件, 保存一次就不会再去跑get_cookies了\r\n有时候登陆前要输入验证码或者是逆过来的文字,这里处理起来比较麻烦,直接手动处理,登陆一次\r\n之后就会跑 login_simulation()就可以模拟登录知乎\r\n\r\n\"\"\"\r\n\r\nfrom selenium import webdriver\r\nfrom selenium.webdriver.common.by import By\r\nfrom selenium.webdriver.support.ui import WebDriverWait\r\nimport selenium.common.exceptions as ex\r\nfrom selenium.webdriver.support import expected_conditions as EC\r\nimport time\r\nimport os\r\nimport json # 用于保存cookies\r\nfrom random import random # 用于生成0-1的随机数\r\n\r\n\r\nclass Zhihu:\r\n\r\n def __init__(self):\r\n url = 'https://www.zhihu.com/'\r\n self.url = url\r\n options = webdriver.ChromeOptions()\r\n options.add_experimental_option('excludeSwitches',\r\n ['enable-automation']) # 此步骤很重要,设置为开发者模式,防止被各大网站识别出来使用了Selenium\r\n\r\n self.browser = webdriver.Chrome(executable_path=chromedriver_path, options=options)\r\n self.wait = WebDriverWait(self.browser, 10) # 超时时长为10s\r\n\r\n # 检测是否登陆了知乎\r\n def is_login(self):\r\n try:\r\n return bool(\r\n self.browser.find_element_by_css_selector(\".GlobalSideBar-navText\")\r\n )\r\n except ex.NoSuchElementException:\r\n return False\r\n\r\n # 用于获取cookies\r\n def get_cookies(self):\r\n self.browser.get(\"https://www.zhihu.com/signin\")\r\n # 这里首先要按账号密码登陆\r\n self.browser.find_element_by_xpath(\r\n \"//*[@id=\\\"root\\\"]/div/main/div/div/div/div[1]/div/form/div[1]/div[2]\").click()\r\n time.sleep(1 + random())\r\n self.browser.find_element_by_css_selector(\".SignFlow-accountInput.Input-wrapper input\").send_keys(my_account)\r\n time.sleep(1 + random())\r\n self.browser.find_element_by_css_selector(\".SignFlow-password input\").send_keys(my_password)\r\n time.sleep(1 + random())\r\n self.browser.find_element_by_css_selector(\".Button.SignFlow-submitButton\").click()\r\n\r\n # 然后会出现验证码的问题,手动处理,登陆进去以后获取cookies\r\n cnt = 0\r\n while not self.is_login():\r\n time.sleep(random())\r\n cnt += 1\r\n print(\"第 %d 次尝试登录知乎\" % cnt)\r\n\r\n # 退出上面循环说明已经登陆知乎了,开始获取cookies\r\n cookies = self.browser.get_cookies() # Selenium为我们提供了get_cookies来获取登录cookies\r\n print(\"已经获取cookies!\")\r\n self.browser.close()\r\n with open('ZhihuCookies.json', 'w') as f:\r\n f.write(json.dumps(cookies))\r\n\r\n def login(self):\r\n print(\"开始用已获取的cookies模拟登录\")\r\n\r\n # 从文件中获取保存的cookies\r\n with open('ZhihuCookies.json', 'r', encoding='utf-8') as f:\r\n listcookies = json.loads(f.read()) # 获取cookies\r\n # 把获取的cookies处理成dict类型\r\n cookies_dict = dict()\r\n for cookie in listcookies:\r\n # 在保存成dict时,我们其实只要cookies中的name和value,而domain等其他都可以不要\r\n cookies_dict[cookie['name']] = cookie['value']\r\n\r\n self.browser.get(\"https://www.zhihu.com\")\r\n for item in cookies_dict:\r\n self.browser.add_cookie({\r\n \"domain\": \".zhihu.com\",\r\n \"name\": item,\r\n \"value\": cookies_dict[item],\r\n \"path\": '/',\r\n \"expires\": None\r\n })\r\n self.browser.get(\"https://www.zhihu.com\")\r\n print(\"成功使用cookies登陆知乎! 10秒后自动关闭\")\r\n time.sleep(10)\r\n self.browser.close()\r\n\r\n\r\nif __name__ == \"__main__\":\r\n\r\n # 输入你的账号和密码用于获取cookies,还要知道chrome_driver的位置\r\n my_account = '这是你的账号'\r\n my_password = '这是你的密码'\r\n chromedriver_path = \"D:/ChromePython/chromedriver.exe\" # 改成你的chromedriver的完整路径地址\r\n\r\n zhihu = Zhihu() # Zhihu是定义的类\r\n\r\n # 看看当前地址里有没有知乎的cookies文件, 没有就获取cookies, 有就不用了\r\n file = 'ZhihuCookies.json'\r\n if file not in os.listdir():\r\n zhihu.get_cookies()\r\n zhihu = Zhihu()\r\n zhihu.login()\r\n","sub_path":"selenium_login.py","file_name":"selenium_login.py","file_ext":"py","file_size_in_byte":4663,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"163927443","text":"__author__ = 'huanpc'\n\nfrom docker import Client\nfrom io import BytesIO\n\ncli = Client(base_url='unix://var/run/docker.sock')\n\n\ndef get_docker_container(container_name=''):\n if container_name == '':\n return cli.containers()\n else:\n return cli.inspect_container(container_name)\n\n\ndef get_available_port(type='app'):\n list_container = get_docker_container()\n if type == 'app':\n port_available = 8000\n for container in list_container:\n for port in container['Ports']:\n if 'PublicPort' in port:\n if port['PublicPort'] > port_available:\n port_available = port['PublicPort']\n return port_available+1\n elif type == 'database':\n port_available = 3000\n for container in list_container:\n for port in container['Ports']:\n if 'PublicPort' in port:\n if port_available < port['PublicPort'] < 6000:\n port_available = port['PublicPort']\n return port_available+1\n else:\n port_available = 6000\n for container in list_container:\n for port in container['Ports']:\n if 'PublicPort' in port:\n if port_available < port['PublicPort'] < 8000:\n port_available = port['PublicPort']\n return port_available+1\n\ndef build_docker_file(docker_file_path, image_tag):\n # file = open(docker_file_path)\n # f = BytesIO(file.encode('utf-8'))\n return cli.build(path=docker_file_path, rm=True, tag=image_tag)","sub_path":"week_5/Docker_Utils.py","file_name":"Docker_Utils.py","file_ext":"py","file_size_in_byte":1573,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"268945892","text":"\"\"\"\nThis module contains SwagLabsProducts\nthe page object for the SwagLabs products page\n\"\"\"\n\nimport re\nfrom selenium.webdriver.common.by import By\nfrom selenium.common.exceptions import NoSuchElementException\n\n\ndef strip_non_price_characters(s):\n return re.sub(\"[^0-9^.]\", \"\", s)\n\n\nclass SwagLabsProducts:\n # Locators\n TITLE = (By.CLASS_NAME,\n 'title')\n INVENTORY_ITEM_CARDS = (By.CLASS_NAME,\n 'inventory_item')\n INVENTORY_ITEM_NAME = (By.CLASS_NAME,\n 'inventory_item_name')\n INVENTORY_ITEM_PRICE = (By.CLASS_NAME,\n 'inventory_item_price')\n INVENTORY_ADD_TO_CART = (By.XPATH,\n './/button[contains(@id, \"add-to-cart-\")]')\n INVENTORY_REMOVE_FROM_CART = (By.XPATH,\n './/button[contains(@id, \"remove-\")]')\n SHOPPING_CART_BADGE = (By.CLASS_NAME,\n 'shopping_cart_badge')\n SHOPPING_CART_LINK = (By.CLASS_NAME,\n 'shopping_cart_link')\n\n # Initializer\n def __init__(self, browser):\n self.browser = browser\n\n # Interaction Methods\n def get_product_names(self):\n product_names = self.browser.find_elements(*self.INVENTORY_ITEM_NAME)\n names = [product.text for product in product_names]\n return names\n\n def get_product_prices(self):\n product_prices = self.browser.find_elements(*self.INVENTORY_ITEM_PRICE)\n prices = [price.text for price in product_prices]\n return prices\n\n def get_inventory_item(self, index):\n return self.browser.find_elements(*self.INVENTORY_ITEM_CARDS)[index]\n\n def add_inventory_item_to_cart(self, index):\n name_and_price = {}\n inventory_item = self.get_inventory_item(index)\n\n add_to_cart_button = inventory_item.find_element(\n *self.INVENTORY_ADD_TO_CART)\n add_to_cart_button.click()\n\n name_and_price[\n inventory_item.find_element(*self.INVENTORY_ITEM_NAME).text] = \\\n strip_non_price_characters(\n inventory_item.find_element(*self.INVENTORY_ITEM_PRICE).text\n )\n\n return name_and_price\n\n def get_inventory_item_add_to_cart_button_text(self, index):\n inventory_item = self.get_inventory_item(index)\n\n add_to_cart_button = inventory_item.find_element(\n *self.INVENTORY_ADD_TO_CART)\n return add_to_cart_button.text\n\n def get_inventory_item_remove_from_cart_button_text(self, index):\n inventory_item = self.get_inventory_item(index)\n\n remove_button = inventory_item.find_element(\n *self.INVENTORY_REMOVE_FROM_CART)\n return remove_button.text\n\n def remove_inventory_item_from_cart(self, index):\n inventory_item = self.get_inventory_item(index)\n\n remove_button = inventory_item.find_element(\n *self.INVENTORY_REMOVE_FROM_CART)\n remove_button.click()\n\n def open_shopping_cart(self):\n return self.browser.find_element(*self.SHOPPING_CART_LINK).click()\n\n def get_cart_badge_number(self):\n return self.browser.find_element(*self.SHOPPING_CART_BADGE).text\n\n def cart_badge_does_not_exist(self):\n try:\n self.browser.find_element(*self.SHOPPING_CART_BADGE)\n return False\n except NoSuchElementException:\n return True\n\n def get_title(self):\n title = self.browser.find_element(*self.TITLE)\n return title.text\n","sub_path":"python/pages/products.py","file_name":"products.py","file_ext":"py","file_size_in_byte":3509,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"63862868","text":"#!/usr/bin/env python\nfrom constant import *\nINFO_FIELDS = set([INFO_ID,INFO_TYPE,COMPANY_NAME,COLLEGE_NAME,\n MEETING_TIME,MEETING_LOCATION,ORIGIN_URL,INFO_TEXT,\n RELEASE_DATE,CLICK_RATE,REMARKS,ORIGIN_WEBSITE_NAME,\n MEETING_GPS,CAREER_CENTRE,COMPANY_TYPE,COMPANY_INTRO,\n COMPANY_RECRUIT_SITE,COMPANY_WEIBO,COMPANY_WEIXIN,\n RECRUIT_TITLE,RECRUIT_EMAIL,RECRUIT_TEL,RECRUIT_CITIES,\n RECRUIT_URL,HAS_RESUME,HAS_EXAM,HAS_HUKOU,MAJOR_FIELD,\n JOB_TYPE,RESUME_START_DATE,RESUME_END_DATE,WORK_PLACE,\n INTERVIEW_TIME,EXAM_TIME,GROUP_ID,LAST_MOD_TIME\n ])\n","sub_path":"src/constants/table_fields.py","file_name":"table_fields.py","file_ext":"py","file_size_in_byte":689,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"594535681","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Time    : 7/27/2019 4:05 PM\n# @Author  : HowsonLiu\n# @File    : myhttp.py\n\nimport requests\n\n\nclass MyHttp:\n retry_time = 3\n se = requests.session()\n headers = {\n 'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/'\n 'signed-exchange;v=b3',\n 'accept-encoding': 'gzip, deflate, br',\n 'accept-language': 'en-US,en;q=0.9,zh-CN;q=0.8,zh;q=0.7',\n 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/'\n '73.0.3683.86 Safari/537.36',\n 'referer': '',\n }\n\n def get_html_response(self, url):\n \"\"\" my http get interface \"\"\"\n try:\n res = self.se.get(url, headers=self.headers)\n if res.status_code != 200:\n print('Error: The get status code of ' + url + ' is ' + str(res.status_code))\n return None\n except Exception:\n print('Error: GET ' + url)\n return None\n return res\n\n def post_data(self, url, data):\n \"\"\" my http post interface\"\"\"\n try:\n res = self.se.post(url, data=data, headers=self.headers)\n if res.status_code != 200:\n print('Error: The post status code of ' + url + ' is ' + str(res.status_code))\n return None\n except:\n print('Error: POST ' + url)\n return None\n return res\n\n def get(self, url):\n cur_time = 0\n res = self.get_html_response(url)\n while res is None and cur_time < self.retry_time:\n res = self.get_html_response(url)\n cur_time += 1\n return res\n\n def post(self, url, data):\n cur_time = 0\n res = self.post_data(url, data)\n while res is None and cur_time < self.retry_time:\n res = self.post_data(url, data)\n cur_time += 1\n return res\n","sub_path":"myhttp/myhttp.py","file_name":"myhttp.py","file_ext":"py","file_size_in_byte":2004,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"157250818","text":"# -*- coding: utf-8 -*-\nfrom PyQt4 import QtCore, QtGui\nimport sys\n\n\nclass MyRect(QtGui.QGraphicsRectItem):\n def __init__(self, r):\n QtGui.QGraphicsRectItem.__init__(self)\n self.setPen(QtGui.QPen(QtCore.Qt.black, 3))\n self.setBrush(QtGui.QBrush(QtCore.Qt.darkGreen))\n self.setRect(r)\n self.setFlag(QtGui.QGraphicsItem.ItemIsFocusable)\n\n def focusInEvent(self, e):\n self.setPen(QtGui.QPen(QtCore.Qt.red, 3))\n QtGui.QGraphicsRectItem.focusInEvent(self, e)\n\n def focusOutEvent(self, e):\n self.setPen(QtGui.QPen(QtCore.Qt.black, 3))\n QtGui.QGraphicsRectItem.focusOutEvent(self, e)\n\n def keyPressEvent(self, e):\n if e.key() == QtCore.Qt.Key_Up:\n self.moveBy(0, -5)\n elif e.key() == QtCore.Qt.Key_Down:\n self.moveBy(0, 5)\n elif e.key() == QtCore.Qt.Key_Left:\n self.moveBy(-5, 0)\n elif e.key() == QtCore.Qt.Key_Right:\n self.moveBy(5, 0)\n e.ignore()\n QtGui.QGraphicsRectItem.keyPressEvent(self, e)\n\n def keyReleaseEvent(self, e):\n QtGui.QGraphicsRectItem.keyReleaseEvent(self, e)\n\n\ndef on_clicked():\n view.setFocus()\n\n\napp = QtGui.QApplication(sys.argv)\nwindow = QtGui.QWidget()\nwindow.setWindowTitle(\"События клавиатуры\")\nwindow.resize(600, 400)\n\nscene = QtGui.QGraphicsScene(0.0, 0.0, 500.0, 335.0)\nscene.setBackgroundBrush(QtCore.Qt.white)\n\nrect = MyRect(QtCore.QRectF(0.0, 0.0, 400.0, 100.0))\nrect.setPos(QtCore.QPointF(50.0, 150.0))\nscene.addItem(rect)\n\nview = QtGui.QGraphicsView(scene)\n\nbox = QtGui.QVBoxLayout()\nbox.addWidget(view)\nwindow.setLayout(box)\n\nwindow.show()\nsys.exit(app.exec_())","sub_path":"140_gui/pyqt_pyside/examples/PyQt_PySide_book/007_Graphic scene/007_event Processing/648. keyPressEvent.py","file_name":"648. keyPressEvent.py","file_ext":"py","file_size_in_byte":1681,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"625018679","text":"import json\nfrom django.core.files import File\nfrom django_filters.rest_framework import DjangoFilterBackend\nfrom rest_framework import status, filters, viewsets\nfrom django.contrib.auth.models import User\nfrom rest_framework.authtoken.models import Token\nfrom rest_framework.decorators import action\nfrom rest_framework.permissions import AllowAny, IsAuthenticated\nfrom rest_framework.response import Response\nfrom rest_framework.settings import api_settings\nfrom api.models import Shopping, Product\nfrom api.serializers import ShoppingSerializer, ShoppingRegistroSerializer\n\n\nclass ShoppingViewSet(viewsets.ModelViewSet):\n queryset = Shopping.objects.filter(is_active=True)\n\n filter_backends = (DjangoFilterBackend, filters.SearchFilter, filters.OrderingFilter)\n filter_fields = (\"prod\", \"cant\")\n search_fields = (\"prod\", \"cant\")\n ordering_fields = (\"prod\", \"cant\")\n\n def get_queryset(self):\n return Shopping.objects.filter(is_active=True, prod__user=self.request.user)\n\n def get_serializer_class(self):\n \"\"\"Define serializer for API\"\"\"\n if self.action == 'list' or self.action == 'retrieve':\n return ShoppingSerializer\n else:\n return ShoppingRegistroSerializer\n\n def get_permissions(self):\n \"\"\"\" Define permisos para este recurso \"\"\"\n if self.action == \"create\" or self.action == \"token\":\n permission_classes = [AllowAny]\n else:\n permission_classes = [IsAuthenticated]\n return [permission() for permission in permission_classes]\n\n\n def create(self, request, *args, **kwargs):\n try:\n data = request.data\n producto = data.get('prod')\n prod = Product.objects.get(pk=producto)\n serializer = ShoppingRegistroSerializer(data=request.data)\n if(serializer.is_valid()): \n Shopping.objects.create(\n prod = prod,\n cant = data.get('cant'),\n \n )\n return Response(serializer.data, status=status.HTTP_201_CREATED) \n else:\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n except Exception as e:\n return Response({'detail': str(e)}, status=status.HTTP_400_BAD_REQUEST)\n\n def update(self, request, *args, **kwargs):\n try:\n instancia = self.get_object()\n data = request.data\n producto = data.get('prod')\n prod = Product.objects.get(pk=producto)\n serializer = ShoppingRegistroSerializer(data=request.data)\n if(serializer.is_valid()): \n instancia.prod = prod, \n instancia.cant = data.get('cant') \n instancia.save() \n return Response(serializer.data, status=status.HTTP_201_CREATED) \n else:\n Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n except Exception as e:\n return Response({'detail': str(e)}, status=status.HTTP_400_BAD_REQUEST)\n\n def destroy(self, request, *args, **kwargs):\n try: \n instancia=self.get_object()\n instancia.is_active = False\n instancia.save()\n return Response({'': str(e)}, status=status.HTTP_208_OK)\n except Exception as e:\n return Response({'detail': str(e)}, status=status.HTTP_204_NO_CONTENT)","sub_path":"api/viewsets/shopping.py","file_name":"shopping.py","file_ext":"py","file_size_in_byte":3555,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"592341896","text":"valid_directions = [\"N\", \"E\", \"S\", \"W\"]\n\n\nclass MyRover:\n\n \"\"\"Create your very own Mars Rover 1. We salute you, Mars Rover 1. There's only one Mars Rover 1, until\n you make more of them, I guess.\n Define your Mars Rover 1 with:\n\n MyRover(\n starting_point = [list],\n\n starting_direction = string (\"N/E/S/W\")\n\n mars_grid = SurfaceOfMars\n\n )\"\"\"\n\n def __init__(self, starting_point, starting_direction, mars_grid):\n self.mars = mars_grid\n\n print(\"The chances of anything coming from Mars is a million to one, they said.\")\n print(\"So we calculated that Mars has a grid of: \" + str(self.mars.x) + \" on the x axis and \" +\n str(self.mars.y) + \" on the y axis.\")\n print(\"We await your safe return, Mars Rover 1, the only of its kind. Failure is not an option...\")\n\n # We want to check for if the first variable is a list. If it isn't, abort the mission, we've failed.\n self.coordinate = starting_point\n if not isinstance(self.coordinate, list):\n print(\"Negative starting points received. Aborting mission, over.\")\n MyRover.abort_mission(self)\n else:\n print(\"Mar Rover 1 starting co-ordinates are: \" + str(self.coordinate))\n\n self.direction = starting_direction\n if self.direction in valid_directions:\n print(\"\\nMars Rover 1 is facing \" + self.direction + \"\\n\")\n else:\n print(\"\\nDanger, danger Will Robinson! Aborting mission, over.\\n\")\n MyRover.abort_mission(self)\n\n # MyRover.await_commands(self)\n\n def get_commands(self):\n \"\"\"The get_commands function listens for user input and reacts based on user input until quit is entered.\n USAGE: MyRover.get_commands\"\"\"\n cmd = input(\"F = Forward, B = Backward, L = Turn Left, R = Turn Right. String multiple letters together for \"\n \"one large command (eg: ffflfrf goes forward three times, turns left, goes forward, turns right\"\n \"then goes forward one more time: \")\n cmd = cmd.upper()\n\n # TODO: Implement find and replace\n # fwd = \"FORWARD\"\n # back = \"BACK\"\n # left = \"LEFT\"\n # right = \"RIGHT\" -> These variables will be used to find the words in cmd and for each instance of it, replace\n # -> with the relevant letter (F/B/L/R)\n\n if cmd == \"FORWARD\":\n cmd = \"F\"\n if cmd == \"BACKWARD\" or cmd == \"BACK\":\n cmd = \"B\"\n if cmd == \"LEFT\":\n cmd = \"L\"\n if cmd == \"RIGHT\":\n cmd = \"R\"\n\n char = []\n for i in range(len(cmd)):\n char += cmd[i]\n\n for i in range(len(char)):\n if char[i] == \"F\":\n MyRover.move_x_axis(self, \"F\")\n elif char[i] == \"B\":\n MyRover.move_x_axis(self, \"B\")\n elif char[i] == \"L\":\n MyRover.turn_left(self)\n elif char[i] == \"R\":\n MyRover.turn_right(self)\n\n def move_x_axis(self, moving):\n \"\"\"\n :param moving: a string of f's/b's which is converted to a list.\n :return: +1 or -1 to x axis\n \"\"\"\n moving = moving.upper()\n if moving == \"FORWARD\":\n moving = \"F\"\n if moving == \"BACKWARD\" or moving == \"BACK\":\n moving = \"B\"\n\n char = []\n for i in range(len(moving)):\n char += moving[i]\n print(char)\n\n for i in range(len(char)):\n if char[i] == \"F\":\n print(\"Moving forward!\")\n if self.coordinate[0] == self.mars.x:\n self.coordinate[0] = 0\n else:\n self.coordinate[0] += 1\n print(\"New coordinates are: \" + str(self.coordinate))\n return self.coordinate[0]\n elif char[i] == \"B\":\n print(\"Moving backwards!\")\n if self.coordinate[0] == 0:\n self.coordinate[0] = self.mars.x\n else:\n self.coordinate[0] -= 1\n print(\"New coordinates are: \" + str(self.coordinate))\n # return self.coordinate[0]\n else:\n print(\"Mayday! Mayday! Incorrect movement command provided...\")\n return self.coordinate[0]\n\n def move_y_axis(self, moving):\n moving = moving.upper()\n if moving == \"RIGHT\" or moving == \"R\":\n print(\"Moving right!\")\n if self.coordinate[1] == 0:\n self.coordinate[1] = self.mars.y\n else:\n self.coordinate[1] += 1\n print(\"New coordinates are: \" + str(self.coordinate))\n return self.coordinate[0]\n elif moving == str(\"back\").upper() or moving == str(\"b\").upper():\n print(\"Moving backwards!\")\n self.coordinate[0] -= 1\n return self.coordinate[0]\n else:\n print(\"Mayday! Mayday! Incorrect movement command provided...\")\n return self.coordinate[0]\n\n def turn_left(self):\n if self.direction == \"N\":\n print(\"direction is now West (W)\")\n self.direction = \"W\"\n elif self.direction == \"E\":\n print(\"direction is now North (N)\")\n self.direction = \"N\"\n elif self.direction == \"S\":\n print(\"direction is now East (E)\")\n self.direction = \"E\"\n elif self.direction == \"W\":\n print(\"direction is now South (S)\")\n self.direction = \"S\"\n return self.direction\n\n def turn_right(self):\n if self.direction == \"N\":\n print(\"direction is now East (E)\")\n self.direction = \"E\"\n elif self.direction == \"E\":\n print(\"direction is now South (S)\")\n self.direction = \"S\"\n elif self.direction == \"S\":\n print(\"direction is now West (W)\")\n self.direction = \"W\"\n elif self.direction == \"W\":\n print(\"direction is now North (N)\")\n self.direction = \"N\"\n return self.direction\n\n def abort_mission(self):\n \"\"\"Call abort_mission when an unexpected value is predicted.\n The mission was a complete failure... Abort the mission, go home, you're drunk.\"\"\"\n console_status = -1\n return console_status\n\n\nclass SurfaceOfMars:\n \"\"\"SurfaceOfMars is called to define a grid for Mars. Just how big is Mars?!\"\"\"\n\n def __init__(self, x, y):\n print(\"BRB, preparing Mars!\")\n self.x = x\n self.y = y\n self.x_obstacle = 4\n self.y_obstacle = 7\n self.obstacle_list = [[], []]\n SurfaceOfMars.plant_obstacles(self)\n\n def plant_obstacles(self):\n i = 0\n i2 = 0\n while i < self.x:\n self.obstacle_list[0].append(0)\n i += 1\n while i2 < self.y:\n self.obstacle_list[1].append(0)\n i2 += 1\n\n print(\"obstacle_list[0] has: \" + str(self.obstacle_list[0]))\n print(\"obstacle_list[1] has: \" + str(self.obstacle_list[1]))\n\n for i in self.obstacle_list[0][::self.x_obstacle]:\n self.obstacle_list[0][::self.x_obstacle].insert(i, 1)\n\n print(\"This is a rock list: \" + str(self.obstacle_list[0]))\n\n def confirm_mars(self):\n print(\"We confirm that Mars is: \" + str(self.x) + \" on the x axis and \" + str(self.y) + \" on the y axis.\")\n return self.x, self.y\n","sub_path":"Mars Rover/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":7462,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"211088513","text":"#!/usr/bin/python3\n\"\"\"\nPython script to fetch information from memberclicks using the REST API.\n\nAPI Documentation:\nhttps://classic.memberclicks.com/hc/en-us/articles/360016335371-User\n\nNote that we use \"classic\" memberclicks which does not provide the extensive\nAPI that the new version seems to provide.\n\nFile: util.py\n\nCopyright 2019 Ankur Sinha\nAuthor: Ankur Sinha \n\"\"\"\n\n\nimport requests\n\n\ndef get_token(apikey, username, password):\n \"\"\"\n Get authorization token from server.\n\n :apikey: API key to use\n :username: Username to use\n :password: password to use\n :returns: authorization token\n\n \"\"\"\n # End point for authorisation\n URL_auth = 'https://ocns.memberclicks.net/services/auth'\n\n # Initialise to nothing\n api_token = None\n\n # Case sensitive\n data = {\n 'apiKey': apikey,\n 'username': username,\n 'password': password,\n }\n # set the headers so that we get a json response instead of the default XML\n headers_auth = {\n 'Accept': 'application/json'\n }\n\n # Get the api_token\n r = requests.post(URL_auth, data=data, headers=headers_auth)\n # Check response code\n if r.status_code == 200:\n api_token = r.json()['token']\n else:\n print(\"Received status code {}\".format(r.status_code))\n print(\"Response: {}\".format(r.text))\n\n return api_token\n\n\ndef check_user_registration(api_token, user_search_terms, year):\n \"\"\"\n Check whether these user_search_terms are registered.\n\n :api_token: api_token\n :user_search_terms: list of users to check\n :year: registration year to check for\n :returns: nothing\n\n \"\"\"\n group_list = [\"OCNS Board\", \"Student Member\", \"Faculty Member\",\n \"Basic Contact\", \"Postdoc Member\"]\n\n headers = {\n 'Accept': 'application/json',\n 'Authorization': api_token\n }\n URL_user = 'https://ocns.memberclicks.net/services/user?searchText='\n for aterm in user_search_terms:\n url = URL_user + aterm + '#'\n\n # Make the request\n r = requests.get(url, headers=headers)\n if r.status_code == 200:\n results = r.json()\n # returns a list if more than one result, otherwise results a\n # single dict. So we always convert it to a list for simplcity.\n if not isinstance(results['user'], list):\n presults = [results['user']]\n else:\n presults = results['user']\n print(\"\\n** {} **\".format(aterm))\n print(\"{} accounts found. Fetching information.\".format(\n len(presults)))\n for userdata in presults:\n # check group\n URL_groups = (\n 'https://ocns.memberclicks.net/services/user/' +\n userdata['userId'] + '/attribute/453639'\n )\n p = requests.get(URL_groups, headers=headers)\n if p.status_code == 200:\n group_results = p.json()\n group = group_results['attData']\n if group in group_list:\n print(\"\\n{} is a valid member of {}.\".format(\n userdata['userName'], group\n ))\n else:\n print(\"Received status code {}\".format(r.status_code))\n print(\"Response: {}\".format(r.text))\n\n # check registration\n URL_registration = (\n 'https://ocns.memberclicks.net/services/user/' +\n userdata['userId'] + '/attribute/609323'\n )\n p = requests.get(URL_registration, headers=headers)\n if p.status_code == 200:\n reg_results = p.json()\n if 'attData' not in reg_results:\n print(\"{} does not contain attribute\".format(\n userdata['userName']))\n else:\n registration = reg_results['attData']\n if registration == year:\n print(\n \"{} is registered.\".format(\n userdata['userName']))\n if group == \"Basic Contact\":\n print(\"They can submit ONE abstract.\")\n else:\n print(\"They can submit TWO abstracts.\")\n else:\n print(\"{} has not yet registered for {}.\".format(\n userdata['userName'], year\n ))\n else:\n print(\"Received status code {}\".format(r.status_code))\n print(\"Response: {}\".format(r.text))\n elif r.status_code == 204:\n print(\"No users found with search term {}\".format(aterm))\n print(\"Received status code {}\".format(r.status_code))\n else:\n print(\"Received status code {}\".format(r.status_code))\n print(\"Response: {}\".format(r.text))\n\n\ndef get_registered_users(api_token, year):\n \"\"\"\n Get list of users registered for a particular conference year.\n\n We get the whole user list and count.\n\n TODO: incomplete. There does not seem to be a way of searching by an\n attribute value.\n\n\n :api_token: api_token\n :searchID: searchID of the search\n :returns: Nothing\n\n \"\"\"\n headers = {\n 'Accept': 'application/json',\n 'Authorization': api_token\n }\n # Does not work\n URL_profiles = (\n 'https://ocns.memberclicks.net/services/attribute/609323/user'\n )\n r = requests.get(URL_profiles, headers=headers)\n if r.status_code == 200:\n print(r.json())\n else:\n print(\"Received status code {}\".format(r.status_code))\n print(\"Response: {}\".format(r.text))\n\n\ndef get_attribute_list(api_token):\n \"\"\"\n Get list of attributes.\n\n :api_token: api_token\n :returns: nothing\n\n \"\"\"\n URL_attributes = 'https://ocns.memberclicks.net/services/attribute'\n headers = {\n 'Accept': 'application/json',\n 'Authorization': api_token\n }\n r = requests.get(URL_attributes, headers=headers)\n if r.status_code == 200:\n print(r.json())\n else:\n print(\"Received status code {}\".format(r.status_code))\n print(\"Response: {}\".format(r.text))\n\n\ndef get_user_info(api_token, user_search_terms, active):\n \"\"\"\n Get information from the database about a list of users.\n\n Currently, the API does not seem to return inactive users even when the\n parameter is given.\n\n :api_token: api_token\n :user_search_terms: list of usernames\n :active: whether or not user is active\n :returns: nothing\n\n \"\"\"\n if active == \"true\":\n print(\"Looking in active users\")\n else:\n print(\"Looking in inactive users\")\n\n URL_user = (\n 'https://ocns.memberclicks.net/services/user?pageSize=100&searchText='\n )\n headers = {\n 'Accept': 'application/json',\n 'Authorization': api_token\n }\n for aterm in user_search_terms:\n url = URL_user + aterm + '&active={}'.format(active) + '#'\n print(url)\n\n # Make the request\n r = requests.get(url, headers=headers)\n if r.status_code == 200:\n results = r.json()\n # returns a list if more than one result, otherwise results a\n # single dict. So we always convert it to a list for simplcity.\n if not isinstance(results['user'], list):\n presults = [results['user']]\n else:\n presults = results['user']\n print(\"\\n** {} **\".format(aterm))\n print(\"{} accounts found. Fetching information.\".format(\n len(presults)))\n for userdata in presults:\n # fetch all attributes\n URL_attributes = (\n 'https://ocns.memberclicks.net/services/user/' +\n userdata['userId'] + '?includeAtts=true'\n )\n p = requests.get(URL_attributes, headers=headers)\n if p.status_code == 200:\n profile_info = p.json()\n print(\"\\n{}\\n\".format(profile_info))\n else:\n print(\"Received status code {}\".format(r.status_code))\n print(\"Response: {}\".format(r.text))\n elif r.status_code == 204:\n print(\"No users found with search term {}\".format(aterm))\n print(\"Received status code {}\".format(r.status_code))\n else:\n print(\"Received status code {}\".format(r.status_code))\n print(\"Response: {}\".format(r.text))\n\n\nif __name__ == \"__main__\":\n # credentials\n api_key = \"\"\n username = \"\"\n password = ''\n year = \"2019\"\n\n # list of users to check\n users = ['a.sinha2@herts.ac.uk']\n\n # Do the work\n token = get_token(api_key, username, password)\n check_user_registration(token, users, year)\n # get_user_info(token, users)\n # get_attribute_list(token)\n # get_registered_users(token, year)\n","sub_path":"scripts/registration/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":9217,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"257656355","text":"# Insert start and end points into lists\n# Maintain each list sorted\n# Check maximum overlap in book\nimport bisect\nclass MyCalendarThree(object):\n\n def __init__(self):\n self.start = []\n self.end = []\n\n def book(self, start, end):\n \"\"\"\n :type start: int\n :type end: int\n :rtype: int\n \"\"\"\n bisect.insort(self.start, start, 0, len(self.start))\n bisect.insort(self.end, end, 0, len(self.end))\n start_index = end_index = 0\n max_overlap = 0\n overlap = 0\n while(start_index < len(self.start) and end_index < len(self.end)):\n if(self.start[start_index] < self.end[end_index]):\n start_index += 1\n overlap += 1\n else:\n end_index += 1\n overlap -= 1\n max_overlap = max(max_overlap, overlap)\n return max_overlap\n\n\ncal = MyCalendarThree()\nprint(cal.book(30, 40))\nprint(cal.book(30, 40))\nprint(cal.book(30, 40))\n\n\n","sub_path":"Leetcode/MyCalenderThree.py","file_name":"MyCalenderThree.py","file_ext":"py","file_size_in_byte":996,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"289951640","text":"#\n# How many Sundays fell on the first day of the month during the twentieth century (1 Jan 1901 to 31 Dec 2000)?\n#\n# python sundays.py\n#\n\n# 30days: 4 6 9 11\n# 31days: 1 3 5 7 8 10 12\n# 28 or 29 days: 2\n#\n# leap year % 4 == 0\n# or a century if % 400 == 0\n#\n# year = 1901 - 2000\n\nday31 = [1, 3, 5, 7, 8, 10, 12]\nday30 = [4, 6, 9, 11]\n\ndef mon(year, num, empty):\n month = []\n daysInMonth = 0\n daysLastWeek = 0\n counter = 0\n\n if empty != 7:\n for a in range(0, empty):\n month.append(0)\n \n if num in day31:\n daysInMonth = 31\n for b in range(0, 31):\n month.append(b + 1)\n elif num in day30:\n daysInMonth = 30\n for b in range(0, 30):\n month.append(b + 1)\n else:\n if year % 4 == 0 or year % 400 == 0:\n daysInMonth = 29\n for b in range(0, 29):\n month.append(b + 1)\n else:\n daysInMonth = 28\n for b in range(0, 28):\n month.append(b + 1)\n \n if month[0] == 1:\n counter += 1\n\n realDaysFirstWeek = 7 - empty\n c = 1\n while 7 * c < daysInMonth - realDaysFirstWeek:\n c += 1\n else: \n daysLastWeek = daysInMonth - realDaysFirstWeek - (7 * (c - 1))\n\n return [daysLastWeek, counter]\n\n\n\ndef yea(num, emptyFirstMonth):\n empty = emptyFirstMonth\n counter = 0\n for a in range(0, 12):\n arr = mon(num, a + 1, empty)\n empty = arr[0]\n counter += arr[1]\n \n return [empty, counter]\n\n\n\ndef cen():\n theCounter = 0\n emptyForNextYear = 2\n for a in range(1901, 2001):\n arr = yea(a, emptyForNextYear)\n emptyForNextYear = arr[0]\n theCounter += arr[1]\n \n print(theCounter)\n \n\n\ncen()","sub_path":"sundays.py","file_name":"sundays.py","file_ext":"py","file_size_in_byte":1749,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"415958281","text":"#coding:utf-8\nimport requests\nimport json\n\nurl = \"http://api.common.updrv.com/json/mininews_status\"\n#payload ={\"appid\":\"1111\",\"unionid\":\"1111\",\"pcid\":\"abc123456\"}\nf = open(\"data\\\\test.json\",\"r\")\npayload = f.read()\nprint(payload)\n\n#data_json=json.dumps(payload)\n#r=requests.post('http://api.common.updrv.com/json/mininews_status',data=payload)\n\nr = requests.post(url,json=payload)\nprint(r.status_code)\nprint(r.text)","sub_path":"接口测试/testpost_josn.py","file_name":"testpost_josn.py","file_ext":"py","file_size_in_byte":414,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"351853151","text":"\"\"\"\n This module implements a supervised task 1, subtask B model that trains general pattern\n recognition classifiers on scores produced by task 1, subtask A models (regressors).\n\"\"\"\n\nfrom joblib import Parallel, delayed\nimport logging\n\nimport numpy as np\nfrom sklearn.ensemble import AdaBoostClassifier, RandomForestClassifier\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.svm import SVC\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.model_selection import StratifiedKFold, GridSearchCV\nfrom sklearn.feature_selection import RFECV, SelectKBest, chi2\n\nfrom .base import Model\nfrom dataset import Page, Screen, RANDOM_STATE\nfrom models.task1.subtaska import VGGS256, ROW_NUMBERS, HISTOGRAMS\n\nLOGGER = logging.getLogger(__name__)\n\nSCORING = \"roc_auc\"\nNUM_FOLDS = 3\nNUM_FEATURES = 100\n\nESTIMATORS = [LogisticRegression(random_state=RANDOM_STATE, class_weight=\"balanced\"),\n SVC(random_state=RANDOM_STATE, class_weight=\"balanced\")]\nENSEMBLES = [AdaBoostClassifier(DecisionTreeClassifier(max_depth=1, random_state=RANDOM_STATE,\n class_weight=\"balanced\")),\n RandomForestClassifier(random_state=RANDOM_STATE, class_weight=\"balanced\")]\nCLASSIFIERS = ESTIMATORS + ENSEMBLES\n\nPARAM_GRIDS = {\n SVC: [\n {\n \"kernel\": [\"linear\"],\n \"C\": [2**(2*(k-2)-1) for k in range(10)],\n }, {\n \"kernel\": [\"rbf\"],\n \"C\": [2**(2*(k-2)-1) for k in range(10)],\n \"gamma\": [2**(2*(k-7)-1) for k in range(10)],\n },\n ], AdaBoostClassifier: {\n \"n_estimators\": [2**(k+2) for k in range(7)],\n }, RandomForestClassifier: {\n \"n_estimators\": [2**(k+2) for k in range(7)],\n \"max_depth\": [None] + [2**k for k in range(7)],\n },\n}\n\nclass RegressorClassifier(Model):\n \"\"\"\n This class represents a task 1, subtask B model that trains pattern recognition classifiers\n on scores produced by task 1, subtask A models (regressors).\n \"\"\"\n\n def __init__(self, classifier, use_vgg256):\n \"\"\"Constructs a supervised task1, subtask B model based on task 1, subtask A models.\n\n Parameters:\n classifier The provided supervised classifier.\n\n use_vgg256 Whether the VGG256 features should be used.\n \"\"\"\n assert classifier in CLASSIFIERS\n self.classifier = classifier\n assert type(use_vgg256) is bool\n self.use_vgg256 = use_vgg256\n self.scaler = StandardScaler()\n# self.feature_preselector = SelectKBest(chi2, k=NUM_FEATURES)\n self.feature_selector = RFECV(self.classifier, scoring=SCORING,\n cv=StratifiedKFold(NUM_FOLDS, random_state=RANDOM_STATE),\n n_jobs=-1)\n\n def fit(self, videos):\n LOGGER.debug(\"Preparing training samples for %s ...\", self)\n observations = [(screen_video, page_video) \\\n for screen_video in videos for page_video in videos]\n X = self._get_regressor_predictions(observations)\n y = [1 if screen_video == page_video and screen.matching_pages else 0 \\\n for screen_video, page_video in observations \\\n for screen in screen_video.screens]\n LOGGER.debug(\"Done preparing training samples for %s.\", self)\n\n# LOGGER.debug(\"Fitting the feature preselector (%d samples, %d features) ...\", len(X), len(X[0]))\n# self.feature_preselector.fit(X, y)\n# X = self.feature_preselector.transform(X)\n# LOGGER.debug(\"Done fitting the feature preselector (%d features).\", X.shape[1])\n\n LOGGER.debug(\"Fitting the feature scaler ...\")\n self.scaler.fit(X)\n X = self.scaler.transform(X)\n LOGGER.debug(\"Done fitting the feature scaler.\")\n\n if self.classifier.__class__ != SVC:\n LOGGER.debug(\"Fitting the feature selector (%d samples, %d features) ...\", *X.shape)\n self.feature_selector.fit(X, y)\n X = self.feature_selector.transform(X)\n LOGGER.debug(\"Done fitting the feature selector. (%d features)\", X.shape[1])\n\n if self.classifier.__class__ in PARAM_GRIDS and self.classifier.__class__ != SVC:\n LOGGER.debug(\"Optimizing the classifier parameters and fitting the classifier ...\")\n param_grid = PARAM_GRIDS[self.classifier.__class__]\n optimizer = GridSearchCV(self.classifier, param_grid, scoring=SCORING, refit=True,\n cv=StratifiedKFold(NUM_FOLDS, random_state=RANDOM_STATE))\n optimizer.fit(X, y)\n self.classifier = optimizer.best_estimator_\n LOGGER.debug(\"Done optimizing the classifier parameters and fitting the classifier.\")\n else:\n LOGGER.debug(\"Fitting the classifier ...\")\n self.classifier.fit(X, y)\n LOGGER.debug(\"Done fitting the classifier.\")\n\n def predict(self, observations):\n X = self._get_regressor_predictions(observations)\n# X = self.feature_preselector.transform(X)\n X = self.scaler.transform(X)\n if self.classifier.__class__ != SVC:\n X = self.feature_selector.transform(X)\n y = self.classifier.predict(X)\n return y\n\n def _get_regressor_predictions_worker(self, regressor_num, regressor, regressors, observations):\n LOGGER.info(\"Retrieving rankings from regressor number %d / %d (%s) ...\",\n regressor_num + 1, len(regressors), regressor)\n rankings = regressor.predict(observations)\n scores = [ranking[0] for ranking in rankings]\n LOGGER.info(\"Done retrieving rankings from regressor number %d / %d.\",\n regressor_num + 1, len(regressors))\n return scores\n\n def _get_regressor_predictions(self, observations):\n \"\"\"Predicts the vector of scores assigned to the top-ranking document page in a provided\n video for all screens in another provided video using an ensemble of task 1, subtask A\n models (regressors) and returns the vectors as a list.\n\n Parameters:\n observations The provided list of 2-tuples, where each tuple consists of a video\n containing screens and a video containing document pages.\n \"\"\"\n LOGGER.debug(\"Predicting the vector of scores for %s ...\", self)\n regressors = VGGS256 + ROW_NUMBERS + HISTOGRAMS if self.use_vgg256 \\\n else ROW_NUMBERS + HISTOGRAMS\n predictions = Parallel(n_jobs=-1)(delayed(self._get_regressor_predictions_worker)\\\n (regressor_num, regressor, regressors,\n observations) \\\n for regressor_num, regressor in enumerate(regressors))\n# predictions = []\n# for regressor_num, regressor in enumerate(regressors):\n# LOGGER.info(\"Retrieving rankings from regressor number %d / %d (%s) ...\",\n# regressor_num + 1, len(regressors), regressor)\n# rankings = regressor.predict(observations)\n# scores = [ranking[0] for ranking in rankings]\n# predictions.append(scores)\n# LOGGER.info(\"Done retrieving rankings from regressor number %d / %d.\",\n# regressor_num + 1, len(regressors))\n LOGGER.debug(\"Done predicting the vector of scores for %s.\", self)\n return list(zip(*predictions))\n\n def _filename(self):\n return \"%s.%s-%s-%s\" % (__name__, self.__class__.__name__, self.use_vgg256,\n self.classifier.__class__.__name__)\n\n def __repr__(self):\n return \"Regressor classifier (with%s VGG256, %s)\" % (\"\" if self.use_vgg256 else \"out\",\n self.classifier.__class__.__name__)\n\nREGRESSOR_CLASSIFIERS = [RegressorClassifier(classifier, use_vgg256) \\\n for use_vgg256 in (True, False) \\\n for classifier in CLASSIFIERS]\n","sub_path":"models/task1/subtaskb/classifiers.py","file_name":"classifiers.py","file_ext":"py","file_size_in_byte":8142,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"171559541","text":"import io\nimport os\nimport sys\nimport requests\nimport pandas as pd\nimport numpy as np\nfrom datetime import datetime, timedelta\n\nsys.path.append(os.path.join(os.path.dirname(__file__), '..'))\nimport scheduled_tasks.reddit.stocks.fast_yahoo as fast_yahoo\n\ncurrent_date = datetime.utcnow().date()\n\n\ndef get_30d_data_finra():\n \"\"\"\n Get short volume data from https://cdn.finra.org/ in the last 30 days\n \"\"\"\n last_date = datetime.utcnow().date() - timedelta(days=30)\n combined_df = pd.DataFrame(columns=[\"Date\", \"Symbol\", \"ShortVolume\", \"ShortExemptVolume\", \"TotalVolume\", \"%Shorted\"])\n while current_date >= last_date:\n print(\"Looking at \" + str(last_date))\n url = r\"https://cdn.finra.org/equity/regsho/daily/CNMSshvol{}.txt\".format(str(last_date).replace(\"-\", \"\"))\n s = requests.get(url).content\n df = pd.read_csv(io.StringIO(s.decode('utf-8')), delimiter=\"|\")\n\n if len(df) == 1:\n print(\"No data for \" + str(last_date) + \"\\n\")\n else:\n del df[\"Market\"]\n df[\"%Shorted\"] = 100 * (df[\"ShortVolume\"] / df[\"TotalVolume\"])\n df[\"%Shorted\"] = df[\"%Shorted\"].round(2)\n combined_df = combined_df.append(df)\n last_date = last_date + timedelta(days=1)\n\n combined_df[\"Date\"] = combined_df[\"Date\"].astype(str)\n combined_df[\"Date\"] = combined_df[\"Date\"].apply(lambda x: x[0:4] + \"-\" + x[4:6] + \"-\" + x[6:])\n combined_df.columns = [\"Date\", \"ticker\", \"short_vol\", \"short_exempt_vol\", \"total_vol\", \"percent\"]\n combined_df.to_csv(\"database/short_volume.csv\", index=False)\n\n\ndef get_daily_data_finra(date_to_process: datetime.date = datetime.utcnow().date()-timedelta(days=0)):\n \"\"\"\n Get short volume data from https://cdn.finra.org/\n \"\"\"\n\n original_df = pd.read_csv(\"database/short_volume.csv\")\n\n url = r\"https://cdn.finra.org/equity/regsho/daily/CNMSshvol{}.txt\".format(str(date_to_process).replace(\"-\", \"\"))\n print(url)\n s = requests.get(url).content\n df = pd.read_csv(io.StringIO(s.decode('utf-8')), delimiter=\"|\")\n if len(df) == 1:\n print(\"No data for \" + str(date_to_process) + \"\\n\")\n else:\n df[\"Date\"] = df[\"Date\"].astype(str).apply(lambda x: x[0:4] + \"-\" + x[4:6] + \"-\" + x[6:])\n df[\"%Shorted\"] = 100 * (df[\"ShortVolume\"] / df[\"TotalVolume\"])\n df[\"%Shorted\"] = df[\"%Shorted\"].round(2)\n\n del df[\"Market\"]\n df_copy = df.copy()\n df_copy.columns = [\"Date\", \"ticker\", \"short_vol\", \"short_exempt_vol\", \"total_vol\", \"percent\"]\n original_df = original_df.append(df_copy)\n original_df.drop_duplicates(keep=\"first\", inplace=True)\n original_df.to_csv(\"database/short_volume.csv\", index=False)\n\n highest_shorted = df[df[\"ShortVolume\"] >= 3000000].nlargest(50, \"%Shorted\")\n del highest_shorted[\"Date\"]\n highest_shorted.index = np.arange(1, len(highest_shorted) + 1)\n highest_shorted.reset_index(inplace=True)\n highest_shorted.rename(columns={\"index\": \"Rank\",\n \"ShortVolume\": \"Short Volume\",\n \"ShortExemptVolume\": \"Short Exempt Vol\",\n \"TotalVolume\": \"Total Volume\",\n \"%Shorted\": \"% Shorted\"}, inplace=True)\n\n quick_stats = {'regularMarketPreviousClose': 'Previous Close',\n 'regularMarketChangePercent': '1 Day Change %',\n 'marketCap': 'Market Cap'}\n stats_df = fast_yahoo.download_quick_stats(highest_shorted[\"Symbol\"].to_list(), quick_stats)\n\n highest_shorted = pd.merge(highest_shorted, stats_df, on=\"Symbol\")\n highest_shorted.replace(np.nan, \"N/A\", inplace=True)\n highest_shorted.to_csv(\"database/highest_short_volume.csv\", index=False)\n\n\ndef main():\n get_30d_data_finra()\n get_daily_data_finra()\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"scheduled_tasks/get_short_volume.py","file_name":"get_short_volume.py","file_ext":"py","file_size_in_byte":3920,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"97490307","text":"import os\nimport re\n\nPATH = os.path.abspath('.')\n\nres_files = [os.path.join(base, file) for base, _, files in os.walk(PATH) if 'build' not in base and 'src' in base for\n file in files if file.endswith('.java') or file.endswith('.xml')]\n\n# pprint(res_files)\n\ndrawable_files = {file[:-4]: os.path.join(base, file) for base, _, files in os.walk(PATH)\n if 'build' not in base and 'src' in base and 'drawable' in base\n for file in files if file.endswith('.png') and not file.endswith('.9.png')}\n\nrefs = set()\nfor file in res_files:\n with open(file, encoding='utf-8') as f:\n for line in f:\n\n if line.lstrip().startswith('#'): continue\n\n match = re.findall(r'R\\.drawable\\.(\\w+)\\b', line)\n if match:\n refs.update(set(match))\n\n match = re.findall(r'@drawable/(\\w+)\\b', line)\n if match:\n refs.update(set(match))\n\nunused = drawable_files.keys() - refs\nfrom pprint import pprint\n\nfor _un_use_img in unused:\n print(f'deleting {_un_use_img}')\n os.remove(drawable_files[_un_use_img])","sub_path":"wheels/script/remove_unused_drawable.py","file_name":"remove_unused_drawable.py","file_ext":"py","file_size_in_byte":1115,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"641223065","text":"import torch\nimport pandas as pd\nimport numpy as np\nfrom torch.autograd import Variable\nimport torch.nn.functional as F\nimport torch.nn as nn\nimport torch.optim as optim\nfrom torch.utils.data import Dataset, DataLoader\nfrom itertools import count\nfrom torchvision import datasets, transforms\nimport plot\n\n\nclass CNN(torch.nn.Module):\n def __init__(self, shallow):\n super(CNN, self).__init__()\n self.cnn = nn.Sequential(\n nn.Conv2d(1, 16, 5),\n nn.ReLU(),\n nn.MaxPool2d(2),\n nn.Conv2d(16, 32, 5),\n nn.ReLU(),\n nn.MaxPool2d(2)\n )\n if shallow:\n self.dnn = nn.Sequential(\n nn.Linear(32*4*4, 10)\n )\n else:\n self.dnn = nn.Sequential(\n nn.Linear(16*4*4, 8),\n nn.ReLU(),\n nn.Linear(8, 8),\n nn.ReLU(),\n nn.Linear(8, 8),\n nn.ReLU(),\n nn.Linear(8, 8),\n nn.ReLU(),\n nn.Linear(8, 8),\n nn.ReLU(),\n nn.Linear(8, 8),\n nn.ReLU(),\n nn.Linear(8, 8),\n nn.ReLU(),\n nn.Linear(8, 8),\n nn.ReLU(),\n nn.Linear(8, 10)\n )\n\n def forward(self, x):\n x = self.cnn(x)\n x = x.view(x.size(0), -1)\n out = self.dnn(x)\n return out\n\ndef get_data(batch_size):\n train_loader = torch.utils.data.DataLoader(\n datasets.MNIST('../data', train=True, download=True, transform=transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize((0.1307,), (0.3081,))\n ])),\n batch_size=batch_size, shuffle=True)\n test_loader = torch.utils.data.DataLoader(\n datasets.MNIST('../data', train=False, transform=transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize((0.1307,), (0.3081,))\n ])),\n batch_size=batch_size, shuffle=True)\n return train_loader, test_loader\n\ndef to_var(x):\n x = torch.autograd.Variable(x)\n if torch.cuda.is_available():\n x = x.cuda()\n return x\n\ndef train(batchsize=256, lr=0.01):\n model = CNN(True)\n print(model)\n optimizer = optim.SGD(model.parameters(), lr=lr)\n loss_func = nn.CrossEntropyLoss()\n train_loader, test_loader = get_data(batchsize)\n\n for epoch in range(10):\n losses = []\n for sample in train_loader:\n x = to_var(sample[0])\n y = to_var(sample[1])\n prediction = model(x)\n loss = loss_func(prediction, y) # cross entropy loss\n optimizer.zero_grad() # clear gradients for this training step\n loss.backward() # backpropagation, compute gradients\n optimizer.step()\n losses.append(loss.data[0])\n print('epoch={}, loss={:.4f}'.format(epoch + 1, np.average(losses)))\n correct = 0\n total = 0\n for data in test_loader:\n images, labels = data\n outputs = model(Variable(images))\n _, predicted = torch.max(outputs.data, 1)\n total += labels.size(0)\n correct += (predicted == labels).sum()\n print('Accuracy of the network on the 10000 test images: %d %%' % (\n 100 * correct / total))\n return model\n\n\ndef main():\n \n #model64 = train(64)\n #model1024 = train(1024)\n #torch.save(model64.state_dict(), 'model/hw1-3-3-2-64.pt')\n #torch.save(model1024.state_dict(), 'model/hw1-3-3-2-1024.pt')\n model001 = train(lr=0.01)\n model0001 = train(lr=0.001)\n torch.save(model001.state_dict(), 'model/hw1-3-3-2-001.pt')\n torch.save(model0001.state_dict(), 'model/hw1-3-3-2-0001.pt')\n\n \n model64 = CNN(True)\n model1024 = CNN(True)\n model64.load_state_dict(torch.load('model/hw1-3-3-2-001.pt'))\n model1024.load_state_dict(torch.load('model/hw1-3-3-2-0001.pt'))\n cnn_weight_64_0 = model64.cnn[0].weight.data.numpy()\n cnn_weight_1024_0 = model1024.cnn[0].weight.data.numpy()\n cnn_weight_64_1 = model64.cnn[3].weight.data.numpy()\n cnn_weight_1024_1 = model1024.cnn[3].weight.data.numpy()\n dnn_weight_64 = model64.dnn[0].weight.data.numpy()\n dnn_weight_1024 = model1024.dnn[0].weight.data.numpy()\n loss_func = nn.CrossEntropyLoss()\n alphas = []\n train_losses = []\n train_accuracy = []\n test_losses = []\n test_accuracy = []\n for a in range(-20, 41):\n alpha = a / 20\n alphas.append(alpha)\n print(alpha)\n model = CNN(True)\n model.cnn[0].weight.data = torch.from_numpy((1 - alpha) * cnn_weight_64_0 + alpha * cnn_weight_1024_0)\n model.cnn[3].weight.data = torch.from_numpy((1 - alpha) * cnn_weight_64_1 + alpha * cnn_weight_1024_1)\n model.dnn[0].weight.data = torch.from_numpy((1 - alpha) * dnn_weight_64 + alpha * dnn_weight_1024)\n #model1024.cnn[0].weight.data = torch.from_numpy(np.zeros(model1024.cnn[0].weight.data.numpy().shape))\n #print(model1024.cnn[0].weight.data.numpy())\n\n train_loader, test_loader = get_data(256)\n correct = 0\n total = 0\n losses = []\n for data in test_loader:\n images, labels = data\n outputs = model(Variable(images))\n loss = loss_func(outputs, Variable(labels))\n losses.append(loss.data[0])\n _, predicted = torch.max(outputs.data, 1)\n total += labels.size(0)\n correct += (predicted == labels).sum()\n print('Accuracy of the network on the 10000 test images: %d %%' % (100 * correct / total))\n test_accuracy.append((100 * correct / total))\n test_losses.append(np.average(losses))\n correct = 0\n total = 0\n losses = []\n for data in train_loader:\n images, labels = data\n outputs = model(Variable(images))\n loss = loss_func(outputs, Variable(labels))\n losses.append(loss.data[0])\n _, predicted = torch.max(outputs.data, 1)\n total += labels.size(0)\n correct += (predicted == labels).sum()\n train_accuracy.append((100 * correct / total))\n train_losses.append(np.average(losses))\n fout = open('hw1-3-3-1-lr.csv', 'w')\n fout.write('alpha,train_accuracy,train_loss,test_accuracy,test_lost\\n')\n for i in range(len(alphas)):\n fout.write(str(alphas[i])+','+str(train_accuracy[i])+','+\n str(train_losses[i])+','+str(test_accuracy[i])+','+\n str(test_losses[i])+'\\n')\n fout.close()\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"hw1/code/hw1-3-3-1.py","file_name":"hw1-3-3-1.py","file_ext":"py","file_size_in_byte":6656,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"442837075","text":"from django.urls import path\nfrom . import views\napp_name = 'twe'\nurlpatterns =[\n path('list',views.tweet_list_view,name ='list'),\n path('list/',views.tweet_detail_view,name='list_detail'),\n path('class/',views.Detail.as_view(),name='detail_class'),\n path('class/list',views.ListModelView.as_view(),name='list_class'),\n path('class/create',views.CreateForm.as_view(),name='create_class'),\n path('class/create/view',views.CreateFormView.as_view(),name='create_class'),\n path('class/update/',views.UpdateForm.as_view(),name='update_class'),\n path('class/delete/',views.DeleteFormView.as_view(),name='delete_class'),\n]","sub_path":"src/tweets/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":671,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"87577428","text":"import cv2\nimport numpy as np\n\nMAX_FEATURES = 5000\nGOOD_MATCH_PERCENT = 0.45\n\n\ndef alignImages(im1, im2):\n im1Gray = cv2.cvtColor(im1, cv2.COLOR_BGR2GRAY)\n im2Gray = cv2.cvtColor(im2, cv2.COLOR_BGR2GRAY)\n\n orb = cv2.ORB_create(MAX_FEATURES)\n\n kp1 = orb.detect(im1Gray)\n kp1, des1 = orb.compute(im1Gray, kp1)\n kp2 = orb.detect(im2Gray)\n kp2, des2 = orb.compute(im2Gray, kp2)\n\n matcher = cv2.BFMatcher_create(cv2.NORM_HAMMING)\n matches = matcher.match(des1, des2)\n\n matches = sorted(matches, key=lambda x: x.distance)\n numGoodMatches = int(len(matches) * GOOD_MATCH_PERCENT)\n matches = matches[numGoodMatches:-1]\n # img3 = cv2.drawMatches(im1Gray,kp1,im2Gray,kp2,matches[:10],None,flags=cv2.DrawMatchesFlags_NOT_DRAW_SINGLE_POINTS)\n # plotImages(img3)\n\n src_pts = np.float32([kp1[m.queryIdx].pt for m in matches])\n dst_pts = np.float32([kp2[m.trainIdx].pt for m in matches])\n\n M, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, 0.5)\n\n h, w = im2Gray.shape\n im1Reg = cv2.warpPerspective(im1, M, (w, h))\n # plotImages(im2, im1Reg, im1, titles=['base', 'reg', 'flaw'])\n\n return im1Reg\n\n\ndef diffAndMask(imReg, imReference):\n preframe = cv2.cvtColor(imReference, cv2.COLOR_BGR2GRAY)\n\n curframe = cv2.cvtColor(imReg, cv2.COLOR_BGR2GRAY)\n curframe = cv2.absdiff(curframe, preframe)\n ret, curframe = cv2.threshold(curframe, 120, 255.0, cv2.THRESH_BINARY)\n\n kernel = np.ones((5, 5), np.uint8)\n curframe = cv2.erode(curframe, kernel)\n curframe = cv2.dilate(curframe, kernel)\n\n contours, hierarchy = cv2.findContours(curframe, cv2.RETR_EXTERNAL,\n cv2.CHAIN_APPROX_NONE)\n\n x, y, w, h = cv2.boundingRect(\n max(contours, key=lambda x: cv2.contourArea(x)))\n imCrop = imReg.copy()\n imCrop = imCrop[y:y+h, x:x+w]\n cv2.rectangle(imReg, (x, y), (x + w, y + h), [0, 0, 255], 3)\n\n return imReg, imCrop","sub_path":"convention.py","file_name":"convention.py","file_ext":"py","file_size_in_byte":1938,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"380363465","text":"import sys\nimport os\nimport numpy as np \nimport json\nfrom subprocess import call\n\n# usage: python split_validation.py \n\nl = os.listdir(sys.argv[1])\nos.mkdir(sys.argv[2])\nd = dict()\npatient_to_labels = json.load(open(sys.argv[4]))\nfor i in l:\n\tif patient_to_labels[i] not in d:\n\t\td[patient_to_labels[i]] = []\n\td[patient_to_labels[i]].append(i)\n\nres = []\n\nfor i in d:\n\tnum = min(1, int(len(d[i]) * float(sys.argv[3])))\n\tif num < 1:\n\t\tprint('In class %d, epected number of patient in validation is %f' % (i, len(d[i]) * float(sys.argv[3])))\n\tidx = np.random.choice(int(len(d[i])), size=num)\n\tfor patient in idx:\n\t\tprint(d[i][patient])\n\t\tres.append(d[i][patient])\n\nfor i in res:\n\tcall('mv %s %s' % (os.path.join(sys.argv[1], str(i)), os.path.join(sys.argv[2], str(i))), shell=True)","sub_path":"split_validation.py","file_name":"split_validation.py","file_ext":"py","file_size_in_byte":887,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"27690496","text":"from typing import List\n\n\nclass Solution:\n def camelMatch(self, queries: List[str], pattern: str) -> List[bool]:\n \"\"\"\n https://leetcode.com/problems/camelcase-matching/\n Time Complexity - O(N(L+M))\n 'L' is the length of pattern\n 'M' is the avg length of query\n 'N' is the number of queries\n Space Complexity - O(1)\n \"\"\"\n result = []\n for query in queries:\n # pattern string\n i = 0\n flag = False\n for j in range(len(query)):\n # char at query string\n ch = query[j]\n if i < len(pattern) and ch == pattern[i]:\n i += 1\n # we are done with pattern string\n if i == len(pattern):\n flag = True\n # the ch is not a match with pattern string\n # and is upper case\n elif ch.isupper():\n flag = False\n break\n result.append(flag)\n return result","sub_path":"1023_camel_case_matching.py","file_name":"1023_camel_case_matching.py","file_ext":"py","file_size_in_byte":1094,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"71272138","text":"#Ejercicio 1\r\nn = int(input(\"Ingrese el tamaño del arreglo: \")) #Solicitamos al usuario el tamaño, o la cantidad de valores que tendrá el arreglo\r\nm = int(input(\"Ingrese un múltiplo: \")) #Solicitamos al usuario el número sobre el cual se calcularan los múltiplos del arreglo\r\na = [] #El arreglo inicia vacío\r\nfor i in range (1,n+1):\r\n a.append(i*m) #Se llena al arreglo\r\nprint (a) #Imprimimos por pantalla el arreglo final\r\n\r\n\r\n\r\n#Ejercicio 2\r\nA = int(input(\"Ingresa el tamaño de los arreglos: \")) #Solicitamos al usuario que ingrese el tamaño de los arreglos\r\nB = [] #El arreglo B inicia vacío\r\nC = [] #El arreglo C inicia vacío\r\nfor i in range (0,A):\r\n B.append(input(\"Ingresa el nombre de las personas: \")) #Segun el rango de i, el arreglo B, se está llenando con los nombres que ingresa el usuario\r\nprint (B) #Se imprime el arreglo B\r\nfor j in range (0,A): #Segun el rango de j, se llena el arreglo C, con la longitud de los nombres ingresados en el arreglo B\r\n C.append(len(B[j]))\r\nprint (C) #Se imprime el arreglo C\r\n\r\n\r\n#Ejercicio 3\r\nsubjects = [\"Matemáticas\", \"Física\", \"Química\", \"Historia\", \"Lenguaje\"] #Se crea una lista con las diferentes materias\r\nscores = [] #El arreglo esta vacío\r\nfor subject in subjects:\r\n score = input(\"¿Qué nota has sacado en \" + subject + \"?: \") #Le solicitamos al usuario que ingrese la nota de cada materia, segun el orden de la lista\r\n scores.append(score) #Se llena el arreglo scores con el valor ingresado por el usuario\r\nfor i in range(len(subjects)):\r\n print(\"En \" + subjects[i] + \" has sacado: \" + scores[i]) #Segun el rango de i, en la longitud de la lista subjects, se imprime el nombre de curso y su nota correspondiente\r\n\r\n#Palindromo\r\ndef palindromo(palabra):\r\n if palabra == palabra[::-1]:\r\n print(\"La palabra ingresada es un palíndromo\")\r\n else:\r\n print(\"La palabra ingresada NO es un palíndromo\")\r\n\r\nword = input(\"Ingresa una palabra: \")\r\npalindromo(word)\r\n\r\n\r\n#Abecedario\r\nlista = [\"a\",\"b\",\"c\",\"d\",\"e\",\"f\",\"g\",\"h\",\"i\",\"j\",\"k\",\"l\",\"m\",\"n\",\"ñ\",\"o\",\"p\",\"q\",\"r\",\"s\",\"t\",\"u\",\"v\",\"w\",\"x\",\"y\",\"z\"]\r\n#Se toma \"a\" como la posición 1\r\nfor i in range(len(lista), 1,-1):\r\n if i%3 == 0:\r\n lista.pop(i-1) #Se elimina un un elemento de la lista\r\nprint (lista)\r\n\r\n\r\n#Funcion len()\r\ndef longitud(palabra):\r\n print(\"La longitud de la frase ingresada es: \", len(palabra))\r\n\r\nword = input(\"Ingresa una frase: \")\r\nlongitud(word)\r\n\r\n\r\n","sub_path":"Quiz 2.4 Arreglos.py","file_name":"Quiz 2.4 Arreglos.py","file_ext":"py","file_size_in_byte":2432,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"114420316","text":"from django.shortcuts import render_to_response, get_object_or_404\nfrom django.template import RequestContext\nfrom django.http import HttpResponseRedirect\nfrom django.db.models import Q\nfrom search.forms import *\nfrom video.models import *\nfrom blog.models import *\nfrom headlines.models import *\nfrom shows.models import *\nfrom django.db.models import Count\nimport itertools\n\ndef results(request):\n\n shows = Show.objects.filter(isactive=True)\n entries = {}\n videos = {}\n shows_results = {}\n search_result_total = 0\n\n if request.method == 'GET':\n return HttpResponseRedirect(\"/\")\n\n search_string = request.POST.get(\"searchbox\")\n if search_string:\n entries = Entry.objects.select_related().filter(\n Q(title__icontains=search_string) |\n Q(body_markdown__icontains=search_string),status=1)\n\n videos = Video.objects.select_related().filter(\n Q(title__icontains=search_string) |\n Q(details__icontains=search_string),isactive=1)\n\n shows_results = Show.objects.select_related().filter(\n Q(title__icontains=search_string) |\n Q(body_markdown__icontains=search_string),isactive=1)\n\n search_result_total = entries.count() + videos.count() + shows_results.count()\n\n search_results = itertools.chain(entries, videos, shows_results)\n\n headlines = Headline.objects.filter(isactive=1)\n variables = RequestContext(request, {\n 'entries':entries,\n 'videos':videos,\n 'shows':shows,\n 'search_result_total': search_result_total,\n 'shows_results':shows_results,\n 'search_results':search_results,\n 'search_term':search_string,\n 'headlines':headlines,\n #'tweets':tweets,\n })\n return render_to_response('search/results.html', variables)\n","sub_path":"search/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1820,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"168680978","text":"#Reconoce el rostro usando la cámara de la laptop\nimport cv2\n\ncap = cv2.VideoCapture(0)\nclasificador = cv2.CascadeClassifier('cascades/haarcascade_frontalface_default.xml')\nwhile True:\n ret, frame = cap.read()\n imagenGris = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n rostros = clasificador.detectMultiScale(imagenGris, 1.3, 8)\n for(x,y,w,h) in rostros:\n cv2.rectangle(frame, (x,y), (x+w, y+h), (0,255,0), 2)\n cv2.imshow('frame', frame)\n\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\ncap.release()\ncv2.destroyAllWindows()\n","sub_path":"Deteccion/Deteccion/detector.py","file_name":"detector.py","file_ext":"py","file_size_in_byte":552,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"172431726","text":"from keras.models import Sequential\nfrom keras.layers import Embedding, Dense\nfrom keras.layers import LSTM, Bidirectional\nfrom keras.callbacks import EarlyStopping\n\nfrom lecce.feature.representation.word_embeddings import\\\n FastTextEmbedder, Word2VecEmbedder\nfrom definitions import ROOT_DIR\n\nft_bible = FastTextEmbedder(model_name=\"ft_bible.bin\", directory=rf\"{ROOT_DIR}/embeddings\")\nft_europarl = FastTextEmbedder(model_name=\"ft_europarl.bin\", directory=rf\"{ROOT_DIR}/embeddings\")\nft_pubmed = FastTextEmbedder(model_name=\"ft_pubmed.bin\", directory=rf\"{ROOT_DIR}/embeddings\")\n\nw2v_bible = Word2VecEmbedder(model_name=\"w2v_bible.bin\", directory=rf\"{ROOT_DIR}/embeddings\")\nw2v_europarl = Word2VecEmbedder(model_name=\"w2v_europarl.bin\", directory=rf\"{ROOT_DIR}/embeddings\")\nw2v_pubmed = Word2VecEmbedder(model_name=\"w2v_pubmed.bin\", directory=rf\"{ROOT_DIR}/embeddings\")\n\n\ndef get_embedding(token, corpus, paradigm=\"ft\"):\n \"\"\"\n\n Parameters\n ----------\n token\n corpus\n paradigm\n\n Returns\n -------\n\n \"\"\"\n token = token.lower()\n if paradigm.lower() == \"ft\":\n if corpus == 'bible':\n return ft_bible.transform(token)\n if corpus == 'pubmed':\n return ft_pubmed.transform(token)\n if corpus == 'europarl':\n return ft_europarl.transform(token)\n\n if paradigm.lower() == \"w2v\":\n if corpus == 'bible':\n return w2v_bible.transform(token)\n if corpus == 'pubmed':\n return w2v_pubmed.transform(token)\n if corpus == 'europarl':\n return w2v_europarl.transform(token)\n\n#get data\nX = []\nY = []\ncorpus = []\nwith open('lcp_single_train.tsv', 'r', encoding='utf-8') as f:\n for line in f:\n split = line.strip().split('\\t')\n X.append(get_embedding(split[3], split[1]))\n Y.append(split[4])\n corpus.append(split[1])\n\nsplit_point = int(0.75*len(X))\nX_train = X[:split_point]\nY_train = Y[:split_point]\nX_test = X[split_point:]\nY_test = Y[split_point:] \ntrain_corpus = corpus[:split_point]\ntest_corpus = corpus[split_point:]\n\nprint(train_corpus)\n\n\n#build the model\n# model = Sequential()\n# model.add(Embedding(input_dim=100, output_dim=100, input_length = ?))\n# model.add(Bidirectional(LSTM(64)))\n# model.add(Dense(input_dim = ?, units = 500, activation = 'adam'))\n# model.compile(loss='mean_squared_error', optimizer='sgd', metrics=['accuracy'])\n# es = EarlyStopping(monitor='val_loss', min_delta = 0, patience=10, mode='auto', verbose=1)\n# model.fit(X_train, Y_train, batch_size = 32, epochs = 100, callbacks=[es])\n#\n# predictions = model.predict(X_test, batch_size = 5)\n","sub_path":"lecce/estimator/LSTM.py","file_name":"LSTM.py","file_ext":"py","file_size_in_byte":2618,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"289742339","text":"import pickle\n\nfrom matplotlib import pyplot as plt\n\nfrom config import VOC_CLASSES\n\n# 历史记录名\nhistory_filename = r'fcn32_CrossEntropy_null_null_1e4'\n\n# 读取历史记录\nwith open(f'E:\\keras-segmentation\\logs\\{history_filename}.history', 'rb') as history_file:\n history = pickle.load(history_file)\n\n# 将数据整理为列表\nclass_acc = [history[f'val_acc_of_clazz{i}'] for i in range(21)] + [history['val_categorical_accuracy']]\nclass_name = [VOC_CLASSES[i] for i in range(21)] + ['mean_acc']\n\n# 绘图\nfor class_acc, class_name in zip(class_acc, class_name):\n plt.plot(class_acc)\n plt.legend(class_name)\n plt.show()\nplt.title(f'accuracy process|{history_filename}')\nplt.show()\n","sub_path":"test/draw_accuracy_process.py","file_name":"draw_accuracy_process.py","file_ext":"py","file_size_in_byte":702,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"142653814","text":"# -*- coding: utf-8 -*-\nimport datetime\n\nfrom django.contrib.auth.decorators import login_required\nfrom django.shortcuts import redirect, render\n\nfrom ossus.app.backup.forms import JobFoldersForm, JobForm, JobSQLsForm\nfrom ossus.app.backup.models import Job\n\n\n@login_required()\ndef new(request, server_id):\n return form(request, server_id)\n\n\n@login_required()\ndef edit(request, server_id, id):\n return form(request, server_id, id)\n\n\n@login_required()\ndef form(request, server_id, id=False):\n job = Job()\n\n server = request.user.profile.get_server_or_change_team(id=server_id)\n title = \"New job\"\n\n def next_from_date():\n next = datetime.datetime.now() + datetime.timedelta(days=1)\n return next.strftime(\"%Y-%m-%d\") + \" 02:00:00\"\n\n initial_data = {'from_date': next_from_date()}\n\n if id:\n job = request.user.profile.get_jobs().get(id=id)\n title = \"Job %s \" % job.name\n initial_data = {}\n\n form = JobForm(instance=job, initial=initial_data, user=request.user)\n form_folders = JobFoldersForm(instance=job, prefix=\"folders\")\n form_sql = JobSQLsForm(instance=job, prefix=\"sql\")\n\n if request.method == \"POST\":\n form = JobForm(request.POST, instance=job, initial=initial_data,\n user=request.user)\n form_folders = JobFoldersForm(request.POST, prefix=\"folders\", instance=job)\n form_sql = JobSQLsForm(request.POST, prefix=\"sql\", instance=job)\n\n if form.is_valid() and form_folders.is_valid() and form_sql.is_valid():\n job = form.save(commit=False)\n job.server = server\n\n job.save()\n form_folders.save()\n form_sql.save()\n\n if server.template:\n return redirect(\"templates.view\", server.id)\n else:\n return redirect(\"servers.view\", server.id)\n\n return render(request, 'backup/form.html', {'form': form,\n 'server': server,\n 'form_folders': form_folders,\n 'form_sql': form_sql,\n 'title': title})\n","sub_path":"ossus/app/backup/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2198,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"37965927","text":"import unittest\r\n\r\nfrom classified_email_program import sanitize_user_words, censor_email_text, create_menu_with_options, process_user_menu_input\r\n\r\n\r\nclass ClassifiedEmailTest(unittest.TestCase):\r\n def test_sanitize_user_words_splits_string_and_strips_leading_and_trailing_whitespace(self):\r\n self.assertEqual(sanitize_user_words(\"score # four# set forth \"), ['score','four','set forth'], \"Should be ['score','four','set forth']\")\r\n\r\n def test_censor_email_text_replace_words_with_asterisks(self):\r\n email_text = \"These words.\"\r\n classified_words = ['words']\r\n self.assertEqual(censor_email_text(email_text, classified_words)['email'], \"These *****.\",\r\n \"Should be 'These *****.'\")\r\n\r\n def test_censor_email_text_replace_no_words_with_asterisks(self):\r\n email_text = \"These words.\"\r\n classified_words = ['skywalker']\r\n self.assertEqual(censor_email_text(email_text, classified_words)['email'], \"These words.\",\r\n \"Should be 'These words.'\")\r\n\r\n def test_censor_flag_true(self):\r\n email_text = \"These words.\"\r\n classified_words = ['words']\r\n self.assertEqual(censor_email_text(email_text, classified_words)['flag'],True, \r\n \"Should be True\")\r\n\r\n def test_censor_flag_false(self):\r\n email_text = \"These words.\"\r\n classified_words = ['skywalker']\r\n self.assertEqual(censor_email_text(email_text, classified_words)['flag'],False, \r\n \"Should be False\")\r\n\r\n def test_menu_options_size(self):\r\n menu = create_menu_with_options()\r\n self.assertEqual(len(menu), 4,\r\n \"Should be 4.\")\r\n\r\n def test_menu_options_has_correct_keys(self):\r\n menu = create_menu_with_options()\r\n keys = list(menu.keys())\r\n target_keys = ['1','2','3','4']\r\n self.assertEqual(keys, target_keys,\r\n \"Should be ['1','2','3','4']\")\r\n\r\n def test_menu_has_correct_values(self):\r\n menu = create_menu_with_options()\r\n values = list(menu.values())\r\n target_values = [\" Display preloaded sample text.\",\r\n \" Run censor function with preloaded sample text and new user provided classified words.\",\r\n \" Run censor function with new user provided text and new user provided classified words.\",\r\n \" Exit program.\"]\r\n self.assertEqual(values, target_values,\r\n (\"Should be \", target_values))\r\n \r\n def test_process_user_input_for_value_4(self):\r\n ans = '4'\r\n self.assertEqual(process_user_menu_input(ans), False,\r\n \"Should be False.\")\r\n\r\n\r\n\r\nif __name__ == '__main__':\r\n unittest.main()","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":2616,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"364874699","text":"\nimport cmd\n\nfrom prajna.rengu.cmd import auto_help\n\n\nclass RenguFuzzCmd(cmd.Cmd):\n\n prompt = \"fuzz >\"\n\n @auto_help\n def do_quit(self, args):\n return True\n\n do_EOF = do_quit\n\n @auto_help\n def do_source(self, args):\n from prajna.rengu.source import Source\n\n try:\n for s in Source.find(args):\n print(s.to_json())\n except SyntaxError as e:\n print(e)\n\n @auto_help\n def do_author(self, args):\n import shlex\n from prajna.rengu.author import Author\n\n author_map = Author.author_map()\n\n for name in shlex.split(args):\n for f, pk, match_name, real_name in Author.fuzz(name, match_ratio=80, authors=author_map):\n print(\"{0: 3g} {1:50} = {2:25} [{3}]\".format(\n f, name + \"~\" + match_name, real_name, pk))\n","sub_path":"lib/prajna/rengu/cmd/fuzz.py","file_name":"fuzz.py","file_ext":"py","file_size_in_byte":856,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"515351865","text":"#-*- coding:utf-8 -*-\n\n# 1、有如下变量:tu ,请实现要求的功能\ntu = ('alex',[11,22,{'k1':'v1','k2':['age','name'],'k3':[11,22,33]},44])\n# a、讲述元组的特性\n# b、请问tu变量的第一个元素alex是否可以被修改,\n# c、请问tu变量的k2对应的值数据类型为,是否可以被修改,如果可以请在其中添加一个元素’Seven‘\n# d、请问tu变量的k3对应的值数据类型为,是否可以被修改,如果可以请在其中添加一个元素’Seven‘\n\n# a、元组本身是只读列表,不可增加,删除、修改,只能够进行查找,但是如何元组里面包含了可变数据类型(列表、字典、集合)则可以修改、增加、删除\n# b、不可修改\n# c、\n\nprint(type(tu[1][2]['k2']))\ntu[1][2]['k2'].append('Seven')\nprint(tu)\n\nprint(type(tu[1][2]['k3']))\ntu[1][2]['k3'].append('Seven')\nprint(tu)\n\n# 2、字典,dic dic = {'k1':'v1','k2':'v2','k3':[11,22,33]}\n# a、请循环输出所有的key\n# b、请循环输出所有的value\n# c、请循环输出所有的key和value\n# d、请在字典中添加一个键值对,k4:v4,并输出添加后的字典\n# e、请修改字典中的k1值为‘alex’并输出修改后的字典\n# f、请在k3对应的值中添加一个元素44,并输出修改后的字典\n# g、请在k3对应的值中的第一个位置,添加一个元素18,并输出修改后的字典\ndic = {'k1':'v1','k2':'v2','k3':[11,22,33]}\n# a\nfor i in dic.keys():\n print(i)\n# b\nfor i in dic.values():\n print(i)\n# c\nfor i in dic.items():\n print(i)\n\n# d\n# 方法1:\ndic['k4'] = 'v4'\nprint(dic)\n\n# 方法2:\ndic.setdefault('k4','v4')\nprint(dic)\n\n# e、\n# 方法1\ndic['k1'] = 'alex'\nprint(dic)\n\n# 方法2\ndic1 = {'k1':'alex'}\ndic.update(dic1)\nprint(dic)\n# f、\ndic['k3'].append(44)\nprint(dic)\n\n# g、\ndic['k3'].insert(0,18)\nprint(dic)\n\n\n# 3、元素分类,有如下值li = [11,22,33,44,55,66,77,88,99,90],\n# 将所有的大于66的值,保存到字典的第一个key下面,将小于66值,保存至第二个,key值中\n#即:{'k1':'大于66值的所有列表','k2':'小于66的所有值列表'}\ndic = {}\nnew1_li =[]\nnew2_li =[]\nli =[11,22,33,44,55,66,77,88,99,90]\nfor i in li:\n if i > 66:\n new1_li.append(i)\n if i < 66:\n new2_li.append(i)\ndic['k1'] = new1_li\ndic['k2'] = new2_li\nprint(dic)\n\n# 4、输出商品列表,用户输入序号,显示用户输入的商品,\n# li = ['手机','电脑','鼠标垫','游艇']\n# 要求:\n# 1、页面显示序号+商品名称,如:\n# 1 手机\n# 2 电脑\n# ...\n# 2、用户输入选择的商品序号,然后打印商品名称。\n# 3、如果用户输入的商品序号有错误,则提示输入有误,请重新输入。\n# 4、用户输入q或者Q退出程序。\n\nli = ['手机', '电脑', '鼠标垫', '游艇']\ncart = []\nwhile 1:\n for i,f in enumerate(li):\n print(i+1,f)\n shopping = input('请输入商品序号/输入q或者Q退出循环:')\n if shopping.isdigit():\n if 0 < int(shopping) and int(shopping) <= len(li):\n cart.append(li[int(shopping) - 1])\n print(cart)\n else:\n print('你输入的商品序号不存在,请重新输入')\n elif shopping.upper() == 'Q':\n break\n else:\n print('你输入的商品序号不存在,请重新输入')\n","sub_path":"Python基础&并发编程/day5/day5作业.py","file_name":"day5作业.py","file_ext":"py","file_size_in_byte":3316,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"148957837","text":"import numpy as np\nimport random\n\ndef boost(x, y, num):\n\tn = x.shape[0]\n\tm = x.shape[1]\n\talpha, classifier = [], []\n\t# initial weights follow the uniform distribution\n\tweight = [[1/n for i in range(n)]]\n\n\tfor i in range(1,num+1):\n\t\tmin_error, ind, pos, h = 1, 0, 0, None\n\n\t\t# find the weak classifier learner with the highest accuracy\n\t\tfor j in range(m):\n\t\t\th1, h2 = [], []\n\t\t\terror1, error2 = 0, 0\n\t\t\t# labels predicted by positive classifier at this feature\n\t\t\th1 = [1 if x[k][j] == 1 else -1 for k in range(n)]\n\t\t\t# labels predicted by negative classifier at this feature\n\t\t\th2 = [1 if x[k][j] == 0 else -1 for k in range(n)]\n\t\t\t# compute the errors\n\t\t\terror1 = sum(weight[i-1][k]*(y[k]!=h1[k]) for k in range(n))\n\t\t\terror2 = sum(weight[i-1][k]*(y[k]!=h2[k]) for k in range(n))\n\n\t\t\t# maintain the local minimum error\n\t\t\tif error1 < 0.5:\n\t\t\t\tif error2 < error1 and error2 < min_error:\n\t\t\t\t\tmin_error, ind, pos, h = error2, j, -1, h2\n\t\t\t\telif error1 < min_error:\n\t\t\t\t\tmin_error, ind, pos, h = error1, j, 1, h1\n\t\t\telif error2 < 0.5 and error2 < min_error:\n\t\t\t\tmin_error, ind, pos, h = error2, j, -1, h2\n\n\t\t# each classifier is represented by the threshold feature and an\n\t\t# indicator of its positivity/negativity\n\t\tclassifier.append((ind,pos))\n\n\t\t# compute alpha of this round\n\t\ta_t = 0.5*np.log((1-min_error)/min_error)\n\t\talpha.append(a_t)\n\t\ttemp = [weight[i-1][k]*np.exp(-a_t*y[k]*h[k]) for k in range(n)]\n\t\ts = sum(temp)\n\t\t# the normalized weights after this round\n\t\tD = [temp[k]/s for k in range(n)]\n\t\tweight.append(D)\n\n\treturn alpha, classifier\n\n\ndef classify(x, y, alpha, C, num):\n\tn = x.shape[0]\n\tcount = 0\n\n\tfor i in range(n):\n\t\t# predict the label of the testing example by all trained classifiers\n\t\tH = [C[j][1] if x[i][C[j][0]] == 1 else -C[j][1] for j in range(num)]\n\t\tpredict = np.sign(sum(alpha[j]*H[j] for j in range(num)))\n\t\tif predict == 0: predict = random.choice([1,-1])\n\t\tif predict != y[i]: count+=1\n\n\t# return the error ratio\n\treturn count/n\n","sub_path":"boosting.py","file_name":"boosting.py","file_ext":"py","file_size_in_byte":1966,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"278690404","text":"import openpyxl as px\nfrom .cell_for_openpyxl import cell\nfrom .getClassroom import GetClassroom\nimport time\nfrom datetime import datetime\n\nimport os\nfrom os import path\nos.chdir(path.dirname(path.abspath(__file__)))\n\n\ndef importclassroom(filename, debug_mode=False):\n dt = datetime.today()\n gc = GetClassroom(dt)\n try:\n wb = px.load_workbook(filename)\n ws1 = wb['ForPDF']\n\n cnobox = [[None for i0 in range(8)] for j0 in range(5)]\n for i1 in range(5): # read data from the file\n for j1 in range(8):\n cnobox[i1][j1] =\\\n ws1[cell(c=chr(i1 + 66), i=3 * (j1 + 2) - 1)].value\n\n cnolist = []\n for i in range(5): # table->list\n for j in range(8):\n inlist = False\n for k in cnolist:\n if cnobox[i][j] == k \\\n or not cnobox[i][j] or cnobox[i][j] == \"-\":\n inlist = True\n break\n if len(cnolist) == 0 and \\\n (not cnobox[i][j] or cnobox[i][j] == \"-\"):\n pass\n elif inlist is False:\n cnolist.append(cnobox[i][j])\n\n croomlist = []\n iii = len(cnolist)\n print(\"Downloading...\")\n for ii, cnol in enumerate(cnolist): # import html\n print(\"{} of {}\".format(ii + 1, iii))\n croomlist.append(gc.getClassroom(cnol, debug_mode))\n time.sleep(5)\n\n for i in range(5): # list->box\n for j in range(8):\n for m, cnol in enumerate(cnolist):\n if cnobox[i][j] and cnobox[i][j] == cnol:\n cnobox[i][j] = croomlist[m]\n break\n\n for i1 in range(5): # write data to the file\n for j1 in range(8):\n if cnobox[i1][j1]:\n ws1[cell(c=chr(i1 + 66), i=3 * (j1 + 2))].value = \"\"\n ws1[cell(c=chr(i1 + 66), i=3 * (j1 + 2))].value =\\\n cnobox[i1][j1]\n\n wb.save(filename)\n print(\"Successfully completed\")\n return True\n\n except PermissionError:\n print(\"Please close the file.\")\n return False\n\n\nif __name__ == \"__main__\":\n importclassroom(\"../files_for_debug/a.xlsx\", True)\n","sub_path":"src/importClassroom.py","file_name":"importClassroom.py","file_ext":"py","file_size_in_byte":2334,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"414284935","text":"import requests\nimport os\n\n# url = \"https://api.mailgun.net/vN/domainAbcdefg.mailgun.org\"\n\n\ndef send_simple_message(amount, name):\n return requests.post(\n \"https://api.mailgun.net/v3/sandbox8305a785c9cf45bdaf8457fa68577b8f.mailgun.org/messages\",\n auth=(\"api\", os.getenv(\"MAILGUN_API_KEY\")),\n data={\"from\": \"Excited User mailgun@sandbox8305a785c9cf45bdaf8457fa68577b8f.mailgun.org\",\n \"to\": [\"haz.faizul@gmail.com\"],\n \"subject\": f\"Hello, ${amount} was just donated by {name}\",\n \"text\": \"Testing some Mailgun awesomness!\"})\n","sub_path":"instagram_web/util/mailhelper.py","file_name":"mailhelper.py","file_ext":"py","file_size_in_byte":584,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"639641857","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Jul 12 07:48:25 2019\n\n@author: Ruben\n\"\"\"\n\nfrom datetime import datetime\nimport pandas as pd\nimport pickle\nimport analisis_estadistico.analisis_tot_data_filtrada as ana\n\nmarcos = ['15M_c','60M_c','mediodia','1D_c','1D-15M_c']\nmarcos_x_lim = [.75, 1.25, 1.75, 2, 2]\n\n#los filtros de tipo gap se aplicarian por encima de los filtros normales\n#para ser un primer filtrado de que si directamente la accion abre en la apertura\n#a un precio que no nos interesa pues directamente descartamos la operacion\nfiltros_gap =[['sin filt_gap', lambda d: d['stock']!='']] #ej: lambda d: abs(d['gap'])<=lim\n\nfiltros = [['sin filtro', lambda d: d['stock']!=''],\n ['1D_d=3,2', lambda d: abs(d['1D_direccion']).isin([3,2])],\n ['1D_d=3', lambda d: abs(d['1D_direccion']).isin([3])],\n ['1D_d=2', lambda d: abs(d['1D_direccion']).isin([2])],\n ['1D_d=1', lambda d: abs(d['1D_direccion']).isin([1])],\n ['1D_d=0', lambda d: abs(d['1D_direccion']).isin([0])],\n ['1D_d=1,0', lambda d: abs(d['1D_direccion']).isin([1,0])],\n \n ['1H_d=3,2', lambda d: abs(d['1H_direccion']).isin([3,2])],\n ['1H_d=3', lambda d: abs(d['1H_direccion']).isin([3])],\n ['1H_d=2', lambda d: abs(d['1H_direccion']).isin([2])],\n ['1H_d=1', lambda d: abs(d['1H_direccion']).isin([1])],\n ['1H_d=0', lambda d: abs(d['1H_direccion']).isin([0])],\n ['1H_d=1,0', lambda d: abs(d['1H_direccion']).isin([1,0])],\n \n ['sum_d=6,5', lambda d: abs(d['suma_direcciones']).isin([6,5])],\n ['sum_d=6', lambda d: abs(d['suma_direcciones']).isin([6])],\n ['sum_d=5', lambda d: abs(d['suma_direcciones']).isin([5])],\n ['sum_d=4', lambda d: abs(d['suma_direcciones']).isin([4])],\n ['sum_d=3', lambda d: abs(d['suma_direcciones']).isin([3])],\n ['sum_d=2', lambda d: abs(d['suma_direcciones']).isin([2])],\n ['sum_d=1', lambda d: abs(d['suma_direcciones']).isin([1])],\n \n ['1D_t=P', lambda d: d['1D_tipo'].isin(['P'])],\n ['1D_t=N', lambda d: d['1D_tipo'].isin(['N'])],\n ['1D_t=A', lambda d: d['1D_tipo'].isin(['A'])],\n \n ['1H_t=P', lambda d: d['1H_tipo'].isin(['P'])],\n ['1H_t=N', lambda d: d['1H_tipo'].isin(['N'])],\n ['1H_t=A', lambda d: d['1H_tipo'].isin(['A'])],\n \n ['1D&1H_t=P', lambda d: d['1D_tipo'].isin(['P']) & d['1H_tipo'].isin(['P'])],\n ['1D&1H_t=A', lambda d: d['1D_tipo'].isin(['A']) & d['1H_tipo'].isin(['A'])]]\n\ndef ejecutar_analisis(data_org):\n res = []\n for f_gap in filtros_gap:\n data_f_gap = data_org.loc[f_gap[1](data_org)]\n for f in filtros: \n data_filt = data_f_gap.loc[f[1](data_f_gap), ['stock','color']+marcos]\n res_conj_d = ana.obtener_resultados_globales(data_filt, False)\n res.append((f_gap[0]+' & '+f[0], res_conj_d))\n return res\n\ndef save_results(res, nombre_extra=\"\"):\n res_sort = sorted(res, key = lambda e: e[1].loc['Total'][:-1].sum(), reverse=True)\n \n for e in res_sort:\n print(e[0])\n \n fecha = datetime.now().strftime(\"%Y%m%d_%H%M%S\") + nombre_extra\n \n with open('../Analisis resultados/'+fecha+'_object_res.txt', 'wb') as f:\n pickle.dump(res, f)\n with open('../Analisis resultados/'+fecha+'_excel.txt', 'w') as f:\n for e in res_sort:\n f.write(e[0]+'\\t'+\n e[1].loc[['Total']].to_string(header=False).replace('.',',')+' '+\n e[1].loc[['area verde pos']].to_string(header=False).replace('.',',')+' '+\n e[1].loc[['area roja neg']].to_string(header=False).replace('.',',')+' '+\n e[1].loc[['suma alfas areas']].to_string(header=False).replace('.',',')+' '+\n e[1].loc[['dif area pos-neg']].to_string(header=False).replace('.',',')+'\\n')\n with open('../Analisis resultados/'+fecha+'.txt', 'w') as f:\n for e in res:\n f.write(e[0]+'\\n')\n f.write(e[1].iloc[1:].to_string()+'\\n\\n')\n with open('../Analisis resultados/'+fecha+'_sorted.txt', 'w') as f:\n for e in res_sort:\n f.write(e[0]+'\\n')\n f.write(e[1].iloc[1:].to_string()+'\\n\\n')\n \ndef main():\n path = \"../data/tradingsim_todo_normalizadoPorVolatilidad.csv\"\n data_org = pd.read_csv(path, sep=\";\")\n data_org['suma_direcciones'] = data_org['1D_direccion'] + data_org['1H_direccion']\n \n res = ejecutar_analisis(data_org)\n \n save_results(res)\n \nif __name__ == \"__main__\":\n main()\n \n \ndef read_res_object():\n nombre_fichero = '20190712_095528_object_res'\n with open('Analisis resultados/'+nombre_fichero+'.txt', 'rb') as f:\n res = pickle.load(f)\n \n res_sort = sorted(res, key = lambda e: e[1].loc['Total'][:-1].sum(), reverse=True)\n \n with open('Analisis resultados/'+nombre_fichero+'_excel.txt', 'w') as f:\n for e in res_sort:\n f.write(e[0]+'\\t'+\n e[1].loc[['Total']].to_string(header=False).replace('.',',')+' '+\n e[1].loc[['area verde pos']].to_string(header=False).replace('.',',')+' '+\n e[1].loc[['area roja neg']].to_string(header=False).replace('.',',')+' '+\n e[1].loc[['suma alfas areas']].to_string(header=False).replace('.',',')+' '+\n e[1].loc[['dif area pos-neg']].to_string(header=False).replace('.',',')+'\\n')\n\n","sub_path":"pred_diarias_python_analysis/analisis_estadistico/analisis_todos_los_filtros.py","file_name":"analisis_todos_los_filtros.py","file_ext":"py","file_size_in_byte":5530,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"155712846","text":"import sqlite3\nimport time\nimport datetime\nimport random\nimport Database\n\nconnection = sqlite3.connect(\"2LC.db\")\nc = connection.cursor()\n\ndef create_table():\n\tc.execute(\"CREATE TABLE IF NOT EXISTS betting(playerID TEXT, playerName TEXT, selectedChoice TEXT, betAmount INT)\")\n\ndef data_entry():\n\tc.execute(\"INSERT INTO betting VALUES('id', 'name', 'A', 0)\")\n\tconnection.commit()\t\n\ndef dynamic_data_entry(playerID, playerName, selectedChoice, betAmount):\n\tc.execute(\"INSERT INTO betting (playerID, playerName, selectedChoice, betAmount) VALUES(?, ?, ?, ?)\", (playerID, playerName, selectedChoice, betAmount))\n\tconnection.commit()\n\t\ndef read_from_db(command, id = 0, win = \"\"):\n\tc.execute(\"SELECT * FROM betting\")\n\tif command == \"scan\":\n\t\tfound = 0\n\t\tfor row in c.fetchall(): \n\t\t\tif row[0] == id:\n\t\t\t\tfound += 1\n\t\tif found < 1:\n\t\t\treturn True\n\t\telse:\n\t\t\treturn False\n\tif command == \"retrieve\":\n\t\tfor row in c.fetchall(): \n\t\t\tif row[0] == id:\n\t\t\t\treturn row[2]\n\tif command == \"check\":\n\t\tfor row in c.fetchall(): \n\t\t\tif row[0] == id:\n\t\t\t\treturn \"You have betted \" + str(row[3]) + \" Dice on \\\"\" + row[2] + \"\\\"\"\n\tif command == \"wonbet\":\n\t\twinners = \"\"\n\t\tfor row in c.fetchall():\n\t\t\tif row[2] == win:\n\t\t\t\tDatabase.del_and_update(\"add\", row[0], row[1]+\"#\", row[3]*2)\n\t\t\t\twinners += \"\\n\" + row[1]\n\t\treturn winners + \"\\n\\n\" + del_and_update(\"end\")\n\t\t\n\tif command == \"startCheck\":\n\t\tcount = c.fetchall()\n\t\tif len(count) > 0:\n\t\t\treturn \"\\nBetting Available\"\n\t\telse:\n\t\t\treturn \"\\nCurrently, there is no betting session\"\"\"\n\ndef del_and_update(command):\n\tif command == \"end\":\n\t\tc.execute(\"DELETE FROM betting\")\n\t\tconnection.commit()\n\t\treturn \"Contest is over\"\n\t\ndef convert(n):\n\tname = \"\"\n\tmax = str(n).index(\"#\")\n\tname = str(n)[: max]\n\treturn name\n#create_table()\n#data_entry()","sub_path":"2LCData/BetSave.py","file_name":"BetSave.py","file_ext":"py","file_size_in_byte":1762,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"255149363","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed Jan 23 15:53:17 2019\r\n\r\n@author: Chandrasekar Sivaraman\r\n\"\"\"\r\n\r\n#Creating a function to do the zero padding operation\r\nimport numpy as np\r\n\r\ndef my_zero_pad(X, pad_value):\r\n # Assuming that the first dimension of the input matrix is the number of samples in the data set\r\n #2nd and 3rd dimension of the array are our pixel dimensions and 4th dimension is the number of channels\r\n #We are padding zeros only to the 2nd and 3rd dimensions and not to the channels\r\n \r\n padded_mat = np.pad(X, ((0, 0), (pad_value,pad_value), (pad_value, pad_value), (0, 0)), 'constant', constant_values=0)\r\n\r\n return padded_mat\r\n\r\n\r\ndef my_conv(input_layer, filters, bias, pad, stride):\r\n \r\n #m = number of samples in the set\r\n #n_h_inp = height of input layer\r\n #n_w_inp = width of input layer\r\n #n_c_inp = channels of input layer\r\n (m, n_h_inp, n_w_inp, n_c_inp) = input_layer.shape\r\n \r\n #f = fitler height and width \r\n #note that n_c_inp should match since the convolution is performed across the channels\r\n #n_f = number of filters\r\n (f,f,n_c_inp,n_f) = filters.shape\r\n \r\n \r\n #Number of values after convolution\r\n #Height and Width are given by ((n-f+2p)/s) + 1\r\n \r\n n_h = int(((n_h_inp - f + 2*pad)/stride)) + 1 \r\n n_w = int(((n_w_inp - f + 2*pad)/stride)) + 1 \r\n \r\n #The feature matrix after the convolution\r\n #it will have a 3d volume of height n_h, width n_w and depth equal to the number of filters used n_f\r\n #since we have m examples we have m of such 3d volumes\r\n feature_mat = np.zeros((m,n_h,n_w,n_f))\r\n \r\n #Zero pad the input before convolving to preserve the dimensions\r\n input_layer_padded = my_zero_pad(input_layer,pad)\r\n\r\n #Lets begin the convolution process\r\n \r\n #Loop over all the samples in input space\r\n for l in range(m): \r\n #Select the current sample to convolve \r\n current_inp = input_layer_padded[l] \r\n \r\n #Nested for loops to cover the 3D space of the output feature matrix for one sample (n_h, n_w, n_f)\r\n for i in range(n_h):\r\n for j in range(n_w):\r\n for k in range(n_f):\r\n \r\n #pick out the current volume slice that you want to convolve\r\n current_slice = current_inp[(i*stride):(i*stride)+f,(j*stride):(j*stride)+f, : ]\r\n current_filter = filters[:,:,:,k]\r\n current_bias = bias[:,:,:,k]\r\n current_conv = (np.multiply(current_slice,current_filter)) + current_bias\r\n \r\n feature_mat[l,i,j,k] = np.sum(current_conv)\r\n \r\n \r\n \r\n cache = (input_layer, filters, bias, stride, pad)\r\n \r\n return feature_mat, cache\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef my_maxpool(input_layer,f,stride):\r\n \r\n #m = number of samples in the set\r\n #n_h_inp = height of input layer\r\n #n_w_inp = width of input layer\r\n #n_c_inp = channels of input layer\r\n\r\n (m, n_h_inp, n_w_inp, n_c_inp) = input_layer.shape\r\n \r\n n_h = int(((n_h_inp - f)/stride)) + 1 \r\n n_w = int(((n_w_inp - f)/stride)) + 1 \r\n n_c = n_c_inp\r\n \r\n output_mat = np.zeros((m,n_h,n_w,n_c))\r\n \r\n for l in range(m): \r\n \r\n #Nested for loops to cover the 3D space of the output feature matrix for one sample (n_h, n_w, n_f)\r\n for i in range(n_h):\r\n for j in range(n_w):\r\n for k in range(n_c):\r\n \r\n current_slice = input_layer[l,(i*stride):(i*stride)+f,(j*stride):(j*stride)+f, k]\r\n \r\n output_mat[l,i,j,k] = np.max(current_slice)\r\n \r\n cache = (input_layer,f,stride)\r\n \r\n return output_mat,cache\r\n \r\n \r\n\r\n\r\n\r\n\r\n \r\ndef my_backprop(dfeature_mat,cache):\r\n \r\n #Get your required information from the cached values\r\n \r\n (input_layer, filters, bias, stride, pad) = cache\r\n \r\n #m = number of samples in the set\r\n #n_h_inp = height of input layer\r\n #n_w_inp = width of input layer\r\n #n_c_inp = channels of input layer\r\n (m, n_h_inp, n_w_inp, n_c_inp) = input_layer.shape\r\n \r\n #f = fitler height and width \r\n #note that n_c_inp should match since the convolution is performed across the channels\r\n #n_f = number of filters\r\n (f,f,n_c_inp,n_f) = filters.shape\r\n \r\n #Determine the shape values of the gradient matrix which is input to the function\r\n #dfeature_mat is the gradient of the loss w.r.t feature_mat\r\n (m,n_h,n_w,n_c) = dfeature_mat.shape\r\n \r\n #Note that n_c will be equal to n_f since the output channels is equal to number of filters during convoltution\r\n \r\n \r\n #dinput_layer is the gradient of the loss w.r.t input_layer\r\n #dfilters is the gradient of the loss w.r.t filters\r\n #dbias is the gradient of the loss w.r.t bias\r\n dinput_layer = np.zeros((m, n_h_inp, n_w_inp, n_c_inp))\r\n dfilters = np.zeros((f,f,n_c_inp,n_f))\r\n dbias = np.zeros((1,1,1,n_c))\r\n \r\n #pad the input_layer and dinput_layer\r\n input_layer_padded = my_zero_pad(input_layer,pad)\r\n dinput_layer_padded = my_zero_pad(dinput_layer,pad)\r\n \r\n \r\n for l in range(m):\r\n \r\n current_input_layer_padded = input_layer_padded[l]\r\n current_dinput_layer_padded = dinput_layer_padded[l]\r\n \r\n for i in range(n_h):\r\n for j in range(n_w):\r\n \r\n for k in range(n_c): \r\n current_input_slice = current_input_layer_padded[(i*stride):(i*stride)+f,(j*stride):(j*stride)+f, :]\r\n current_dinput_slice = current_dinput_layer_padded[(i*stride):(i*stride)+f,(j*stride):(j*stride)+f, :]\r\n \r\n # feature = input * filter\r\n # if error depends on f and its gradients wrt to feature map is df\r\n # then the gradient of error wrt input is df*filters by chain rule\r\n # and the gradient of error wrt filters is df*input by chain rule\r\n \r\n current_dinput_slice += filters[:,:,:,k]*dfeature_mat[l,i,j,k]\r\n dfilters[:,:,:,k] += current_input_slice*dfeature_mat[l,i,j,k]\r\n dbias[:,:,:,k] += dfeature_mat[l,i,j,k]\r\n \r\n \r\n dinput_layer[l,:,:,:] = current_dinput_layer_padded[pad:-pad, pad:-pad, :]\r\n \r\n \r\n \r\n return dinput_layer, dfilters, dbias\r\n\r\n\r\ndef my_maxpool_tracker(x):\r\n \r\n maxpool_tracker = (x==np.max(x))\r\n \r\n return maxpool_tracker\r\n\r\n\r\ndef my_maxpool_backprop(doutput_mat,cache):\r\n \r\n (input_layer,f,stride) = cache\r\n \r\n (m, n_h_inp, n_w_inp, n_c_inp) = input_layer.shape\r\n \r\n (m, n_h, n_w, n_c) = doutput_mat.shape\r\n \r\n dinput_layer = np.zeros(input_layer.shape)\r\n\r\n \r\n for l in range(m):\r\n \r\n current_inp = input_layer[l]\r\n \r\n for i in range(n_h):\r\n for j in range(n_w):\r\n for k in range(n_c):\r\n \r\n current_input_slice = current_inp[(i*stride):(i*stride)+f,(j*stride):(j*stride)+f, k]\r\n \r\n #track the positions of the maximum values extracted by maxpool\r\n tracker = my_maxpool_tracker(current_input_slice)\r\n \r\n #assign the gradients only to the values that contributed to the maximum value \r\n dinput_layer[l,(i*stride):(i*stride)+f,(j*stride):(j*stride)+f, k] += np.multiply(tracker,doutput_mat[l,i,j,k])\r\n \r\n \r\n return dinput_layer\r\n\r\n\r\n\r\n\r\n\r\nimport cv2\r\n\r\nangry_cat = cv2.imread(\"angry cat.jpg\")\r\nangry_cat = cv2.resize(angry_cat, (256,256))\r\n\r\nangry_cat = np.reshape(angry_cat,[1,256,256,3])\r\n\r\ncat_img = angry_cat[0,:,:,:]\r\n\r\ncv2.imshow(\"resized cat img\",cat_img)\r\ncv2.waitKey(0)\r\ncv2.destroyAllWindows()\r\n\r\nrandom_filter = 2*np.random.rand(3,3,3) - 1\r\n\r\nrandom_filter = np.reshape(random_filter,[1,3,3,3])\r\nW = np.random.randn(2, 2, 3, 8)\r\nb = np.random.randn(1, 1, 1, 8)\r\n\r\nz, cache = my_conv(angry_cat, W, b, 0,1)\r\n\r\nfor i in range(8):\r\n cv2.imshow(\"resized cat img\",z[0,:,:,i])\r\n cv2.waitKey(0)\r\n cv2.destroyAllWindows()\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n \r\n \r\n\r\n \r\n \r\n \r\n","sub_path":"my_full_cnn.py","file_name":"my_full_cnn.py","file_ext":"py","file_size_in_byte":8457,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"256928402","text":"#! /usr/bin/env python\n## -*- encoding: utf-8 -*-\n\nfrom __future__ import print_function\n\nimport os\nimport sys\nimport subprocess\nfrom setuptools import setup\nfrom setuptools import Extension\nfrom setuptools.command.test import test as TestCommand # for tests\nfrom Cython.Build import cythonize\nfrom Cython.Compiler.Errors import CompileError\nfrom codecs import open # To open the README file with proper encoding\nfrom sage.env import sage_include_directories\n\n# For the tests\nclass SageTest(TestCommand):\n def run_tests(self):\n errno = os.system(\"PYTHONPATH=`pwd` sage check_license.py\")\n if errno >> 8 == 42:\n print(\"The module seems to work but the license is missing. Skipping the test suite.\",\n file=sys.stderr)\n sys.exit(0)\n elif errno != 0:\n print(\"check_license.py returned error {}\".format(errno))\n sys.exit(1)\n self._actually_run_tests()\n\n def _actually_run_tests(self):\n # Passing optional=sage avoids using sage.misc.package.list_packages,\n # which gives an error on Debian unstable as of 2019-12-27:\n # FileNotFoundError: [Errno 2] No such file or directory: '/usr/share/sagemath/build/pkgs'\n errno = os.system(\"PYTHONPATH=`pwd` sage -t --force-lib --optional=sage,gurobi sage_numerical_backends_gurobi\")\n if errno != 0:\n sys.exit(1)\n\nclass SageTestSage(SageTest):\n def _actually_run_tests(self):\n errno = os.system(\"PYTHONPATH=`pwd` sage -c 'load(\\\"check_sage_testsuite.py\\\")'\")\n if errno != 0:\n sys.exit(1)\n\n# Get information from separate files (README, VERSION)\ndef readfile(filename):\n with open(filename, encoding='utf-8') as f:\n return f.read()\n\ngurobi_include_directories = []\ngurobi_lib_directories = []\ngurobi_lib_files = []\ngurobi_libs = []\ngurobi_home = os.getenv(\"GUROBI_HOME\")\n\nif not gurobi_home:\n # gurobi.sh might be in PATH. As of Gurobi 9.0 (on macOS), it is\n # a shell script that sets (but does not export) GUROBI_HOME\n # and then invokes a Python interpreter.\n # Gurobi 8.0.1 and 8.1.1 (on macOS) do not set GUROBI_HOME\n # but set PYTHONPATH.\n try:\n gurobi_home = subprocess.check_output(\n '. gurobi.sh -c \"\" '\n '&& if [ -n \"$GUROBI_HOME\" ]; then echo $GUROBI_HOME; else dirname $PYTHONPATH; fi',\n shell=True).decode(\"UTF-8\").strip()\n print(\"Using GUROBI_HOME obtained from gurobi.sh: {}\".format(gurobi_home),\n file=sys.stderr)\n except subprocess.CalledProcessError:\n pass\n\nexts = ['so']\nif sys.platform == 'darwin':\n exts.insert(0, 'dylib')\n\nuse_rpath = True\n\nif gurobi_home:\n gurobi_include_directories.append(gurobi_home + \"/include\")\n libdir = gurobi_home + \"/lib\"\n gurobi_lib_directories.append(libdir)\n from fnmatch import fnmatch\n for file in os.listdir(libdir):\n # Avoid libgurobi81_light.dylib, which causes runtime\n if any(fnmatch(file, 'libgurobi*.' + ext) for ext in exts) and not fnmatch(file, 'libgurobi*light*.*'):\n gurobi_libs = [os.path.splitext(file)[0][3:]]\n gurobi_lib_files = [os.path.join(libdir, file)]\n break\n\nif not gurobi_libs:\n print(\"GUROBI_HOME is not set, or it does not point to a directory with a \"\n \"Gurobi installation. Trying to link against -lgurobi\", file=sys.stderr)\n gurobi_libs = ['gurobi']\nelse:\n print(\"Using gurobi_include_directories={}, libraries={}, library_dirs={}\".format(\n gurobi_include_directories, gurobi_libs, gurobi_lib_directories), file=sys.stderr)\n\nif use_rpath:\n lib_args = dict(libraries=gurobi_libs,\n library_dirs=gurobi_lib_directories,\n runtime_library_dirs=gurobi_lib_directories)\nelse:\n lib_args = dict(extra_link_args=gurobi_lib_files)\n\n # Cython modules\next_modules = [Extension('sage_numerical_backends_gurobi.gurobi_backend',\n sources = [os.path.join('sage_numerical_backends_gurobi',\n 'gurobi_backend.pyx')],\n include_dirs=sage_include_directories() + gurobi_include_directories,\n **lib_args)\n ]\n\n\n## SageMath 8.1 (included in Ubuntu bionic 18.04 LTS) does not have sage.cpython.string;\n## it was introduced in 8.2.\ncompile_time_env = {'HAVE_SAGE_CPYTHON_STRING': False,\n 'HAVE_ADD_COL_UNTYPED_ARGS': False}\n\nprint(\"Checking whether HAVE_SAGE_CPYTHON_STRING...\", file=sys.stderr)\ntry:\n import sage.cpython.string\n compile_time_env['HAVE_SAGE_CPYTHON_STRING'] = True\nexcept ImportError:\n pass\n\n## SageMath 8.7 changed the signature of add_col.\nprint(\"Checking whether HAVE_ADD_COL_UNTYPED_ARGS...\", file=sys.stderr)\ntry:\n cythonize(Extension('check_add_col_untyped_args',\n sources=['check_add_col_untyped_args.pyx'],\n include_dirs=sage_include_directories()),\n quiet=True,\n include_path=sys.path)\n compile_time_env['HAVE_ADD_COL_UNTYPED_ARGS'] = True\nexcept CompileError:\n pass\n\nprint(\"Using compile_time_env: {}\".format(compile_time_env), file=sys.stderr)\n\nsetup(\n name=\"sage_numerical_backends_gurobi\",\n version=readfile(\"VERSION\").strip(),\n description=\"Gurobi backend for Sage MixedIntegerLinearProgram\",\n long_description = readfile(\"README.md\"), # get the long description from the README\n long_description_content_type='text/markdown', # https://packaging.python.org/guides/making-a-pypi-friendly-readme/\n url=\"https://github.com/mkoeppe/sage-numerical-backends-gurobi\",\n # Author list obtained by running the following command on sage 9.0.beta9:\n # for f in gurobi_backend.p*; do git blame -w -M -C -C --line-porcelain \"$f\" | grep -I '^author '; done | sort -f | uniq -ic | sort -n\n # cut off at < 10 lines of attribution.\n author='Nathann Cohen, Martin Albrecht, Matthias Koeppe, John Perry, David Coudert, Jori Mäntysalo, Jeroen Demeyer, Erik M. Bray, Emil R. Vaughan, and others',\n author_email='mkoeppe@math.ucdavis.edu',\n license='GPLv2+', # This should be consistent with the LICENCE file\n classifiers=['Development Status :: 5 - Production/Stable',\n \"Intended Audience :: Science/Research\",\n 'License :: OSI Approved :: GNU General Public License v2 or later (GPLv2+)',\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 2\",\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n ],\n ext_modules = cythonize(ext_modules, include_path=sys.path,\n compile_time_env=compile_time_env),\n cmdclass = {'test': SageTest, 'check_sage_testsuite': SageTestSage}, # adding a special setup command for tests\n keywords=['milp', 'linear-programming', 'optimization'],\n packages=['sage_numerical_backends_gurobi'],\n package_dir={'sage_numerical_backends_gurobi': 'sage_numerical_backends_gurobi'},\n package_data={'sage_numerical_backends_gurobi': ['*.pxd']},\n install_requires = [# 'sage>=8', ### On too many distributions, sage is actually not known as a pip package\n 'sphinx'],\n)\n","sub_path":"pypi_install_script/sage_numerical_backends_gurobi-9.0.0.tar/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":7511,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"399996531","text":"#!/usr/bin/env python3\n\n\ndef parse(ss_file):\n\n \"\"\"Reads secondary structure file, ss2 or ss3\n @param ss_file, ss2 or ss3 fasta-formated ss-file\n @return secondary structure string.\n \"\"\"\n\n result = ''\n for line in ss_file:\n if line.startswith('>'):\n continue\n result = line.strip()\n return result\n","sub_path":"contactvis/parsing/parse_ss.py","file_name":"parse_ss.py","file_ext":"py","file_size_in_byte":343,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"339124159","text":"\r\n# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sat Aug 29 04:32:02 2020\r\n\r\n@author: sinha\r\n\"\"\"\r\n\r\nimport pandas as pd\r\nimport os\r\nfrom io import TextIOWrapper\r\n\r\n# pd.options.mode.chained_assignment = None \r\n\r\ndef merge_Segments(seg_df, delay_or_group):\r\n d_segments = seg_df['Segment'].unique()\r\n n_seg = 0\r\n for seg in d_segments:\r\n n_seg += 1\r\n if delay_or_group == 0:\r\n seg_df.loc[seg_df['Segment'] == seg, 'Delay'] = seg_df['Group'] + '-' + str(n_seg)\r\n # seg_df['Delay'][seg_df['Segment'] == seg] = seg_df['Group'] + '-' + str(n_seg)\r\n # seg_df['Parts'][seg_df['Segment'] == seg] = n_seg\r\n elif delay_or_group == 1:\r\n seg_df.loc[seg_df['Segment'] == seg, 'Delay'] = seg_df['Delay'] + '-' + str(n_seg)\r\n # seg_df['Delay'][seg_df['Segment'] == seg] = seg_df['Delay'] + '-' + str(n_seg)\r\n # seg_df['Parts'][seg_df['Segment'] == seg] = n_seg\r\n return pd.DataFrame(seg_df)\r\n\r\ndef delete_columns(df, cols):\r\n df = df.drop(cols, axis = 1)\r\n if 'Group' in df:\r\n df = df.drop(['Group'], axis = 1)\r\n if 'Video Type' in df:\r\n df = df.drop(['Video Type'], axis = 1)\r\n \r\n return pd.DataFrame(df)\r\n\r\ndef filter_on_delay(csv):\r\n delays = csv['Delay'].unique()\r\n df = pd.DataFrame() \r\n for d in delays:\r\n d_hscores = csv.loc[csv['Delay'] == d] \r\n merged = merge_Segments(d_hscores, 1)\r\n df = df.append(merged)\r\n \r\n # print (df)\r\n # updated_delay = df.pop('Delay')\r\n cols = ['Unnamed: 0','start', 'end', 'Segment']\r\n \r\n df = delete_columns(df, cols)\r\n # df.insert(2, 'Delay', updated_delay)\r\n return df\r\n\r\ndef filter_on_group(csv):\r\n delays = csv['Group'].unique()\r\n df = pd.DataFrame() \r\n for d in delays:\r\n d_hscores = csv.loc[csv['Group'] == d] \r\n d_hscores = merge_Segments(d_hscores, 0)\r\n df = df.append(d_hscores)\r\n \r\n # updated_delay = df.pop('Delay')\r\n cols = ['Unnamed: 0','start', 'end', 'Segment']\r\n \r\n df = delete_columns(df, cols)\r\n # df.insert(2, 'Delay', updated_delay)\r\n return df\r\n\r\ndef compute_statistics(dfg):\r\n stats = dfg.groupby('Delay').mean()\r\n df_std = dfg.groupby('Delay').std()['h_score']\r\n df_median = dfg.groupby('Delay').median()['h_score']\r\n \r\n stats['mean h_score'] = stats['h_score']\r\n stats['std h_score'] = df_std\r\n stats['median h_score'] = df_median\r\n stats = stats.drop(['h_score'], axis = 1)\r\n \r\n stats.to_csv('temp.csv')\r\n return stats\r\n \r\n\r\nsession_path = \"../Sessions/\"\r\nsession_id = \"IM175_1\"\r\n\r\ndef main():\r\n home_path = os.getcwd()\r\n print (home_path)\r\n session_names = os.listdir(session_path)\r\n for session_id in session_names:\r\n try:\r\n print ('\\n\\n------------- Processing files of ' + session_id + ' for statistics -----------')\r\n processed_file_path = session_path + session_id + \"/processed_data/\"\r\n file_suffix = '_video_analysis_filtered.csv'\r\n \r\n csv = pd.read_csv(processed_file_path + session_id + file_suffix)\r\n print('read csv')\r\n pid = csv['PId'][0]\r\n eft_ert_type = csv['Type'][0] \r\n \r\n filtered_csv = pd.DataFrame()\r\n \r\n if 'Group' not in csv:\r\n filtered_csv = filter_on_delay(csv)\r\n print('Filtered perfectly')\r\n else:\r\n filtered_csv = filter_on_group(csv)\r\n print('Filtered perfectly')\r\n stats = compute_statistics(filtered_csv)\r\n print ('Statistics done')\r\n stats.reset_index(level=0, inplace = True)\r\n print('reset done')\r\n stats.insert(0, 'PId', pid)\r\n stats.insert(1, 'Type', eft_ert_type)\r\n print('insert done')\r\n print('Reached before writing')\r\n stats.set_index('PId')\r\n output = pd.DataFrame(stats)\r\n output.set_index('PId')\r\n print (output)\r\n # os.chdir(processed_file_path)\r\n # print(os.getcwd())\r\n output_file = session_id + '_stats.csv'\r\n print ('writing')\r\n # print (output_file)\r\n # with open(processed_file_path + output_file, mode='w', newline='\\n') as f:\r\n # stats.to_csv(f, sep=self.delimiter, index=False)\r\n output.to_csv(processed_file_path + session_id + '_stats.csv')\r\n print ('Written successfully\\n')\r\n # os.chdir(home_path)\r\n except:\r\n pass\r\n \r\n\r\nif __name__ == \"__main__\":\r\n main()","sub_path":"scripts/just.py","file_name":"just.py","file_ext":"py","file_size_in_byte":4643,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"414594785","text":"\r\n\r\n# -*- coding: utf-8 -*-\r\n\r\n\"\"\"\r\n\r\nCreated on Tue Jan 05 13:09:38 2016\r\n\r\n@file crd.py\r\n\r\n@author: Apurva Pathak\r\n\r\n@brief Program to scrape data from WebOfScience.\r\n\r\n\"\"\"\r\n#%%\r\nimport time\r\n\r\nimport urllib.request as urllib2\r\n\r\nimport pickle\r\n\r\nfrom bs4 import BeautifulSoup\r\nimport re\r\n\r\nBASE_URL='https://apps.webofknowledge.com/full_record.do?product=UA&search_mode=GeneralSearch&qid=15&SID=4AfNuKSCQ7W8LDwH8m9&page=1&doc='\r\n\r\nall_records=[]\r\n\r\npage_no=1\r\n\r\nmax_page=2071\r\ntries=0\r\nmax_try=5\r\n\r\nwhile page_no<=max_page:\r\n\r\n print('Page No: %d'%(page_no))\r\n\r\n try:\r\n fname = urllib2.urlopen(BASE_URL + str(page_no))\r\n soup = BeautifulSoup(fname,\"html.parser\") \r\n except:\r\n page_no += 1 \r\n continue\r\n\r\n tags_journal=soup.find_all(\"p\",{\"class\":\"sourceTitle\"})\r\n\r\n tags_fields=soup.find_all(\"p\",{\"class\":\"FR_field\"})\r\n\r\n tags_address=soup.find_all(\"td\",{\"class\":\"fr_address_row2\"})\r\n\r\n record=dict()\r\n\r\n address=tags_address[0].get_text()\r\n\r\n #record['journal']=str(tags_journal[0].find('value').get_text())\r\n\r\n #record['address']=str(address.split('\\n')[0]) \r\n\r\n add_list=[]\r\n \r\n for tag_a in tags_address:\r\n try:\r\n add = tag_a.get_text()\r\n br_idx = add.find(']')\r\n tmp_add = (add[br_idx+1:].strip())\r\n \r\n org_address=tmp_add.split('Organization-Enhanced Name(s)')[1:]\r\n ##print('code reached refinement')\r\n refined_add = re.sub(r'[^\\x00-\\x7f]',r' ',org_address[0]).strip()\r\n refined_list = re.sub(r'[\\n\\t' ']+',r'',refined_add).split(' ')\r\n refined_orgs = \"\"\r\n for el in refined_list:\r\n refined_orgs=refined_orgs+el\r\n if not el=='':\r\n refined_orgs=refined_orgs+','\r\n \r\n add_list.append(refined_orgs[:-1])\r\n \r\n except:\r\n continue \r\n record['organization']=add_list; \r\n \r\n \r\n# \r\n \r\n for tags_field in tags_fields:\r\n \r\n address1 = tags_field.find('p',{'class','FR_field'})\r\n acc_no = tags_field.find('span',{'class','FR_label'}) \r\n \r\n field_name = tags_field.find('span',{'class','FR_label'})\r\n \r\n if field_name:\r\n\r\n field_name=str(field_name.get_text())\r\n\r\n #if('By:' in field_name):\r\n \r\n # value=str(tags_field.get_text())\r\n \r\n # record['author']=value[value.find('(')+1:value.find(')')]\r\n \r\n # if('Published:' in field_name):\r\n \r\n # record['year']=int(tags_field.find('value').get_text()[-4:])\r\n \r\n #if('DOI:' in field_name):\r\n # ##print('found')\r\n # value = str(tags_field.find('value').get_text())\r\n # record['DOI']=value\r\n \r\n if('Accession Number:' in field_name):\r\n print('found')\r\n value = str(tags_field.find('value').get_text())\r\n record['ID']=value[4:] \r\n \r\n ##print('code reached here') \r\n all_records.append(record)\r\n\r\n page_no+=1\r\n tries=0\r\n \r\n\r\n \r\npickle.dump(all_records,open('wos_2001_2500_soc_data','wb'))\r\n\r\n\r\n","sub_path":"socg290/pro3/ScrapeWOS.py","file_name":"ScrapeWOS.py","file_ext":"py","file_size_in_byte":3319,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"301363785","text":"# 이분탐색을 사용하므로 lst는 정렬된 상태여야 한다.\nlst = [1, 5, 4, 10, 6, 27, 17]\nlst.sort()\n\n\ndef lowerBound(lst, target):\n start = 0\n end = len(lst)-1\n\n \"\"\"\n target이 lst의 모든 요소보다 클 경우\n \n if lst[-1] < target:\n return -1\n \"\"\"\n\n while start < end:\n mid = (start + end) // 2\n\n if lst[mid] < target:\n start = mid + 1\n else:\n end = mid\n\n return end\n\n# 혹은 from bisect import bisect_left 로 lower_bound를 구할 수 있음\n\nfrom bisect import bisect_left\n\nlst = [1,5,7,10,27,2,16,30]\ntarget = int(input())\nlst.sort()\n\nidx = bisect_left(lst,target)\n\n# lst에서 target보다 크거나 같은 요소의 최소 인덱스 구하기\n","sub_path":"알고리즘 정리/이진 탐색/LowerBound.py","file_name":"LowerBound.py","file_ext":"py","file_size_in_byte":748,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"580811131","text":"import matplotlib.pyplot as plt\n\nimport sys\n\n# line 1 points\nx1 = sys.argv[1].split(',')\ny1 = sys.argv[2].split(',')\n\nx1 = [float(i) for i in x1]\ny1 = [float(i) for i in y1]\n\nprint(x1)\nprint(y1)\n\n# setting the axes at the centre\nfig = plt.figure()\nax = fig.add_subplot(1, 1, 1)\nax.spines['left'].set_position('center')\nax.spines['bottom'].set_position('center')\nax.spines['right'].set_color('none')\nax.spines['top'].set_color('none')\nax.xaxis.set_ticks_position('bottom')\nax.yaxis.set_ticks_position('left')\n\n# plotting the line 1 points\nplt.plot(x1, y1, label=\"line 1\")\n\n# function to show the plot\nplt.show()\n","sub_path":"plotting.py","file_name":"plotting.py","file_ext":"py","file_size_in_byte":611,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"543542177","text":"print(\"Hello World\")\n\n# Concatenar strings con números\nprint(\"This costs \" + str(4) + \" Dollars\")\n\n# Convertir en array un string\nprint(\"Hello:Nick:World\".split(\":\"))\n\n\nprint(\"Hello:Nick:World\".split(\":\")[1]) # Acceder al indice del array\n\n# Comparacion de valores booleanos\nprint(5 == 5) # true\nprint(5 is 5) # true\nprint(5 is not 5) # true\nprint(\"This\" is \"This\") # true\nprint(\"True\" is True) # false - because it evaluates the type too\n\n# Lists (Arrays)\n['Movies', 'Games', 'Python']\n['Movies', 6, 'Python']\n\n# Dictionaries (son tipo JSON, key value format)\n{\"name\": \"Nick\", \"age\": 27, \"hobby\": \"code\"}\n\n# Variables\ngreeting = \"Hello World\"\ngreeting = greeting.split(\" \")[0] # separa el string en el valor que asignemos, lo convierte en array para que nos devuelva lo que le asignemos\nnumber = 1\nsecondnumber = 2\nprint( number * secondnumber + secondnumber * number)\n\n# funciones básicas (built-in)///////////////////////////////////\n\nprint(\"Imprime\")\nstr(5) # convierte en string\nstr(\"True\")\nint(\"4\") # convierte en int\nfloat(\"5.4\") # convierte en float\nbool(\"True\") # convierte en booleandos\n\nlen(\"Hello\") # determina el length de strings y arrays\nlen([1, 2, 3, 4])\nlen([\"Hello\", \"John\"])\n\nsorted([16, 3, 8, 6, 9, 133, 435, 21, 823, 45]) # ordena numericamente\n\nsorted([\"B\", \"R\", \"a\", \"N\"]) # ordena alfabeticamente (primero numeros luego capital letters y luego lowercase)\n\n\n\n\n\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1396,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"105098996","text":"from django.conf.urls import url\nfrom django.urls import path,re_path\nfrom Hackathon import views\n\nHackathon_url_patterns = [\n path('', views.hackathon_home, name = 'hackathon_home'),\n path('register_page', views.hackathon_register, name = 'hackathon_register'),\n path('shedule', views.shedule , name = 'shedule'),\n path('problem_statements', views.ps, name = 'ps'),\n path('countdown', views.countdown , name = 'countdown'),\n path('faq', views.faq , name = 'faq'),\n path('sponsors', views.sponsors , name = 'sponsors'),\n path('paralax', views.paralax , name = 'paralax'),\n path('first', views.first , name = 'first'),\n]","sub_path":"Hackathon/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":646,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"292019134","text":"import pygame, math\nfrom random import randrange\n\nclass StarField:\n def __init__(self, num_stars, max_depth):\n pygame.init()\n self.screen = pygame.display.set_mode((640, 480))\n pygame.display.set_caption(\"3D starfield\")\n self.clock = pygame.time.Clock()\n self.num_stars = num_stars\n self.max_depth = max_depth\n self.init_stars()\n\n def init_stars(self):\n self.stars = []\n for i in range(self.num_stars):\n star = [randrange(-25, 25), randrange(-25, 25), randrange(1, self.max_depth)]\n self.stars.append(star)\n \n def move_and_draw_stars(self):\n origin_x = self.screen.get_width() / 2\n origin_y = self.screen.get_height() / 2\n for star in self.stars:\n star[2] -= 0.05\n if star[2] <= 0:\n star[0] = randrange(-25, 25)\n star[1] = randrange(-25, 25)\n star[2] = self.max_depth\n k = 128.0 / star[2] #what is this 128? \n x = int(star[0] * k + origin_x)\n y = int(star[1] * k + origin_y)\n if 0 <= x < self.screen.get_width() and 0 <= y < self.screen.get_height():\n size = (1 - float(star[2]) / self.max_depth) * 5\n shade = (1 - float(star[2]) / self.max_depth) * 255\n self.screen.fill((shade, shade, shade), (x, y, size, size))\n \n def run(self):\n \"\"\" Main Loop \"\"\"\n while 1:\n # Lock the framerate at 50 FPS.\n self.clock.tick(50)\n \n # Handle events.\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n pygame.quit()\n return\n \n self.screen.fill((0,0,0))\n self.move_and_draw_stars()\n pygame.display.flip()\n\nif __name__ == \"__main__\":\n StarField(512, 32).run()\n","sub_path":"pygames/game_starfield.py","file_name":"game_starfield.py","file_ext":"py","file_size_in_byte":1921,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"322419820","text":"import numpy as np\nfrom astropy import units\n\n\n__all__ = [\n 'camb',\n]\n\n\ndef camb(wavenumber, redshift, cosmology, A_s, n_s):\n r'''CAMB linear matter power spectrum.\n Compute the linear matter power spectrum on a two dimensional grid of\n redshift and wavenumber using CAMB [1]_.\n\n Parameters\n ----------\n wavenumber : (nk,) array_like\n Array of wavenumbers in units of Mpc-1 at which to\n evaluate the linear matter power spectrum.\n redshift : (nz,) array_like\n Array of redshifts at which to evaluate the linear matter power\n spectrum.\n cosmology : astropy.cosmology.Cosmology\n Cosmology object providing omega_matter, omega_baryon, Hubble\n parameter and CMB temperature in the present day\n A_s : float\n Cosmology parameter, amplitude normalisation of curvature perturbation\n power spectrum\n n_s : float\n Cosmology parameter, spectral index of scalar perturbation power\n spectrum\n\n Returns\n -------\n power_spectrum : (nz, nk) array_like\n Array of values for the linear matter power spectrum in Mpc3\n evaluated at the input wavenumbers for the given primordial power\n spectrum parameters, cosmology. For nz redshifts and nk wavenumbers\n the returned array will have shape (nz, nk).\n\n Examples\n --------\n >>> import numpy as np\n >>> from astropy.cosmology import default_cosmology\n >>> cosmology = default_cosmology.get()\n >>> redshift = np.array([0, 1])\n >>> wavenumber = np.array([1.e-2, 1.e-1, 1e0])\n >>> A_s = 2.e-9\n >>> n_s = 0.965\n >>> power_spectrum = camb(wavenumber, redshift, cosmology, A_s, n_s) # doctest: +SKIP\n\n References\n ----------\n .. [1] Lewis, A. and Challinor, A. and Lasenby, A. (2000),\n doi : 10.1086/309179.\n\n '''\n\n try:\n from camb import CAMBparams, model, get_matter_power_interpolator\n except ImportError:\n raise Exception(\"camb is required to use skypy.power_spectrum.camb\")\n\n return_shape = (*np.shape(redshift), *np.shape(wavenumber))\n redshift = np.atleast_1d(redshift)\n\n h2 = cosmology.h * cosmology.h\n\n pars = CAMBparams()\n pars.set_cosmology(H0=cosmology.H0.value,\n ombh2=cosmology.Ob0 * h2,\n omch2=cosmology.Odm0 * h2,\n omk=cosmology.Ok0,\n TCMB=cosmology.Tcmb0.value,\n mnu=np.sum(cosmology.m_nu.to_value(units.eV)),\n standard_neutrino_neff=cosmology.Neff\n )\n\n # camb interpolator requires redshifts to be in increasing order\n redshift_order = np.argsort(redshift)\n wavenumber_order = np.argsort(wavenumber)\n\n pars.InitPower.ns = n_s\n pars.InitPower.As = A_s\n\n pars.NonLinear = model.NonLinear_none\n\n pk_interp = get_matter_power_interpolator(pars,\n nonlinear=False,\n hubble_units=False, k_hunit=False,\n kmax=np.max(wavenumber),\n zmax=np.max(redshift))\n\n pzk = pk_interp.P(redshift[redshift_order], wavenumber[wavenumber_order])\n\n return pzk[redshift_order].reshape(return_shape)\n","sub_path":"skypy/power_spectrum/_camb.py","file_name":"_camb.py","file_ext":"py","file_size_in_byte":3296,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"449645883","text":"\nfrom Square import *\nfrom Coordinate import *\n\nfrom ChessColour import *\nfrom ChessPieces import *\n\nimport Piece\nfrom Queen import *\nfrom Rook import *\nfrom Pawn import *\nfrom King import *\nfrom Bishop import *\nfrom Knight import * \n\nclass ChessBoard:\n\n\t# WHITE is the first move\n\t__activeColour__ = ChessColour.WHITE\n\t# Move count starts at 1\n\t# Increment after black has replied with their move\n\t__fullMove__ = 1\t \n\n\tdef __init__(self):\n\t\tself.reset()\n\n\tdef initilizeFromPieces(self, positions, pieces):\n\t\tpass\n\n\t# Sets the default state of the chess board\n\tdef reset(self):\n\n\t\t# Instantiate the chessboard \n\t\tself.chessboard = [[0,0,0,0,0,0,0,0],\n\t\t\t\t\t\t [0,0,0,0,0,0,0,0],\n\t\t\t\t\t\t [0,0,0,0,0,0,0,0],\n\t\t\t\t\t\t [0,0,0,0,0,0,0,0],\n\t\t\t\t\t\t [0,0,0,0,0,0,0,0],\n\t\t\t\t\t\t [0,0,0,0,0,0,0,0],\n\t\t\t\t\t\t [0,0,0,0,0,0,0,0],\n\t\t\t\t\t\t [0,0,0,0,0,0,0,0]]\n\n\t\t# Set the board pieces\n\t\t# White Main Pieces\n\t\tself.chessboard[0][0] = Square(Coordinate(\"a1\"), Rook(ChessColour.WHITE))\n\t\tself.chessboard[1][0] = Square(Coordinate(\"b1\"), Knight(ChessColour.WHITE))\n\t\tself.chessboard[2][0] = Square(Coordinate(\"c1\"), Bishop(ChessColour.WHITE))\n\t\tself.chessboard[3][0] = Square(Coordinate(\"d1\"), Queen(ChessColour.WHITE))\n\t\tself.chessboard[4][0] = Square(Coordinate(\"e1\"), King(ChessColour.WHITE))\n\t\tself.chessboard[5][0] = Square(Coordinate(\"f1\"), Bishop(ChessColour.WHITE))\n\t\tself.chessboard[6][0] = Square(Coordinate(\"g1\"), Knight(ChessColour.WHITE))\n\t\tself.chessboard[7][0] = Square(Coordinate(\"h1\"), Rook(ChessColour.WHITE))\n\n\t\t# White Pawns\n\t\tself.chessboard[0][1] = Square(Coordinate(\"a2\"), Pawn(ChessColour.WHITE))\n\t\tself.chessboard[1][1] = Square(Coordinate(\"b2\"), Pawn(ChessColour.WHITE))\n\t\tself.chessboard[2][1] = Square(Coordinate(\"c2\"), Pawn(ChessColour.WHITE))\n\t\tself.chessboard[3][1] = Square(Coordinate(\"d2\"), Pawn(ChessColour.WHITE))\n\t\tself.chessboard[4][1] = Square(Coordinate(\"e2\"), Pawn(ChessColour.WHITE))\n\t\tself.chessboard[5][1] = Square(Coordinate(\"f2\"), Pawn(ChessColour.WHITE))\n\t\tself.chessboard[6][1] = Square(Coordinate(\"g2\"), Pawn(ChessColour.WHITE))\n\t\tself.chessboard[7][1] = Square(Coordinate(\"h2\"), Pawn(ChessColour.WHITE))\n\n\t\t# The blank squares\n\t\tfor j in range(0, 8):\n\t\t\tfor i in range(2, 6):\n\t\t\t\tself.chessboard[j][i] = Square(Coordinate(\"{}{}\".format(chr(j+97),i+1)), None)\t\t\n\n\t\t# Black Main Pieces\n\t\tself.chessboard[0][7] = Square(Coordinate(\"a8\"), Rook(ChessColour.BLACK))\n\t\tself.chessboard[1][7] = Square(Coordinate(\"b8\"), Knight(ChessColour.BLACK))\n\t\tself.chessboard[2][7] = Square(Coordinate(\"c8\"), Bishop(ChessColour.BLACK))\n\t\tself.chessboard[3][7] = Square(Coordinate(\"d8\"), Queen(ChessColour.BLACK))\n\t\tself.chessboard[4][7] = Square(Coordinate(\"e8\"), King(ChessColour.BLACK))\n\t\tself.chessboard[5][7] = Square(Coordinate(\"f8\"), Bishop(ChessColour.BLACK))\n\t\tself.chessboard[6][7] = Square(Coordinate(\"g8\"), Knight(ChessColour.BLACK))\n\t\tself.chessboard[7][7] = Square(Coordinate(\"h8\"), Rook(ChessColour.BLACK))\n\n\t\t# Black Pawns\n\t\tself.chessboard[0][6] = Square(Coordinate(\"a7\"), Pawn(ChessColour.BLACK))\n\t\tself.chessboard[1][6] = Square(Coordinate(\"b7\"), Pawn(ChessColour.BLACK))\n\t\tself.chessboard[2][6]= Square(Coordinate(\"c7\"), Pawn(ChessColour.BLACK))\n\t\tself.chessboard[3][6] = Square(Coordinate(\"d7\"), Pawn(ChessColour.BLACK))\n\t\tself.chessboard[4][6] = Square(Coordinate(\"e7\"), Pawn(ChessColour.BLACK))\n\t\tself.chessboard[5][6] = Square(Coordinate(\"f7\"), Pawn(ChessColour.BLACK))\n\t\tself.chessboard[6][6] = Square(Coordinate(\"g7\"), Pawn(ChessColour.BLACK))\n\t\tself.chessboard[7][6] = Square(Coordinate(\"h7\"), Pawn(ChessColour.BLACK))\n\n\t# Return the square based on the passed coordinate \n\tdef getSquare(self, coordinate):\n\t\t# return the square\n\t\treturn self.chessboard[coordinate.getColumnNumber()][coordinate.getRowNumber()]\n\n\t# Move method\n\tdef move(self, source, destination):\n\t\t# The source square coordinate\n\t\tsource_column = source.getColumnNumber()\n\t\tsource_row = source.getRowNumber()\n\n\t\t# The destination square coordinate\n\t\tdestination_column = destination.getColumnNumber()\n\t\tdestination_row = destination.getRowNumber()\n\n\t\t# If the source square is empty, return False\n\t\tif(self.getSquare(source).getPiece() is None): \n\t\t\treturn False\n\n\t\t# If the piece on the destination square is the same colour as the active piece colour, return FALSE\n\t\tif(self.getSquare(destination).isOccupied() and self.getSquare(destination).getPiece().getColour() is ChessBoard.__activeColour__):\n\t\t\treturn False\n\n\t\t# If the move on the source piece is a legal move\n\t\tif(self.getSquare(source).getPiece().isLegalMove(self,source, destination)):\n\t\t\t# If the move is not on the active colour's turn return False\n\t\t\tif(self.getSquare(source).getPiece().getColour() is not ChessBoard.__activeColour__):\n\t\t\t\treturn False\n\n\t\t\t# Do the move\n\t\t\t# change the active colour\n\t\t\t# increment the move number\n\t\t\tself.getSquare(destination).addPiece(self.getSquare(source).deletePiece())\n\n\t\t\t# change the active colour \n\t\t\tif(ChessBoard.__activeColour__ is ChessColour.WHITE):\n\t\t\t\tChessBoard.__activeColour__ = ChessColour.BLACK\n\t\t\telse:\n\t\t\t\tChessBoard.__activeColour__ = ChessColour.WHITE\n\t\t\t\tChessBoard.__fullMove__ = ChessBoard.__fullMove__ + 1\n\t\t\t# RETURN TRUE (THE MOVE WAS DONE)\n\t\t\treturn True\n\t\t# RETURN FALSE (THE MOVE COULD NOT BE DONE)\n\t\telse:\n\t\t\treturn False\n\n\t# String function \n\t# Print the board in BOARD mode\n\t# White pieces at the bottom\n\tdef __str__(self):\n\t\tout = \"Board:Board\\n\"\n\t\tfor i in range(7, -1, -1):\n\t\t\tfor j in range (0,8):\n\t\t\t\tif self.chessboard[j][i].getPiece() is None:\n\t\t\t\t\tif j == 7:\n\t\t\t\t\t\tout += \" \"\n\t\t\t\t\telse:\n\t\t\t\t\t\tout += \" _\"\n\t\t\t\telse:\n\t\t\t\t\tif j == 7:\n\t\t\t\t\t\tout += \"{}\".format(self.chessboard[j][i].getPiece().getShortName())\n\n\t\t\t\t\telse:\n\t\t\t\t\t\tout += \"{}_\".format(self.chessboard[j][i].getPiece().getShortName())\n\t\t\tout += \"\\n\"\n\t\treturn out\n\n\t# Print the board in FEN mode\n\t# White Pieces on the left\n\tdef toFEN(self):\n\t\tpass\n","sub_path":"ChessBoard.py","file_name":"ChessBoard.py","file_ext":"py","file_size_in_byte":5860,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"199515907","text":"\"\"\"两个排序数组的中位数\n\n给定两个大小为 m 和 n 的有序数组 nums1 和 nums2 。\n请找出这两个有序数组的中位数。要求算法的时间复杂度为 O(log (m+n)) 。\n你可以假设 nums1 和 nums2 均不为空。\n\n示例 1:\nnums1 = [1, 3]\nnums2 = [2]\n中位数是 2.0\n\n示例 2:\nnums1 = [1, 2]\nnums2 = [3, 4]\n中位数是 (2 + 3)/2 = 2.5\n\"\"\"\n\nclass Solution:\n def findMedianSortedArrays(self, nums1, nums2):\n \"\"\"\n :type nums1: List[int]\n :type nums2: List[int]\n :rtype: float\n \"\"\"\n pointer1 = 0\n pointer2 = 0\n length = len(nums1) + len(nums2)\n\n nums3 = []\n for index in range((length//2)+1):\n if pointer1 >= len(nums1):\n nums3.append(nums2[pointer2])\n pointer2 += 1\n elif pointer2 >= len(nums2):\n nums3.append(nums1[pointer1])\n pointer1 += 1\n elif nums1[pointer1] <= nums2[pointer2]:\n nums3.append(nums1[pointer1])\n pointer1 += 1\n elif nums1[pointer1] > nums2[pointer2]:\n nums3.append(nums2[pointer2])\n pointer2 += 1\n\n if length % 2:\n return nums3[-1]\n else:\n return (nums3[-2] + nums3[-1]) / 2\n\n\nimport unittest\n\n\nclass TestSolution(unittest.TestCase):\n def setUp(self):\n self.solution = Solution()\n\n def test_case_1(self):\n result = self.solution.findMedianSortedArrays([1, 2], [3, 4])\n self.assertEqual(result, 2.5)\n\n def test_case_2(self):\n result = self.solution.findMedianSortedArrays([3, 4], [1, 2])\n self.assertEqual(result, 2,5)\n \n def test_case_3(self):\n result = self.solution.findMedianSortedArrays([1, 3], [2])\n self.assertEqual(result, 2)\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"src/4_findMedianSortedArrays.py","file_name":"4_findMedianSortedArrays.py","file_ext":"py","file_size_in_byte":1871,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"87114038","text":"from router import Router\n\nimport unittest\n\nclass Handler:\n def GET(self):\n pass\n\n\ntest_routes = {\n '/': {\n '': 'test.test_router.Handler'\n }\n}\n\n\nclass TestRouter(unittest.TestCase):\n \n\n def test_urls(self):\n r = Router(test_routes)\n urls = r.urls()\n self.assertEqual(len(urls), 2)\n\n\n def test_class_paths(self):\n r = Router(test_routes)\n paths = r.class_paths()\n self.assertTrue(isinstance(paths, dict))\n self.assertEqual(len(paths), 1)\n","sub_path":"test/test_router.py","file_name":"test_router.py","file_ext":"py","file_size_in_byte":519,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"297582145","text":"import requests\nfrom bs4 import BeautifulSoup\nimport pprint\n\nres = requests.get('https://news.ycombinator.com/newest')\nsoup = BeautifulSoup(res.text, 'html.parser')\nlinks = soup.select('.storylink')\nsubtext = soup.select('.subtext')\n\ndef sorted_hn(hnlist) :\n return sorted(hnlist, key = lambda k : k['points'], reverse=True)\n\ndef create_custom_hn(links, subtext) :\n hn = []\n for idx, items in enumerate(links) :\n title = links[idx].getText()\n href = links[idx].get('href', 'none')\n votes = subtext[idx].select('.score')\n if len(votes) :\n points = votes[0].getText().split(' ')\n\n point = points[0]\n hn.append({'Title': title, 'link': href, 'points': point})\n return sorted_hn(hn)\n\npprint.pprint(create_custom_hn(links,subtext))","sub_path":"hackernews_project.py","file_name":"hackernews_project.py","file_ext":"py","file_size_in_byte":791,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"490542666","text":"import tensorflow as tf\r\nfrom tensorflow import keras\r\nfrom tensorflow.keras import datasets, layers, optimizers, Sequential, metrics\r\n\r\ndef preprocess(x, y):\r\n\r\n\tx = tf.cast(x,dtype=tf.float32)/255.0\r\n\ty = tf.cast(y,dtype=tf.int32)\r\n\treturn x,y\r\n\r\n(x_train,y_train), (x_test,y_test) = datasets.fashion_mnist.load_data()\r\n\r\nprint(x_train.shape,y_train.shape)\r\n\r\ndb = tf.data.Dataset.from_tensor_slices((x_train,y_train))\r\ndb = db.map(preprocess).batch(128)#.shuffle(10000)\r\n\r\n#print(db.shape)\r\n\r\ndb_test = tf.data.Dataset.from_tensor_slices((x_test,y_test))\r\ndb_test = db_test.map(preprocess).batch(128)\r\n\r\n#db_iter = iter(db)\r\n#sample = next(db_iter)\r\n\r\n# 6层神经网络\r\nmodel = Sequential([\r\n\tlayers.Flatten(input_shape=(28, 28)),\r\n\tlayers.Dense(392, activation=tf.nn.relu),\r\n\tlayers.Dense(196, activation=tf.nn.relu),\r\n\tlayers.Dense(98, activation=tf.nn.relu),\r\n\tlayers.Dense(49, activation=tf.nn.relu),\r\n\tlayers.Dense(24, activation=tf.nn.relu),\r\n\tlayers.Dense(10)\r\n])\r\n\r\nmodel.build(input_shape = [None, 28*28])\r\nmodel.summary()\r\noptimizer = optimizers.Adam(lr=0.0005)\r\n\r\n\r\n\r\ndef main():\r\n\r\n # Training\r\n\tfor epoch in range(100):\r\n\t\tfor step, (x,y) in enumerate(db):\r\n\t\t\t#x = tf.reshape(x, [-1, 28*28])\r\n\t\t\twith tf.GradientTape() as tape:\r\n\t\t\t\tlogits = model(x)\r\n\t\t\t\ty_onehot = tf.one_hot(y, depth = 10)\r\n\t\t\t\tloss = tf.reduce_mean(tf.losses.MSE(y_onehot,logits))\r\n\t\t\t\tloss2 = tf.losses.categorical_crossentropy(y_onehot, logits, from_logits = True)\r\n\t\t\t\tloss2 = tf.reduce_mean(loss2)\r\n\t\t\t\t\r\n\t\t\tgrads = tape.gradient(loss2, model.trainable_variables)\r\n\t\t\toptimizer.apply_gradients(zip(grads,model.trainable_variables))\r\n\t\t\t\r\n\t\t\tif step%100 == 0:\r\n\t\t\t\tprint(epoch, step, 'loss: ', float(loss2), float(loss))\r\n\t\t\t\t\r\n\t# Testing\r\n\t\ttotal_correct = 0\r\n\t\ttotal_sum = 0\r\n\t\tfor x,y in db_test:\r\n\t\t\t#x = tf.reshape(x, [-1, 28*28])\r\n\t\t\tlogits = model(x)\r\n\t\t\tprob = tf.nn.softmax(logits, axis = 1)\r\n\t\t\tpred = tf.argmax(prob, axis = 1)\r\n\t\t\tpred = tf.cast(pred, dtype = tf.int32)\r\n\t\t\tcorrect = tf.equal(pred, y)\r\n\t\t\tcorrect = tf.reduce_sum(tf.cast(correct, dtype=tf.int32))\r\n\t\t\ttotal_correct += int(correct)\r\n\t\t\ttotal_sum += x.shape[0]\r\n\t\t\r\n\t\tratio = total_correct/total_sum\r\n\t\tprint(epoch, 'accurancy: ', ratio)\r\n\r\nif __name__ == '__main__':\r\n main()\r\n","sub_path":"fashion_mnist_dense.py","file_name":"fashion_mnist_dense.py","file_ext":"py","file_size_in_byte":2253,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"337078749","text":"# fab jest z pythona 2 więc najłatwiej go wywołać posługując się podpocesem\nfrom os import path\nimport subprocess\nTHIS_FOLDER = path.abspath(path.dirname(__file__))\n\n\ndef reset_database(host):\n subprocess.check_call(['fab', 'reset_database', '--host={}'.format(host)],\n cwd=THIS_FOLDER,)\n\ndef create_session_on_server(host, email):\n return subprocess.check_output(['fab',\n 'create_session_on_server:email={}'.format(email),\n '--host={}'.format(host),\n '--hide=everything,status', ],\n cwd=THIS_FOLDER,\n ).decode().strip()\n","sub_path":"functional_tests/server_tools.py","file_name":"server_tools.py","file_ext":"py","file_size_in_byte":733,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"468862663","text":"#encoding: utf-8\nfrom django import forms\n\nfrom django.utils.translation import ugettext_lazy as _, ugettext\nfrom django.utils.text import capfirst, get_text_list\nfrom crispy_forms.helper import FormHelper, Layout\nfrom crispy_forms.layout import Field, Div, Row, HTML\nfrom crispy_forms.bootstrap import FormActions, TabHolder, Tab, \\\n PrependedAppendedText, PrependedText, InlineRadios\n\nfrom django.utils.timezone import get_current_timezone\nfrom datetime import datetime\n\nfrom apps.utils.forms import smtSave, btnCancel, btnReset\n\nfrom ..models.mascota import Mascota, BOOL_GENERO, TIPO_MASCOTA, CONDICION\n\nclass MascotaForm(forms.ModelForm):\n \"\"\"Tipo Documeto Form.\"\"\"\n class Meta:\n \"\"\"Meta.\"\"\"\n model = Mascota\n exclude = ()\n fields = ['nombre','dueño','fecha_nacimiento','genero','especie','raza','color','esterelizado','descripcion',]\n\n def __init__(self, *args, **kwargs):\n self.request = kwargs.pop('request', None)\n self.object = kwargs.pop('object', None)\n\n super(MascotaForm, self).__init__(*args, **kwargs)\n\n self.fields['nombre'] = forms.CharField(\n label=capfirst(_(u'nombre')),\n required=True,\n help_text=u' %s' % _(\n u' '),\n )\n self.fields['fecha_nacimiento'] = forms.DateTimeField(\n label=_(u'Fecha Nacimiento'), required=False,\n initial=datetime.now().replace(tzinfo=get_current_timezone()),\n widget=forms.DateTimeInput(format='%Y-%m-%d %H:%M:%S',),\n input_formats=(\n '%d/%m/%Y', '%d/%m/%y', '%d-%m-%Y', '%d-%m-%y', '%Y-%m-%d',\n '%Y-%m-%d %H:%M:%S'),\n help_text=u' %s' % _(\n u''),\n )\n self.fields['genero'] = forms.ChoiceField(\n label=capfirst(_(u'genero*:')), required=False,\n choices=BOOL_GENERO,\n widget=forms.RadioSelect(attrs={'default':'macho',}),\n help_text=u' %s' % _(\n u' '),\n )\n self.fields['especie'] = forms.ChoiceField(\n label=capfirst(_(u'tipo Mascota')), required=False,\n choices=TIPO_MASCOTA,\n help_text=u' %s' % _(\n u' '),\n )\n self.fields['raza'] = forms.CharField(\n label=capfirst(_(u'raza')), required=True,\n help_text=u' %s' % _(\n u' '),\n )\n self.fields['color'] = forms.CharField(\n label=capfirst(_(u'color')), required=True,\n help_text=u' %s' % _(\n u' '),\n )\n self.fields['cond_corporal'] = forms.ChoiceField(\n label=capfirst(_(u'C. Corporal')), required=False,\n choices=CONDICION,\n help_text=u' %s' % _(\n u' '),\n )\n self.fields['esterelizado'] = forms.BooleanField(\n label=capfirst(_(u'¿Esterelizado?')), required=False,\n help_text=u' %s' % _(\n u' '),\n )\n\n self.fields['descripcion'] = forms.CharField(\n label=capfirst(_(u'Descripcion')), required=False,\n widget=forms.Textarea(attrs = {'rows': 4, }),\n\n help_text=u' %s' % _(\n u' '),\n )\n\n self.helper = FormHelper()\n\n self.helper.form_method = 'post'\n self.helper.form_class = 'js-validate form-vertical form-mascota'\n self.helper.layout = Layout(\n Row(\n Div(\n Field('nombre', placeholder=\"Introdusca el nombre de la mascota\", css_class='input-required'),\n css_class=\"col-md-4\"),\n Div(\n Field('dueño', css_class=\"chosen-select select-medica\", tabindex=\"6\"),\n css_class='col-md-4'),\n Div(\n Field('fecha_nacimiento', css_class='input-datex'),\n css_class=\"col-md-4\"),\n ),\n Row(\n Div(\n Field('especie',),\n css_class=\"col-md-3\"),\n Div(\n Field('raza',placeholder=\"Introdusca la raza\", css_class='input-required' ),\n css_class=\"col-md-3\"),\n Div(\n Field('color', placeholder=\"Introdusca el color\", css_class='input-required'),\n css_class=\"col-md-3\"),\n Div(\n Field('cond_corporal', ),\n css_class=\"col-md-3\"),\n ),\n Row(\n Div(\n Div(\n InlineRadios('genero', default=\"macho\"),\n Field('esterelizado',),\n css_class='mascota-opcion'),\n css_class='col-md-3'),\n Div(\n Field('descripcion', placeholder=\"Introdusca una cualidad de la mascota\"),\n css_class='col-md-9'),\n ),\n Row(\n Div(\n FormActions(\n smtSave(),\n btnCancel(),\n btnReset(),\n ),\n css_class=\"col-md-12 btn-controls\"),\n ),\n )\n","sub_path":"apps/clinica/forms/mascotaform.py","file_name":"mascotaform.py","file_ext":"py","file_size_in_byte":5482,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"554318028","text":"import shutil\nimport os\nimport io\nimport srctools\nimport contextlib\nfrom zipfile import ZipFile, ZipInfo\n\n\nico_path = os.path.realpath(os.path.join(os.getcwd(), \"../bee2.ico\"))\n\n\n# src -> build subfolder.\ndata_files = [\n ('../BEE2.ico', '.'),\n ('../BEE2.fgd', '.'),\n ('../images/BEE2/*.png', 'images/BEE2/'),\n ('../images/icons/*.png', 'images/icons/'),\n ('../images/splash_screen/*.jpg', 'images/splash_screen/'),\n ('../palettes/*.bee2_palette', 'palettes/'),\n\n # Add the FGD data for us.\n (os.path.join(srctools.__path__[0], 'fgd.lzma'), 'srctools'),\n (os.path.join(srctools.__path__[0], 'srctools.fgd'), 'srctools'),\n\n]\n\ndef get_localisation(key):\n \"\"\"Get localisation files from Loco.\"\"\"\n import requests\n\n # Make the directories.\n os.makedirs('../i18n/', exist_ok=True)\n\n print('Reading translations... ', end='', flush=True)\n zip_request = requests.get(\n 'https://localise.biz/api/export/archive/mo.zip',\n headers={\n 'Authorization': 'Loco ' + key,\n },\n params={\n 'path': '{%lang}{_%region}.{%ext}',\n },\n )\n zip_file = ZipFile(io.BytesIO(zip_request.content))\n print('Done!')\n\n print('Translations: ')\n\n for file in zip_file.infolist(): # type: ZipInfo\n if 'README.txt' in file.filename:\n continue\n filename = os.path.basename(file.filename)\n print(filename)\n # Copy to the dev and output directory.\n with zip_file.open(file) as src, open('../i18n/' + filename, 'wb') as dest:\n shutil.copyfileobj(src, dest)\n data_files.append((dest.name, 'i18n'))\n\nget_localisation('kV-oMlhZPJEJoYPI5EQ6HaqeAc1zQ73G')\n\n\n# Exclude bits of modules we don't need, to decrease package size.\nEXCLUDES = [\n # We just use idlelib.WidgetRedirector\n 'idlelib.ClassBrowser',\n 'idlelib.ColorDelegator',\n 'idlelib.Debugger',\n 'idlelib.Delegator',\n 'idlelib.EditorWindow',\n 'idlelib.FileList',\n 'idlelib.GrepDialog',\n 'idlelib.IOBinding',\n 'idlelib.IdleHistory',\n 'idlelib.MultiCall',\n 'idlelib.MultiStatusBar',\n 'idlelib.ObjectBrowser',\n 'idlelib.OutputWindow',\n 'idlelib.PathBrowser',\n 'idlelib.Percolator',\n 'idlelib.PyParse',\n 'idlelib.PyShell',\n 'idlelib.RemoteDebugger',\n 'idlelib.RemoteObjectBrowser',\n 'idlelib.ReplaceDialog',\n 'idlelib.ScrolledList',\n 'idlelib.SearchDialog',\n 'idlelib.SearchDialogBase',\n 'idlelib.SearchEngine',\n 'idlelib.StackViewer',\n 'idlelib.TreeWidget',\n 'idlelib.UndoDelegator',\n 'idlelib.WindowList',\n 'idlelib.ZoomHeight',\n 'idlelib.aboutDialog',\n 'idlelib.configDialog',\n 'idlelib.configHandler',\n 'idlelib.configHelpSourceEdit',\n 'idlelib.configSectionNameDialog',\n 'idlelib.dynOptionMenuWidget',\n 'idlelib.idle_test.htest',\n 'idlelib.idlever',\n 'idlelib.keybindingDialog',\n 'idlelib.macosxSupport',\n 'idlelib.rpc',\n 'idlelib.tabbedpages',\n 'idlelib.textView',\n\n 'bz2', # We aren't using this compression format (shutil, zipfile etc handle ImportError)..\n\n 'sqlite3', # Imported from aenum, but we don't use that enum subclass.\n\n # Imported by logging handlers which we don't use..\n 'win32evtlog',\n 'win32evtlogutil',\n 'smtplib',\n\n 'unittest', # Imported in __name__==__main__..\n 'doctest',\n 'optparse',\n 'argparse',\n]\n\nblock_cipher = None\n\n\n# AVbin is needed to read OGG files.\nINCLUDE_PATHS = [\n 'C:/Windows/system32/avbin.dll', # Win 32 bit\n 'C:/Windows/sysWOW64/avbin64.dll', # Win 64 bit\n '/usr/local/lib/libavbin.dylib', # OS X\n '/usr/lib/libavbin.so', # Linux\n]\n\n# Filter out files for other platforms\nINCLUDE_LIBS = [\n (path, '.') for path in INCLUDE_PATHS\n if os.path.exists(path)\n]\n\nbee_version = input('BEE2 Version: ')\n\n# Write this to the temp folder, so it's picked up and included.\n# Don't write it out though if it's the same, so PyInstaller doesn't reparse.\nversion_val = 'BEE_VERSION=' + repr(bee_version)\nversion_filename = os.path.join(workpath, 'BUILD_CONSTANTS.py')\n\nwith contextlib.suppress(FileNotFoundError), open(version_filename) as f:\n if f.read().strip() == version_val:\n version_val = ''\n\nif version_val:\n with open(version_filename, 'w') as f:\n f.write(version_val)\n\nfor snd in os.listdir('../sounds/'):\n if snd == 'music_samp':\n continue\n data_files.append(('../sounds/' + snd, 'sounds'))\n\n\n# We need to include this version data.\ntry:\n import importlib_resources\n data_files.append(\n (\n os.path.join(importlib_resources.__path__[0], 'version.txt'),\n 'importlib_resources',\n )\n )\nexcept ImportError:\n pass\n\n# Finally, run the PyInstaller analysis process.\n\nbee2_a = Analysis(\n ['BEE2_launch.pyw'],\n pathex=[workpath, os.path.dirname(srctools.__path__[0])],\n binaries=INCLUDE_LIBS,\n datas=data_files,\n hiddenimports=[\n 'PIL._tkinter_finder',\n ],\n hookspath=[],\n runtime_hooks=[],\n excludes=EXCLUDES,\n win_no_prefer_redirects=False,\n win_private_assemblies=False,\n cipher=block_cipher,\n noarchive=False\n)\n\n# Need to add this manually, so it can have a different name.\nbee2_a.datas.append((\n 'README.txt',\n os.path.join(os.getcwd(), '../INSTALL_GUIDE.txt'),\n 'DATA',\n))\n\npyz = PYZ(\n bee2_a.pure,\n bee2_a.zipped_data,\n cipher=block_cipher\n)\n\nexe = EXE(\n pyz,\n bee2_a.scripts,\n [],\n exclude_binaries=True,\n name='BEE2',\n debug=False,\n bootloader_ignore_signals=False,\n strip=False,\n upx=True,\n console=False,\n windowed=True,\n icon='../BEE2.ico'\n)\n\ncoll = COLLECT(\n exe,\n bee2_a.binaries,\n bee2_a.zipfiles,\n bee2_a.datas,\n strip=False,\n upx=True,\n name='BEE2',\n)\n","sub_path":"src/BEE2.spec","file_name":"BEE2.spec","file_ext":"spec","file_size_in_byte":5776,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"327546928","text":"# Лабораторная работа №5 Вариант 2.\r\nfrom math import tan, acosh\r\nprint('Здравствуйте')\r\n\r\n# Ввод значений\r\na = int(input('Введите a:'))\r\nxMin = int(input('Введите минимальное значение x: '))\r\nxMax = int(input('Введите максимальное значение x: '))\r\nstepCount = int(input('Введите количество шагов для вычисления функции: '))\r\nif stepCount <= 0:\r\n print ('Ошибка: Количество шагов не может быть меньше или равно нулю')\r\n exit()\r\n\r\n#Выбор Функции\r\nfunc = int(input('Выберите Функцию для вычиления: '\r\n '\\n Для расчета функции G: Введите 1'\r\n '\\n Для расчета функции F: Введите 2'\r\n '\\n Для расчета функции Y: Введите 3'\r\n '\\nНомер Функции: '))\r\nif (func > 3) or (func < 1):\r\n print('Ошибка: Такой функции не существует')\r\n\r\n# Функции\r\nX = []\r\nR = []\r\n\r\ndef calc(a, x):\r\n if func == 1:\r\n try:\r\n X.append(x)\r\n R.append((7 * ((-15 * a**2) + (22 * a * x) + (5 * x**2))) / ((4 * a**2) + (7 * a * x) + (3 * x**2)))\r\n except ZeroDivisionError:\r\n print ('Ошибка: Деление на Ноль')\r\n\r\n\r\n elif func == 2:\r\n F = -tan((4 * a**2) - (3 * a * x) - (7 * x**2))\r\n X.append(x)\r\n R.append(F)\r\n\r\n\r\n elif func == 3:\r\n try:\r\n X.append(x)\r\n R.append(acosh((-7 * a**2) + (20 * a * x) + (3 * x**2) + 1))\r\n except ValueError:\r\n print('Ошибка: Значение выходит за область определения функции')\r\n\r\n# Цикл\r\ncount = 0\r\nwhile count < stepCount:\r\n x = xMin + count\r\n if xMin < xMax:\r\n calc(a, x)\r\n count +=1\r\n else:\r\n print('Ошибка: Максимальное значение меньше или равно минимальному')\r\n break\r\n\r\n# Массив значений\r\nprint('Массив значений: \\n' + str(R))\r\n\r\n# Поиск значений\r\nneedFind = float(input('Введите число которое необходимо найти: '))\r\nprint('Совпадений найдено: ' + str(R.count(needFind)))\r\n","sub_path":"Lab5.py","file_name":"Lab5.py","file_ext":"py","file_size_in_byte":2424,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"148835353","text":"from __future__ import division, print_function\nfrom multiprocessing import Pool\nimport argparse\nimport sys\n\nimport numpy as np\nfrom cooler.io import CoolerAggregator\nimport cooler.ice\nimport cooler\nimport h5py\n\n\nFACTOR = 2\nTILESIZE = 256\nN_CPU = 8\n\n\ndef set_postmortem_hook():\n import sys, traceback, ipdb\n def _excepthook(exc_type, value, tb):\n traceback.print_exception(exc_type, value, tb)\n print()\n ipdb.pm()\n sys.excepthook = _excepthook\n\nset_postmortem_hook()\n\n\ndef main(infile, outfile, chunksize):\n c = cooler.Cooler(infile)\n binsize = c.info['bin-size']\n chromtable = c.chroms()[:]\n chromsizes = chromtable.set_index('name')['length']\n chroms = chromtable['name'].values\n lengths = chromtable['length'].values\n total_length = np.sum(chromsizes.values)\n n_tiles = total_length / binsize / TILESIZE\n n_zooms = int(np.ceil(np.log2(n_tiles)))\n\n print(\n \"Copying base matrix to level {0} and producing {0} zoom levels starting from 0...\".format(n_zooms),\n file=sys.stderr\n )\n\n # transfer base matrix\n with h5py.File(outfile, 'w') as dest, \\\n h5py.File(infile, 'r') as src:\n\n zoomLevel = str(n_zooms)\n src.copy('/', dest, zoomLevel)\n\n print(zoomLevel, file=sys.stderr)\n\n\n # produce aggregations\n with h5py.File(outfile, 'r+') as f:\n grp = f[str(n_zooms)]\n c = cooler.Cooler(grp)\n binsize = cooler.info(grp)['bin-size']\n\n for i in range(n_zooms - 1, -1, -1):\n zoomLevel = str(i)\n\n # aggregate\n new_binsize = binsize * FACTOR\n new_bins = cooler.util.binnify(chromsizes, new_binsize)\n \n reader = CoolerAggregator(c, new_bins, chunksize)\n \n grp = f.create_group(zoomLevel)\n f.attrs[zoomLevel] = new_binsize\n cooler.io.create(grp, chroms, lengths, new_bins, reader)\n\n # balance\n with Pool(N_CPU) as pool:\n too_close = 20000 # for HindIII\n ignore_diags = max(int(np.ceil(too_close / new_binsize)), 3)\n\n bias = cooler.ice.iterative_correction(\n f, zoomLevel,\n chunksize=chunksize,\n min_nnz=10,\n mad_max=3,\n ignore_diags=ignore_diags,\n map=pool.map)\n h5opts = dict(compression='gzip', compression_opts=6)\n grp['bins'].create_dataset('weight', data=bias, **h5opts)\n\n print(zoomLevel, file=sys.stderr)\n\n c = cooler.Cooler(grp)\n binsize = new_binsize\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(\n description=\"Recursively aggregate a single resolution cooler file into a multi-resolution file.\")\n parser.add_argument(\n \"cooler_file\",\n help=\"Cooler file\",\n metavar=\"COOLER_PATH\")\n parser.add_argument(\n \"--out\", \"-o\",\n help=\"Output text file\")\n args = vars(parser.parse_args())\n\n\n infile = args['cooler_file']\n if args['out'] is None:\n outfile = infile.replace('.cool', '.multires.cool')\n else:\n outfile = args['out']\n\n chunksize = int(1e6)\n main(infile, outfile, chunksize)\n\n","sub_path":"api/coolers/recursive_agg_onefile.py","file_name":"recursive_agg_onefile.py","file_ext":"py","file_size_in_byte":3264,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"650354196","text":"\"\"\"\nSolver for 8 queens problem\nProblem statement:\non an 8x8 board put 8 queens such that they do not attack each other\nSolution: A classical constraint propagation algorithm\n\"\"\"\nfrom collections import deque\nfrom copy import deepcopy\n\nROWS = \"12345678\"\nCOLS = \"ABCDEFGH\"\n\n\ndef cross(xs: str, ys: str) -> list:\n return [x+y for x in xs for y in ys]\n\n\ndef get_peers(val: str) -> set:\n peers = set()\n [peers.add(x) for x in cross(val[0], ROWS)]\n [peers.add(x) for x in cross(COLS, val[1])]\n\n ind_col = COLS.index(val[0])\n ind_row = ROWS.index(val[1])\n\n cols_left = COLS[:ind_col]\n cols_right = COLS[(ind_col + 1):]\n rows_up = ROWS[(ind_row + 1):]\n rows_down = ROWS[:ind_row]\n\n [peers.add(tp[0] + tp[1]) for tp in zip(cols_right, rows_up)]\n [peers.add(tp[0] + tp[1]) for tp in zip(cols_right, reversed(rows_down))]\n [peers.add(tp[0] + tp[1]) for tp in zip(reversed(cols_left), reversed(rows_down))]\n [peers.add(tp[0] + tp[1]) for tp in zip(reversed(cols_left), rows_up)]\n\n return peers\n\n\ndef is_present(solutions: list, solution: set) -> bool:\n for x in solutions:\n if len(solution - x) == 0:\n return True\n return False\n\n\nif __name__ == \"__main__\":\n\n search = deque()\n cells = cross(COLS, ROWS)\n all_states = set() # for tracking states and not including existing ones\n\n peers = {}\n for x in cells:\n peers[x] = get_peers(x)\n\n for x in cells:\n search.append(([x], set(cells) - peers[x]))\n\n solutions = []\n count = 0\n\n while len(search) != 0:\n state = search.pop()\n if len(state[0]) == 8:\n solution_tmp = set(state[0])\n if not is_present(solutions, solution_tmp):\n solutions.append(solution_tmp)\n count += 1\n print(\"Solution ID:\" + str(count) + \" \" + str(state[0]))\n\n if len(state[1]) != 0:\n for x in state[1]:\n state_tmp = deepcopy(state)\n val = state_tmp[0]\n val.append(x)\n visited_state = frozenset(val)\n if visited_state not in all_states:\n search.append((val, state_tmp[1] - peers[x]))\n all_states.add(visited_state)\n","sub_path":"eight_queens/8queens.py","file_name":"8queens.py","file_ext":"py","file_size_in_byte":2240,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"291808940","text":"# -*- coding:utf-8 -*- \n\nimport os.path\nimport re\nimport time\nimport numpy as np\nimport tensorflow as tf\nimport cifar10\n\nbatch_size = 128\nmax_steps = 1000000\nnum_gpus = 4\ndata_dir = '../data/CIFAR10_data'\n#每个GPU生成独立的网络,网络结构完全一致,且共享参数\ndef tower_loss(scope):\n images, labels = cifar10.distorted_inputs()\n logits = cifar10.inference(images)\n _ = cifar10.loss(logits, labels)\n losses = tf.get_collection('losses', scope)\n total_loss = tf.add_n(losses, name='total_loss')\n return total_loss\n\n'''\n外层是不同GPU计算的梯度,内层是某个GPU对应的不同var的值\ntower_grads = \n[[(grad0_gpu0, var0_gpu0), (grad1_gpu0, var1_gpu0),...],\n [(grad0_gpu1, var0_gpu1), (grad1_gpu1, var1_gpu1),...]]\nzip(*tower_grads)= 相当于转置了\n[[(grad0_gpu0, var0_gpu0), (grad0_gpu1, var0, gpu1),...],\n [(grad1_gpu0, var1_gpu0), (grad1_gpu1, var1_gpu1),...]]\n'''\n#将不同GPU计算的梯度进行合成\ndef average_gradients(tower_grads):\n average_grads = []\n #*为解包裹,解出来作为多个参数\n for grad_and_vars in zip(*tower_grads):\n grads = []\n for g, _ in grad_and_vars:\n #增加一个维度 1x2\n expanded_g = tf.expand_dims(g, 0)\n #获取所有GPU的梯度 Nx2\n grads.append(expanded_g)\n grads = tf.concat(grads, 0)\n #1x2\n grads = tf.reduce_mean(grads, 0)\n v = grad_and_vars[0][1]\n grad_and_vars = (grads, v)\n average_grads.append(grad_and_vars)\n return average_grads\n\ndef train():\n with tf.Graph().as_default(), tf.device('/cpu:0'):\n global_step = tf.get_variable('global_step', [], initializer=tf.constant_initializer(0), trainable=False)\n num_batches_per_epoch = cifar10.NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN / batch_size\n #学习率衰减所需步数\n decay_steps = int(num_batches_per_epoch * cifar10.NUM_EPOCHS_PER_DECAY)\n lr = tf.train.exponential_decay(cifar10.INITIAL_LEARNING_RATE,\n global_step,\n decay_steps,\n cifar10.LEARNING_RATE_DECAY_FACTOR,\n staircase=True)\n opt = tf.train.GradientDescentOptimizer(lr)\n tower_grads = []\n for i in range(num_gpus):\n with tf.device('/gpu:%d' % i):\n with tf.name_scope('%s_%d' % (cifar10.TOWER_NAME, i)) as scope:\n loss = tower_loss(scope)\n #opt的参数已经被更新,重用大家一起算出来的参数\n tf.get_variable_scope().reuse_variables()\n grads = opt.compute_gradients(loss)\n tower_grads.append(grads)\n grads = average_gradients(tower_grads)\n #更新参数,所有gpu共享同一个opt,所以opt跟新的权重会得到共享\n apply_gradient_op = opt.apply_gradients(grads, global_step=global_step)\n saver = tf.train.Saver(tf.all_variables())\n init = tf.global_variables_initializer()\n sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True))\n sess.run(init)\n tf.train.start_queue_runners(sess=sess)\n for step in range(max_steps):\n start_time = time.time()\n _, loss_value = sess.run([apply_gradient_op, loss])\n duration = time.time() - start_time\n\n if step % 10 == 0:\n num_example_per_step = batch_size * num_gpus\n examples_per_sec = num_example_per_step / duration\n sec_per_batch = duration / num_gpus\n\n format_str = ('step %d, loss = %.2f (%.1f examples/sec; %.3f sec/batch)')\n print(format_str % (step, loss_value, examples_per_sec, sec_per_batch))\n if step % 1000 == 0 or (step + 1) == max_steps:\n saver.save(sess, '../data/CIFAR10_data/models/model.ckpt', global_step=step)\ncifar10.maybe_download_and_extract()\ntrain()","sub_path":"TensorFlow/GPU_parallel_cifar10.py","file_name":"GPU_parallel_cifar10.py","file_ext":"py","file_size_in_byte":4025,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"366415243","text":"import datetime\r\nfrom freezegun import freeze_time\r\nfrom math import pi\r\nfrom time import sleep\r\n\r\nclass Bubble:\r\n\r\n inst_id = 0 # setting the instance id value to zero\r\n creation_log = {} # empty log dictionary\r\n\r\n # creating logger method for the class in order to make records to creation_log\r\n # when new instance of Bubble class is created\r\n @classmethod\r\n def creation_logger(cls, creation_time):\r\n cls.creation_log[cls.inst_id] = creation_time\r\n cls.inst_id += 1\r\n return cls.creation_log\r\n\r\n def __init__(self, radius=None, capacity=None):\r\n if radius is None:\r\n self.capacity = capacity\r\n self.radius = ((3*self.capacity)/(4*pi))**(1/3)\r\n self.id = Bubble.inst_id\r\n Bubble.creation_logger(self._get_date())\r\n else:\r\n self.radius = radius\r\n self.capacity = (4*pi*(self.radius**3))/3\r\n self.id = Bubble.inst_id\r\n Bubble.creation_logger(self._get_date())\r\n\r\n @staticmethod \r\n def _get_date(): # static method for getting current time\r\n return datetime.datetime.now()\r\n\r\n @property\r\n def square(self):\r\n square = 4*pi*(self.radius**2)\r\n return square\r\n\r\n #setting the rules for addition and subtraction of\r\n #two spheres and returning new sphere object as a result\r\n\r\n def __add__(self, other):\r\n return Bubble(self.radius + other.radius)\r\n\r\n def __sub__(self, other):\r\n if self.radius >= other.radius:\r\n return Bubble(self.radius - other.radius)\r\n else:\r\n raise ValueError(\"Cannot set negative value for a radius\")\r\n\r\n# Printing creation log for 4 instances of the class\r\n# a = Bubble(None, 1)\r\n# sleep(0.5)\r\n# b = Bubble(None, 4)\r\n# sleep(0.5)\r\n# c = a+b\r\n# sleep(0.5)\r\n# d = b-a\r\n# print(Bubble.creation_log)\r\n\r\n# Tests for time value in the creation log\r\nwith freeze_time('2019-01-20 06:06:06'):\r\n a = Bubble(None, 1)\r\n assert Bubble.creation_log[0] == datetime.datetime.now()\r\n b = Bubble(None, 4)\r\n assert Bubble.creation_log[1] == datetime.datetime.now()\r\n c = a+b\r\n assert Bubble.creation_log[2] == datetime.datetime.now()\r\n assert (c.id - b.id) == 1 # this is for checking if creation_logger() method was called only once\r\n d = b-a\r\n assert Bubble.creation_log[3] == datetime.datetime.now()\r\n","sub_path":"homework/jenya_s/bubble_test.py","file_name":"bubble_test.py","file_ext":"py","file_size_in_byte":2365,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"359010552","text":"from my_array import Array\n\nclass _MapEntry : \n def __init__(self, key, value):\n self.key = key\n self.value = value \n\nUNUSED = None # slot never used \nEMPTY = _MapEntry(None, None) # key removed\n\nclass HashTable_LinearProbing : \n def __init__(self, size):\n self._table = Array(size) \n self._table.clear(UNUSED)\n self._size = size\n self._count = 0\n self.slotsAccessed = 0\n \n def __len__(self):\n return self._count\n\n def __contains__(self, key):\n (found, slot) = self._findSlot(key)\n return found\n \n # insert (if not already in table)\n # return True/False is key inserted/not\n def insert(self, key, value):\n (found, slot) = self._findSlot(key)\n if not found :\n self._table[slot] = _MapEntry(key, value)\n self._count += 1\n return not found\n \n # remove (key, value) (if in the table)\n # return True/False is key removed/not\n def remove(self, key):\n (found, slot) = self._findSlot(key)\n if found :\n self._table[slot] = EMPTY\n self._count -= 1\n return found\n\n # find the slot where a key is or should be inserted\n # return (True/False, slot) if key was found/not\n def _findSlot(self, key):\n home = self._hash1(key)\n i = 0\n \n # Iterate through it once\n while (i <= self._size):\n slot = (home + i) % self._size\n self.slotsAccessed += 1\n # If we come across an unused slot, it means that its\n # Never been added before, so we should add it here\n if self._table[slot] == UNUSED:\n return (False, slot)\n elif self._table[slot] != EMPTY and self._table[slot].key == key:\n return (True, slot)\n i += 1\n # Iterate once more\n i = 0\n if (self._table[home] == EMPTY):\n self.slotsAccessed += 1\n return (False, slot)\n while (i < self._size):\n slot = (home + i) % self._size\n self.slotsAccessed += 1\n if self._table[slot] == EMPTY:\n return (False, slot)\n i += 1\n\n # compute first slot attempted\n def _hash1(self, key): \n return abs(hash(key)) % self._size\n\n\n# ht = HashTable_LinearProbing(98)\n# print ht._hash1(55)\n# ht.insert(26, 'Jam')\n# ht.insert(26, 'Jammy')\n# ht.insert(26, 'POO')\n# ht.insert(27, 'Jelly')\n# ht.insert(28, 'Marmalade')\n# for index, item in enumerate(range(0, ht._size)):\n# print ht._table[index]\n\n# ht.remove(26)\n# print '============'\n# for item in ht._table:\n# if item:\n# print '{} {}'.format(item.value, item.key)\n# print '============'\n# ht.insert(26, 'PBJ')\n# for item in ht._table:\n# if item:\n# print '{} {}'.format(item.value, item.key)\n# print \"======HELLO WORLD=======\"\n# print ht._table[27].value\n# print ht._table[26].value","sub_path":"assignment_3/HashTable_LinearProbing.py","file_name":"HashTable_LinearProbing.py","file_ext":"py","file_size_in_byte":3039,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"501388411","text":"#!/usr/bin/python3\n\"\"\"Module to make a simple Flask web application.\n\"\"\"\n\nfrom flask import Flask\napp = Flask(__name__)\n\n\n@app.route('/', strict_slashes=False)\ndef root_page():\n \"\"\"Helper function to serve a message on the '/' route.\n\n Take in account that this function will be triggered\n with or without trailing slashes ('0.0.0.0:5000' or '0.0.0.0:5000/').\n\n Decorators:\n app.route\n \"\"\"\n return 'Hello HBNB!'\n\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0', port=5000)\n","sub_path":"web_flask/0-hello_route.py","file_name":"0-hello_route.py","file_ext":"py","file_size_in_byte":505,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"321410713","text":"#!/usr/bin/env python3\n\nimport re\n\nfrom .processor.mapping import Mapping as mpg\nfrom .processor.prcmap import PrcMap as Prm\n\n\nclass Sabdakosh():\n def __init__(self):\n self.unirex = Prm.getPreRex()\n self.prerex = mpg.getPreRex()\n self.postrex = mpg.getRexArray()\n self.rawunifile= './res/Chha.txt'\n self.unifile = './res/ChhaPr.txt'\n self.sepfile = './res/ChhaSep.txt'\n #self.outfile = './res/Out.txt'\n #self.infile = './res/Page.txt'\n self.asciifile= './res/ChhaASCII.txt'\n #self.infile = '/home/pranphy/Desktop/Dictionarys.txt'\n\n def rexSub(self,line,rex):\n for rx in rex:\n line = re.sub(rx[0],rx[1],line)\n return line \n\n\n def asciiToUnicode(self):\n '''This function converts the ASCII Nepali characters to Unicode\n since the ASCII for Nepali based on Priti font is not used \n everywhere in the original dictionary there are going to be \n problems which we subsequently address and try to correct\n '''\n '''There are two parts of regular expression substution, the first\n part one to one substitutes the Preeti ascii characters by the\n corresponding unicode character for Nepali. The post part tries\n to correct the misplacement of certain characters in the mapping\n '''\n dicmap = mpg.getMapping()\n rexarray = mpg.getRexArray()\n extmap = mpg.getExtraMap()\n with open (self.rawunifile,'w') as ofl:\n with open(self.asciifile,'r') as fil:\n content = fil.read()\n content = self.rexSub(content,self.prerex)\n cnt = ''\n for char in content:\n try:\n ucc = dicmap[char]\n cnt += ucc\n except KeyError:\n try:\n euc = extmap[char]\n cnt += euc\n except KeyError:\n cnt += char\n \n print('doing a post process')\n cnt = self.rexSub(cnt,self.postrex)\n ofl.write(cnt)\n\n\n def postProcess(self):\n '''This Post process part tries to correct errors that are seen\n during manual inspection. I've added errors found by inspection\n and tried to generalize them to include other such errors to do\n the correction later. Also this part tries to remove the recur-\n rent advertiesment from the downloader. Crazy right??\n '''\n with open(self.rawunifile,'r') as inf:\n with open(self.unifile,'w') as otf:\n wholeFile = inf.read()\n for rex in self.unirex:\n wholeFile = re.sub(rex[0],rex[1],wholeFile)\n\n otf.write(wholeFile)\n\n\n def doStuffs(self):\n self.asciiToUnicode()\n self.postProcess()\n self.separateWords()\n\n def separateWords(self):\n seprex = r'[^\\x00-\\x7F]*[/~]*([^\\x00-\\x7F]+—)'\n comprex = re.compile(seprex)\n cnt = 0\n lastPos = 0\n curPos = 0\n with open(self.unifile,'r') as inf:\n with open(self.sepfile,'w') as otf:\n wholeFile = inf.read()\n for m in comprex.finditer(wholeFile):\n lastPos = curPos\n curPos = m.start()\n otf.write('\\n >> \\n'+wholeFile[lastPos:curPos]+'\\n << \\n')\n otf.write(m.group()+'\\n')\n print(cnt,' ',m.start(),' ' , m.group())\n cnt += 1\n\n\n\n \nif __name__ == '__main__':\n obj = Sabdakosh()\n obj.doStuffs()\n","sub_path":"kosh/prepare.py","file_name":"prepare.py","file_ext":"py","file_size_in_byte":3765,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"491333700","text":"#####################################################\r\n# Camada Física da Computação\r\n#Carareto\r\n#11/08/2020\r\n#Aplicação\r\n####################################################\r\n\r\n\r\n#esta é a camada superior, de aplicação do seu software de comunicação serial UART.\r\n#para acompanhar a execução e identificar erros, construa prints ao longo do código! \r\n\r\n\r\nfrom enlace import *\r\nimport time\r\n\r\n\r\n# voce deverá descomentar e configurar a porta com através da qual ira fazer comunicaçao\r\n# para saber a sua porta, execute no terminal :\r\n# python -m serial.tools.list_ports\r\n# se estiver usando windows, o gerenciador de dispositivos informa a porta\r\n\r\n#use uma das 3 opcoes para atribuir à variável a porta usada\r\n#serialName = \"/dev/ttyACM0\" # Ubuntu (variacao de)\r\n#serialName = \"/dev/tty.usbmodem1411\" # Mac (variacao de)\r\nserialName = \"COM5\" # Windows(variacao de)\r\n\r\n\r\ndef main():\r\n try:\r\n #declaramos um objeto do tipo enlace com o nome \"com\". Essa é a camada inferior à aplicação. Observe que um parametro\r\n #para declarar esse objeto é o nome da porta.\r\n com = enlace(serialName)\r\n \r\n # Ativa comunicacao. Inicia os threads e a comunicação seiral \r\n com.enable()\r\n \r\n #Se chegamos até aqui, a comunicação foi aberta com sucesso. Faça um print para informar.\r\n print(\"a comunicação foi aberta com sucesso\")\r\n \r\n #aqui você deverá gerar os dados a serem transmitidos. \r\n #seus dados a serem transmitidos são uma lista de bytes a serem transmitidos. Gere esta lista com o \r\n #nome de txBuffer. Esla sempre irá armazenar os dados a serem enviados.\r\n imageR=\"./imgs/imagem.jpg\"\r\n imageW=\"./imgs/recebidaCopia.png\"\r\n\r\n print(\"carregando imagem para transmissão:\")\r\n print(\" - {}\" .format(imageR))\r\n print(\"------------------------\")\r\n\r\n txBuffer = open(imageR,'rb').read() #rb=read byte\r\n #tx buffer com o dado da lista de bytes em que a imagem a ser enviada foi transformada\r\n #txBuffer = bytes(255)\r\n\r\n \r\n \r\n #faça aqui uma conferência do tamanho do seu txBuffer, ou seja, quantos bytes serão enviados.\r\n #print(\"quantidade de bytes sendo enviada: {}\" .format(com.getBufferLen(txBuffer)))############################\r\n \r\n \r\n #finalmente vamos transmitir os dados. Para isso usamos a funçao sendData que é um método da camada enlace.\r\n #faça um print para avisar que a transmissão vai começar.\r\n #tente entender como o método send funciona!\r\n\r\n com.sendData(txBuffer)\r\n print(\"A transmissão de dados irá começar\")\r\n\r\n\r\n # A camada enlace possui uma camada inferior, TX possui um método para conhecermos o status da transmissão\r\n # Tente entender como esse método funciona e o que ele retorna\r\n time.sleep(2) #intervalo pra dar tempo de terminar o envio\r\n txSize = com.tx.getStatus() #O quanto de bytes foi enviado (tamanho de fato)\r\n #Se vc não tiver o tamanho que enviou, entra em loop infinito no enlaceRX getNData\r\n time.sleep(2)\r\n print(\"Tamanho enviado: {}\".format(len(txBuffer)))\r\n \r\n \r\n #Agora vamos iniciar a recepção dos dados. Se algo chegou ao RX, deve estar automaticamente guardado\r\n #Observe o que faz a rotina dentro do thread RX\r\n #print um aviso de que a recepção vai começar.\r\n\r\n print(\"A recepção de dados irá começar agora\")\r\n \r\n #Será que todos os bytes enviados estão realmente guardadas? Será que conseguimos verificar?\r\n #Veja o que faz a funcao do enlaceRX getBufferLen\r\n \r\n #acesso aos bytes recebidos\r\n txLen = len(txBuffer) #Tamanho que deveria ter sido enviado\r\n rxBuffer, nRx = com.getData(txLen) #função que confere o que enviou (tamanho)\r\n \r\n \r\n print (\"recebeu {} bytes\".format(len(rxBuffer)))\r\n\r\n #Salva imagem recebida no arquivo\r\n print(\"Salvando dados no arquivo\")\r\n print(\"- {}\".format(imageW))\r\n f = open(imageW,'wb') #write byte\r\n f.write(rxBuffer)\r\n\r\n #fecha arquivo de imagem\r\n f.close()\r\n \r\n \r\n \r\n # Encerra comunicação\r\n print(\"-------------------------\")\r\n print(\"Comunicação encerrada\")\r\n print(\"-------------------------\")\r\n com.disable()\r\n except:\r\n print(\"ops! :-\\\\\")\r\n com.disable()\r\n\r\n #so roda o main quando for executado do terminal ... se for chamado dentro de outro modulo nao roda\r\nif __name__ == \"__main__\":\r\n main()\r\n","sub_path":"Projeto2-Final/Server.py","file_name":"Server.py","file_ext":"py","file_size_in_byte":4672,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"109009218","text":"# coding:utf-8\nimport gevent\nfrom gevent import monkey;monkey.patch_all()\nimport requests\nimport queue\nurls = []\nfilters = []\nall = ''\nqueue = queue.Queue()\nheaders = {\"User-Agent\":\"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/62.0.3202.94 Safari/537.36\"}\nf = open('testawvs.txt','r',encoding='utf-8')# 这里代表另一个电脑跑出的所有url防止两电脑采集到了相同的url\n\ndef test(url):\n try:\n global all\n #print('test:', url)\n res = requests.get(url='http://'+url,headers=headers,timeout=3)\n if res.status_code == 200 and len(res.text) > 5:\n print('http://' + url)\n all = all + url + '\\n'\n except Exception as e:\n pass\n\nfor i in f.readlines():\n i = i.strip().replace('\\r\\n', '').replace('\\n', '')\n if i not in urls:\n print(i)\n if len(i) > 27:\n i = i.split(' ')[0]\n urls.append(i)\n all = all + i + '\\n'\n\n'''\ng_list = []\nwhile urls != []:\n for i in range(1000):\n if urls !=[]:\n try:\n temp = urls[i]\n g = gevent.spawn(test, temp)\n g_list.append(g)\n urls.remove(temp)\n except:\n break\n gevent.joinall(g_list)\n\nf.close()\n'''\nf = open('testawvs.txt', 'w+', encoding='utf-8')# 这里代表另一个电脑跑出的所有url防止两电脑采集到了相同的url\nfor i in all.split('\\n'):\n f.write(i+'\\n')\nf.close()\n\n# 将输出复制到目录txt就好\nprint('Successfully')\n","sub_path":"去重.py","file_name":"去重.py","file_ext":"py","file_size_in_byte":1542,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"351785771","text":"from conf import settings\r\nfrom lib import common\r\nimport time\r\n\r\n\r\nlogger = common.get_logger(__name__)\r\n\r\n\r\ncurrent_user = {\r\n 'user': None,\r\n 'login_time': None,\r\n 'timeout': int(\r\n settings.LOGIN_TIMEOUT)}\r\n\r\n\r\ndef auth(func):\r\n\r\n def wrapper(*args, **kwargs):\r\n\r\n if current_user['user']:\r\n\r\n interval = time.time() - current_user['login_time']\r\n\r\n if interval < current_user['timeout']:\r\n\r\n return func(*args, **kwargs)\r\n\r\n name = input('name>>: ')\r\n\r\n db = common.conn_db()\r\n\r\n if db.get(name): # log in\r\n\r\n if db.get(name).get('locked'):\r\n\r\n logger.warning(\r\n 'Try to log in locked %s' %\r\n current_user['user'])\r\n\r\n else:\r\n\r\n logging_error_times = 0\r\n\r\n while True:\r\n\r\n if logging_error_times >= 3:\r\n\r\n logger.warning('%s locked' % (current_user['user']))\r\n\r\n db[name]['locked'] = 1\r\n\r\n common.save_db(db)\r\n\r\n break\r\n\r\n password = input('password>>:')\r\n\r\n if password == db.get(name).get('password'):\r\n\r\n logger.info(\r\n '%s log in successfully.' %\r\n (current_user['user']))\r\n\r\n current_user['user'] = name\r\n\r\n current_user['login_time'] = time.time()\r\n\r\n break\r\n\r\n else:\r\n\r\n logging_error_times += 1\r\n\r\n logger.warning(\r\n '%s %dth incorrect password.' %\r\n (current_user['user'], logging_error_times))\r\n\r\n return func(*args, **kwargs)\r\n\r\n else: # register\r\n\r\n is_register = input('Register? (Y/N)')\r\n\r\n if is_register in ['Y', 'y']:\r\n\r\n password = input('password>>')\r\n\r\n db[name] = {\"password\": password, \"money\": 0, \"locked\": 0}\r\n\r\n logger.info(\"Log in successfully\")\r\n\r\n current_user['user'] = name\r\n\r\n current_user['login_time'] = time.time()\r\n\r\n common.save_db(db)\r\n\r\n else:\r\n\r\n logger.info('new user cancelled')\r\n\r\n return func(*args, **kwargs)\r\n\r\n return wrapper\r\n\r\n\r\n@auth\r\ndef shopping():\r\n\r\n db = common.conn_db()\r\n\r\n money = db.get(current_user['user']).get('money')\r\n\r\n print('You have $%d' % money)\r\n\r\n items_dict = {'book': 1, 'bread': 2}\r\n\r\n for k in items_dict:\r\n\r\n print(k)\r\n\r\n items_bought_dic = {}\r\n\r\n while True:\r\n\r\n item_buy = input('Which do you want to buy?>>').strip()\r\n\r\n item_buy_split = item_buy.split(' ')\r\n\r\n if len(item_buy_split) == 2:\r\n\r\n if item_buy_split[0] in items_dict:\r\n\r\n item, item_num = item_buy_split[0], item_buy_split[1]\r\n\r\n item_price = items_dict[item] * int(item_num)\r\n\r\n print(item, ':', item_num, 'spent $%d' % item_price)\r\n\r\n if item_price <= money:\r\n\r\n money -= item_price\r\n\r\n logger.info(\r\n '%s bought %s,and $%d left' %\r\n (current_user['user'], item, money))\r\n\r\n if item in items_bought_dic:\r\n\r\n items_bought_dic[item] += item_num\r\n\r\n else:\r\n\r\n items_bought_dic[item] = item_num\r\n\r\n else:\r\n\r\n print('Insufficient balance')\r\n\r\n else:\r\n\r\n print('Item does not exit.')\r\n\r\n elif item_buy_split[0] in ['q', 'Q']:\r\n\r\n db[current_user['user']]['money'] = money\r\n\r\n common.save_db(db)\r\n\r\n print('You Bought:')\r\n\r\n for v in items_bought_dic:\r\n\r\n print(v)\r\n\r\n print('Balance:', money)\r\n\r\n break\r\n\r\n else:\r\n\r\n print('You should input like (name number),you should seperate by space')\r\n\r\n\r\n@auth\r\ndef balance_operation(opt):\r\n\r\n # check saving\r\n\r\n db = common.conn_db()\r\n\r\n money = db.get(current_user['user']).get('money')\r\n\r\n print('You have $%d left.' % money)\r\n\r\n num = ''\r\n\r\n # check input\r\n\r\n while not num.isdigit():\r\n\r\n num = input('amount >>: ')\r\n\r\n if num in ['q', 'Q']:\r\n\r\n return\r\n\r\n elif not num.isdigit():\r\n\r\n print('Invalid Input')\r\n\r\n num = int(num)\r\n\r\n # deposit\r\n\r\n if opt == 'in':\r\n\r\n money += num\r\n\r\n db[current_user['user']]['money'] = money\r\n\r\n common.save_db(db)\r\n\r\n logger.info(\r\n '%s deposits $%d, and $%d left' %\r\n (current_user['user'], num, money))\r\n\r\n # spend money\r\n\r\n elif opt == 'out':\r\n\r\n if money > num:\r\n\r\n money -= num\r\n\r\n db[current_user['user']]['money'] = money\r\n\r\n common.save_db(db)\r\n\r\n logger.info(\r\n '%s spent $%d, and $%d left' %\r\n (current_user['user'], num, money))\r\n\r\n else:\r\n\r\n logger.warning(\r\n '%s fails to spend $%d, and $%d left' %\r\n (current_user['user'], num, money))\r\n\r\n\r\n@auth\r\ndef run():\r\n\r\n while True:\r\n\r\n print('\\n1. 存款\\n2. 取款\\n3. 还款\\n4. 购物\\n5. 退出\\n')\r\n\r\n choice = input('>>: ').strip()\r\n\r\n if not choice:\r\n continue\r\n\r\n if choice == '1':\r\n\r\n balance_operation('in')\r\n\r\n if choice in ['2', '3']:\r\n\r\n balance_operation('out')\r\n\r\n if choice == '4':\r\n\r\n shopping()\r\n\r\n if choice in ['Q', 'q']:\r\n\r\n quit()\r\n","sub_path":"soft/core/src.py","file_name":"src.py","file_ext":"py","file_size_in_byte":5835,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"153699038","text":"\nimport pandas as pd\nimport shutil\nimport os\nimport math\nimport re\n\n# I'm assuming that this repo does not contain repeated images from Cohen\n\nmetadata = \"metadata.csv\"\nimagedir = \"images\"\noutputdir = \"../../2_Raw/Figure1\"\n\n# Remove output dir if present\nif os.path.isdir(outputdir):\n shutil.rmtree(outputdir)\n\nmask_dir = os.path.join(outputdir, \"Masks\")\nif not os.path.isdir(mask_dir):\n os.makedirs(mask_dir)\n\nmetadata_csv = pd.read_csv(metadata, encoding = 'ISO-8859-1')\n\nfor (i, row) in metadata_csv.iterrows():\n\n if row[\"finding\"] != \"COVID-19\":\n continue\n\n filename = row[\"patientid\"]\n\n if os.path.isfile(os.path.join(imagedir, filename + \".png\")):\n ext = \".png\"\n else:\n ext = \".jpg\"\n\n image_path = os.path.join(imagedir, filename + ext)\n\n # Check if destination folder exists, if not create it\n dest_dir = os.path.join(outputdir, \"COVID-19\")\n if not os.path.isdir(dest_dir):\n os.makedirs(dest_dir)\n\n # Copy image\n shutil.copy2(image_path, dest_dir)\n\n _, pid = re.split(\"-\", row[\"patientid\"])\n offset = row[\"offset\"]\n\n try:\n offset = int(offset)\n except:\n offset = 0\n\n new_filename = \"P\" + str(pid) + \"_\" + str(offset)\n new_filename_ext = \"P\" + str(pid) + \"_\" + str(offset)\n old_file = os.path.join(dest_dir, filename + ext)\n new_file = os.path.join(dest_dir, new_filename_ext + ext)\n os.rename(old_file, new_file)\n\n # Check if there are any mask provided for this image\n mask_filename = filename + \".png\"\n mask_filepath = os.path.join(\"Masks\", mask_filename)\n if os.path.exists(mask_filepath):\n shutil.copy2(mask_filepath, mask_dir)\n old_file = os.path.join(mask_dir, mask_filename)\n new_file = os.path.join(mask_dir, new_filename + \".png\")\n os.rename(old_file, new_file)\n","sub_path":"1_data/COVIDx - Figure1-COVID-chestxray-dataset/extract_images.py","file_name":"extract_images.py","file_ext":"py","file_size_in_byte":1733,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"308716599","text":"'''Define a function that computes the length of a given list or string.\n(It is true that Python has the len() function built in, but writing it yourself\nis nevertheless a good exercise.)'''\n\ndef length(str):\n\t'''\n\tThis function computes the length of a given list or string\n\n\tParameters\n\t----------\n\tstr (list or string)\n\n\tReturns\n\t----------\n\tThe length (int)\n\t'''\n\tlength = 0\n\tfor i in str:\n\t\tlength += 1\n\treturn length\n","sub_path":"srcs/ex03.py","file_name":"ex03.py","file_ext":"py","file_size_in_byte":423,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"83177914","text":"from matplotlib import pyplot as plt\nimport numpy as np\nfrom matplotlib.pyplot import plot, show, figure, title\nfrom scipy.fftpack import dct, idct\nfrom scipy.sparse import coo_matrix\nimport datagen as dtgn\nfrom sklearn.linear_model import Lasso # http://scikit-learn.org/stable/modules/generated/sklearn.linear_model.Lasso.html\n\n\n\n# Generate synthetic sinusoid data\nM = 45 # Number of compressed \"basis\" functions - we're going from N_samps to M samps.\n\n# Get synthetic glucose data\nmodelname = \"sturis_model\"\nt_length = 2000\nt_coarse = 2\nnum_measurements = 400\nmeasured_vars = [2]\nN_samps = num_measurements\nprint(\"Compression ratio {0}\".format(M/N_samps))\n\nt_measurements, measurement_matrix = dtgn.model_datagen(modelname,t_length,t_coarse,num_measurements,measured_vars,linspace=\"y\")\nt = t_measurements\nX = np.transpose(measurement_matrix)[2]\n\n# Subtract the DC offset from glucose measurements\nDC_offset = np.mean(X)\nX = X - DC_offset\n\n\n# From here down stays the same - downsamples data\nfigure(figsize=[10,4])\nplot(t,X+DC_offset)\ntitle('Original signal')\nplt.xlabel('Time (s)')\nplt.ylabel('Blood Glucose')\nplt.show()\n\n#yi = np.random.randint(0,N_samps,(M,))\n#yi = np.sort(yi)\nyi = list(map(int,np.floor(np.linspace(0,N_samps-1,M))))\nY = X[yi]\n\nfigure(figsize=[10,4])\nplot(t,X+DC_offset,'b',np.asarray(t)[yi],Y+DC_offset,'r.')\ntitle('Original Signal with Random Sampling Points')\nplt.xlabel('Time (s)')\nplt.ylabel('X(t) and X(random sample)')\nplt.show()\n\nD = dct(np.eye(N_samps)) # Construct the DCT basis functions for each of the frequencies\nA = D[yi]\n\nlasso = Lasso(alpha=0.001)\nlasso.fit(A,Y)\n\nplot(lasso.coef_)\n\nsparseness = np.sum(lasso.coef_ == 0)/N_samps\nprint(\"Solution is %{0} sparse\".format(100.*sparseness))\n\nXhat = idct(lasso.coef_)\n\nfigure()\nplot(t,Xhat)\ntitle('Reconstructed signal')\nplt.show()\n\nfigure()\nplot(t,Xhat-X)\ntitle('Error delta')\nplt.show()\n\nfigure(figsize=[12,6])\nplot(t,Xhat+DC_offset)\nplot(t,X+DC_offset)\ntitle(\"Original and reconstructed signal plotted together (orig = blue)\")\nplt.show()\n","sub_path":"sturis_model/compressed_sensing/glucose_compressed_sensing_template0.py","file_name":"glucose_compressed_sensing_template0.py","file_ext":"py","file_size_in_byte":2031,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"141145568","text":"from time_bar import print_time\nfrom PIL import Image\nfrom shutil import copyfile\n\nimport numpy as np\nimport face_recognition\n\nimport os\nimport time\n\n\ndef distribute_photos(folder, res_fold, master_db, pioners_db, master_set, pioners_set, stock):\n lst = os.listdir(folder)\n st = time.time()\n\n i = 0\n for el in lst:\n print_time(len(lst), i, st, time.time(), inf='Processing photos...')\n i += 1\n\n img = Image.open(folder + '/' + el)\n faces = face_recognition.api.face_locations(np.array(img))\n\n if len(faces) > 0:\n vectors = face_recognition.api.face_encodings(np.array(img), faces, model='large')\n\n flag = False\n for vec in vectors:\n name = master_db.find(vec)\n if (name is not None) and (name in master_set):\n flag = True\n name = '/masters/' + name + '/'\n else:\n name = pioners_db.find(vec)\n if (name is not None) and (name in pioners_set):\n flag = True\n name = '/pioners/' + name + '/'\n\n if name is not None:\n copyfile(folder + '/' + el, res_fold + name + el)\n\n if not flag:\n copyfile(folder + '/' + el, res_fold + '/_Unrecognized_/' + el)\n else:\n copyfile(folder + '/' + el, res_fold + '/_Empty_/' + el)\n\n copyfile(folder + '/' + el, stock + '/' + el)\n os.remove(folder + '/' + el)\n\n print_time(len(lst), i, st, time.time(), inf='Processing photos...', fin=True)\n return True\n\n\ndef calculate_results(folder):\n lst = os.listdir(folder + '/masters')\n print(\"Finishing program...\", end='')\n\n for el in lst:\n info = open(folder + '/masters/' + el + '/info.inf', 'w')\n info.write(el)\n info.close()\n\n lst = os.listdir(folder + '/pioners')\n names = []\n sizes = []\n\n for el in lst:\n info = open(folder + '/pioners/' + el + '/info.inf', 'w')\n info.write(el)\n info.close()\n sizes.append(len(os.listdir(folder + '/pioners/' + el)) - 2)\n names.append(el)\n\n print(\"\\rProgram finished\")\n return names, sizes\n","sub_path":"distribute.py","file_name":"distribute.py","file_ext":"py","file_size_in_byte":2224,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"631568431","text":"import os\nimport cv2\nimport numpy as np\n\ndata_dir = \"/data/ai_lane\"\nimg_dir = \"trainval_pic\"\ntag_dir = \"trainval_tag\"\ntest_dir= \"testA_crop\"\n\nif __name__ == \"__main__\":\n path1 = os.path.join(data_dir,img_dir)\n path2 = os.path.join(data_dir,tag_dir)\n img_list = np.array(list(os.listdir(path1)))\n for _ in range(5):\n np.random.shuffle(img_list)\n print(len(img_list))\n f_train = open(os.path.join(data_dir,\"train_list.txt\"),\"w\")\n f_eval = open(os.path.join(data_dir,\"val_list.txt\"),\"w\")\n f_error = open(os.path.join(data_dir,\"error_list.txt\"),\"w\")\n count = 1\n for img_name in img_list:\n print(\"Processing %d image:%s\"%(count,img_name))\n img1 = cv2.imread(os.path.join(data_dir+\"/\"+img_dir,img_name),-1)\n img2 = cv2.imread(os.path.join(data_dir+\"/\"+tag_dir,img_name[:-4]+\".png\"),-1)\n if (img1.shape[0] == img2.shape[0]) and (img1.shape[0] == img2.shape[0]):\n name1 = os.path.join(img_dir,img_name)\n name2 = os.path.join(tag_dir,img_name[:-4]+\".png\")\n if count <= 14000:\n f_train.write(name1+\" \"+name2+\"\\n\")\n else:\n f_eval.write(name1+\" \"+name2+\"\\n\")\n else:\n name1 = os.path.join(img_dir,img_name)\n name2 = os.path.join(tag_dir,img_name[:-4]+\".png\")\n f_error.write(name1+\" \"+name2+\"\\n\")\n count += 1\n f_train.close()\n f_eval.close()\n f_error.close()\n","sub_path":"tools/split_data.py","file_name":"split_data.py","file_ext":"py","file_size_in_byte":1440,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"122578902","text":"import os\nimport sys\nfile_path = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\nsys.path.append(file_path)\nfrom data import excel_column_data\nfrom util.operating_excel import operatingExcel\nfrom util.operating_json_file import operatingJson\nfrom util.connect_db import connectDb\nclass getData():\n def __init__(self):\n self.operating = operatingExcel()\n self.operating_json = operatingJson()\n self.connect_db = connectDb()\n\n def get_case_lines(self):\n return self.operating.get_lins()\n\n def get_case_ID(self,row):\n '''获取用例ID'''\n col = int(excel_column_data.get_id())\n caseID = self.operating.get_value(row,col)\n return caseID\n\n def get_case_is_run(self,row):\n '''获取是否运行'''\n col = int(excel_column_data.get_is_run())\n Flag = None\n run_value = self.operating.get_value(row,col)\n if run_value == \"yes\":\n Flag = True\n else:\n Flag = False\n return Flag\n\n def get_url(self,row):\n '''获取url'''\n col = int(excel_column_data.get_url())\n request_url = self.operating.get_value(row,col)\n return request_url\n\n def get_request_type(self,row):\n '''获取请求方式'''\n col = int(excel_column_data.get_request_type())\n request_type = self.operating.get_value(row,col)\n return request_type\n\n def get_is_cookie(self,row):\n '''获取是否需要cookie'''\n col = int(excel_column_data.get_request_data_type())\n request_cookie = self.operating.get_value(row,col)\n return request_cookie\n\n def get_header(self,row):\n '''获取header信息'''\n col = int(excel_column_data.get_header())\n request_header = self.operating.get_value(row,col)\n if request_header == \"\":\n return None\n else:\n request_json_header = self.operating_json.get_header(request_header)\n return request_json_header\n\n\n def get_case_depend(self,row):\n '''获取case是否存在依赖'''\n col = int(excel_column_data.get_case_depend())\n request_case_depend = self.operating.get_value(row,col)\n if request_case_depend == \"\":\n return None\n else:\n return request_case_depend\n\n def get_data_depend(self,row):\n '''获取依赖的返回数据'''\n col = int(excel_column_data.get_data_depend())\n request_data_depend = self.operating.get_value(row,col)\n if request_data_depend == \"\":\n return None\n else:\n return request_data_depend\n\n def get_field_depend(self,row):\n '''获取数据依赖字段'''\n col = int(excel_column_data.get_field_depend())\n request_field_depend = self.operating.get_value(row,col)\n if request_field_depend == \"\":\n return None\n else:\n return request_field_depend\n\n def get_data(self,row):\n '''获取body信息'''\n col = int(excel_column_data.get_data())\n request_data = self.operating.get_value(row,col)\n if request_data != \"\":\n return request_data\n else:\n return None\n\n def get_json_data(self,row):\n '''根据关键字获取json文件中body相关信息'''\n data = self.get_data(row)\n if data != None:\n request_json_header = self.operating_json.get_data(data)\n else:\n request_json_header = None\n return request_json_header\n\n def get_expect(self,row):\n '''获取预期结果'''\n col = int(excel_column_data.get_expect())\n request_expect = self.operating.get_value(row,col)\n return request_expect\n\n def write_result(self,row,value):\n '''写入实际结果'''\n col = int(excel_column_data.get_result())\n self.operating.write_data(row,col,value)\n\n def get_sql(self,row):\n '''通过sql获取预期结果'''\n col = int(excel_column_data.get_expect())\n sql = self.operating.get_value(row, col)\n result = self.connect_db.search_one(sql)\n return result\n\n\n\n\n\n\n","sub_path":"pytest/data/get_data.py","file_name":"get_data.py","file_ext":"py","file_size_in_byte":4139,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"406098634","text":"# Stuff for the database\nimport sqlite3\nimport os\nimport datetime\n\n# Command line parsing\nimport sys, getopt\n\n# Code reuse\nfrom app import init_db \n\nDATABASE = './media/media.db'\n\ndef main(argv):\n\n #Command line parsing\n try:\n opts, args = getopt.getopt(argv, \"nlr:a:h\", [\"remove=\", \"add=\"])\n except getopt.GetoptError:\n print(\"Unknown command, try -h for help\")\n sys.exit(2)\n for opt, arg in opts:\n if opt == \"-h\":\n print(\"-h for help\")\n print(\"-n for a new database\")\n print(\"-l for listing database\")\n print(\"-r for removing a file from the database\")\n print(\"-a for adding a file to the database\")\n sys.exit()\n elif opt in (\"-n\"):\n print(\"Creating a new database...\")\n init_db() # Calls this from app.py, where I intend to add most of this functionality \n print(\"Done\")\n sys.exit()\n elif opt in (\"-l\"):\n db = sqlite3.connect(DATABASE)\n for row in db.execute('SELECT date, fileName FROM media ORDER BY id ASC'):\n print(row)\n sys.exit()\n elif opt in (\"-r\", \"--remove\"):\n filename = arg\n db = sqlite3.connect(DATABASE)\n # This returns 1 if there is at least 1 record of the filename in the database\n exist = db.execute('SELECT EXISTS (SELECT 1 FROM media WHERE fileName=? LIMIT 1)', [filename]).fetchone()[0]\n if exist == 1:\n db.execute('DELETE FROM media WHERE fileName=?', [filename])\n db.commit()\n sys.exit()\n elif opt in (\"-a\", \"--add\"):\n filename = arg\n path = \"./media/\" + filename\n if os.path.isfile(path) == True:\n date = datetime.date.today() # Since there isn't a sure way to get the time the file was created, use today instead\n db = sqlite3.connect(DATABASE)\n db.execute('INSERT INTO media (date, fileName) VALUES (?, ?)', [date, filename])\n db.commit()\n sys.exit()\n print(\"File does not exist, or is not in the media folder\")\n sys.exit()\n else:\n sys.exit()\n\nif __name__ == \"__main__\":\n main(sys.argv[1:])\n","sub_path":"database_manager.py","file_name":"database_manager.py","file_ext":"py","file_size_in_byte":2314,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"139042889","text":"import os\nimport json\n\n# search for all the files in imagedirectories\nhappyfiles = os.listdir(\"happy_images\")\nunhappyfiles = os.listdir(\"unhappy_images\")\n\n# construct full url-path by adding the folder\nhappyurls = []\nfor f in happyfiles:\n happyurls.append(os.path.join(\"happy_images\", f))\n\nunhappyurls = []\nfor f in unhappyfiles:\n unhappyurls.append(os.path.join(\"unhappy_images\", f))\n\n# construct the writable json-dict\ndata = {}\ndata[\"url_happy\"] = happyurls\ndata[\"url_unhappy\"] = unhappyurls\n\n# write it so that the variable name is on the file (easier to execute on js)\nwith open(\"app/imagedata.js\", \"w\") as outfile:\n outfile.write(\"data = \")\n json.dump(data, outfile)\n\n# This would maybe be \"proper way\", but creates a file with no variable and not \n# so easy to execute on a page\n#with open(\"data1.json\", \"w\") as outfile:\n# json.dump(data, outfile)","sub_path":"build_imagedata.py","file_name":"build_imagedata.py","file_ext":"py","file_size_in_byte":864,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"220243546","text":"#-*- coding:utf-8 -*-\nfrom bs4 import BeautifulSoup\nfrom urllib.parse import urljoin\nimport requests\nimport pymysql\n\nurl = \"http://xa.58.com/pinpaigongyu/pn/{page}\"\ndb=pymysql.connect(host=\"localhost\",user=\"root\",password=\"83438023\",db=\"rent\",port=3306,charset=\"utf8\")\ncur = db.cursor()\n\n#已完成的页数序号,初时为0\npage = 0\n\nwhile True:\n page += 1\n print(\"fetch: \", url.format(page=page))\n response = requests.get(url.format(page=page))\n html = BeautifulSoup(response.text, \"lxml\")\n house_list = html.select(\".list > li\")\n\n # 循环在读不到新的房源时结束\n if not house_list:\n break\n\n for house in house_list:\n house_title = house.select(\"h2\")[0].string\n house_url = urljoin(url, house.select(\"a\")[0][\"href\"])\n house_info_list = house_title.split()\n\n # 如果第二列是公寓名则取第一列作为地址\n if \"公寓\" in house_info_list[1] or \"青年社区\" in house_info_list[1]:\n house_location = house_info_list[0]\n else:\n house_location = house_info_list[1]\n\n house_money = house.select(\".money\")[0].select(\"b\")[0].string\n \n cur.execute(\"INSERT INTO house(title,location,money,url)VALUES('{0}','{1}','{2}','{3}');\".format(house_title,house_location,house_money,house_url)) \n db.commit() \n\ndb.close() \n","sub_path":"crawl.py","file_name":"crawl.py","file_ext":"py","file_size_in_byte":1362,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"275764354","text":"import os\nfrom cryptography.hazmat.backends import default_backend\nfrom cryptography.hazmat.primitives.asymmetric import rsa\nfrom cryptography.hazmat.primitives import serialization\nfrom cryptography.hazmat.primitives import hashes\nfrom cryptography.hazmat.primitives.asymmetric import padding\nfrom cryptography.fernet import Fernet\nfrom prompts import success\n\n\ndef RSA(target_file, public_key_source):\n \"\"\"Encrypts the passed file with the passed RSA public key\n \n Keyword arguments:\n target_file -- the filepath to the file to be encrypted\n public_key_source -- the filepath to the public key\n \"\"\"\n with open(public_key_source, \"rb\") as public_key_file:\n public_key = serialization.load_pem_public_key(\n public_key_file.read(),\n backend=default_backend()\n )\n try:\n with open(target_file) as read_file:\n file_data = read_file.read()\n file_data = bytes(file_data, \"utf-8\")\n except UnicodeDecodeError:\n with open(target_file, \"rb\") as read_file:\n file_data = read_file.read()\n data = public_key.encrypt(\n file_data,\n padding.OAEP(\n mgf=padding.MGF1(algorithm=hashes.SHA256()),\n algorithm=hashes.SHA256(),\n label=None\n )\n )\n data += b\"0\"\n with open(target_file, \"wb\") as write_file:\n write_file.write(data)\n\n\ndef Symmetric(target_file, symmetric_key_source):\n \"\"\"Encrypts the passed file with the passed symmetric key\n \n Keyword arguments:\n target_file -- the filepath to the file to be encrypted\n symmetric_key_source -- the filepath to the symmetric key\n \"\"\"\n with open(symmetric_key_source, \"rb\") as symmetric_key_file:\n symmetric_key_data = symmetric_key_file.read()\n symmetric_key = Fernet(symmetric_key_data)\n try:\n with open(target_file) as read_file:\n file_data = read_file.read()\n file_data = bytes(file_data, \"utf-8\")\n except UnicodeDecodeError:\n with open(target_file, \"rb\") as read_file:\n file_data = read_file.read()\n data = symmetric_key.encrypt(file_data)\n data += b\"1\"\n with open(target_file, \"wb\") as write_file:\n write_file.write(data)\n\ndef enc_manager(target_files, save_folder):\n \"\"\"Encrypt all files passed to the function with the symmetric key,\n and then replace the symmetric key file's contents with an encrypted\n version, encrypted with the public key.\n\n Keyword arguments:\n target_file_raw -- a string composed of file locations seperated by colons\n save_folder -- the location of the saved key trio\n \"\"\"\n pub_src = save_folder + \"/public_key.pem\"\n sym_src = save_folder + \"/symmetric_key.key\"\n if not os.path.exists(sym_src):\n for fl in target_files:\n RSA(fl, pub_src)\n else:\n for fl in target_files:\n if os.path.getsize(fl) > 446:\n Symmetric(fl, sym_src)\n else:\n RSA(fl, pub_src)\n with open(pub_src, \"rb\") as pub_file, \\\n open(sym_src, \"rb\") as sym_file:\n public_key = serialization.load_pem_public_key(\n pub_file.read(),\n backend=default_backend()\n )\n symmetric_key_data = sym_file.read()\n encrypted_key = public_key.encrypt(\n symmetric_key_data,\n padding.OAEP(\n mgf = padding.MGF1(algorithm = hashes.SHA256()),\n algorithm = hashes.SHA256(),\n label = None\n )\n )\n with open(sym_src, \"wb\") as crypto_key_file:\n crypto_key_file.write(encrypted_key)\n success(\"enc\")","sub_path":"Scripts/encrypt.py","file_name":"encrypt.py","file_ext":"py","file_size_in_byte":3713,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"611002335","text":"#!/usr/bin/env python\n# coding: utf-8\n\nimport os\nimport os.path\nimport sys\nimport copy\nimport math\n\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport json\n\nLABEL_FONT_SIZE = 7\nTICK_FONT_SIZE = 6\n\n# Use TrueType fonts instead of Type 3 fonts\n# Type 3 fonts embed bitmaps and are not allowed in camera-ready submissions\n# for many conferences. TrueType fonts look better and are accepted.\nmatplotlib.rcParams['pdf.fonttype'] = 42\nmatplotlib.rcParams['ps.fonttype'] = 42\n\nmatplotlib.rcParams['figure.figsize'] = 5, 2\n\ndef log(*args, **kwargs):\n print(*args, file=sys.stderr, **kwargs)\n\n# Color palette\nlight_gray = \"#cacaca\"\ndark_gray = \"#827b7b\"\nlight_blue = \"#a6cee3\"\ndark_blue = \"#1f78b4\"\nlight_green = \"#b2df8a\"\ndark_green = \"#33a02c\"\nlight_red = \"#fb9a99\"\ndark_red = \"#e31a1c\"\n\n# math\nlg2 = lambda n: math.log(n, 2)\n\n\nrawdata = None # raw data\nwith open(\"./gate-count-bench.json\", \"r\") as f:\n rawdata = json.load(f)\nassert rawdata is not None\n\nINF = 10**9\nclass PlotDatum:\n def __init__(self, label, kind, stats, idx):\n self.label = label # testname\n self.kind = kind # default, qiskit, qssa, zx\n self.idx = idx\n if 'ops' not in stats or 'depth' not in stats:\n log(f'> INVALID {label}::{kind} : {stats}')\n self.gates = {}\n self.cx = -1\n self.u = -1\n self.single_qubit = -1\n self.depth = -1\n self.tot = -1\n self.time = stats.get('time', -1)\n else:\n gates = stats['ops']\n self.gates = copy.deepcopy(gates) # gates dict: {: , ...}\n self.cx = gates.get(\"cx\", 0)\n self.u = gates.get(\"u\", 0) + gates.get(\"u3\", 0)\n self.single_qubit = 0\n for gate in \"h x y z rx ry rz s sdg t tdg u u1 u2 u3\".split():\n self.single_qubit += gates.get(gate, 0)\n self.depth = stats['depth']\n self.tot = sum([gates[g] for g in gates])\n self.time = stats['time']\n\n def __lt__(self, other):\n return self.tot < other.tot\nclass FullData:\n def __init__(self, test, stats, idx):\n self.test = test\n self.idx = idx\n self.data = dict()\n for kind in rawdata[test]:\n self.data[kind] = PlotDatum(test, kind, stats[kind], idx)\n def __lt__(self, other):\n lv = self.data['default'].tot\n rv = other.data['default'].tot\n if lv != rv: return lv < rv\n lv = self.data['default'].depth\n rv = self.data['default'].depth\n if lv != rv: return lv < rv\n return True\n def hasKind(self, k):\n return k in self.data\n def getKind(self, k):\n return self.data[k]\n\n\nplotdata = []\nplotdata_routing = []\npidx = 0\nfor test in rawdata:\n pidx += 1\n data = FullData(test, rawdata[test], pidx)\n if test.find('onlyCX') >= 0:\n plotdata_routing.append(data)\n else:\n plotdata.append(data)\n\nplotdata.sort()\n\n\n#### PLOTTING-CODE\nto_plot = plotdata\nlog(\">> Plotting [%d] test cases...\"% (len(to_plot)))\n\nxs = np.arange(len(to_plot))\nwidth = 0.2\n\nanamolies = []\nfor p in plotdata:\n rat = 100*(1-p.getKind('qiskit_lev3').tot / p.getKind('default').tot)\n if rat >= 35.0:\n anamolies.append((p.test, rat))\nlog(f\">> {len(anamolies)} anamolies\")\nlog(f\">>> {anamolies}\")\n#### Optimization ratio\nfig, ax = plt.subplots(figsize=(15,10))\nfor idx, kind in enumerate(['qiskit_lev1', 'qiskit_lev2', 'qiskit_lev3', 'qssa_full']):\n ratio_nocap = lambda p: 100*(1-p.getKind(kind).tot / p.getKind('default').tot)\n ratio = lambda p: min(35.0, ratio_nocap(p))\n col = None\n label = None\n if kind == 'qiskit_lev1':\n col = light_green\n label = 'qiskit -O1'\n if kind == 'qiskit_lev2':\n col = dark_green\n label = 'qiskit -O2'\n if kind == 'qiskit_lev3':\n col = light_blue\n label = 'qiskit -O3'\n if kind == 'qssa_full':\n col = dark_blue\n label = 'qssa'\n rects1 = ax.bar(xs + ((idx + 1) * 1 * width), [ratio(p) for p in to_plot], width, label=label, color=col)\n #rects1 = ax.plot(xs, [ratio(p) for p in to_plot], label=label, color=col)\n\nax.spines['right'].set_visible(False)\nax.spines['top'].set_visible(False)\n\nax.legend(ncol=100, frameon=False, loc='lower right', bbox_to_anchor=(0, 1, 1, 0), fontsize=LABEL_FONT_SIZE)\n\nax.set_xticks([])\nax.tick_params(axis='y', labelsize=TICK_FONT_SIZE)\nax.set_ylabel('%optimization', rotation='horizontal', position = (1, 1.05),\n horizontalalignment='left', verticalalignment='bottom', fontsize=LABEL_FONT_SIZE)\n#ax.margins(x=0)\nax.margins(0.015, tight=True)\n\nfig.set_size_inches(5,2)\nfig.tight_layout()\nfilename = os.path.basename(__file__).replace(\".py\", \".pdf\")\nfig.savefig(filename)\n\n### Stats for the paper\nbeat1, equal1, fail1 = [], [], []\nbeat2, equal2, fail2 = [], [], []\nfor lev in [1,2,3]:\n beat = []\n equal = []\n fail = []\n for p in to_plot:\n qssa = p.getKind('qssa_full')\n qis = p.getKind('qiskit_lev' + str(lev))\n if qssa.tot < qis.tot:\n beat.append(p.test)\n elif qssa.tot == qis.tot:\n equal.append(p.test)\n else:\n fail.append(p.test)\n print(f\">>>>>>>>> LEVEL {lev} >>>>>>>>>>>>\")\n print(f'> beat = {len(beat)}, equal = {len(equal)+len(beat)}')\n print()\n print(f'> beat: {beat}')\n print()\n print(f'> equal: {equal}')\n print()\n print(f'> fail: {fail}')\n print()\n print(\"---------------------------------\")\n","sub_path":"data/ibm_challenge/ibm-plot-gate-counts.py","file_name":"ibm-plot-gate-counts.py","file_ext":"py","file_size_in_byte":5502,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"505623092","text":"#!/usr/bin/env python\n\nimport sys\nimport heapq\ndata = sys.stdin.read()\n\n\n\ngraph = {}\nfor num, line in enumerate(data.split(\"\\n\")):\n if line:\n if num == 0:\n N, M, T = line.split(\" \")\n N, M, T = int(N), int(M), int(T)\n else:\n x, y, t1, t2 = line.split(\" \")\n x, y, t1, t2 = int(x), int(y), int(t1), int(t2)\n if x in graph.keys():\n if y in graph[x].keys():\n graph[x][y].append((t1, t2))\n else:\n graph[x][y] = [(t1, t2)]\n else:\n graph[x] = {y: [(t1, t2)]}\n\n# print(graph)\n\n\n# def escape_from_time(graph: dist, N: int, T: int):\ndef escape_from_time(graph, N, T):\n\n graph[N] = {}\n all_vertices = graph.keys()\n visited = {1}\n time_dict = {}\n heap = []\n not_visited = {1}\n\n # Initialization\n for ver in all_vertices:\n if ver != 1:\n time_dict[ver] = float(\"inf\")\n not_visited.add(ver)\n heapq.heappush(heap, ((float(\"inf\"), 0), (ver, ver)))\n else:\n time_dict[ver] = 0\n heapq.heappush(heap, ((0, 0), (ver, ver)))\n\n while not_visited and heap:\n time_tup, ver_tup = heapq.heappop(heap)\n if time_dict[ver_tup[0]] > time_tup[0] and (\n 0 < time_tup[1] - time_dict[ver_tup[1]] <= T\n ):\n time_dict[ver_tup[0]] = time_tup[0]\n visited.add(ver_tup[0])\n\n if ver_tup[0] in not_visited:\n not_visited.remove(ver_tup[0])\n\n for nbr in graph[ver_tup[0]]:\n graph[ver_tup[0]][nbr].sort()\n for start_t, stop_t in graph[ver_tup[0]][nbr]:\n previous_stop_t = time_dict[ver_tup[0]]\n if previous_stop_t + T >= start_t > previous_stop_t:\n break\n if nbr in not_visited and time_dict[nbr] > stop_t and 0 < start_t - time_dict[ver_tup[0]] <= T :\n time_dict[nbr] = stop_t\n heapq.heappush(heap, ((stop_t, start_t), (nbr, ver_tup[0])))\n\n # return time_dict\n return time_dict[N]\n\ntmin = escape_from_time(graph, N, T)\n\nif tmin < float('inf'):\n print('YES', tmin)\nelse:\n print('NO')\n\n# time_dict = escape_from_time(graph, N, T)\n# print(time_dict)\n","sub_path":"CS5800hw/hw3/escape_from_time_submission.py","file_name":"escape_from_time_submission.py","file_ext":"py","file_size_in_byte":2265,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"42433829","text":"import numpy as np\nimport cv2\n\n\ndef conv_transform(image):\n image_copy = image.copy()\n\n for i in range(image.shape[0]):\n for j in range(image.shape[1]):\n image_copy[i][j] = image[image.shape[0] - i - 1][image.shape[1] - j - 1]\n return image_copy\n\n\ndef conv(image, kernel):\n kernel = conv_transform(kernel)\n image_h = image.shape[0]\n image_w = image.shape[1]\n\n kernel_h = kernel.shape[0]\n kernel_w = kernel.shape[1]\n\n h = kernel_h // 2\n w = kernel_w // 2\n\n image_conv = np.zeros(image.shape)\n\n for i in range(h, image_h - h):\n for j in range(w, image_w - w):\n sum = 0\n\n for m in range(kernel_h):\n for n in range(kernel_w):\n sum = (sum + kernel[m][n] * image[i - h + m][j - w + n])\n\n image_conv[i][j] = sum\n\n return image_conv\n\n\ndef norm(img1, img2):\n img_copy = np.zeros(img1.shape)\n\n for i in range(img1.shape[0]):\n for j in range(img1.shape[1]):\n q = (img1[i][j] * 2 + img2[i][j] * 2) * (1 / 2)\n if (q > 90):\n img_copy[i][j] = 255\n else:\n img_copy[i][j] = 0\n\n return img_copy\n\n\ndef hough_lines_draw(img, img1, img2, outfile, outfile2, peaks, rhos, thetas):\n for peak in peaks:\n rho = rhos[peak[0]]\n theta = thetas[peak[1]] * np.pi / 180.0\n a = np.cos(theta)\n b = np.sin(theta)\n pt0 = rho * np.array([a,b])\n pt1 = tuple((pt0 + 1000 * np.array([-b, a])).astype(int))\n pt2 = tuple((pt0 - 1000 * np.array([-b, a])).astype(int))\n if pt1[0] < 0:\n cv2.line(img, pt1, pt2, (255, 255, 0), 3)\n else:\n print(a, b)\n cv2.line(img2, pt1, pt2, (255, 255, 255), 3)\n cv2.imwrite(outfile, img)\n cv2.imwrite(outfile2, img2)\n return img\n\n\ndef hough_lines_draw_final(img, img1, outfile, peaks, rhos, thetas):\n for peak in peaks:\n rho = rhos[peak[0]]\n theta = thetas[peak[1]] * np.pi / 180.0\n a = np.cos(theta)\n b = np.sin(theta)\n pt0 = rho * np.array([a, b])\n pt1 = tuple((pt0 + 1000 * np.array([-b, a])).astype(int))\n pt2 = tuple((pt0 - 1000 * np.array([-b, a])).astype(int))\n cv2.line(img, pt1, pt2, (255, 255, 255), 3)\n cv2.imwrite(outfile, img)\n return img\n\n\ndef hough_lines_acc(img, rho_res=1, thetas = np.arange(-90, 90, 1)):\n rho_max = int(np.linalg.norm(img.shape-np.array([1, 1]), 2))\n rhos = np.arange(-rho_max, rho_max, rho_res)\n thetas -= min(min(thetas),0)\n accumulator = np.zeros((len(rhos), len(thetas)), dtype=np.uint8)\n yis, xis = np.nonzero(img) # use only edge points\n for idx in range(len(xis)):\n x = xis[idx]\n y = yis[idx]\n temp_rhos = x * np.cos(np.deg2rad(thetas)) + y * np.sin(np.deg2rad(thetas))\n temp_rhos = temp_rhos / rho_res + rho_max\n m, n = accumulator.shape\n valid_idxs = np.nonzero((temp_rhos < m) & (thetas < n))\n temp_rhos = temp_rhos[valid_idxs]\n temp_thetas = thetas[valid_idxs]\n c = np.stack([temp_rhos,temp_thetas], 1)\n cc = np.ascontiguousarray(c).view(np.dtype((np.void, c.dtype.itemsize * c.shape[1])))\n _, idxs, counts = np.unique(cc, return_index=True, return_counts=True)\n uc = c[idxs].astype(np.uint)\n accumulator[uc[:, 0], uc[:, 1]] += counts.astype(np.uint)\n accumulator = cv2.normalize(accumulator, accumulator, 0, 255,\n cv2.NORM_MINMAX, dtype=cv2.CV_8UC1)\n return accumulator, thetas, rhos\n\n\ndef clip(idx):\n return int(max(idx, 0))\n\n\ndef hough_peaks(H, numpeaks=1, threshold=100, nhood_size=5):\n peaks = np.zeros((numpeaks, 2), dtype=np.uint64)\n temp_H = H.copy()\n for i in range(numpeaks):\n _, max_val, _, max_loc = cv2.minMaxLoc(temp_H) # find maximum peak\n if max_val > threshold:\n peaks[i] = max_loc\n (c, r) = max_loc\n t = nhood_size//2.0\n temp_H[clip(r-t):int(r+t+1), clip(c-t):int(c+t+1)] = 0\n else:\n peaks = peaks[:i]\n break\n return peaks[:, ::-1]\n\n\nimage = cv2.imread(\"input_images/hough.jpg\", cv2.IMREAD_GRAYSCALE)\nimage_1 = cv2.imread(\"input_images/hough.jpg\", cv2.IMREAD_GRAYSCALE)\n\nsobel_x = np.array([\n [-1, 0, 1],\n [-2, 0, 2],\n [-1, 0, 1]\n ])\nsobel_y = np.array([\n [1, 2, 1],\n [0, 0, 0],\n [-1, -2, -1]\n ])\nimg_x = conv(image, sobel_x)\nimg_y = conv(image, sobel_y)\nedge_img = norm(img_x, img_y)\n\ncv2.imwrite(\"Hough_edges.jpg\", edge_img)\nH, thetas, rhos = hough_lines_acc(edge_img)\npeaks = hough_peaks(H, numpeaks=30, threshold=150, nhood_size=20)\n\ncv2.imwrite(\"Sine_wave.jpg\", H)\ncolor_img1 = cv2.cvtColor(image, cv2.COLOR_GRAY2BGR)\ncolor_img2 = cv2.cvtColor(image_1, cv2.COLOR_GRAY2BGR)\nres_img = hough_lines_draw(image, color_img1, color_img2, \"Diagonal_lines.jpg\", \"vertical_lines.jpg\", peaks, rhos, thetas)\n\nfinal = hough_lines_draw_final(image, color_img2, \"Hough_final.jpg\", peaks, rhos, thetas)\n","sub_path":"detection.py","file_name":"detection.py","file_ext":"py","file_size_in_byte":5058,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"4890719","text":"import argparse\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.patches as patches\nfrom LucasKanade import *\nfrom scipy.interpolate import RectBivariateSpline\nimport time\nfrom TemplateCorrection import *\n\n# write your script here, we recommend the above libraries for making your animation\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--num_iters', type=int, default=1e4, help='number of iterations of Lucas-Kanade')\nparser.add_argument('--threshold', type=float, default=1e-2, help='dp threshold of Lucas-Kanade for terminating optimization')\nparser.add_argument('--template_threshold', type=float, default=5, help='threshold for determining whether to update template')\nargs = parser.parse_args()\nnum_iters = args.num_iters\nthreshold = args.threshold\ntemplate_threshold = args.template_threshold\n\nseq = np.load(\"../data/girlseq.npy\")\nrect = [280, 152, 330, 318]\nrect_list=[]\nrect_old=np.load(\"../result/girlseqrects.npy\")\n\nwidth=rect[2]-rect[0]\nheight=rect[3]-rect[1]\n\n#For the first frame we get the template spline evaluation and keep subtracting it\n\nIt=seq[:,:,0]\nx1, y1, x2, y2 = rect[0], rect[1], rect[2], rect[3]#2.\nrow,column=It.shape#3.\nrow_rec=x2-x1\ncol_rec=y2-y1\ny=np.arange(0,row,1)#6.\nx=np.arange(0,column,1)#7.\ncc,rr=np.meshgrid(np.linspace(x1,x2,col_rec),np.linspace(y1,y2,row_rec))\n\nsplinet=RectBivariateSpline(y,x,It)\nT=splinet.ev(rr,cc)\n\n#We add tbe template correction\nfor i in range(1,seq.shape[2]):\n \n #print(\"Processing frame %d\" % i)\n #We have to send each frame to lucas Kanade\n It=seq[:,:,i-1]\n It1=seq[:,:,i]\n rect_list.append(rect)\n p=LucasKanade(It, It1, rect,threshold,num_iters, p0 = np.zeros(2))\n rect[0]+=p[0]#x1\n rect[1]+=p[1]#y1\n rect[2]+=p[0]#x2\n rect[3]+=p[1]#y2\n \n #now we resend these coordinates to Lucas Kanade for template collection\n \n p_new=TemplateCorrection(T, It1, rect,threshold,num_iters, p0 = np.zeros(2))\n \n if (np.linalg.norm(p_new-p)) '+sl2[1]+' [ label = \"'+sl[1]+'\" ];\\n'\n elif bool(sl[1] in busca[2])==False:\n print(\"El \"+sl[1]+\" no existe en la lista de Alfabetos\") \n elif bool(sl2[1]in busca[1])==False:\n print(\"El \"+sl2[1]+\" no existe en la lista de estados\")\n elif bool(sl2[0] in busca[1])==False:\n print(\"El \"+sl2[0]+\" no existe en la lista de estados\") \n elif busca[5]:\n if transicion1 in busca[5]:\n print(\"Error las transiciones no pueden repetirse, Las transiciones repetidas solo son aceptadas en AFN\")\n else:\n if bool(sl2[0] in busca[1] )==True:\n if bool(sl2[1] in busca[1])==True:\n if bool(sl[1] in busca[2])==True:\n busca[5].append(transicion1)\n banderatransi=True\n auxdfagraph+=''+sl2[0]+' -> '+sl2[1]+' [ label = \"'+sl[1]+'\" ];\\n'\n elif bool(sl[1] in busca[2])==False:\n print(\"El \"+sl[1]+\" no existe en la lista de Alfabetos\") \n elif bool(sl2[1]in busca[1])==False:\n print(\"El \"+sl2[1]+\" no existe en la lista de estados\")\n elif bool(sl2[0] in busca[1])==False:\n print(\"El \"+sl2[0]+\" no existe en la lista de estados\")\n \n menupreg() \ndef modo2():\n global lista,auxiliar1,auxdfagraph, banderatransi\n auxi=''\n auxi2=''\n auxiliar2=str(input(\"Ingrese las transiciones de las siguiente manera [estado 1,estado 2; estado 1,estado 2]:\\n\"))\n strange=auxiliar2.split(\"[\")\n strange1=strange[1].split(\"]\")\n strange2=strange1[0].split(\";\")\n if auxiliar2== '' or auxiliar2=='\\t' or auxiliar2==' ':\n modo2()\n else:\n for busca in lista:\n if busca[0]== nombre:\n \n #for lista2 in strange2:\n x=0\n while x '+strange3[k]+' [ label = \"'+busca[2][k]+'\" ];\\n'\n \n elif bool(strange3[k] in busca[1])==False: \n bandera=False\n z=0\n while z '+strange3[k]+' [ label = \"'+busca[2][k]+'\" ];\\n'\n \n elif bool(strange3[k] in busca[1])==False: \n bandera=False\n z=0\n while z\r\n
\r\n
\r\n

MID TERM - I

\r\n

NAME: NIKHIL GUPTA

\r\n

PIET18CS100 -- Sec: B -- Roll No 43

\r\n
\r\n
\r\n \r\n \"\"\"\r\n st.markdown(html_temp,unsafe_allow_html=True)\r\n st.header(\"Person Leaving Prediction\")\r\n meanfreq = st.number_input('Insert mean frequency',0,1)\r\n sd= st.number_input('Insert SD',0,1)\r\n median = st.number_input('Insert median',0,1)\r\n iqr = st.number_input('Insert iqr',0,1)\r\n skew = st.number_input('Insert skew')\r\n kurt = st.number_input('Insert kurt')\r\n mode = st.number_input('Insert mode',)\r\n centroid = st.number_input('Insert centroid')\r\n dfrange = st.number_input('Insert dfrange')\r\n\r\n # iqr = st.number_input('Insert SD',0,1)\r\n # skew = st.number_input('Insert skew')\r\n # kurt = st.number_input('Insert kurt')\r\n # mode = st.number_input('Insert mode',0,1)\r\n # centroid = st.number_input('Insert centroid',0,1)\r\n # dfrange = st.number_input('Insert dfrange')\r\n\r\n resul=\"\"\r\n if st.button(\"Predict\"):\r\n result=predict_note_authentication(meanfreq,sd,median,iqr,skew,kurt,mode,centroid,dfrange)\r\n st.success('Model has predicted that -> {}'.format(result))\r\n if st.button(\"About\"):\r\n st.subheader(\"Developed by NIKHIL GUPTA\")\r\n st.subheader(\"B-Section,PIET\")\r\n\r\nif __name__=='__main__':\r\n main()\r\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":3538,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"250561958","text":"from collections import defaultdict\nfrom fnmatch import fnmatch\nfrom io import StringIO\nfrom ovn_sandbox import Sandbox\n\n\n# The wrapper allows us to execute the command on all\n# matching central containers\nclass CentralNodeWrapper(Sandbox):\n def __init__(self, central_node, container):\n super(CentralNodeWrapper, self).__init__(\n central_node.phys_node, container\n )\n\n\nclass ExtCmdUnit(object):\n def __init__(self, conf, central_node, worker_nodes):\n self.iteration = conf.get('iteration')\n self.cmd = conf.get('cmd')\n self.test = conf.get('test')\n self.pid_name = conf.get('pid_name')\n self.background_opt = conf.get('background_opt')\n self.pid_opt = conf.get('pid_opt', '')\n\n node = conf.get('node')\n self.nodes = [n for n in worker_nodes if fnmatch(n.container, node)]\n self.nodes.extend(\n [\n CentralNodeWrapper(central_node, c)\n for c in central_node.central_containers()\n if fnmatch(c, node)\n ]\n )\n\n def is_valid(self):\n return (\n self.iteration is not None\n and self.cmd\n and self.test\n and self.nodes\n )\n\n def exec(self):\n return [self._node_exec(node) for node in self.nodes]\n\n def _node_exec(self, node):\n cmd = self.cmd\n\n if self.pid_name:\n stdout = StringIO()\n node.run(f'pidof -s {self.pid_name}', stdout=stdout)\n cmd += f' {self.pid_opt} {stdout.getvalue().strip()}'\n\n if self.background_opt:\n cmd += ' >/dev/null 2>&1 &'\n\n stdout = StringIO()\n node.run(cmd, stdout=stdout)\n return stdout.getvalue().strip()\n\n\nclass ExtCmd(object):\n def __init__(self, config, central_node, worker_nodes):\n self.cmd_map = defaultdict(list)\n for ext_cmd in config.get('ext_cmd', list()):\n cmd_unit = ExtCmdUnit(ext_cmd, central_node, worker_nodes)\n if cmd_unit.is_valid():\n self.cmd_map[(cmd_unit.iteration, cmd_unit.test)].append(\n cmd_unit\n )\n\n def exec_cmd(self, iteration, test):\n ext_cmds = self.cmd_map.get((iteration, test))\n if not ext_cmds:\n return\n\n return {ext_cmd: ext_cmd.exec() for ext_cmd in ext_cmds}\n","sub_path":"ovn-tester/ovn_ext_cmd.py","file_name":"ovn_ext_cmd.py","file_ext":"py","file_size_in_byte":2364,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"540447224","text":"\"\"\"yo chai home page ma click garexi aauxa...ya chai aba specific functions haru\njastai bank ko lagi matra milne functions haru tala lekhya hunxa \njasle garda repsonse dina lai sajilo hunxa\ntya edit box ko sato chai euta figure rakhne tyo sound ko waveform jasto\"\"\"\n\nimport sys,os\nimport subprocess\nfrom PyQt4 import QtGui, QtCore\nfrom PyQt4.QtCore import QThread\nfrom time import sleep\n\nfrom run_hmm import execute\n\nclass playThread(QThread):\n def __init__(self):\n QThread.__init__(self)\n\n def __del__(self):\n self.wait()\n\n\n###yesma chai maile conditional lyaera rakhde...\n def run(self):\n print(\"specific-2\")\n sleep(0.3)\n print(\"What is the choice?\")\n a = int(execute())\n print('a2: %s' % a)\n print(type(a))\n self.conditional(a)\n\n def conditional(self,a):\n if (a == 0):\n print(\"menu is to repeated\")\n subprocess.call(['python','specific-main.py']) #calls a different script\n self.close()\n\n\nclass InitWindow(QtGui.QWidget):\n def __init__(self):\n super(InitWindow, self).__init__()\n self.initUI()\n\n def initUI(self):\n\n grid = QtGui.QGridLayout()\n self.setLayout(grid)\n\n pic = QtGui.QLabel()\n pic.setPixmap(QtGui.QPixmap(\"logo.jpg\"))\n grid.addWidget(pic, 0, 0)\n\n customer = QtGui.QLabel(\"You are at Customer Support Menu \\t\" + u'\\u0924\\u092a\\u093e\\u0908\\u0902 \\u0915\\u094d\\u0938\\u094d\\u091f\\u092e\\u0930 \\u0938\\u092a\\u094b\\u0930\\u094d\\u091f \\u092e\\u0947\\u0928\\u0941\\u092e\\u093e \\u0939\\u0941\\u0928\\u0941\\u0939\\u0941\\u0928\\u094d\\u091b \\n'\n+ '-----------------------------------------------------------------------------------------')\n first = QtGui.QLabel(u'\\u0967' + ' Customer Support Representative ' + u'\\u0915\\u0938\\u094d\\u091f\\u092e\\u0930 \\u0938\\u092a\\u094b\\u0930\\u094d\\u091f \\u092a\\u094d\\u0930\\u0924\\u093f\\u0928\\u093f\\u0927\\u093f')\n second = QtGui.QLabel(u'\\u0968' + ' Manager ' + u'\\u092e\\u0947\\u0928\\u0947\\u091c\\u0930 ')\n \n back = QtGui.QLabel(u'\\u0966' + ' Previous Menu '+ u'\\u092a\\u0941\\u0930\\u093e\\u0928\\u094b \\u092e\\u0947\\u0928\\u0941') \n\n grid.addWidget(customer,1,0)\n grid.addWidget(first,3,0)\n grid.addWidget(second,4,0)\n grid.addWidget(back,7,0)\n\n self.addButtons(grid)\n\n self.get_thread = playThread()\n self.get_thread.start()\n\n self.setWindowTitle('IVR Solutions')\n self.setGeometry(350,100,500,500)\n self.show()\n\n def addButtons(self,grid): \n\n cancelButton = QtGui.QPushButton(\"Cancel\")\n cancelButton.setStyleSheet('QPushButton {color: red;}')\n \n cancelButton.clicked.connect(QtCore.QCoreApplication.instance().quit)\n\n grid.addWidget(cancelButton,8,0)\n\n \n \ndef main():\n app = QtGui.QApplication(sys.argv)\n ex = InitWindow()\n sys.exit(app.exec_())\n\n\nif __name__=='__main__':\n main()\n","sub_path":"HMM/specific2.py","file_name":"specific2.py","file_ext":"py","file_size_in_byte":2967,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"235380028","text":"import os\nimport pdb\nimport numpy as np\nimport pandas as pd\nfrom argparse import ArgumentParser\nfrom libxmp.utils import file_to_dict\nfrom libxmp.consts import (\n XMP_NS_EXIF_Aux,\n XMP_NS_Photoshop,\n XMP_NS_EXIF,\n XMP_NS_XMP,\n XMP_NS_DC,\n XMP_NS_XMP_MM,\n XMP_NS_CameraRaw,\n XMP_NS_TIFF\n)\n\nMETA_NAMES = []\nALL_PROPERTIES = [XMP_NS_EXIF, XMP_NS_EXIF_Aux, XMP_NS_Photoshop, XMP_NS_XMP, XMP_NS_DC, XMP_NS_XMP_MM, XMP_NS_CameraRaw, XMP_NS_TIFF]\nPROPERTIES = [XMP_NS_EXIF, XMP_NS_TIFF]\nFN_DESIRED_FIELDS = os.path.join(os.path.dirname(__file__), \"res\", \"desired_labels\")\nDESIRED_FIELDS = pd.read_csv(FN_DESIRED_FIELDS, names=[\"dtype\", \"field\"])\n\n\ndef xmp_extract(fns):\n data = []\n columns = []\n for fn in fns:\n c, d = xmp_to_vec(fn)\n columns.append(c)\n data.append(d)\n return columns, data\n\n\ndef convert_types(df):\n\n def str_to_float(s):\n if isinstance(s, (str, unicode)):\n if \"/\" in s:\n # parse a ratio to its float value\n num, den = s.split(\"/\")\n return [float(num) / float(den)]\n elif \",\" in s:\n # parse a csv variable into multiple new columns\n return [float(el) for el in s.split(\",\")]\n else:\n # parse to float directly\n return [float(s)]\n else:\n # parse to float directly\n return [float(s)]\n\n converted = []\n columns = []\n for column, dtype in zip(df.columns, DESIRED_FIELDS[\"dtype\"]):\n if dtype == \"categorical\":\n values = pd.get_dummies(df[column]).values.tolist()\n converted.extend(zip(*values))\n columns.extend([\"{}_{}\".format(column, i) for i in xrange(len(values[0]))])\n elif dtype == \"binary\":\n converted.append(df[column].replace({\"True\": 1, \"False\": 0}).astype(int).values.tolist())\n columns.append(column)\n elif dtype == \"numerical\":\n values = df[column].replace('', np.nan).apply(str_to_float).values.tolist()\n lengths = np.array([len(val) if isinstance(val, list) else 1 for val in values])\n target_len = np.max(lengths)\n columns.extend([\"{}_{}\".format(column, i) for i in xrange(target_len)])\n if np.any(lengths > 1):\n for i, val in enumerate(values):\n if lengths[i] < target_len:\n values[i] = [None] * target_len\n values = zip(*values)\n converted.extend(values)\n else:\n raise TypeError(\"Unexpected type {} for property {}\".format(dtype, column))\n\n if len(converted) != len(columns):\n raise RuntimeError(\"The number of data columns and the number of data column names is different.\")\n\n return columns, converted\n\n\ndef xmp_to_vec(fn):\n # read in the core data of interest from the XMP file.\n xmp_data = file_to_dict(fn)\n df = pd.DataFrame([tup[:2] for _, data in xmp_data.items() for tup in data], columns=[\"field\", \"value\"])\n\n # filter down to the desired properties only.\n df = df.merge(DESIRED_FIELDS, how=\"inner\", on=\"field\")\n\n return df[\"field\"].values, df[\"value\"].values\n\n\ndef main():\n args = parse_args()\n with open(args.fn) as fp:\n fns = fp.read().splitlines()\n columns, data = xmp_extract(fns)\n df = pd.DataFrame(np.empty(shape=(len(data), len(DESIRED_FIELDS))), columns=DESIRED_FIELDS[\"field\"])\n for i, (c, d) in enumerate(zip(columns, data)):\n df.loc[i, c] = d\n\n # convert the data types\n columns, data = convert_types(df)\n df = pd.DataFrame(data).transpose()\n df.columns = columns\n import pdb; pdb.set_trace()\n\n\ndef parse_args():\n parser = ArgumentParser()\n parser.add_argument(\"-f\", \"--file\", dest=\"fn\", help=\"Path to a file which contains a list of XMP files to parse (one per line).\")\n return parser.parse_args()\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"edit_learn/extract/get_labels.py","file_name":"get_labels.py","file_ext":"py","file_size_in_byte":3944,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"215500240","text":"import json, requests\n\ndef get_page_json(page_id, expand = False):\n if expand:\n suffix = \"?expand=\" + expand \n #body.storage\n else:\n suffix = \"\"\n\n url=\"https://stamp.gs.ec.ge.com/confluence/rest/api/content/\" + page_id + suffix\n response = requests.get(url, auth=(\"502670418\", \"Idobetter@2018y\"))\n response.encoding = \"utf8\"\n return json.loads(response.text)\n\njson_data = get_page_json(\"649706783\", \"body.storage\")\nprint(json_data)\n\nprint(json_data['title'])\nprint(json_data['body']['storage']['value'])\nwith open('data1.json','w') as outfile:\n json.dump(json_data, outfile)\n\nprint(\"Execution completed\\n\")","sub_path":"confluence3.py","file_name":"confluence3.py","file_ext":"py","file_size_in_byte":670,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"219997791","text":"import dash\nimport dash_core_components as dcc\nimport dash_bootstrap_components as dbc\nimport dash_html_components as html\nfrom dash.dependencies import Input, Output\nimport plotly.express as px\nimport pandas as pd\nfrom datetime import date\n\n# read in the data\ns = pd.read_csv('nasdaq_100.csv',parse_dates = True\n ,header = [0,1],index_col = 0)\ncluster = pd.read_csv('clusters.csv',index_col = 0)\n\napp = dash.Dash(external_stylesheets=[dbc.themes.BOOTSTRAP])\n\nsidepanel = dbc.Card([\n dbc.FormGroup([\n dbc.Label(\"Select Ticker\")\n ,dcc.Dropdown(id='stock-select'\n ,options=[{'label':i, 'value':i} for i in cluster.index]\n ,value='ATVI' )\n ,dbc.FormGroup([\n dbc.Label(\"Timeframe\")\n ,dcc.RadioItems(id = 'timeframe-select'\n ,options=[\n {'label': 'Max', 'value': 'Max'},\n {'label': 'YTD', 'value': 'YTD'},\n {'label': '3Y', 'value': '3Y'},\n {'label': '1Y', 'value': '1Y'},\n {'label': '6Mo', 'value': '6Mo'},\n {'label': '3Mo', 'value': '3Mo'},\n {'label': '1Mo', 'value': '1Mo'}\n ],value='Max'\n ,labelStyle={\"margin-right\": \"5px\"}) \n ],row = False)\n ])\n])\n\nmainbody = [\n dcc.Graph(id=\"stock-vs-cluster\")\n ,html.P(id = \"cluster-members\")\n]\napp.layout = dbc.Container([\n html.H1(\"NASDAQ 100 Cluster Explorer\"),\n dbc.Row([\n dbc.Col(sidepanel, md=2),\n dbc.Col(mainbody, md=10)\n ])\n],fluid=True)\n\n\n@app.callback(\n [Output('stock-vs-cluster', 'figure'),\n Output('cluster-members','children')]\n ,[Input('stock-select', 'value')\n ,Input('timeframe-select', 'value')]\n)\ndef update_figure(symbol,timeframe):\n \n # get the cluster for the given symbol and it's members\n clust_no = cluster.cluster[symbol]\n clust_members = cluster[cluster.cluster == clust_no].index\n \n # get the closing values for only the stocks in the given cluster\n p = s['Close'].loc[:,clust_members]\n \n # filter the index based on just the timeframe\n if timeframe == 'YTD':\n p = p.loc[p.index.year == date.today().year,:]\n elif timeframe == '3Y':\n p = p.loc[p.index > date.today()-pd.DateOffset(years = 3)]\n elif timeframe == '1Y':\n p = p.loc[p.index > date.today()-pd.DateOffset(years = 1)]\n elif timeframe == '6Mo':\n p = p.loc[p.index > date.today()-pd.DateOffset(months = 6)]\n elif timeframe == '3Mo':\n p = p.loc[p.index > date.today()-pd.DateOffset(months = 3)]\n elif timeframe == '1Mo':\n p = p.loc[p.index > date.today()-pd.DateOffset(months = 1)]\n else:\n p = p\n \n # get rid of any missing values\n p = p.dropna(axis = 0,subset = [symbol])\n # equalize all stocks with a sum of 100 on the first day\n c = p*((100/len(p.columns))/p.iloc[0,:])\n \n # sum the cluster stocks and join on the selected stock (100-index both)\n plot_df = pd.DataFrame(c.transpose().sum().transpose(),columns = ['cluster'])\n plot_df = plot_df.join(p[symbol]*(100/p[symbol][0])).reset_index()\n plot_df = plot_df.melt(id_vars = ['Date'])\n graph = px.line(plot_df, x=\"Date\", y=\"value\", color='variable')\n\n # output the list of clusters in the stock\n out_text = 'Cluster Members: ' + ', '.join(clust_members)\n \n return graph, out_text\n\nif __name__ == \"__main__\":\n app.run_server(debug=False, port = 8050)\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":3567,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"319245746","text":"#-----------------------------------------BASICS-------------------------------------\nurls = [''] # urls (array) to monitor urls\nwait_time = 5 #customizable time to wait to check for changes IE every 20 secs\nheaders = {'User-Agent': \"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/52.0.2743.116 Safari/537.36\"}\n\n\n#------------------------------------GMAIL SECTION----------------------------------\n\nnotify = True #True to notify via email on change False\n\nuser = '' #emailuser@gmail.com\npwd = '' #app password generated by google IE \"djfj rubi sifu sofi\"\nrecipient = '' #recievingemail@gmail.com\n\n\n# ----------------------------------TWITTER SECTION------------------------------------\n\ntweet = False #True to tweet on change, False not to\n\nconsumer_key = ''\nconsumer_secret = ''\naccess_token = ''\naccess_token_secret = ''\n\n\n #---------------------------------SMS SECTION------------------------------------------\n#https://www.twilio.com/console/account/settings\n\ntext = True #True to send text, false to not\n\naccountSID = ''\nauthToken = ''\nmyNumber = '' #number to send text messages to\ntwilioNumber = '' #twillio phone number\n\n","sub_path":"config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":1338,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"247624867","text":"\nimport json\nimport logging\n\nimport deploytk\nimport pyaas\n\nimport tornado.web\n\nclass Data(pyaas.handlers.Base):\n @tornado.web.authenticated\n def get(self):\n self.set_header('Content-Type', 'text/javascript')\n\n self.write('var _data={\\n' +\n 'foremen:' + deploytk.records.Foremen.Read().json + ',\\n' +\n 'managers:' + deploytk.records.Managers.Read().json + ',\\n' +\n 'jobs:' + deploytk.records.Jobs.Read().json + ',\\n' +\n 'positions:' + deploytk.records.Positions.Read().json + '\\n' +\n '};')\n\nclass Jobs(pyaas.handlers.Base):\n @tornado.web.authenticated\n def post(self, _id):\n self.set_header('Content-Type', 'application/json')\n\n values = json.loads(self.request.body)\n\n try:\n job = deploytk.records.Job.Create(values)\n except pyaas.error as e:\n raise tornado.web.HTTPError(500, e)\n\n logging.debug('Created job: %s', job.json)\n\n self.write(job.record)\n\n self.application.router.Send(str(job.foreman), 'AddJob', job.json)\n\n #position = dict(\n # id = identity['id'],\n # x = 0,\n # y = 0,\n # w = 100,\n # h = 100,\n # )\n #deploytk.records.Position.Create(position)\n #self.application.Broadcast(\n # action = 'CreatePosition',\n # data = position\n # )\n\n self.application.Broadcast(\n action = 'CreateJob',\n data = job.record\n )\n\n\nclass Positions(pyaas.handlers.Base):\n @tornado.web.authenticated\n def put(self, _id):\n values = json.loads(self.request.body)\n\n if _id != values['id']:\n raise tornado.web.HTTPError(500)\n\n position = deploytk.records.Position.Read(_id)\n if position is None:\n raise tornado.web.HTTPError(404)\n\n position.Update(values)\n\n","sub_path":"deploytk/handlers/manager.py","file_name":"manager.py","file_ext":"py","file_size_in_byte":1928,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"476655055","text":"import csv\nimport numpy as np\nimport random\nimport pygame\nfrom sklearn import metrics\nWINDOW_W=1000\nWINDOW_H=1000\n\ndef toc_ds(x,y):\n x1 = int(x * int(WINDOW_W/2) + int(WINDOW_W/2))\n y1 = int(y * int(WINDOW_H/2) + int(WINDOW_H/2))\n return (x1, y1)\n\ndef draw_dataset(screen, dataset):\n for dot in dataset:\n x,y = toc_ds(dot.c[0], dot.c[1])\n pygame.draw.circle(screen, pygame.Color(0,125,125), (x,y), 3)\n\nclass Dot:\n def __init__(self, cluster, coords):\n self.cluster = int(cluster)\n self.c = np.array(coords)\n self.pred_cluster = -1\n\ndataset = []\nwith open('2d_dataset.csv') as csvf:\n reader = csv.reader(csvf)\n for row in reader:\n cluster = row[0]\n coords = [float(c) for c in row[1:-1]]\n dataset.append(Dot(cluster, coords))\ndataset = np.array(dataset)\ndataset_clusters = [x.cluster for x in dataset]\n\nclusters = {}\n\nR = 0.275\n\npygame.init()\nscreen = pygame.display.set_mode((WINDOW_W, WINDOW_H))\nFPS=15\nclock = pygame.time.Clock()\nrunning = True\ncluster_no = 0\npred_dataset = []\nwhile running:\n for e in pygame.event.get():\n if e.type == pygame.QUIT:\n running = False\n clock.tick(FPS)\n\n screen.fill((0,0,0))\n draw_dataset(screen, dataset)\n pygame.display.update()\n\n if len(dataset) == 0:\n for cent, nei in clusters.items():\n x,y = toc_ds(cent.c[0],cent.c[1])\n pygame.draw.circle(screen, (0,255,0), (x,y), int(R*WINDOW_H/2))\n for _,n in nei.items():\n x,y = toc_ds(n.c[0],n.c[1])\n pygame.draw.circle(screen, (0,0,255), (x,y), 3)\n pygame.display.update()\n continue\n\n\n cur_d_ind = random.randint(0, len(dataset)-1)\n cur_d = dataset[cur_d_ind]\n neibs = {cur_d_ind:cur_d}\n\n prev_center = Dot(999, (999,999))\n center = None\n center_ind = 0\n neibs_to_delete = {}\n while center != prev_center and running:\n for e in pygame.event.get():\n if e.type == pygame.QUIT:\n print('quitting')\n running = False\n break\n screen.fill((0,0,0))\n draw_dataset(screen, dataset)\n ind = 0\n for d in dataset:\n if center == None:\n dist = np.linalg.norm(cur_d.c - d.c)\n else:\n dist = np.linalg.norm(center.c - d.c)\n if dist <= R:\n neibs[ind] = d\n ind += 1\n\n for _,v in neibs.items():\n x,y = toc_ds(v.c[0], v.c[1])\n pygame.draw.circle(screen, (255,0,0), (x,y), 3)\n\n indexes = [k for k,v in neibs.items()]\n\n prev_center = center\n center_ind = random.randint(0, len(indexes)-1)\n center = neibs[indexes[center_ind]]\n cur_dist = 999999\n\n for d_ind, d in neibs.items():\n total_dist = 0\n for _, a in neibs.items():\n total_dist += np.linalg.norm(d.c - a.c)\n if total_dist < cur_dist:\n center = d\n center_ind = d_ind\n cur_dist = total_dist\n\n neibs_to_delete = dict(neibs)\n neibs = {center_ind:center}\n\n pygame.display.update()\n\n for _,n in neibs_to_delete.items():\n n.pred_cluster = cluster_no\n cluster_no += 1\n clusters[center] = neibs_to_delete\n pred_dataset += [x.pred_cluster for k,x in neibs_to_delete.items()]\n to_delete = [ind for ind, _ in neibs_to_delete.items()]\n dataset = np.delete(dataset, to_delete)\n\n\nprint('Adjusted Rand score:', metrics.adjusted_rand_score(dataset_clusters, pred_dataset))\nprint('Mutual Information based score:', metrics.adjusted_mutual_info_score(dataset_clusters, pred_dataset))\nprint('V-measure score:', metrics.v_measure_score(dataset_clusters, pred_dataset))\nprint('Fowlkes-Mallows score:', metrics.fowlkes_mallows_score(dataset_clusters, pred_dataset))","sub_path":"forel.py","file_name":"forel.py","file_ext":"py","file_size_in_byte":3863,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"547258007","text":"#\n# Hello World server in Python\n# Binds REP socket to tcp://*:5555\n# Expects b\"Hello\" from client, replies with b\"World\"\n#\n\nimport time\nimport zmq\nimport sys\n\nport = \"5556\"\nif len(sys.argv) > 1:\n port = sys.argv[1]\n int(port)\ncontext = zmq.Context()\nsocket = context.socket(zmq.REP)\nsocket.bind(\"tcp://*:5555\")\nsocket.bind(\"tcp://*:%s\" % port)\n\nwhile True:\n # Wait for next request from client\n message = socket.recv()\n print(\"Received request: %s\" % message)\n\n # Do some 'work'\n time.sleep(1)\n\n # Send reply back to\n socket.send(\"World from %s\" % port)","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":591,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"503431847","text":"import FWCore.ParameterSet.Config as cms\n\nprocess = cms.Process('TESTING')\n\n#load run conditions\nprocess.load('Configuration.StandardSequences.Services_cff')\nprocess.load('SimGeneral.HepPDTESSource.pythiapdt_cfi')\nprocess.load('FWCore.MessageService.MessageLogger_cfi')\nprocess.load('Configuration.EventContent.EventContentCosmics_cff')\nprocess.load('Configuration.StandardSequences.GeometryRecoDB_cff')\nprocess.load('Configuration.StandardSequences.MagneticField_AutoFromDBCurrent_cff')\nprocess.load('Configuration.StandardSequences.FrontierConditions_GlobalTag_cff')\n\n#get hemispheres and MET\nprocess.load(\"RecoMET.METProducers.PFMET_cfi\")\nprocess.load(\"HLTriggerOffline.SUSYBSM.razorHemispheres_cff\")\n\n#define input\nprocess.maxEvents = cms.untracked.PSet( input = cms.untracked.int32(-1) )\n#process.maxEvents = cms.untracked.PSet( input = cms.untracked.int32(50) )\nprocess.source = cms.Source(\"PoolSource\",\n# fileNames = cms.untracked.vstring('file:/afs/cern.ch/user/a/anwang/work/HLT/CMSSW_7_2_1/src/HLTriggerOffline/SUSYBSM/test/samples/TTbarLepton_13_Razor7e33_1_1_dqQ.root',\n# 'file:/afs/cern.ch/user/a/anwang/work/HLT/CMSSW_7_2_1/src/HLTriggerOffline/SUSYBSM/test/samples/TTbarLepton_13_Razor7e33_2_1_Oup.root',\n# 'file:/afs/cern.ch/user/a/anwang/work/HLT/CMSSW_7_2_1/src/HLTriggerOffline/SUSYBSM/test/samples/TTbarLepton_13_Razor7e33_3_1_eiw.root',\n# 'file:/afs/cern.ch/user/a/anwang/work/HLT/CMSSW_7_2_1/src/HLTriggerOffline/SUSYBSM/test/samples/TTbarLepton_13_Razor7e33_4_1_srx.root',\n# 'file:/afs/cern.ch/user/a/anwang/work/HLT/CMSSW_7_2_1/src/HLTriggerOffline/SUSYBSM/test/samples/TTbarLepton_13_Razor7e33_5_1_IBN.root'\n fileNames = cms.untracked.vstring('root://xrootd.unl.edu//store/user/amwang/11_Calo/TTbar_Lepton_New_Calo_1_1_wkN.root',\n 'root://xrootd.unl.edu//store/user/amwang/11_Calo/TTbar_Lepton_New_Calo_4_1_lMH.root',\n 'root://xrootd.unl.edu//store/user/amwang/11_Calo/TTbar_Lepton_New_Calo_2_1_KtV.root',\n 'root://xrootd.unl.edu//store/user/amwang/11_Calo/TTbar_Lepton_New_Calo_5_1_Gik.root',\n 'root://xrootd.unl.edu//store/user/amwang/11_Calo/TTbar_Lepton_New_Calo_3_1_Pmd.root'\n )\n)\n\n#TFileService for output \nprocess.TFileService = cms.Service(\"TFileService\", \n fileName = cms.string(\"razor_11_11_Calo_TTbar_Lepton.root\"),\n closeFileFast = cms.untracked.bool(True)\n)\n\n#get global tag\nfrom Configuration.AlCa.GlobalTag_condDBv2 import GlobalTag\nprocess.GlobalTag = GlobalTag(process.GlobalTag, 'auto:run2_mc_GRun', '')\n\n#create AK4 charged-hadron subtracted jets\nprocess.load(\"CommonTools.ParticleFlow.pfNoPileUpJME_cff\")\nfrom RecoJets.Configuration.RecoPFJets_cff import ak4PFJetsCHS\nfrom RecoJets.JetProducers.ak4PFJets_cfi import ak4PFJets\nprocess.ak4PFJets = ak4PFJets.clone()\nprocess.ak4PFJetsCHS = ak4PFJets.clone(src = 'pfNoPileUpJME', doAreaFastjet = True)\n\n#declare analyzer module\nprocess.razorTriggerAnalysis = cms.EDAnalyzer(\"RazorTriggerAnalyzer\",\n trigSummary = cms.InputTag(\"hltTriggerSummaryAOD\"),\n pfMETCollection = cms.InputTag(\"pfMet\"),\n pfJetCollection = cms.InputTag(\"ak4PFJetsCHS\"),\n TriggerResults = cms.InputTag('TriggerResults','','reHLT'),\n TriggerPath = cms.string('HLT_RsqMR300_Rsq0p09_MR200_v1'),\n TriggerFilter = cms.InputTag('hltRsqMR300Rsq0p09MR200', '', 'reHLT'), #the last filter in the path\n #CaloFilter = cms.InputTag('hltRsqMRNoMinRsqNoMinMRNoMinCalo', '', 'reHLT'), #filter implementing cuts on calo MR and Rsq\n CaloFilter = cms.InputTag('hltRsqMR200Rsq0p01MR100Calo', '', 'reHLT'), #filter implementing cuts on calo MR and Rsq \n hemispheres = cms.InputTag('hemispheres')\n )\n\n#define messagelogger (controls verbosity of the module)\nprocess.MessageLogger = cms.Service(\"MessageLogger\",\n destinations = cms.untracked.vstring('detailedInfo','critical','cerr'),\n critical = cms.untracked.PSet(threshold = cms.untracked.string('ERROR')),\n detailedInfo = cms.untracked.PSet(threshold = cms.untracked.string('INFO') ),\n cerr = cms.untracked.PSet(threshold = cms.untracked.string('WARNING') )\n)\n\nprocess.run_module = cms.Path(process.pfNoPileUpJMESequence*process.ak4PFJets*process.ak4PFJetsCHS*cms.ignore(process.hemispheres)*process.pfMet*process.razorTriggerAnalysis)\n","sub_path":"test/RazorTriggerAnalyzer.py","file_name":"RazorTriggerAnalyzer.py","file_ext":"py","file_size_in_byte":4617,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"425680370","text":"from __future__ import absolute_import\nfrom __future__ import print_function\n\nimport logging\nimport sys\nfrom abc import abstractmethod\nfrom typing import Iterator, NamedTuple, Any, List\n\nimport xgboost as xgb\nfrom scipy.sparse import csr_matrix\n\nfrom . import config_fields\nfrom .data_source import DataSource\n\nlogger = logging.getLogger(__name__)\n\n\nclass XGBoostData(NamedTuple):\n d_matrix: xgb.DMatrix\n append_info: List[List[Any]] = None\n\n\nclass XGBoostDataBuilder:\n \"\"\"\n Base class of XGBoost data builder, producing XGBoostData from XGBoostRecord (output of data_source.reader)\n in form of Generator.\n \"\"\"\n\n def __init__(self, data_reader: DataSource,\n batch_size: int = -1,\n logging_interval: int = 2000):\n \"\"\"\n :param data_reader: specific DataSource of input data\n :param batch_size: batch size of XGBoostData, default (-1) means full batch\n :param logging_interval: record interval of progress logging (unit: seconds)\n \"\"\"\n\n self.num_features = data_reader.num_features\n self._iterator_fn = lambda: data_reader.read()\n self.batch_size = batch_size\n self.logging_interval = logging_interval\n\n @abstractmethod\n def build(self) -> Iterator[XGBoostData]:\n pass\n\n @classmethod\n def create(cls, config: config_fields.DataBuilderFields,\n data_reader: DataSource):\n \"\"\"\n factory of XGBoostDataBuilder\n :param config: configs.DataBuilderFields\n :param data_reader: specific DataSource of input data\n :return: a XGBoostDataBuilder instance\n \"\"\"\n\n if config.name == \"CSRMatrixBuilder\":\n return CSRMatBuilder(data_reader, config.batch_size)\n else:\n raise NameError(\"Unknown DMatrixBuilder %s!\" % config.name)\n\n\nclass CSRMatBuilder(XGBoostDataBuilder):\n \"\"\"\n Convert XGBoostRecords into scipy.csr_matrix, and then build xgb.DMatrix from csr_matrix.\n \"\"\"\n\n def build(self) -> Iterator[XGBoostData]:\n for mat, label, group, weight, base_margin, append_info in self._build_csr():\n d_matrix = xgb.DMatrix(mat)\n if label:\n d_matrix.set_label(label)\n if group:\n d_matrix.set_group(group)\n if weight:\n d_matrix.set_weight(weight)\n if base_margin:\n d_matrix.set_base_margin(base_margin)\n yield XGBoostData(d_matrix, append_info)\n\n def _build_csr(self) -> Iterator:\n row_size = self.batch_size if self.batch_size > 0 else sys.maxsize\n col_size = self.num_features\n data = []\n ind = []\n indptr = [0]\n label_buf = []\n group_buf = []\n weight_buf = []\n base_margin_buf = []\n append_info_buf = []\n count = 0\n for rcd in self._iterator_fn():\n data_length = len(rcd.indices)\n ind.extend(rcd.indices)\n data.extend(rcd.values)\n last_ind = indptr[-1]\n indptr.append(last_ind + data_length)\n if rcd.label is not None:\n label_buf.append(rcd.label)\n if rcd.weight is not None:\n weight_buf.append(rcd.weight)\n if rcd.group is not None:\n group_buf.append(rcd.group)\n if rcd.base_margin is not None:\n base_margin_buf.append(rcd.base_margin)\n if rcd.append_info is not None and len(rcd.append_info) > 0:\n append_info_buf.append(rcd.append_info)\n count += 1\n if count % min([self.logging_interval, row_size]) == 0:\n logging.info('CSRMatrixBuilder has fetched %d records.' % count)\n if count % row_size == 0:\n # if col_size == 0, let csr_matrix do shape inference.\n if col_size > 0:\n mat = csr_matrix((data, ind, indptr), [row_size, col_size])\n else:\n mat = csr_matrix((data, ind, indptr))\n data.clear()\n ind.clear()\n indptr.clear()\n indptr.append(0)\n label = label_buf[:]\n label_buf.clear()\n group = group_buf[:]\n group_buf.clear()\n weight = weight_buf[:]\n weight_buf.clear()\n base_margin = base_margin_buf[:]\n base_margin_buf.clear()\n append_info = append_info_buf[:]\n append_info_buf.clear()\n yield mat, label, group, weight, base_margin, append_info\n\n if data:\n if col_size > 0:\n mat = csr_matrix((data, ind, indptr), [count % row_size, col_size])\n else:\n mat = csr_matrix((data, ind, indptr))\n yield mat, label_buf, group_buf, weight_buf, base_margin_buf, append_info_buf\n","sub_path":"xgboost-launcher/launcher/data_builder.py","file_name":"data_builder.py","file_ext":"py","file_size_in_byte":4915,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"443262313","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Oct 27 19:50:20 2019\n\n@author: joejo\n\nMailroom Part III\n\"\"\"\n\nimport sys\nimport os\n\ndonor_db = {'Bill Gates': [4000.32, 35.00, 17899.99],\n 'Oprah Winfrey': [9999.99, 9999.99],\n 'Rob Schneider': [400.00, 55.00, 800.00],\n 'Donald Trump': [0.32, 5.00],\n 'Denise Richards': [6040.77]\n }\n\nprompt = \"\\n\".join((\"\\nWelcome to the mailroom!\",\n \"Please choose from below options:\",\n \"1 - Enter a new donation and send a thank you\",\n \"2 - Create a Report\",\n \"3 - Send Thank Yous to All Donors\",\n \"4 - quit\",\n \">>> \"))\n\n\ndef get_amount(name):\n amount = input(f'Enter the amount that {name} donated or type \"q\" to quit back to the main menu: ')\n if amount == 'q':\n main()\n # add donor and new donation to database under donor name\n try:\n if name in donor_db.keys():\n donor_db[name].append(float(amount))\n else:\n donor_db.update({name: [float(amount)]})\n except ValueError:\n print(\"You must enter either 'q' or a number!\")\n get_amount(name)\n return amount\n\n\ndef build_ty(totals=False):\n line1 = 'Dear Mr/Mrs {name},\\n\\n' \\\n + 'Thank you so much for your donation of ${last:.2f}!'\n line2 = 'This brings your total lifetime donations to ${total:.2f}!'\n line3 = 'We here at RLC (Random Local Charity) really appreciate it!\\n\\n' \\\n + 'Sincerely,\\n\\nBob Saget, CEO of RLC\\n'\n if totals:\n ty_note = '\\n'.join([line1, line2, line3])\n else:\n ty_note = '\\n'.join([line1, line3])\n return ty_note\n\n\ndef thank_you():\n \"\"\"Add a new donation to the database and print a thank you email for the donor\"\"\"\n\n response = 'list'\n # get donor name from user, display donor list if asked\n while response == 'list':\n response = input('\\nWhat is the full name of the donor to whom you '\n + 'would like to send a thank you?\\nAlternatively, '\n + 'type \"list\" for a list of current donors '\n + 'or \"q\" to quit back to main menu.\\n>>> ')\n if response == 'list':\n for _ in donor_db.keys():\n print(_)\n elif response == 'q':\n main()\n else:\n check = input(f'Is \"{response.title()}\" the correct donor name?\\n'\n + 'Please respond \"y\", \"n\", or \"q\" to quit back to main menu: ')\n if check == 'q':\n main()\n elif check != 'y':\n response = 'list'\n name = response.title()\n # get donation amount\n amount = get_amount(name)\n # format an email thank you and print to the terminal\n donor = {'name': name, 'last': float(amount)}\n ty_note = build_ty()\n print('\\n' + ty_note.format(**donor))\n\n\ndef create_report():\n \"\"\"Print donor database, sorted by total donation amount\"\"\"\n\n sorted_db = sorted(donor_db.items(), key=lambda x: sum(x[1]), reverse=True)\n # build header line for report\n header = 'Donor Name | Total Given | Num Gifts | Average Gift'\n separator = '-'*len(header)\n bodyline = '{:20} ${:11.2f} {:9d} ${:12.2f}'\n # build the body of the report; a list of donors and info about their gifts\n body = [bodyline.format(sorted_db[i][0], sum(sorted_db[i][1]),\n len(sorted_db[i][1]),\n sum(sorted_db[i][1])/len(sorted_db[i][1]))\n for i in range(len(sorted_db))]\n # print out the report\n print(header, separator, sep='\\n')\n for i in range(len(sorted_db)):\n print(body[i])\n\n\ndef write_letters():\n # make directory for letters\n try:\n folder = os.mkdir(os.getcwd() + '\\\\letters')\n except FileExistsError:\n folder = os.getcwd() + '\\\\letters'\n # with open thank you (donor name_date)\n for k,v in donor_db.items():\n ty_info = {'name': k,\n 'last': v[-1],\n 'total': sum(v)}\n filename = folder + '\\\\' + ty_info['name'].replace(' ', '_') + '.txt'\n with open(filename, 'w') as letter:\n ty_note = build_ty(True)\n letter.write(ty_note.format(**ty_info))\n\n\ndef main():\n \"\"\"Prompt user to choose next action\"\"\"\n\n while True:\n response = input(prompt) # continuously collect user selection\n # now redirect to feature functions based on the user selection\n user_select = {'1': thank_you, '2': create_report, '3': write_letters,\n '4': sys.exit}\n selection = user_select.get(response, 'Not a valid option!')\n try:\n selection()\n except TypeError:\n print(selection)\n continue\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"students/joejohnsto/lesson05/mailroom3.py","file_name":"mailroom3.py","file_ext":"py","file_size_in_byte":4861,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"589485720","text":"import numpy as np\r\nimport numba\r\n\r\n\r\n# gradients for one element of the loss function's sum, don't call this directly\r\n@numba.jit(nopython=True)\r\ndef ABCD_grad(xa, ya, xb, yb, xc, yc, xd, yd, dab, dac, dad, dbc, dbd, dcd, pab):\r\n sum_dist = dab + dac + dad + dbc + dbd + dcd\r\n \r\n dr_ab = (dab/sum_dist)\r\n\r\n gxA = 2*((pab - dr_ab)/sum_dist) * ((dab/sum_dist) * ((xa-xb)/dab + (xa-xc)/dac + (xa-xd)/dad ) - (xa-xb)/dab )\r\n gyA = 2*((pab - dr_ab)/sum_dist) * ((dab/sum_dist) * ((ya-yb)/dab + (ya-yc)/dac + (ya-yd)/dad ) - (ya-yb)/dab )\r\n\r\n gxB = 2*((pab - dr_ab)/sum_dist) * ((dab/sum_dist) * ((xb-xa)/dab + (xb-xc)/dbc + (xb-xd)/dbd ) - (xb-xa)/dab )\r\n gyB = 2*((pab - dr_ab)/sum_dist) * ((dab/sum_dist) * ((yb-ya)/dab + (yb-yc)/dbc + (yb-yd)/dbd ) - (yb-ya)/dab )\r\n\r\n gxC = 2*((pab - dr_ab)/sum_dist) * ((dab/sum_dist) * ((xc-xa)/dac + (xc-xb)/dbc + (xc-xd)/dcd ))\r\n gyC = 2*((pab - dr_ab)/sum_dist) * ((dab/sum_dist) * ((yc-ya)/dac + (yc-yb)/dbc + (yc-yd)/dcd ))\r\n\r\n gxD = 2*((pab - dr_ab)/sum_dist) * ((dab/sum_dist) * ((xd-xa)/dad + (xd-xb)/dbd + (xd-xc)/dcd ))\r\n gyD = 2*((pab - dr_ab)/sum_dist) * ((dab/sum_dist) * ((yd-ya)/dad + (yd-yb)/dbd + (yd-yc)/dcd ))\r\n\r\n return gxA, gyA, gxB, gyB, gxC, gyC, gxD, gyD\r\n\r\n# quartet gradients for a 2D projection, Dhd contains the top-right triangle of the HD distances\r\n# the points are named a,b,c and d internaly to keep track of who is who\r\n# points shape: (4, 2)\r\n# Dhd shape : (6,)\r\n@numba.jit(nopython=True)\r\ndef compute_quartet_grads(points, Dhd):\r\n xa, ya = points[0]\r\n xb, yb = points[1]\r\n xc, yc = points[2]\r\n xd, yd = points[3]\r\n\r\n # LD distances, add a small number just in case\r\n d_ab = np.sqrt((xa-xb)**2 + (ya-yb)**2) + 1e-12\r\n d_ac = np.sqrt((xa-xc)**2 + (ya-yc)**2) + 1e-12\r\n d_ad = np.sqrt((xa-xd)**2 + (ya-yd)**2) + 1e-12\r\n d_bc = np.sqrt((xb-xc)**2 + (yb-yc)**2) + 1e-12\r\n d_bd = np.sqrt((xb-xd)**2 + (yb-yd)**2) + 1e-12\r\n d_cd = np.sqrt((xc-xd)**2 + (yc-yd)**2) + 1e-12\r\n\r\n # HD distances\r\n pab, pac, pad, pbc, pbd, pcd = Dhd[0], Dhd[1], Dhd[2], Dhd[3], Dhd[4], Dhd[5]\r\n\r\n # for each element of the sum: use the same gradient function and just permute the points given in input\r\n gxA, gyA, gxB, gyB, gxC, gyC, gxD, gyD = ABCD_grad(\r\n xa, ya, xb, yb, xc, yc, xd, yd,\\\r\n d_ab, d_ac, d_ad, d_bc, d_bd, d_cd,\\\r\n pab)\r\n\r\n\r\n gxA2, gyA2, gxC2, gyC2, gxB2, gyB2, gxD2, gyD2 = ABCD_grad(\r\n xa, ya, xc, yc, xb, yb, xd, yd,\\\r\n d_ac, d_ab, d_ad, d_bc, d_cd, d_bd,\\\r\n pac)\r\n\r\n\r\n gxA3, gyA3, gxD3, gyD3, gxC3, gyC3, gxB3, gyB3 = ABCD_grad(\r\n xa, ya, xd, yd, xc, yc, xb, yb,\\\r\n d_ad, d_ac, d_ab, d_cd, d_bd, d_bc,\\\r\n pad)\r\n\r\n\r\n gxB4, gyB4, gxC4, gyC4, gxA4, gyA4, gxD4, gyD4 = ABCD_grad(\r\n xb, yb, xc, yc, xa, ya, xd, yd,\\\r\n d_bc, d_ab, d_bd, d_ac, d_cd, d_ad,\\\r\n pbc)\r\n\r\n\r\n gxB5, gyB5, gxD5, gyD5, gxA5, gyA5, gxC5, gyC5 = ABCD_grad(\r\n xb, yb, xd, yd, xa, ya, xc, yc,\\\r\n d_bd, d_ab, d_bc, d_ad, d_cd, d_ac,\\\r\n pbd)\r\n\r\n\r\n gxC6, gyC6, gxD6, gyD6, gxA6, gyA6, gxB6, gyB6 = ABCD_grad(\r\n xc, yc, xd, yd, xa, ya, xb, yb,\\\r\n d_cd, d_ac, d_bc, d_ad, d_bd, d_ab,\\\r\n pcd)\r\n\r\n\r\n gxA = gxA + gxA2 + gxA3 + gxA4 + gxA5 + gxA6\r\n gyA = gyA + gyA2 + gyA3 + gyA4 + gyA5 + gyA6\r\n\r\n gxB = gxB + gxB2 + gxB3 + gxB4 + gxB5 + gxB6\r\n gyB = gyB + gyB2 + gyB3 + gyB4 + gyB5 + gyB6\r\n\r\n gxC = gxC + gxC2 + gxC3 + gxC4 + gxC5 + gxC6\r\n gyC = gyC + gyC2 + gyC3 + gyC4 + gyC5 + gyC6\r\n\r\n gxD = gxD + gxD2 + gxD3 + gxD4 + gxD5 + gxD6\r\n gyD = gyD + gyD2 + gyD3 + gyD4 + gyD5 + gyD6\r\n\r\n return gxA, gyA, gxB, gyB, gxC, gyC, gxD, gyD\r\n","sub_path":"SQuaD MDS/gradients.py","file_name":"gradients.py","file_ext":"py","file_size_in_byte":4497,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"607627410","text":"import json\nimport matplotlib.pyplot as plt\nimport numpy\nimport pdb\n\n\nresults = {}\nresults2 = {}\nfullResults = json.loads(\n open(\"Full_Benchmark_Results.json\", \"r\").read())\n\n\nresults[\"ortools\"] = json.loads(\n open(\"ortoolsResults.json\", \"r\").read())\nresults[\"localsolver\"] = json.loads(\n open(\"localsolverResults.json\", \"r\").read())\n# results[\"ortools\"] = json.loads(\n# open(\"RUN_2022-03-14_local_localsolver.json\", \"r\").read())\n# results[\"localsolver\"] = json.loads(\n# open(\"RUN_2022-03-14_local_ortools.json\", \"r\").read())\nREPEAT = 0\ncolors = {\"ortools\": \"lime\", \"localsolver\": \"red\"}\nstats = [\"unassigned\", \"cost\", \"total_time\", \"total_distance\"]\n# , \"cost\",\n# \"total_time\", \"total_distance\"]\n\n\nfig = plt.figure()\n\naxs = fig.subplots(3, 2, sharex='col')\naxs = axs.flat\n\nfor statistic in stats:\n data_y = []\n data_x = None\n ax = next(axs)\n gap = []\n for solver, result in results.items():\n data_x = result[\"stats\"].keys()\n data_y.append(\n # * (1.5 if solver==\"localsolver\" else 1)\n [list(stat.values())[REPEAT][statistic]\n for stat in result[\"stats\"].values()]\n )\n ax.plot(\n data_x,\n data_y[-1],\n linestyle='none',\n marker='o',\n c=colors[solver]\n )\n if len(data_y) == 2:\n ax.fill_between(data_x,\n [q if q != None else 0 for q in data_y[0]],\n [q if q != None else 0 for q in data_y[1]], color='grey')\n ax.set(ylabel=statistic)\n\n\nfig.tight_layout()\nplt.show()\n\n\n# for opt in obj_python[\"options\"]:\n# print(opt)\n\n# for stat in obj_python[\"stats\"]:\n# listInstances.append(stat)\n# print(obj_python[\"stats\"][stat])\n","sub_path":"benchmark/result_parser.py","file_name":"result_parser.py","file_ext":"py","file_size_in_byte":1739,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"652691847","text":"import pickle\n\nfrom tokenizers import Tokenizer\nfrom tokenizers.models import BPE\nfrom tokenizers.normalizers import Normalizer, BertNormalizer, Sequence, Lowercase, Strip\n\n\nclass TestBertNormalizer:\n def test_instantiate(self):\n assert isinstance(BertNormalizer(), Normalizer)\n assert isinstance(BertNormalizer(), BertNormalizer)\n assert isinstance(pickle.loads(pickle.dumps(BertNormalizer())), BertNormalizer)\n\n def test_strip_accents(self):\n normalizer = BertNormalizer(\n strip_accents=True, lowercase=False, handle_chinese_chars=False, clean_text=False\n )\n\n output = normalizer.normalize_str(\"Héllò\")\n assert output == \"Hello\"\n\n def test_handle_chinese_chars(self):\n normalizer = BertNormalizer(\n strip_accents=False, lowercase=False, handle_chinese_chars=True, clean_text=False\n )\n\n output = normalizer.normalize_str(\"你好\")\n assert output == \" 你 好 \"\n\n def test_clean_text(self):\n normalizer = BertNormalizer(\n strip_accents=False, lowercase=False, handle_chinese_chars=False, clean_text=True\n )\n\n output = normalizer.normalize_str(\"\\ufeffHello\")\n assert output == \"Hello\"\n\n def test_lowercase(self):\n normalizer = BertNormalizer(\n strip_accents=False, lowercase=True, handle_chinese_chars=False, clean_text=False\n )\n\n output = normalizer.normalize_str(\"Héllò\")\n assert output == \"héllò\"\n\n\nclass TestSequence:\n def test_instantiate(self):\n assert isinstance(Sequence([]), Normalizer)\n assert isinstance(Sequence([]), Sequence)\n assert isinstance(pickle.loads(pickle.dumps(Sequence([]))), Sequence)\n\n def test_can_make_sequences(self):\n normalizer = Sequence([Lowercase(), Strip()])\n\n output = normalizer.normalize_str(\" HELLO \")\n assert output == \"hello\"\n\n\nclass TestLowercase:\n def test_instantiate(self):\n assert isinstance(Lowercase(), Normalizer)\n assert isinstance(Lowercase(), Lowercase)\n assert isinstance(pickle.loads(pickle.dumps(Lowercase())), Lowercase)\n\n def test_lowercase(self):\n normalizer = Lowercase()\n\n output = normalizer.normalize_str(\"HELLO\")\n assert output == \"hello\"\n\n\nclass TestStrip:\n def test_instantiate(self):\n assert isinstance(Strip(), Normalizer)\n assert isinstance(Strip(), Strip)\n assert isinstance(pickle.loads(pickle.dumps(Strip())), Strip)\n\n def test_left_strip(self):\n normalizer = Strip(left=True, right=False)\n\n output = normalizer.normalize_str(\" hello \")\n assert output == \"hello \"\n\n def test_right_strip(self):\n normalizer = Strip(left=False, right=True)\n\n output = normalizer.normalize_str(\" hello \")\n assert output == \" hello\"\n\n def test_full_strip(self):\n normalizer = Strip(left=True, right=True)\n\n output = normalizer.normalize_str(\" hello \")\n assert output == \"hello\"\n","sub_path":"bindings/python/tests/bindings/test_normalizers.py","file_name":"test_normalizers.py","file_ext":"py","file_size_in_byte":3028,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"528929497","text":"from sqlalchemy import create_engine\nfrom sqlalchemy.orm import sessionmaker\nfrom puppies import Base, Shelter, Puppy, Adopter\nimport datetime\n\nengine = create_engine('sqlite:///puppyshelter.db')\nBase.metadata.bind = engine\nDBSession = sessionmaker(bind=engine)\nsession = DBSession()\n\n# print('\\nAll puppies, ordered by name')\n# puppies_all = session.query(Puppy).order_by('name')\n# print('Nuber of puppy/ies found: '+str(puppies_all.count()))\n# for puppy in puppies_all:\n\t# print (puppy.name)\n\n# print('\\nAll puppies youngar than 6 month, ordered by descending age')\n# puppies_young = session.query(Puppy).filter(Puppy.dateOfBirth > (datetime.datetime.now() - datetime.timedelta(6*30))).order_by('-dateOfBirth')\n# print('Nuber of puppy/ies found: '+str(puppies_young.count()))\n# for puppy in puppies_young:\n# \tprint (puppy.name + ' ' + str(puppy.dateOfBirth))\n\n# print('\\nAll puupies, ordered by ascending weight')\n# puppies_weight = session.query(Puppy).order_by('weight')\n# print('Nuber of puppy/ies found: '+str(puppies_weight.count()))\n# for puppy in puppies_weight:\n# \tweight = round(puppy.weight,1)\n# \tprint (puppy.name + ' '+ str(weight))\n\n# print('\\nAll puppies, grouped by corresponding shelter')\n# puppies_shelter = session.query(Puppy).order_by(Puppy.shelter_id)\n# print('Nuber of puppy/ies found: '+str(puppies_shelter.count()))\n# for puppy in puppies_shelter:\n\t# print(puppy.name + ' / ' + puppy.shelter.name)\n\t\nprint('\\nAll adopters with their puppies')\nadopters = session.query(Adopter).all()\nfor adopter in adopters:\n\tprint('\\n'+adopter.firstname+' / '+adopter.lastname)\n\tfor puppy in adopter.puppies:\n\t\tprint(puppy.name)\n","sub_path":"vagrant/puppyshelter/puppyqueries.py","file_name":"puppyqueries.py","file_ext":"py","file_size_in_byte":1639,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"346072532","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.4 (62061)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.linux-x86_64/egg/uncompyle6/util.py\n# Compiled at: 2020-04-20 22:50:15\ntry:\n from math import copysign\n\n def is_negative_zero(n):\n \"\"\"Returns true if n is -0.0\"\"\"\n return n == 0.0 and copysign(1, n) == -1\n\n\nexcept:\n\n def is_negative_zero(n):\n return False\n\n\nfrom uncompyle6 import PYTHON_VERSION\n\ndef better_repr(v, version):\n \"\"\"Work around Python's unorthogonal and unhelpful repr() for primitive float\n and complex.\"\"\"\n if isinstance(v, float):\n if str(v) in frozenset(['nan', '-nan', 'inf', '-inf']):\n return \"float('%s')\" % v\n elif is_negative_zero(v):\n return '-0.0'\n return repr(v)\n elif isinstance(v, complex):\n real = better_repr(v.real, version)\n imag = better_repr(v.imag, version)\n return 'complex(%s, %s)' % (real, imag)\n elif isinstance(v, tuple):\n if len(v) == 1:\n return '(%s,)' % better_repr(v[0], version)\n return '(%s)' % (', ').join((better_repr(i, version) for i in v))\n elif PYTHON_VERSION < 3.0 and isinstance(v, long):\n s = repr(v)\n if version >= 3.0 and s[(-1)] == 'L':\n return s[:-1]\n else:\n return s\n elif isinstance(v, list):\n l = better_repr(v)\n if len(v) == 1:\n return '[%s,]' % better_repr(v[0], version)\n return '[%s]' % (', ').join((better_repr(i) for i in v))\n else:\n return repr(v)","sub_path":"pycfiles/uncompyle6-3.6.6-py2.4/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":1612,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"418852386","text":"from mahjong.hand_calculating.hand import HandCalculator\nfrom mahjong.meld import Meld\nfrom mahjong.hand_calculating.hand_config import HandConfig, OptionalRules\nfrom mahjong.tile import TilesConverter as tc\nfrom mahjong.utils import is_chi, is_pon, is_honor\nfrom enum import IntEnum\nfrom copy import deepcopy\n\nMAX_TILE_NUM = 4 * 4 + 2\nMIN_TILE_NUM = 3 * 4 + 2\n\n\nclass TileId(IntEnum):\n # BACK\n BACK = 99\n # MAN\n MAN1 = 0\n MAN2 = 1\n MAN3 = 2\n MAN4 = 3\n MAN5 = 4\n MAN6 = 5\n MAN7 = 6\n MAN8 = 7\n MAN9 = 8\n # PIN\n PIN1 = 9\n PIN2 = 10\n PIN3 = 11\n PIN4 = 12\n PIN5 = 13\n PIN6 = 14\n PIN7 = 15\n PIN8 = 16\n PIN9 = 17\n # SOU\n SOU1 = 18\n SOU2 = 19\n SOU3 = 20\n SOU4 = 21\n SOU5 = 22\n SOU6 = 23\n SOU7 = 24\n SOU8 = 25\n SOU9 = 26\n # TON,NAN,SHA,PEI\n TON = 27\n NAN = 28\n SHA = 29\n PEI = 30\n # HAKU,HATSU,CHUN\n HAKU = 31\n HATSU = 32\n CHUN = 33\n\n\ndef score(boxes, tsumo, y_divider, round_wind=None, player_wind=None):\n \"\"\"\n +---------------+\n | _ _ _ _ _ |\n | |_|_|_|_| |_| | classfied to `open`\n | |\n |---------------| <- y_divider\n | _ _ _ _ _ _ |\n | |_|_|_|_|_|_| | classfied to `closed`\n | |\n +---------------+\n boxes:\n array of box which elements are: tile id, xmin, ymin, xmax and ymax.\n e.g. [[0, 100, 200, 150, 250],\n [2, 200, 300, 350, 350],\n [...]]\n the right most of the tile is assumed as win tile, and ron or tsumo is specified by tsumo.\n tile id is:\n man1...man9: 0...8\n pin1...pin9: 9...17\n sou1...sou9: 18...26\n ton, nan, sha, pei: 27, 28, 29, 30\n haku, hatsu, chun: 31, 32, 33\n y_divider:\n tiles are divided into two types; open or closed.\n if the position is upper than y_divider, the tile is classfied as open.\n \"\"\"\n open_boxes, win_box, closed_boxes = classify_boxes(boxes, y_divider)\n if open_boxes is None:\n return None\n\n melds = make_melds(open_boxes)\n if melds is None:\n return None\n\n # 4-tile-set of kan should be trimed to 3-tile-set.\n open_kan = [x.tiles_34[0] for x in melds if x.type == Meld.KAN and x.opened]\n open_tiles = boxes_to_tiles(open_boxes, open_kan)\n if open_tiles is None:\n return None\n\n win_tile = boxes_to_tiles([win_box])\n if win_tile is None:\n return None\n\n closed_kan_melds, closed_boxes_replaced = make_closed_kan_melds(closed_boxes)\n\n # 4-tile-set of kan should be trimed to 3-tile-set.\n closed_kan = [x.tiles_34[0] for x in closed_kan_melds]\n closed_tiles = boxes_to_tiles(closed_boxes_replaced, closed_kan)\n if closed_tiles is None:\n return None\n\n hand = closed_tiles + open_tiles + win_tile\n # print(\"win:\\n\", tc.to_one_line_string(win_tile))\n # print(\"hand:\\n\", tc.to_one_line_string(hand))\n # print(\"melds:\\n\", melds + closed_kan_melds)\n for meld in melds:\n # print(tc.to_one_line_string(meld.tiles))\n pass\n\n options = OptionalRules(has_open_tanyao=True, kazoe_limit=HandConfig.KAZOE_NO_LIMIT)\n config = HandConfig(\n is_tsumo=tsumo,\n player_wind=player_wind,\n round_wind=round_wind,\n options=options,\n )\n # print(\"tsumo:\", tsumo, \" player:\", player_wind, \" round:\", round_wind)\n\n result = HandCalculator().estimate_hand_value(\n hand, win_tile[0], melds=melds + closed_kan_melds, config=config\n )\n # print_hand_result(result)\n return result\n\n\ndef make_melds(open_boxes):\n melds = []\n pos = 0\n rest = len(open_boxes)\n while rest > 0:\n boxes = open_boxes[pos:]\n \"\"\"\n 3: (3)\n 4: (4)\n 6: (3 3)\n 7: (4 3 | 3 4)\n 8: (4 4)\n 9: (3 3 3)\n *10: (4 3 3 | 3 4 3 | 3 3 4) 5555678888\n 11: (4 4 3 | 4 3 4 | 3 4 4) 5555678\n 12: (3 3 3 3 | 4 4 4) 111123456789\n *13: (4 3 3 3 | 3 4 3 3 | 3 3 4 3 | 3 3 3 4)\n *14: (4 4 3 3 | 4 3 4 3 | 4 3 3 4 | 3 4 4 3 | 3 4 3 4 | 3 3 4 4)\n 15: (4 4 4 3 | 4 4 3 4 | 4 3 4 4 | 3 4 4 4)\n 16: (4 4 4 4)\n \"\"\"\n if rest in (3, 6, 9): # multiple of 3 and not multiple of 4\n meld = meld_3_tiles(boxes)\n if meld is None:\n return None\n melds.append(meld)\n pos += 3\n rest -= 3\n elif rest in (4, 8, 16): # multiple of 4 and not multiple 3\n if not boxes_is_kan(boxes):\n print(f\"error: illegal kan({boxes})\")\n return None\n meld = meld_4_tiles(boxes)\n if meld is None:\n return None\n melds.append(meld)\n pos += 4\n rest -= 4\n elif rest in (7, 11, 15): # only one set of 3\n # chi\n if boxes_is_chi(boxes):\n meld = meld_3_tiles(boxes, meld_type=Meld.CHI)\n if meld is None:\n return None\n melds.append(meld)\n pos += 3\n rest -= 3\n # kan (should be checked after chi)\n elif boxes_is_kan(boxes):\n meld = meld_4_tiles(boxes)\n if meld is None:\n return None\n melds.append(meld)\n pos += 4\n rest -= 4\n # pon\n elif boxes_is_pon(boxes):\n meld = meld_3_tiles(boxes, meld_type=Meld.PON)\n if meld is None:\n return None\n melds.append(meld)\n pos += 3\n rest -= 3\n else:\n print(\"error: illegal open_boxes({})\".format(rest))\n return None\n elif rest in (12,):\n # kan\n if boxes_is_kan(boxes) and boxes_is_kan(boxes[4 : 4 + 4]):\n meld = meld_4_tiles(boxes)\n if meld is None:\n return None\n melds.append(meld)\n pos += 4\n rest -= 4\n else:\n meld = meld_3_tiles(boxes)\n if meld is None:\n return None\n melds.append(meld)\n pos += 3\n rest -= 3\n elif rest in (10, 13, 14): # two sets of 3 and one set of 4 (at least)\n # e.g. 2222345555 -> 2222 345 555 or 222 234 5555 (kan-chi-pon)\n kan_elem = boxes[0][0]\n chi_elems = sorted([b[0] for b in boxes[4 : 4 + 3]])\n pon_elem = boxes[4 + 3][0]\n # chi\n if boxes_is_chi(boxes):\n meld = meld_3_tiles(boxes, meld_type=Meld.CHI)\n if meld is None:\n return None\n melds.append(meld)\n pos += 3\n rest -= 3\n # may be kan\n elif boxes_is_kan(boxes):\n if (\n boxes_is_chi(boxes[4 : 4 + 3])\n and boxes_is_pon(boxes[4 + 3 : 4 + 3 + 3])\n and kan_elem + 1 == chi_elems[0]\n and chi_elems[2] == pon_elem\n ):\n # TODO: also pon can be chosen\n meld = meld_4_tiles(boxes)\n if meld is None:\n return None\n melds.append(meld)\n pos += 4\n rest -= 4\n else:\n meld = meld_4_tiles(boxes)\n melds.append(meld)\n pos += 4\n rest -= 4\n # pon\n elif boxes_is_pon(boxes):\n meld = meld_3_tiles(boxes, meld_type=Meld.PON)\n if meld is None:\n return None\n melds.append(meld)\n pos += 3\n rest -= 3\n else:\n print(\"error: illegal open_boxes({})\".format(rest))\n return None\n else:\n print(\"error: illegal open_boxes({})\".format(rest))\n return None\n return melds\n\n\n# All BACK tiles in closed_boxes are updated, if they compose a closed-kan.\ndef make_closed_kan_melds(closed_boxes):\n \"\"\"\n closed_kan is assumed to be consisted of the patterns as follows:\n [back | front | front | back] or [front | back | back | front].\n front tile must be the same.\n \"\"\"\n melds = []\n pos = 0\n rest = len(closed_boxes)\n replaced = closed_boxes[:]\n while rest >= 4:\n boxes = closed_boxes[pos:]\n kan = boxes_is_closed_kan(boxes)\n if kan is not None:\n boxes[0][0] = boxes[1][0] = boxes[2][0] = boxes[3][0] = kan\n meld = meld_4_tiles(boxes, opened=False)\n melds.append(meld)\n replaced[pos][0] = kan\n replaced[pos + 1][0] = kan\n replaced[pos + 2][0] = kan\n replaced[pos + 3][0] = kan\n pos += 4\n rest -= 4\n else:\n pos += 1\n rest -= 1\n return melds, replaced\n\n\ndef classify_boxes(boxes, y_divider):\n open_boxes = []\n closed_boxes = []\n if len(boxes) > MAX_TILE_NUM:\n print(\"error: number of tile is too large: \", len(boxes))\n return None, None, None\n if len(boxes) < MIN_TILE_NUM:\n print(\"error: number of tile is too short: \", len(boxes))\n return None, None, None\n\n # open or closed\n for box in boxes:\n box = deepcopy(box)\n ymin = box[2]\n ymax = box[4]\n ymid = round((ymax + ymin) / 2)\n if ymid <= y_divider:\n open_boxes.append(box)\n else:\n closed_boxes.append(box)\n\n if len(open_boxes) == 0:\n print(\"error: win tile could not be found\")\n return None, None, None\n if len(closed_boxes) == 0:\n print(\"error: closed tile could not be found\")\n return None, None, None\n\n # sort by xmin\n open_boxes = sorted(open_boxes, key=lambda box: box[1])\n closed_boxes = sorted(closed_boxes, key=lambda box: box[1])\n # remove win tile from open_boxes\n win_box = open_boxes.pop(-1)\n\n return open_boxes, win_box, closed_boxes\n\n\ndef to_136_string(boxes, kan=None):\n man = \"\"\n pin = \"\"\n sou = \"\"\n honors = \"\"\n labels = dict(zip(list(range(9)), [str(i) for i in list(range(1, 9 + 1))]))\n kan = [] if kan is None else kan\n\n def delete_kan_if_hit(t):\n for i in range(len(kan)):\n if t == kan[i]:\n del kan[i]\n return True\n return False\n\n for box in boxes:\n t = box[0]\n res = delete_kan_if_hit(t)\n if res:\n continue\n if t < 9:\n man += labels[t]\n elif t < 18:\n pin += labels[t - 9]\n elif t < 27:\n sou += labels[t - 18]\n elif t < 34:\n honors += labels[t - 27]\n else:\n print(\"error: illegal tile({}) exists\".format(t))\n return None, None, None, None\n return man, pin, sou, honors\n\n\ndef boxes_to_tiles(boxes, kan=None):\n man, pin, sou, honors = to_136_string(boxes, kan)\n if man is None:\n return None\n # print('man', man, 'pin', pin, 'sou', sou, 'honors', honors)\n return tc.string_to_136_array(man=man, pin=pin, sou=sou, honors=honors)\n\n\ndef boxes_is_chi(boxes):\n items = sorted([t[0] for t in boxes[0:3]])\n if is_honor(items[0]) or is_honor(items[2]):\n return False\n return is_chi(items)\n\n\ndef boxes_is_pon(boxes):\n items = [t[0] for t in boxes[0:3]]\n return is_pon(items)\n\n\ndef boxes_is_kan(boxes):\n return boxes[0][0] == boxes[1][0] == boxes[2][0] == boxes[3][0]\n\n\ndef boxes_is_closed_kan(boxes):\n if boxes[0][0] == boxes[3][0] and boxes[1][0] == boxes[2][0] == TileId.BACK:\n return boxes[0][0]\n if boxes[0][0] == boxes[3][0] == TileId.BACK and boxes[1][0] == boxes[2][0]:\n return boxes[1][0]\n return None\n\n\ndef meld_3_tiles(boxes, meld_type=None):\n man, pin, sou, honors = to_136_string(boxes[:3])\n if man is None:\n return None\n # print('man', man, 'pin', pin, 'sou', sou, 'honors', honors)\n tiles = tc.string_to_136_array(man=man, pin=pin, sou=sou, honors=honors)\n if meld_type is None:\n if boxes_is_pon(boxes):\n meld_type = Meld.PON\n elif boxes_is_chi(boxes):\n meld_type = Meld.CHI\n else:\n print(\"error: neither pon nor chi({})\".format(boxes))\n return None\n # print(tc.to_one_line_string(tiles))\n return Meld(meld_type=meld_type, tiles=tiles)\n\n\ndef meld_4_tiles(boxes, opened=True):\n man, pin, sou, honors = to_136_string(boxes[:4])\n if man is None:\n return None\n # print('man', man, 'pin', pin, 'sou', sou, 'honors', honors)\n tiles = tc.string_to_136_array(man=man, pin=pin, sou=sou, honors=honors)\n # print(tc.to_one_line_string(tiles))\n return Meld(meld_type=Meld.KAN, tiles=tiles, opened=opened)\n\n\ndef print_hand_result(hand_result):\n if hand_result.error is not None:\n print(hand_result.error)\n else:\n print(hand_result.han, hand_result.fu)\n print(hand_result.cost)\n print(hand_result.cost[\"main\"])\n print(hand_result.yaku)\n if hand_result.fu_details is not None:\n for fu_item in hand_result.fu_details:\n print(fu_item)\n\n\nif __name__ == \"__main__\":\n boxes = [\n [99, 165, 360, 194, 410],\n [99, 221, 360, 249, 410],\n [30, 248, 362, 277, 412],\n [28, 111, 361, 138, 411],\n [28, 193, 363, 222, 412],\n [30, 276, 361, 305, 411],\n [99, 83, 361, 111, 410],\n [27, 357, 358, 387, 407],\n [99, 302, 358, 332, 407],\n [99, 138, 360, 166, 410],\n [99, 411, 359, 442, 409],\n [32, 439, 359, 471, 409],\n [99, 329, 358, 359, 410],\n [29, 29, 359, 56, 408],\n [99, 0, 358, 27, 410],\n [27, 385, 362, 415, 413],\n [32, 342, 138, 369, 176],\n [29, 53, 359, 82, 412],\n ]\n result = score(boxes, True, 512 // 2)\n","sub_path":"server/app/tile_score.py","file_name":"tile_score.py","file_ext":"py","file_size_in_byte":13984,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"322598348","text":"#!/usr/bin/python\nimport sys\nfrom collections import defaultdict\n\ndef parse_line(pairs):\n \"\"\"Represent bam header key-value pairs as a dictionary\n\n Note: expects input to be a list header elements with the BAM 'key:value'\n format (e.g. [\"VN:1.4\", \"GO:none\",\"SO:coordinate\"]). \n \"\"\"\n return( {k:v.strip() for k,v in [element.split(\":\", 1) for element in pairs]} )\n\ndef make_header_dict(handle):\n \"\"\"Represent a complete BAM header as a dictoinary\n\n The returned dictionary contains keys for each unique header-type (i.e.\n thing that starts with an @ symbol) with each value being a list of\n dictionaries containing the key-value pairs defined in that header row.\n \"\"\"\n res = defaultdict(list)\n for line in handle:\n elements = line.split(\"\\t\")\n name = elements[0].strip(\"@\")\n res[name].append(parse_line(elements[1:]))\n return(res)\n\ndef print_usage():\n print (\"\"\"\n Usage, either of\n $ parse_rg.py [ancestral sample] [bam_header.txt]\n $ samtools view -H [alignment.bam] | parse_rg.py [ancestral sample] - \n \"\"\")\n\ndef main():\n \"\"\" \"\"\"\n try: \n anc = sys.argv[1]\n fname = sys.argv[2]\n except IndexError:\n print_usage()\n if fname == \"-\":\n fhandle = sys.stdin\n else:\n fhandle = open(fname)\n d = make_header_dict(fhandle)\n samples = [r[\"SM\"] for r in d[\"RG\"]]\n if anc in samples: \n print(\"ancestor={}\".format(anc))\n else:\n print(\"ERROR: Ancestral sample {} not found in header\".format(anc))\n exit(1)\n for samp in set([s for s in samples if s != anc]):\n print( \"sample-name={}\".format(samp))\n exit(0)\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"extract_samples.py","file_name":"extract_samples.py","file_ext":"py","file_size_in_byte":1717,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"419522671","text":"# -*- coding:utf-8 -*- \n# !/usr/bin/python \n\"\"\"\n@author:yyx \n@version: 1.0\n@file: chr02.py\n@time: 2019/4/26 11:19\n使用列表管理数据,去除了向量容量的限制\n\"\"\"\nimport random\n\n\nclass Vector:\n def __init__(self): # 初始化时指定向量大小\n self._size = 0\n self._elem = []\n\n def copyFrom(self, A, lo, hi):\n self._size = 0\n self._elem = []\n while lo < hi:\n self._elem.append(A[lo])\n self._size += 1\n lo += 1\n\n def size(self):\n return self._size\n\n def empty(self):\n return self._size == 0\n\n def __getitem__(self, item):\n return self._elem[item]\n\n def __setitem__(self, key, value):\n self._elem[key] = value\n\n def __eq__(self, other):\n self.copyFrom(other, 0, other.size())\n return self\n\n def __len__(self):\n return self._size\n\n def permute(self): # 随机置乱向量\n for i in range(1, self._size):\n k = random.choice(range(i))\n self._elem[i], self._elem[k] = self._elem[k], self._elem[i]\n\n def unsort(self, lo, hi):\n for i in range(1, hi - lo):\n k = random.choice(range(i))\n self._elem[i + lo], self._elem[k + lo] = self._elem[k + lo], self._elem[i + lo]\n\n def find(self, e, lo, hi):\n while lo <= hi:\n hi -= 1\n if self._elem[hi] == e:\n break\n return hi\n\n def insert(self, r, e):\n self._elem.insert(r, e)\n\n def remove(self, *args): # python不支持函数重载\n if len(args) == 1:\n if args[0] < 0 or args[0] > self._size:\n raise ValueError('索引有误')\n self.remove(args[0], args[0] + 1)\n elif len(args) == 2:\n lo, hi = args\n if lo < 0 or lo > self._size:\n raise ValueError('索引有误')\n if hi < 0 or hi > self._size:\n raise ValueError('索引有误')\n if lo == hi:\n return 0\n while hi < self._size:\n self._elem[lo] = self._elem[hi]\n lo += 1\n hi += 1\n self._elem = self._elem[:lo]\n self._size = lo\n return hi - lo\n else:\n raise ValueError('参数过多')\n\n def deduplicate(self):\n old = self._size\n i = 1\n while i < self._size:\n if self.find(self._elem[i], 0, i) < 0:\n i += 1\n else:\n self.remove(i)\n return old - self._size\n\n def traverse(self, visit=print):\n for i in range(self._size):\n visit(self._elem[i])\n\n def disordered(self):\n n = 0 # 逆序对数\n for i in range(1, self._size):\n if self._elem[i - 1] > self._elem[i]:\n n += 1\n return n # n=0说明有序\n\n def uniquify1(self): # 有序向量去重\n old = self._size\n i = 1\n while i < self._size:\n if self._elem[i - 1] == self._elem[i]:\n self.remove(i)\n else:\n i += 1\n return old - self._size\n\n def uniquify(self): # 有序向量去重\n old = self._size\n i = 0\n for j in range(1, self._size):\n if self._elem[i] == self._elem[j]:\n pass\n else:\n i += 1\n self._elem[i] = self._elem[j]\n self._size = i + 1\n self._elem = self._elem[:i + 1]\n return old - self._size\n\n @staticmethod\n def binSearch_A(A, e, lo, hi): # 设置成静态方法,三分支\n while lo < hi:\n mi = (lo + hi) >> 1\n if A[mi] > e:\n hi = mi\n elif A[mi] < e:\n lo = mi + 1\n else:\n return mi\n return -1 # 查找失败\n\n @staticmethod\n def binSearch_B(A, e, lo, hi): # 二分支\n while hi - lo > 1:\n mi = (lo + hi) >> 1\n if A[mi] > e:\n hi = mi\n else:\n lo = mi\n if A[lo] == e:\n return lo\n else:\n return -1\n\n @staticmethod\n def binSearch(A, e, lo, hi):\n while lo < hi:\n mi = (lo + hi) >> 1\n if A[mi] > e:\n hi = mi\n else:\n lo = mi + 1\n return lo - 1\n\n def bubbleSort(self, lo, hi):\n sort = False\n while not sort:\n sort = True\n for i in range(lo, hi - 1):\n if self._elem[i] > self._elem[i + 1]:\n self._elem[i], self._elem[i + 1] = self._elem[i + 1], self._elem[i]\n sort = False\n hi -= 1\n\n def selectSort(self, lo, hi):\n temp = lo\n while hi > lo:\n for i in range(lo, hi):\n if self._elem[i] > self._elem[temp]:\n temp = i\n self._elem[temp], self._elem[hi - 1] = self._elem[hi - 1], self._elem[temp]\n hi -= 1\n temp = lo\n\n def mergeSort(self, lo, hi):\n if hi - lo < 2: return\n mi = (lo + hi) >> 1\n self.mergeSort(lo, mi)\n self.mergeSort(mi, hi)\n self.merge(lo, mi, hi)\n\n def merge(self, lo, mi, hi):\n A = self._elem[lo:mi]\n B = self._elem[mi:hi]\n i = j = 0\n k = lo\n while k < hi:\n if i < len(A) and (not (j < len(B)) or A[i] < B[j]):\n self._elem[k] = A[i]\n i += 1\n k += 1\n if j < len(B) and (not (i < len(A)) or B[j] <= A[i]):\n self._elem[k] = B[j]\n j += 1\n k += 1\n\n def heapSort(self, lo, hi):\n pass\n\n def quickSort(self, lo, hi):\n pass\n\n def sort(self, lo, hi):\n sortes = [self.bubbleSort, self.selectSort, self.mergeSort,\n self.heapSort, self.quickSort]\n s = random.choice(sortes)\n s(lo, hi)\n","sub_path":"chr02.py","file_name":"chr02.py","file_ext":"py","file_size_in_byte":5957,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"525972700","text":"import requests\nfrom base64 import b64decode\nimport logging\n\nfrom rest_framework.views import APIView\nfrom rest_framework.response import Response\nfrom rest_framework import status\nfrom rest_framework.permissions import AllowAny\n\nfrom kodkollektivet.models import Project, Contributor, ProCon, ProLan, Language\nfrom kodkollektivet.forms import ProjectForm\n\n# If develop, use settings.settings, else\n# use settings.production.\ntry:\n from settings.production import OAUTH_TOKEN\nexcept ImportError:\n from settings.settings import OAUTH_TOKEN\n\n\nlog = logging.getLogger(__name__)\n\n\ndef getrepos():\n \"\"\"\n This function collects all the repos from\n GitHub and store them in the database\n \"\"\"\n req = requests.get('https://api.github.com/orgs/kodkollektivet/repos' + OAUTH_TOKEN)\n\n if req.status_code is not 200:\n log.warning(req.text)\n return\n\n projects = req.json()\n\n log.debug('Getting repos...')\n\n for project in projects:\n\n req = requests.get(\n 'https://api.github.com/repos/kodkollektivet/' +\n project['name'] +\n '/readme' +\n OAUTH_TOKEN)\n\n if req.status_code is 200:\n try:\n readme = req.json()\n readme = b64decode(readme['content'])\n except Exception as e:\n log.debug(e)\n else:\n readme = ''\n\n form = ProjectForm({\n 'gh_name': project['name'],\n 'gh_id': project['id'],\n 'gh_url': project['html_url'],\n 'gh_readme': readme\n })\n\n if form.is_valid():\n # Creates or updates a project. It first looks it match on gh_name\n pro, created = Project.objects.update_or_create(\n gh_name=form.data['gh_name'],\n defaults=form.data)\n\n req = requests.get(\n 'https://api.github.com/repos/kodkollektivet/' +\n project['name'] + '/languages' + OAUTH_TOKEN)\n\n if req.status_code is 200:\n languages = req.json()\n for key, value in languages.items():\n lan, created = Language.objects.update_or_create(name=key)\n obj, created = ProLan.objects.update_or_create(project=pro, language=lan)\n else:\n log.debug(req.text)\n else:\n log.warning('Form is not valid')\n log.warning(str(form.errors))\n\n\ndef getcontribs():\n \"\"\"\n This function get all the project objects from the database.\n Ask the GitHub API for the contributors in the project.\n Save them to the database and create a relation.\n Project <-> Contributor\n \"\"\"\n\n log.debug('Getting contributors...')\n\n for project in Project.objects.all(): # Get all projects\n\n # go in here if gh_name or gh_id\n if (len(project.gh_name) > 2) or (project.gh_id is not None):\n req = requests.get(\n 'https://api.github.com/repos/kodkollektivet/' +\n project.gh_name +\n '/contributors' +\n OAUTH_TOKEN)\n\n if req.status_code is 200:\n for contributor in req.json():\n import pdb; pdb.set_trace()\n Contributor.objects.update_or_create(\n gh_login=contributor['login'],\n gh_url=contributor['url'],\n gh_id=contributor['id'],\n gh_html=contributor['html_url'],\n gh_avatar=contributor['avatar_url'])\n else:\n log.debug(req.text)\n\n\ndef getprocon():\n \"\"\"Get the project contributor relations.\"\"\"\n\n log.debug('Getting procons...')\n\n for project in Project.objects.all():\n # If it is a github project\n if (len(project.gh_name) > 2) or (project.gh_id is not None):\n req = requests.get(\n 'https://api.github.com/repos/kodkollektivet/' +\n project.gh_name +\n '/contributors' +\n OAUTH_TOKEN)\n\n if req.status_code is 200:\n for data in req.json():\n contributor = Contributor.objects.get(gh_id=data['id'])\n ProCon.objects.get_or_create(contributor=contributor, project=project)\n else:\n log.debug(req.status_code)\n\n\nclass GithubHook(APIView):\n\n permission_classes = (AllowAny,)\n\n def post(self, *args):\n getrepos()\n getcontribs()\n getprocon()\n log.debug('Getting repos... DONE')\n return Response(status=status.HTTP_200_OK)\n","sub_path":"kodkollektivet/kodkollektivet/github.py","file_name":"github.py","file_ext":"py","file_size_in_byte":4622,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"136068105","text":"import os\n\n\ndef locate_file(file_in_root_path):\n \"\"\"\n Determine the absolute path of a target file.\n\n Returns\n ----------\n str: absolute path of target file.\n \"\"\"\n cur_file_path = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n proj_root_path, _ = os.path.split(cur_file_path)\n target_file_path = os.path.join(proj_root_path, file_in_root_path)\n return target_file_path\n","sub_path":"data_mocking/scripts/file_path.py","file_name":"file_path.py","file_ext":"py","file_size_in_byte":413,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"316876416","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('uptime', '0002_auto_20151201_0900'),\n ]\n\n operations = [\n migrations.AlterModelOptions(\n name='journal',\n options={'verbose_name_plural': 'журналы', 'default_permissions': [], 'permissions': (('view_journal_details', 'View journal details'), ('view_journal_list', 'View journal list'), ('update_journal_description', 'Update journal description'), ('create_journal_record', 'Create record'), ('edit_journal_record', 'Edit record'), ('delete_journal_record', 'Delete record'), ('create_journal_event', 'Create journal event'), ('delete_journal_event', 'Delete journal event')), 'verbose_name': 'журнал', 'ordering': ['equipment__name']},\n ),\n ]\n","sub_path":"etools/apps/uptime/migrations/0003_auto_20160412_1149.py","file_name":"0003_auto_20160412_1149.py","file_ext":"py","file_size_in_byte":881,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"73917615","text":"# =========================================================================\n# Copyright 2020 Viktor Borzov\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n\n# http://www.apache.org/licenses/LICENSE-2.0\n\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#========================================================================== \n\nclass MangaSite():\n def __init__(self, name, xpaths, test_link = ''):\n self.name = name\n self.xpaths = xpaths\n self.optimized = False\n self.test_link = test_link","sub_path":"bin/other/manga_site.py","file_name":"manga_site.py","file_ext":"py","file_size_in_byte":920,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"521996754","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n\"\"\"\nExaplanation: list_myfolders a cmdlet within the sumocli that retrieves information\n\nUsage:\n $ python list_myfolders [ options ]\n\nStyle:\n Google Python Style Guide:\n http://google.github.io/styleguide/pyguide.html\n\n @name sumocli_listt_myfolders\n @version 1.00\n @author-name Wayne Schmidt\n @author-email wschmidt@sumologic.com\n @license-name Apache 2.0\n @license-url http://www.gnu.org/licenses/gpl.html\n\"\"\"\n\n__version__ = 1.00\n__author__ = \"Wayne Schmidt (wschmidt@sumologic.com)\"\n\n### beginning ###\nimport json\nimport os\nimport sys\nimport argparse\nimport http\nimport requests\nsys.dont_write_bytecode = 1\n\nMY_CFG = 'undefined'\nPARSER = argparse.ArgumentParser(description=\"\"\"\nlist_folders is a Sumo Logic cli cmdlet retrieving information about folders\n\"\"\")\n\nPARSER.add_argument(\"-a\", metavar='', dest='MY_SECRET', \\\n help=\"set api (format: :) \")\nPARSER.add_argument(\"-k\", metavar='', dest='MY_CLIENT', \\\n help=\"set key (format: _) \")\nPARSER.add_argument(\"-e\", metavar='', dest='MY_ENDPOINT', \\\n help=\"set endpoint (format: ) \")\nPARSER.add_argument(\"-f\", metavar='', default=\"list\", dest='oformat', \\\n help=\"Specify output format (default = list )\")\nPARSER.add_argument(\"-m\", default=0, metavar='', \\\n dest='myself', help=\"provide specific id to lookup\")\nPARSER.add_argument(\"-p\", default=0, metavar='', \\\n dest='parentid', help=\"provide parent id to locate with\")\nPARSER.add_argument(\"-v\", type=int, default=0, metavar='', \\\n dest='verbose', help=\"Increase verbosity\")\nPARSER.add_argument(\"-n\", \"--noexec\", action='store_true', \\\n help=\"Print but do not execute commands\")\n\nARGS = PARSER.parse_args()\n\nif ARGS.MY_SECRET:\n (MY_APINAME, MY_APISECRET) = ARGS.MY_SECRET.split(':')\n os.environ['SUMO_UID'] = MY_APINAME\n os.environ['SUMO_KEY'] = MY_APISECRET\n\nif ARGS.MY_CLIENT:\n (MY_DEPLOYMENT, MY_ORGID) = ARGS.MY_CLIENT.split('_')\n os.environ['SUMO_LOC'] = MY_DEPLOYMENT\n os.environ['SUMO_ORG'] = MY_ORGID\n os.environ['SUMO_TAG'] = ARGS.MY_CLIENT\n\nif ARGS.MY_ENDPOINT:\n os.environ['SUMO_END'] = ARGS.MY_ENDPOINT\nelse:\n os.environ['SUMO_END'] = os.environ['SUMO_LOC']\n\ntry:\n SUMO_UID = os.environ['SUMO_UID']\n SUMO_KEY = os.environ['SUMO_KEY']\n SUMO_LOC = os.environ['SUMO_LOC']\n SUMO_ORG = os.environ['SUMO_ORG']\nexcept KeyError as myerror:\n print(f'Environment Variable Not Set :: {myerror.args[0]}')\n\n### beginning ###\n\ndef main():\n \"\"\"\n Setup the Sumo API connection, using the required tuple of region, id, and key.\n Once done, then issue the command required\n \"\"\"\n source = SumoApiClient(SUMO_UID, SUMO_KEY)\n run_sumo_cmdlet(source)\n\ndef run_sumo_cmdlet(source):\n \"\"\"\n This will collect the information on object for sumologic and then collect that into a list.\n the output of the action will provide a tuple of the orgid, objecttype, and id\n \"\"\"\n target_object = \"myfolders\"\n target_dict = {}\n target_dict[\"orgid\"] = SUMO_ORG\n target_dict[target_object] = {}\n\n src_items = source.get_myfolders()\n target_dict[target_object]['id'] = {}\n target_dict[target_object]['id'].update({'parent' : SUMO_ORG})\n target_dict[target_object]['id'].update({'dump' : src_items})\n print(json.dumps(target_dict, indent=4))\n\n### class ###\nclass SumoApiClient():\n \"\"\"\n This is defined SumoLogic API Client\n The class includes the HTTP methods, cmdlets, and init methods\n \"\"\"\n\n def __init__(self, access_id, access_key, endpoint=None, cookie_file='cookies.txt'):\n \"\"\"\n Initializes the Sumo Logic object\n \"\"\"\n\n self.session = requests.Session()\n self.session.auth = (access_id, access_key)\n self.session.headers = {'content-type': 'application/json', \\\n 'accept': 'application/json'}\n cookiejar = http.cookiejar.FileCookieJar(cookie_file)\n self.session.cookies = cookiejar\n if endpoint is None:\n self.endpoint = self._get_endpoint()\n elif len(endpoint) < 3:\n self.endpoint = 'https://api.' + endpoint + '.sumologic.com/api'\n else:\n self.endpoint = endpoint\n if self.endpoint[-1:] == \"/\":\n raise Exception(\"Endpoint should not end with a slash character\")\n\n def _get_endpoint(self):\n \"\"\"\n SumoLogic REST API endpoint changes based on the geo location of the client.\n It contacts the default REST endpoint and resolves the 401 to get the right endpoint.\n \"\"\"\n self.endpoint = 'https://api.sumologic.com/api'\n self.response = self.session.get('https://api.sumologic.com/api/v1/collectors')\n endpoint = self.response.url.replace('/v1/collectors', '')\n return endpoint\n\n def delete(self, method, params=None, headers=None, data=None):\n \"\"\"\n Defines a Sumo Logic Delete operation\n \"\"\"\n response = self.session.delete(self.endpoint + method, \\\n params=params, headers=headers, data=data)\n if response.status_code != 200:\n response.reason = response.text\n response.raise_for_status()\n return response\n\n def get(self, method, params=None, headers=None):\n \"\"\"\n Defines a Sumo Logic Get operation\n \"\"\"\n response = self.session.get(self.endpoint + method, \\\n params=params, headers=headers)\n if response.status_code != 200:\n response.reason = response.text\n response.raise_for_status()\n return response\n\n def post(self, method, data, headers=None, params=None):\n \"\"\"\n Defines a Sumo Logic Post operation\n \"\"\"\n response = self.session.post(self.endpoint + method, \\\n data=json.dumps(data), headers=headers, params=params)\n if response.status_code != 200:\n response.reason = response.text\n response.raise_for_status()\n return response\n\n def put(self, method, data, headers=None, params=None):\n \"\"\"\n Defines a Sumo Logic Put operation\n \"\"\"\n response = self.session.put(self.endpoint + method, \\\n data=json.dumps(data), headers=headers, params=params)\n if response.status_code != 200:\n response.reason = response.text\n response.raise_for_status()\n return response\n\n### class ###\n### methods ###\n\n def get_myfolders(self):\n \"\"\"\n Using an HTTP client, this uses a GET to retrieve all connection information.\n \"\"\"\n url = \"/v2/content/folders/personal/\"\n body = self.get(url).text\n results = json.loads(body)\n return results\n\n def get_myfolder(self, myself):\n \"\"\"\n Using an HTTP client, this uses a GET to retrieve single connection information.\n \"\"\"\n url = \"/v2/content/folders/personal/\" + str(myself)\n body = self.get(url).text\n results = json.loads(body)\n return results\n\n### methods ###\n\nif __name__ == '__main__':\n main()\n","sub_path":"bin/list/sumocli_list_folders.py","file_name":"sumocli_list_folders.py","file_ext":"py","file_size_in_byte":7255,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"10391425","text":"import struct\n\n# _______________________Assembly Helper Functions________________________\n\n\n# Borrowed from http://stackoverflow.com/questions/16444726/binary-representation-of-float-in-python-bits-not-hex\n# Slightly edited to adhere to Python 3.5 standards over 2.7\n# The return from this function should be cast by int(str, 2) for the immediate to use in MIPS\ndef convert_float_to_binary(float_val):\n return ''.join(bin(b).replace('0b', '').rjust(8, '0') for b in struct.pack('!f', float_val))\n\n\n# Easy function to get the operation type of any assembly function (looks at the registers)\ndef get_op_type(f_reg, s_reg):\n return 'float' if 'f' in str(f_reg) or 'f' in str(s_reg) else 'normal'\n\n\n# Ensures that f_reg and s_reg are loaded into registers if either is an immediate\n# (Since most MIPS methods have overloads for s_reg as an immediate, we will not assume it needs to be done here)\n# Assumes that not both of f_reg and s_reg are immediates !!!!\n# (If both are immediates, we should have statically operated on them)\ndef load_immediates(op_type, ret_asm, f_reg, s_reg):\n if op_type == 'float':\n if type(s_reg) is float:\n ret_asm += asm_reg_set('$f13', s_reg)\n s_reg = '$f13'\n elif type(f_reg) is float:\n ret_asm += asm_reg_set('$f13', f_reg)\n f_reg = '$f13'\n else:\n if type(f_reg) is int:\n ret_asm += asm_reg_set('$v1', f_reg)\n f_reg = '$v1'\n return ret_asm, f_reg, s_reg\n# _______________________Assembly________________________\n\n## ______STACK______\n\n# Allocates space to the stack\ndef asm_allocate_stack_space(space = 4):\n return asm_add('$sp', '$sp', -space)\n\n\n# Load variable from stack\ndef asm_load_reg_from_stack(reg, offset = 0):\n return asm_load_mem_var_from_addr('$sp', reg, offset)\n\n\n# Save variable to stack\ndef asm_save_reg_to_stack(reg, offset = 0):\n return asm_save_mem_var_from_addr('$sp', reg, offset)\n\n\n## ______I/O______\n\n# Pass in type information to indicate which syscall to use:\n# 5 - read int\n# 6 - read float\n# 7 - read double\n# 8 - read string\ndef asm_read(var_type):\n ret_asm = ''\n if var_type == 'int':\n ret_asm += asm_reg_set('$v0', 5)\n elif var_type == 'float':\n ret_asm += asm_reg_set('$v0', 6)\n elif var_type == 'double':\n ret_asm += asm_reg_set('$v0', 7)\n else:\n ret_asm += asm_reg_set('$v0', 8)\n return ret_asm + 'syscall\\n'\n\n\n# Check current syscode if it's correct for the var_type passed\ndef asm_check_syscode_write(var_type, val):\n return (var_type == 'int' and val != 1) or (var_type == 'float' and val != 2) \\\n or (var_type == 'double' and val != 3) or (var_type == 'string' and val != 4)\n\n\n# Pass it a var_type\n# Return the necessary syscode to print\ndef asm_get_syscode_write(var_type):\n if var_type == 'int':\n return 1\n elif var_type == 'float':\n return 2\n elif var_type == 'double':\n return 3\n else:\n return 4\n\n\n# Broke this off from write to optimize writes a little faster\n# var_type: The type of variable to write\n# 1 - print int, arg in $a0\n# 2 - print float, arg in $f12\n# 3 - print double, arg in $f12\n# 4 - print string, arg in $a0\ndef asm_set_syscode_write(var_type):\n return asm_reg_set('$v0', asm_get_syscode_write(var_type))\n\n\n# var_type is the type of the variable to print\n# reg is the register where the variable is stored\n# 1 - print int, arg in $a0\n# 2 - print float, arg in $f12\n# 3 - print double, arg in $f12\n# 4 - print string, arg in $a0\n# If the is_a0_set boolean is True, then the will be no register equation\ndef asm_write(var_reg, var_type, is_a0_set = False):\n ret_asm = ''\n\n if not is_a0_set:\n if var_type == 'int':\n ret_asm += asm_reg_set('$a0', var_reg)\n elif var_type == 'float':\n ret_asm += asm_reg_set('$f12', var_reg)\n elif var_type == 'double':\n ret_asm += asm_reg_set('$f12', var_reg)\n else:\n ret_asm += asm_reg_set('$a0', var_reg)\n\n return ret_asm + 'syscall\\n'\n\n\n## ______LOGICAL______\n# These work assuming f_reg, s_reg are 0 or 1\n\n# ORs f_reg and s_reg and stores in r_reg\ndef asm_log_or(r_reg, f_reg, s_reg):\n ret_asm, f_reg, s_reg = load_immediates('normal', '', f_reg, s_reg)\n if type(s_reg) is bool:\n s_reg = 1 if s_reg else 0\n ret_asm += 'ori {:s}, {:s}, {:d}\\n'.format(r_reg, f_reg, s_reg)\n else:\n ret_asm += 'or {:s}, {:s}, {:s}\\n'.format(r_reg, f_reg, s_reg)\n return ret_asm\n\n\n# ANDs f_reg and s_reg and stores in r_reg\ndef asm_log_and(r_reg, f_reg, s_reg):\n ret_asm, f_reg, s_reg = load_immediates('normal', '', f_reg, s_reg)\n if type(s_reg) is bool:\n s_reg = 1 if s_reg else 0\n ret_asm += 'andi {:s}, {:s}, {:d}\\n'.format(r_reg, f_reg, s_reg)\n else:\n ret_asm += 'and {:s}, {:s}, {:s}\\n'.format(r_reg, f_reg, s_reg)\n return ret_asm\n\n\ndef asm_log_xor(r_reg, f_reg, s_reg):\n ret_asm, f_reg, s_reg = load_immediates('normal', '', f_reg, s_reg)\n if type(s_reg) is bool:\n s_reg = 1 if s_reg else 0\n ret_asm += 'xori {:s}, {:s}, {:d}\\n'.format(r_reg, f_reg, s_reg)\n else:\n ret_asm += 'xor {:s}, {:s}, {:s}\\n'.format(r_reg, f_reg, s_reg)\n return ret_asm\n\n\n# CANNOT work with immediates!\n# Load f_reg into a register before using if it is an immediate\n# This does a bitflip of the 1's place\ndef asm_log_negate(r_reg, f_reg):\n return asm_log_xor(r_reg, f_reg, 1)\n\n\n## ______EQUALITY______\n\n# r_reg <- f_reg == s_reg\ndef asm_rel_eq(r_reg, f_reg, s_reg):\n op_type = get_op_type(f_reg, s_reg)\n ret_asm, f_reg, s_reg = load_immediates(op_type, '', f_reg, s_reg)\n if op_type == 'float':\n ret_asm += 'c.eq.s {:s}, {:s}\\n'.format(f_reg, s_reg) \\\n + asm_reg_set('$v1', 1) \\\n + 'movf $v1, $0\\n' \\\n + asm_reg_set(r_reg, '$v1')\n else:\n ret_asm += 'seq {:s}, {:s}, {:s}\\n'.format(r_reg, f_reg, str(s_reg))\n return ret_asm\n\n\n# r_reg <- f_reg != s_reg\ndef asm_rel_neq(r_reg, f_reg, s_reg):\n op_type = get_op_type(f_reg, s_reg)\n ret_asm, f_reg, s_reg = load_immediates(op_type, '', f_reg, s_reg)\n if op_type == 'float':\n ret_asm += 'c.eq.s {:s}, {:s}\\n'.format(f_reg, s_reg) \\\n + asm_reg_set('$v1', 1) \\\n + 'movt $v1, $0\\n' \\\n + asm_reg_set(r_reg, '$v1')\n else:\n ret_asm += 'sne {:s}, {:s}, {:s}\\n'.format(r_reg, f_reg, str(s_reg))\n return ret_asm\n\n\n## ______RELATIONSHIP______\n\n# r_reg <- f_reg < s_reg\ndef asm_rel_le(r_reg, f_reg, s_reg):\n op_type = get_op_type(f_reg, s_reg)\n ret_asm, f_reg, s_reg = load_immediates(op_type, '', f_reg, s_reg)\n if op_type == 'float':\n ret_asm += 'c.le.s {:s}, {:s}\\n'.format(f_reg, s_reg) \\\n + asm_reg_set('$v1', 1) \\\n + 'movf $v1, $0\\n' \\\n + asm_reg_set(r_reg, '$v1')\n else:\n ret_asm += 'sle {:s}, {:s}, {:s}\\n'.format(r_reg, f_reg, str(s_reg))\n return ret_asm\n\n\n# I don't know why this one doesn't have pseudocode overrides for immediates, but the others do\n# r_reg <- f_reg <= s_reg\ndef asm_rel_lt(r_reg, f_reg, s_reg):\n op_type = get_op_type(f_reg, s_reg)\n ret_asm, f_reg, s_reg = load_immediates(op_type, '', f_reg, s_reg)\n if op_type == 'float':\n ret_asm += 'c.lt.s {:s}, {:s}\\n'.format(f_reg, s_reg) \\\n + asm_reg_set('$v1', 1) \\\n + 'movf $v1, $0\\n' \\\n + asm_reg_set(r_reg, '$v1')\n else:\n if type(s_reg) is int:\n ret_asm += 'slti {:s}, {:s}, {:d}\\n'.format(r_reg, f_reg, s_reg)\n else:\n ret_asm += 'slt {:s}, {:s}, {:s}\\n'.format(r_reg, f_reg, str(s_reg))\n return ret_asm\n\n\n# r_reg <- f_reg > s_reg\ndef asm_rel_ge(r_reg, f_reg, s_reg):\n op_type = get_op_type(f_reg, s_reg)\n ret_asm, f_reg, s_reg = load_immediates(op_type, '', f_reg, s_reg)\n if op_type == 'float':\n ret_asm += 'c.lt.s {:s}, {:s}\\n'.format(f_reg, s_reg) \\\n + asm_reg_set('$v1', 1) \\\n + 'movt $v1, $0\\n' \\\n + asm_reg_set(r_reg, '$v1')\n else:\n ret_asm += 'sge {:s}, {:s}, {:s}\\n'.format(r_reg, f_reg, str(s_reg))\n return ret_asm\n\n\n# r_reg <- f_reg >= s_reg\ndef asm_rel_gt(r_reg, f_reg, s_reg):\n op_type = get_op_type(f_reg, s_reg)\n ret_asm, f_reg, s_reg = load_immediates(op_type, '', f_reg, s_reg)\n if op_type == 'float':\n ret_asm += 'c.le.s {:s}, {:s}\\n'.format(f_reg, s_reg) \\\n + asm_reg_set('$v1', 1) \\\n + 'movt $v1, $0\\n' \\\n + asm_reg_set(r_reg, '$v1')\n else:\n ret_asm += 'sgt {:s}, {:s}, {:s}\\n'.format(r_reg, f_reg, str(s_reg))\n return ret_asm\n\n\n## ______ARITHMETIC______\n\n# Used to add two values\n# Includes override for immediates\n# r_reg = f_reg + s_reg\ndef asm_add(r_reg, f_reg, s_reg):\n op_type = get_op_type(f_reg, s_reg)\n ret_asm, f_reg, s_reg = load_immediates(op_type, '', f_reg, s_reg)\n if op_type == 'float':\n ret_asm += 'add.s {:s}, {:s}, {:s}\\n'.format(r_reg, f_reg, s_reg)\n elif type(s_reg) is int:\n ret_asm += 'addi {:s}, {:s}, {:d}\\n'.format(r_reg, f_reg, s_reg)\n else:\n ret_asm += 'add {:s}, {:s}, {:s}\\n'.format(r_reg, f_reg, s_reg)\n return ret_asm\n\n\n# Used to subtract two values\n# Includes override for immediates\n# r_reg = f_reg - s_reg\ndef asm_sub(r_reg, f_reg, s_reg):\n op_type = get_op_type(f_reg, s_reg)\n ret_asm, f_reg, s_reg = load_immediates(op_type, '', f_reg, s_reg)\n if op_type == 'float':\n ret_asm += 'sub.s {:s}, {:s}, {:s}\\n'.format(r_reg, f_reg, s_reg)\n elif type(s_reg) is int:\n ret_asm += 'subi {:s}, {:s}, {:d}\\n'.format(r_reg, f_reg, s_reg)\n else:\n ret_asm += 'sub {:s}, {:s}, {:s}\\n'.format(r_reg, f_reg, s_reg)\n return ret_asm\n\n\n# Stores result in lo register in addition to r_reg\n# r_reg = f_reg * s_reg\ndef asm_multiply(r_reg, f_reg, s_reg):\n op_type = get_op_type(f_reg, s_reg)\n ret_asm, f_reg, s_reg = load_immediates(op_type, '', f_reg, s_reg)\n if op_type == 'float':\n ret_asm += 'mul.s {:s}, {:s}, {:s}\\n'.format(r_reg, f_reg, s_reg)\n else:\n ret_asm += 'mul {:s}, {:s}, {:s}\\n'.format(r_reg, f_reg, str(s_reg))\n return ret_asm\n\n\n# r_reg = f_reg / s_reg\ndef asm_divide(r_reg, f_reg, s_reg):\n op_type = get_op_type(f_reg, s_reg)\n ret_asm, f_reg, s_reg = load_immediates(op_type, '', f_reg, s_reg)\n if op_type == 'float':\n ret_asm += 'div.s {:s}, {:s}, {:s}\\n'.format(r_reg, f_reg, s_reg)\n else:\n ret_asm += 'div {:s}, {:s}, {:s}\\n'.format(r_reg, f_reg, str(s_reg))\n return ret_asm\n\n\n# Only defined for integers\n# r_reg = f_reg % s_reg\ndef asm_modulo(r_reg, f_reg, s_reg):\n ret_asm = ''\n if type(f_reg) is int:\n ret_asm += asm_reg_set('$v1', f_reg)\n f_reg = '$v1'\n ret_asm += 'rem {:s}, {:s}, {:s}\\n'.format(r_reg, f_reg, str(s_reg))\n return ret_asm\n\n\n# Load a value from one register to another\n# f_reg = s_reg\ndef asm_reg_set(f_reg, s_reg):\n op_type = get_op_type(f_reg, s_reg)\n ret_asm = ''\n # We don't use the shortcut load_immediates here because this is the function used in that\n if op_type == 'float':\n if type(s_reg) is float:\n ret_asm += 'li {:s}, {:d}\\n'.format('$v1', int(convert_float_to_binary(s_reg), 2)) \\\n + 'mtc1 {:s}, {:s}\\n'.format('$v1', '$f13')\n s_reg = '$f13'\n elif type(f_reg) is float:\n ret_asm += 'li {:s}, {:d}\\n'.format('$v1', int(convert_float_to_binary(f_reg), 2)) \\\n + 'mtc1 {:s}, {:s}\\n'.format('$v1', '$f13')\n f_reg = '$f13'\n\n ret_asm += 'mov.s {:s}, {:s}\\n'.format(f_reg, s_reg)\n else: # int\n if type(s_reg) is int:\n ret_asm += 'li {:s}, {:d}\\n'.format(f_reg, s_reg)\n elif type(f_reg) is int:\n ret_asm += 'li {:s}, {:d}\\n'.format(s_reg, f_reg)\n else:\n ret_asm += 'move {:s}, {:s}\\n'.format(f_reg, s_reg)\n return ret_asm\n\n## ______READ/WRITE RAM______\n\n\n# Loads a variable's memory address into a register\ndef asm_load_mem_addr(mem_name, temp_reg):\n return 'la {:s}, {:s}\\n'.format(temp_reg, mem_name)\n\n\n# Assumes mem_name address isn't in memory already\ndef asm_load_mem_var(mem_name, addr_reg, dest_reg, offset = 0):\n if 'f' in str(dest_reg):\n return 'la {:s}, {:s}\\nl.s {:s}, {:d}({:s})\\n'.format(addr_reg, mem_name, dest_reg, offset, addr_reg)\n else:\n return 'la {:s}, {:s}\\nlw {:s}, {:d}({:s})\\n'.format(addr_reg, mem_name, dest_reg, offset, addr_reg)\n\n\n# Assumes mem_addr_reg holds RAM location of desired variable\ndef asm_load_mem_var_from_addr(mem_addr_reg, dest_reg, offset = 0):\n if 'f' in str(dest_reg):\n return 'l.s {:s}, {:d}({:s})\\n'.format(dest_reg, offset, mem_addr_reg)\n else:\n return 'lw {:s}, {:d}({:s})\\n'.format(dest_reg, offset, mem_addr_reg)\n\n\n# Assumes mem_name address isn't in memory already\ndef asm_save_mem_var(mem_name, addr_reg, var_reg, offset = 0):\n if 'f' in str(var_reg):\n return 'la {:s}, {:s}\\ns.s {:s}, {:d}({:s})\\n'.format(addr_reg, mem_name, var_reg, offset, addr_reg)\n else:\n return 'la {:s}, {:s}\\nsw {:s}, {:d}({:s})\\n'.format(addr_reg, mem_name, var_reg, offset, addr_reg)\n\n\n# Assumes mem_addr_reg holds RAM location of desired variable\ndef asm_save_mem_var_from_addr(mem_addr_reg, var_reg, offset = 0):\n if 'f' in str(var_reg):\n return 's.s {:s}, {:d}({:s})\\n'.format(var_reg, offset, mem_addr_reg)\n else:\n return 'sw {:s}, {:d}({:s})\\n'.format(var_reg, offset, mem_addr_reg)\n\n# _______________________Helpers________________________\n\n\n# Helper that will convert an int to a float\ndef asm_cast_int_to_float(f_reg, i_reg):\n ret_asm = ''\n if type(i_reg) is int:\n ret_asm += asm_reg_set('$v1', i_reg)\n i_reg = '$v1'\n ret_asm += 'mtc1 {:s}, {:s}\\ncvt.s.w {:s}, {:s}\\n'.format(i_reg, f_reg, f_reg, f_reg)\n return ret_asm\n\n\n# This allows for bools to be able to be dynamically printed\ndef asm_dynamic_bool_print(r_reg, f_reg, true_addr_reg, false_addr_reg):\n return asm_rel_eq('$v1', f_reg, 1) + 'movn {:s}, {:s}, {:s}\\n'.format(r_reg, true_addr_reg, '$v1') + \\\n 'movz {:s}, {:s}, {:s}\\n'.format(r_reg, false_addr_reg, '$v1')\n","sub_path":"archive/proj6/assembly_helper.py","file_name":"assembly_helper.py","file_ext":"py","file_size_in_byte":14309,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"134940318","text":"import numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn.preprocessing import MinMaxScaler\nimport xarray as xr\nimport os\nfrom os.path import join\nimport yaml\nimport argparse\n\nfrom holodecml.data import load_raw_datasets, load_unet_datasets, load_unet_datasets_xy\n\n\ndef main():\n \n print(\"Parsing config...\")\n \n # parse arguments from config/yaml file\n parser = argparse.ArgumentParser(description='Describe plotting parameters')\n parser.add_argument(\"config\", help=\"Path to config file\")\n args = parser.parse_args()\n with open(args.config) as config_file:\n config = yaml.load(config_file, Loader=yaml.FullLoader)\n\n path_data = config[\"path_data\"]\n path_save = config[\"path_save\"]\n if not os.path.exists(path_save):\n os.makedirs(path_save)\n num_particles = config[\"num_particles\"]\n output_cols = config[\"output_cols\"]\n bin_factor = config[\"bin_factor\"]\n \n # load data\n print(\"Loading data...\")\n train_inputs,\\\n train_outputs,\\\n valid_inputs,\\\n valid_outputs = load_unet_datasets_xy(path_data,\n num_particles,\n output_cols,\n config[\"subset\"],\n bin_factor)\n\n h = config[\"h\"]\n valid_outputs_pred = xr.open_dataset(join(path_save, \"valid_outputs_pred.nc\"))\n valid_outputs_pred = valid_outputs_pred.to_array().values[0]\n\n image_pred = valid_outputs_pred[h, :, :, 0]\n image_true = valid_outputs[h, :, :, 0]\n\n coords_true = np.where(image_true > 0)\n\n idx = np.argwhere(np.diff(np.sort(valid_outputs_pred[h, :, :, 0].flatten())) > .0001)+1\n pred_argsort = valid_outputs_pred[h, :, :, 0].flatten().argsort()\n coords_pred = []\n for i in pred_argsort[-idx.shape[0]:][::-1]:\n coord = np.array([c[0] for c in np.where(image_pred == image_pred.flatten()[i])])\n coords_pred.append(coord)\n coords_pred = np.stack(coords_pred)\n\n print(\"Plotting...\")\n # Plot 1\n fig=plt.figure(figsize=(12, 8))\n plt.pcolormesh(np.log(valid_outputs_pred[h, :, :, 0]).T, cmap=\"RdBu_r\")\n plt.colorbar()\n plt.scatter(np.where(image_true > 0)[0], np.where(image_true > 0)[1], color='blue', s=100, label=\"True\")\n plt.title(f'Log of probability field for validation hologram {h}', fontsize=20)\n plt.legend(fontsize=20)\n plt.xticks([])\n plt.yticks([])\n plt.savefig(join(path_save, \"prob_field_log.png\"), dpi=200, bbox_inches=\"tight\")\n \n # Plot 2\n plt.figure(figsize=(12, 8))\n x_vals = np.linspace(0, valid_inputs.shape[1]/bin_factor, valid_inputs[h, :, :].shape[0])\n y_vals = np.linspace(0, valid_inputs.shape[2]/bin_factor, valid_inputs[h, :, :].shape[1])\n plt.xticks([])\n plt.yticks([])\n plt.pcolormesh(x_vals, y_vals, valid_inputs[h, :, :].T, cmap=\"RdBu_r\")\n plt.scatter(np.where(image_true > 0)[0], np.where(image_true > 0)[1], color='blue', s=100, label=\"True\", zorder=2)\n plt.scatter(coords_pred[:, 0], coords_pred[:, 1], color='red', s=100, label=\"Predicted\", zorder=1)\n plt.legend(fontsize=20)\n plt.title(f'{int(np.sum(image_true))} True vs Top {idx.shape[0]} Predicted Particles for validation hologram {h}', fontsize=20)\n plt.savefig(join(path_save, \"true_vs_pred_diff.png\"), dpi=200, bbox_inches=\"tight\")\n\n # Plot 3\n pred_argsort = valid_outputs_pred[h, :, :, 0].flatten().argsort()\n coords_pred = []\n for i in pred_argsort[-int(np.sum(image_true)):][::-1]:\n coord = np.array([c[0] for c in np.where(image_pred == image_pred.flatten()[i])])\n coords_pred.append(coord)\n coords_pred = np.stack(coords_pred)\n\n plt.figure(figsize=(12, 8))\n x_vals = np.linspace(0, valid_inputs.shape[1]/bin_factor, valid_inputs[h, :, :].shape[0])\n y_vals = np.linspace(0, valid_inputs.shape[2]/bin_factor, valid_inputs[h, :, :].shape[1])\n plt.xticks([])\n plt.yticks([])\n plt.pcolormesh(x_vals, y_vals, valid_inputs[h, :, :].T, cmap=\"RdBu_r\")\n plt.scatter(np.where(image_true > 0)[0], np.where(image_true > 0)[1], color='blue', s=100, label=\"True\", zorder=2)\n plt.scatter(coords_pred[:, 0], coords_pred[:, 1], color='red', s=100, label=\"Predicted\", zorder=1)\n plt.legend(fontsize=20)\n plt.title(f'{int(np.sum(image_true))} True vs Top {int(np.sum(image_true))} Predicted Particles for validation hologram {h}', fontsize=20)\n plt.savefig(join(path_save, \"true_vs_pred_toptrue.png\"), dpi=200, bbox_inches=\"tight\")\n\n # Plot 4\n fig=plt.figure(figsize=(12, 8))\n plt.imshow(valid_outputs[h, :, :, 0].T, interpolation='bilinear', cmap=plt.cm.gray, aspect='auto', vmin=0, vmax=1)\n plt.title(f'True probability field for validation hologram {h}\\nSum of non-zero values: {np.sum(valid_outputs[h, :, :, 0]):.2f}\\nMax predicted value: {np.max(valid_outputs[h, :, :, 0]):.2f}', fontsize=20)\n plt.savefig(join(path_save, \"prob_true.png\"), dpi=200, bbox_inches=\"tight\")\n\n # Plot 5\n fig=plt.figure(figsize=(12, 8))\n plt.imshow(valid_outputs_pred[h, :, :, 0].T, interpolation='bilinear', cmap=plt.cm.gray, aspect='auto', vmin=0, vmax=1)\n plt.title(f'Predicted probability field for validation hologram {h}\\nSum of non-zero values: {np.sum(valid_outputs_pred[h, :, :, 0]):.2f}\\nMax predicted value: {np.max(valid_outputs_pred[h, :, :, 0]):.2f}', fontsize=20)\n plt.savefig(join(path_save, \"prob_pred.png\"), dpi=200, bbox_inches=\"tight\")\n\nif __name__ == \"__main__\":\n main()","sub_path":"holodecml/plotting.py","file_name":"plotting.py","file_ext":"py","file_size_in_byte":5459,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"269027516","text":"from __future__ import absolute_import\nimport os\nimport pandas as pd\nfrom tqdm import tqdm\nfrom utils.eval_tool import eval_detection_voc\nfrom torch.utils import data as data_\nfrom data.VOCdataset import Dataset, TestDataset, inverse_normalize\nfrom trainer import FasterRCNNTrainer\nfrom utils import array_tool as at\nfrom utils.config import opt\nfrom model.faster_rcnn_densenet121 import FasterRCNNDensenet121\n\nresults_path=\"DenseNet121.csv\"\nweights_path=\"DenseNet121_weight/\"\n\n\n\n\n\ndef eval(dataloader, faster_rcnn, test_num=10000):\n pred_bboxes, pred_labels, pred_scores = list(), list(), list()\n gt_bboxes, gt_labels, gt_difficults = list(), list(), list()\n for ii, (imgs, sizes, gt_bboxes_, gt_labels_, gt_difficults_) in tqdm(enumerate(dataloader)):\n sizes = [sizes[0][0].item(), sizes[1][0].item()]\n pred_bboxes_, pred_labels_, pred_scores_ = faster_rcnn.predict(imgs, [sizes])\n gt_bboxes += list(gt_bboxes_.numpy())\n gt_labels += list(gt_labels_.numpy())\n gt_difficults += list(gt_difficults_.numpy())\n pred_bboxes += pred_bboxes_\n pred_labels += pred_labels_\n pred_scores += pred_scores_\n if ii == test_num: break\n\n result = eval_detection_voc(\n pred_bboxes, pred_labels, pred_scores,\n gt_bboxes, gt_labels, gt_difficults,\n use_07_metric=True)\n return result\n\ndef train():\n \n dataset = Dataset(voc_data_dir=['/dataset/VOCdevkit/VOC2007', '/dataset/VOCdevkit/VOC2012'], size=(600, 1000))\n dataloader = data_.DataLoader(dataset, batch_size=1, shuffle=True)\n \n testset = TestDataset(voc_data_dir=['/dataset/VOCdevkit/VOC2007'])\n test_dataloader = data_.DataLoader(testset, batch_size=1, shuffle=False)\n \n faster_rcnn = FasterRCNNDensenet121()\n print('model construct completed')\n trainer = FasterRCNNTrainer(faster_rcnn).cuda()\n if opt.load_path:\n trainer.load(opt.load_path)\n print('load pretrained model from %s' % opt.load_path)\n \n print(\"trainer lr:\",trainer.faster_rcnn.optimizer.param_groups[0]['lr'])\n best_map = 0\n lr_ = opt.lr\n record_pd = pd.DataFrame(\n columns=['lr', 'map', 'rpn_loc_loss', 'rpn_cls_loss', 'roi_loc_loss', 'roi_cls_loss', 'total_loss'])\n for epoch in range(opt.epoch):\n trainer.reset_meters()\n for ii, (img, bbox_, label_, scale) in tqdm(enumerate(dataloader)):\n scale = at.scalar(scale)\n img, bbox, label = img.cuda().float(), bbox_.cuda(), label_.cuda()\n trainer.train_step(img, bbox, label, scale)\n \n eval_result = eval(test_dataloader, faster_rcnn, test_num=opt.test_num)\n lr_ = trainer.faster_rcnn.optimizer.param_groups[0]['lr']\n log_info = 'lr:{}, map:{},loss:{}'.format(str(lr_),\n str(eval_result['map']),\n str(trainer.get_meter_data()))\n \n dict2 = trainer.get_meter_data()\n new = dict({'lr': lr_, 'map': eval_result['map']}, **dict2)\n record_pd = record_pd.append(new, ignore_index=True)\n record_pd.to_csv(results_path, index=0)\n print(log_info)\n \n if eval_result['map'] > best_map:\n best_map = eval_result['map']\n best_path = trainer.save(best_map=best_map,save_path=os.path.join(weights_path,\"weights_%s.pth\"%round(best_map,3)))\n if epoch == 9:\n trainer.load(best_path)\n trainer.faster_rcnn.scale_lr(opt.lr_decay)\n lr_ = lr_ * opt.lr_decay\n\n\ntrain()","sub_path":"DenseNet121main.py","file_name":"DenseNet121main.py","file_ext":"py","file_size_in_byte":3331,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"286481031","text":"def fn():\r\n n = int(input().strip())\r\n a = list(map(int,input().strip().split()))\r\n maxi = -1\r\n out = []\r\n for i in range(len(a)-1,-1,-1):\r\n if a[i] >= maxi:\r\n maxi = a[i]\r\n out.append(a[i])\r\n for i in range(len(out)-1,-1,-1):\r\n print(out[i],end = ' ')\r\n print()\r\n \r\nfor _ in range(int(input())):\r\n fn()","sub_path":"python/leaders_in_an_array.py","file_name":"leaders_in_an_array.py","file_ext":"py","file_size_in_byte":366,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"400168598","text":"# -*- coding: utf-8 -*-\n\n# Copyright (c) 2013 Johannes Baiter. All rights reserved.\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\n\n\"\"\"\nspreads CLI code.\n\"\"\"\n\nfrom __future__ import division, unicode_literals, print_function\n\nimport argparse\nimport logging\nimport os\nimport sys\nimport time\n\nimport colorama\nimport spreads.confit as confit\n\nimport spreads.workflow as workflow\nfrom spreads import config\nfrom spreads.plugin import get_devices, get_pluginmanager\nfrom spreads.util import DeviceException, ColourStreamHandler\n\n# Kudos to http://stackoverflow.com/a/1394994/487903\ntry:\n from msvcrt import getch\nexcept ImportError:\n def getch():\n \"\"\" Wait for keypress on stdin.\n\n :returns: unicode -- Value of character that was pressed\n\n \"\"\"\n import tty\n import termios\n fd = sys.stdin.fileno()\n old = termios.tcgetattr(fd)\n try:\n tty.setraw(fd)\n return sys.stdin.read(1)\n finally:\n termios.tcsetattr(fd, termios.TCSADRAIN, old)\n\n\ndef configure(args=None):\n for orientation in ('left', 'right'):\n print(\"Please connect and turn on the device labeled \\'{0}\\'\"\n .format(orientation))\n print(colorama.Fore.BLUE + \"Press any key when ready.\")\n getch()\n devs = get_devices()\n if len(devs) > 1:\n raise DeviceException(\"Please ensure that only one device is\"\n \" turned on!\")\n if not devs:\n raise DeviceException(\"No device found!\")\n devs[0].set_orientation(orientation)\n print(colorama.Fore.GREEN + \"Configured \\'{0}\\' device.\"\n .format(orientation))\n print(\"Please turn off the device.\")\n print(colorama.Fore.BLUE + \"Press any key when ready.\")\n getch()\n\n\ndef capture(args=None, devices=None):\n if not devices:\n devices = get_devices()\n if len(devices) != 2:\n raise DeviceException(\"Please connect and turn on two\"\n \" pre-configured devices! ({0} were\"\n \" found)\".format(len(devices)))\n print(colorama.Fore.GREEN + \"Found {0} devices!\".format(len(devices)))\n if any(not x.orientation for x in devices):\n raise DeviceException(\"At least one of the devices has not been\"\n \" properly configured, please re-run the\"\n \" program with the \\'configure\\' option!\")\n # Set up for capturing\n print(\"Setting up devices for capturing.\")\n workflow.prepare_capture(devices)\n # Start capture loop\n print(colorama.Fore.BLUE + \"Press 'b' to capture.\")\n shot_count = 0\n start_time = time.time()\n pages_per_hour = 0\n capture_keys = config['capture']['capture_keys'].as_str_seq()\n while True:\n if not getch().lower() in capture_keys:\n break\n workflow.capture(devices)\n shot_count += len(devices)\n pages_per_hour = (3600/(time.time() - start_time))*shot_count\n status = (\"\\rShot {0} pages [{1:.0f}/h]\"\n .format(colorama.Fore.GREEN + unicode(shot_count),\n pages_per_hour))\n sys.stdout.write(status)\n sys.stdout.flush()\n workflow.finish_capture(devices)\n sys.stdout.write(\"\\rShot {0} pages in {1:.1f} minutes, average speed was\"\n \" {2:.0f} pages per hour\"\n .format(colorama.Fore.GREEN + str(shot_count),\n (time.time() - start_time)/60, pages_per_hour))\n sys.stdout.flush()\n\n\ndef download(args=None, path=None, devices=None):\n if args and args.path:\n path = args.path\n if not devices:\n devices = get_devices()\n status_str = \"Downloading {0} images from devices\"\n if config['download']['keep'].get(bool) or config['keep'].get(bool):\n status_str = status_str.format(\"and deleting \")\n else:\n status_str = status_str.format(\"\")\n print(colorama.Fore.GREEN + status_str)\n workflow.download(devices, path)\n\n\ndef postprocess(args=None, path=None):\n if args and args.path:\n path = args.path\n workflow.process(path)\n\n\ndef output(args=None, path=None):\n if args and args.path:\n path = args.path\n workflow.output(path)\n\n\ndef wizard(args, devices=None):\n # TODO: Think about how we can make this more dynamic, i.e. get list of\n # options for plugin with a description for each entry\n path = args.path\n if not devices:\n devices = get_devices()\n if any(not x.orientation for x in devices):\n print(colorama.Fore.YELLOW + \"Devices not yet configured!\")\n print(colorama.Fore.BLUE + \"Please turn both devices off.\"\n \" Press any key when ready.\")\n while True:\n try:\n configure()\n break\n except DeviceException as e:\n print(e)\n\n print(colorama.Fore.GREEN +\n \"==========================\\n\",\n \"Starting capturing process\\n\",\n \"==========================\")\n capture(devices=devices)\n\n print(colorama.Fore.GREEN +\n \"=========================\\n\",\n \"Starting download process\\n\"\n \"=========================\")\n download(path=path)\n\n print(colorama.Fore.GREEN +\n \"=======================\\n\"\n \"Starting postprocessing\\n\"\n \"=======================\")\n postprocess(path=path)\n\n print(colorama.Fore.GREEN +\n \"=================\\n\",\n \"Generating output\\n\"\n \"=================\")\n output(path=path)\n\n\ndef setup_parser():\n def _add_device_arguments(name, parser):\n try:\n for dev in get_devices():\n dev.add_arguments(name, parser)\n except:\n return\n\n pluginmanager = get_pluginmanager()\n rootparser = argparse.ArgumentParser(\n description=\"Scanning Tool for DIY Book Scanner\")\n subparsers = rootparser.add_subparsers()\n\n rootparser.add_argument(\n '--verbose', '-v', dest=\"verbose\", action=\"store_true\")\n\n wizard_parser = subparsers.add_parser(\n 'wizard', help=\"Interactive mode\")\n wizard_parser.add_argument(\n \"path\", help=\"Path where scanned images are to be stored\")\n wizard_parser.set_defaults(func=wizard)\n\n config_parser = subparsers.add_parser(\n 'configure', help=\"Perform initial configuration of the devices.\")\n config_parser.set_defaults(func=configure)\n\n capture_parser = subparsers.add_parser(\n 'capture', help=\"Start the capturing workflow\")\n capture_parser.set_defaults(func=capture)\n # Add arguments from plugins\n for parser in (capture_parser, wizard_parser):\n parser.add_argument(\"--no-parallel-capture\", dest=\"parallel_capture\",\n action=\"store_false\", default=True,\n help=\"Do not trigger capture on multiple devices at once.\")\n pluginmanager.map(lambda x, y, z: x.plugin.add_arguments(y, z),\n 'capture', parser)\n _add_device_arguments('capture', parser)\n\n download_parser = subparsers.add_parser(\n 'download', help=\"Download scanned images.\")\n download_parser.add_argument(\n \"path\", help=\"Path where scanned images are to be stored\")\n for subparser in (download_parser, wizard_parser):\n subparser.add_argument(\"--no-parallel-download\",\n dest=\"parallel_download\", action=\"store_false\", default=True,\n help=\"Do not download from multiple devices at once.\")\n subparser.add_argument(\n \"--keep\", \"-k\", dest=\"keep\", action=\"store_true\",\n help=\"Keep files on devices after download\")\n download_parser.set_defaults(func=download)\n # Add arguments from plugins\n for parser in (download_parser, wizard_parser):\n pluginmanager.map(lambda x, y, z: x.plugin.add_arguments(y, z),\n 'download', parser)\n _add_device_arguments('download', parser)\n\n postprocess_parser = subparsers.add_parser(\n 'postprocess',\n help=\"Postprocess scanned images.\")\n postprocess_parser.add_argument(\n \"path\", help=\"Path where scanned images are stored\")\n postprocess_parser.add_argument(\n \"--jobs\", \"-j\", dest=\"jobs\", type=int, default=None,\n metavar=\"\", help=\"Number of concurrent processes\")\n postprocess_parser.set_defaults(func=postprocess)\n # Add arguments from plugins\n for parser in (postprocess_parser, wizard_parser):\n pluginmanager.map(lambda x, y, z: x.plugin.add_arguments(y, z),\n 'postprocess', parser)\n\n output_parser = subparsers.add_parser(\n 'output',\n help=\"Generate output files.\")\n output_parser.add_argument(\n \"path\", help=\"Path where scanned and postprocessed images are stored\")\n output_parser.set_defaults(func=output)\n # Add arguments from plugins\n for parser in (download_parser, wizard_parser):\n pluginmanager.map(lambda x, y, z: x.plugin.add_arguments(y, z),\n 'output', parser)\n\n pluginmanager.map(lambda x, y: x.plugin.add_command_parser(y),\n subparsers)\n return rootparser\n\n\ndef main():\n # Initialize color support\n colorama.init()\n # Set to ERROR so we can see errors during plugin loading.\n logging.basicConfig(loglevel=logging.ERROR)\n config.read()\n cfg_path = os.path.join(config.config_dir(), confit.CONFIG_FILENAME)\n if not os.path.exists(cfg_path):\n config.dump(filename=cfg_path)\n\n parser = setup_parser()\n args = parser.parse_args()\n config.set_args(args)\n\n loglevel = config['loglevel'].as_choice({\n 'none': logging.NOTSET,\n 'info': logging.INFO,\n 'debug': logging.DEBUG,\n 'warning': logging.WARNING,\n 'error': logging.ERROR,\n 'critical': logging.CRITICAL,\n })\n if args.verbose:\n loglevel = logging.DEBUG\n\n # Set up logger\n logger = logging.getLogger()\n if logger.handlers:\n for handler in logger.handlers:\n logger.removeHandler(handler)\n handler = ColourStreamHandler()\n handler.setLevel(loglevel)\n handler.setFormatter(logging.Formatter(\"%(name)s: %(message)s\"))\n logger.addHandler(handler)\n logger.setLevel(loglevel)\n\n args.func(args)\n\n # Deinitialize color support\n colorama.deinit()\n","sub_path":"spreads/cli.py","file_name":"cli.py","file_ext":"py","file_size_in_byte":11420,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"110719792","text":"import pandas as pd\nfrom sklearn.cross_validation import train_test_split\n\nimport matplotlib.pyplot as plt\nimport datetime\n\nimport sklearn\nfrom sklearn.linear_model import SGDClassifier\nfrom sklearn.svm import SVC\n\ncurrencies = ['EUR', 'USD', 'XRP', 'NMC']\n\ndef get_variance_level(variance):\n if variance > 0:\n return '+'\n else:\n return '-'\n\n# Loading & cleaning\ndf = pd.read_csv('data.csv')\ndata = pd.DataFrame()\ndata['date'] = df['Date'].apply(lambda date: datetime.datetime.strptime(date, '%Y-%m-%d').date())\nvariances = {}\nfor currency in currencies:\n data[currency + '_variance'] = df['BCHARTS.kraken' + currency + ' - Close'] - df['BCHARTS.kraken' + currency + ' - Open']\n data[currency + '_variance'] /= data[currency + '_variance'].std(axis=0)\n data[currency + '_variance:3'] = sum([data[currency + '_variance'].shift(i) for i in range(1, 4)]) / 3\n data[currency + '_variance:5'] = sum([data[currency + '_variance'].shift(i) for i in range(1, 6)]) / 5\n variances[currency] = data[currency + '_variance']\ndates = data['date']\ndata = data.ix[5:]\nwin = [get_variance_level(variance) for variance in data['EUR_variance']]\ndata = data.drop('date', 1)\nfor currency in currencies:\n data = data.drop(currency + '_variance', 1)\n\ntrain_data, test_data, train_win, test_win = train_test_split(data, win, test_size=0.5, random_state=42)\nclf = SVC()\nclf.fit(train_data, train_win)\n\npredicted = clf.predict(test_data)\nprint('Accuracy using SVC:', sklearn.metrics.accuracy_score(test_win, predicted))\n\nfor currency in currencies:\n print('Default win ' + currency + ':', sum(variances[currency]))\n\n# Win using classifier\nlocs = [i for i in range(len(predicted)) if predicted[i] == '+']\nprint(predicted[locs])\n\n# for currency in currencies:\n# plt.plot(dates, variances[currency], '-')\n# plt.legend(currencies)\n# plt.show()","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1858,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"564021423","text":"#!/usr/bin/env python3\n\nimport os\nimport re\nfrom pathlib import Path\nimport subprocess\n\n# Array of APIs. Call API_Raml_to_Slate(apiname) for each. Expected: API RAML file in folder which is the api name.\n# Inside that folder must be .raml file with the same name. Example: ./src/product-api/product-api.raml\nAPIs = [\"Broker-api\", \"Calendar-api\", \"Context-api\", \"Identity-api\", \"Message-api\", \"Product-api\"]\n\n\ndef api_raml_to_slate(apiname):\n apiname = apiname.lower()\n jsonfile = Path(\"./OAS/\"+apiname+\".json\")\n slatefile = Path(\"./slate/\"+apiname.lower()+\".md\")\n\n print(\"\\nConverting \"+apiname+\" to Slate\")\n\n # Delete previous API docs\n print(\"Delete previous API doc files\")\n if jsonfile.exists():\n os.remove(jsonfile)\n else:\n print(\"File: \"+apiname+\".json delete failed. JSON formatted file not found!\")\n\n if slatefile.exists():\n os.remove(slatefile)\n else:\n print(\"File: \"+apiname+\".md delete failed. MD formatted file not found!\")\n\n # Generate API docs. First convert RAML -> OpenAPISpec file\n jsoncmd = \"./oas-raml-converter/lib/bin/converter.js --from RAML --to OAS20 ./src/\" +apiname+ \"/\" +apiname+ \".raml > ./OAS/\"+apiname+\".json\"\n failure = os.system(jsoncmd)\n if failure:\n print(\"RAML -> OpenAPISpec failed. Trying next in array.\")\n else:\n # Convert from OpenAPISpec to Slate md\n slatecmd= \"swagger-to-slate -i ./OAS/\" +apiname+ \".json -o ./slate/\" +apiname+ \".md\"\n failure = os.system(slatecmd)\n if failure:\n print(\"RAML -> Slate formatted md file creation failed. Trying next in array.\")\n\ndef concatenate_files():\n outfile = Path(\"../source/index.html.md\")\n if outfile.exists():\n os.remove(outfile)\n else:\n print(\"File: index.html.md delete failed. File not found!\")\n\n with open(outfile, 'w') as ofile:\n with open(Path(\"slate/index.md\")) as infile:\n ofile.write(infile.read())\n for api in APIs:\n slatefile = Path(\"./slate/\" + api.lower() + \".md\")\n\n ofile.write(\"# \"+api.replace(\"-\", \" \").replace(\"api\",\"API\")+\"\\n\")\n\n infile = open(slatefile, 'r').readlines()\n for index, line in enumerate(infile):\n\n # Now match the lines after which the code examples are injected.\n # example of one line: `***PUT*** /products/{product_code}`\n # That should match PUT_products_product_code.curl in examples folder\n example_file= str(line)\n example_file= re.sub('[`#* {}]', '', example_file)\n example_file = re.sub('[/]', '_', example_file)\n example_file = re.sub('I', 'i', example_file)\n example_file = example_file.rstrip(os.linesep)\n example_file_curl_path = Path(\"./examples/\" + example_file + \".curl\")\n example_file_python_path = Path(\"./examples/\" + example_file + \".python\")\n example_file_json_path = Path(\"./examples/\" + example_file + \".json\")\n example_file_path = Path(\"./examples/\" + example_file + \".example\")\n # print(str(example_file_path))\n\n example_method = str(line)\n example_method = re.sub('[`#*]', '', example_method)\n\n example_desc = \"\\n\\n > Example for: \"+example_method+\"\\n\\n\"\n\n if example_file_path.exists():\n with open(example_file_path) as sfile:\n print(\"Found example file: \" + str(example_file_path))\n ofile.write(example_desc)\n ofile.write(sfile.read()+\"\\n\\n\")\n\n\n # Ugly way of getting rid of some markup in the beginning of each file. Get everything after line 18 and\n # save to final markdown file\n if index > 18:\n # Some markdown cleanup since the converters mess things up\n if line.startswith(\"#\"):\n ofile.write(\"#\"+line.lower().replace(\"***\", \"**\"))\n elif line.startswith(\"`***\"):\n ofile.write(line.replace(\"***\", \"**\").replace(\"`\", \"\"))\n else:\n ofile.write(line.replace(\"***\", \"**\"))\n print(\"\\n\\nSlate file: \"+str(outfile)+\" created.\")\n\n\ndef make_html():\n cmd = \"cd .. & bundle exec middleman build\"\n failure = os.system(cmd)\n if failure:\n print(\"HTML build failed\")\n else:\n print(\"\\n\\nHTML generated successfully.\")\n\n# ----------------------------\n# MAIN - lets build it\n\n# Generate content\nfor api in APIs:\n api_raml_to_slate(api)\n\n# Merge all together\nconcatenate_files()\n\n# Build deployable content as html\nmake_html()\n\n","sub_path":"raml2markdown/build.py","file_name":"build.py","file_ext":"py","file_size_in_byte":4305,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"63443234","text":"'''\nMIT License\n\nCopyright (c) 2018 JonoCode9374\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n'''\n\n#!/usr/bin/env python3\n\n'''\n\nProject: BreezyUI Version b1.3.1 (b1.3.1.py)\nAuthor: JonoCode9374\nDate Created: 29/8/2018\nDescription:\n\nMany Python users who work with graphical interfaces know the pains of\ndesigning and creating a Graphical User Interface with Python’s built-in\ngraphics library tkinter (Tkinter in Python 2 and tkinter in Python 3) –\ncreating interfaces requires intense planning (getting the co-ordinates\nfor where each widget goes), lots of trial and error. This is what I\nhave personally experienced many times, making programs such as my SpamBot,\nDocEdit and my Head Scruiteneer Program.\n\n'''\n\n###############################################################################\n#Import section\n\nimport sys\n\nif sys.version_info[0] == 2:\n import Tkinter as tkinter\n import Tkinter.dnd\n import tkFileDialog\nelse:\n import tkinter\n from tkinter import dnd #Accessed with `tkinter.dnd`. Provides drag'n'drop\n #services\n import tkinter.filedialog\n\n\nimport functools\nimport tkinter.colorchooser as tkColor\n\nsys.path.insert(0, '../Libraries') #Used for importing all custom libraries\n\nimport screens, bUI, DndWidget\nimport DndSpace, CoreWidget\n###############################################################################\n#Class section\n\nclass Option:\n row = 0\n\n def __init__(self, name, config_name, attr_var, widget_type, applicable_widgets, *args, **kwargs):\n\n '''\n Takes:\n - self\n - name [str] -- The name of this option\n - config_name [str] -- The config attribute this affects\n - attr_var [str] -- The variable which the attribute is stored in\n - widget_type [str] -- The type of widget this option is\n - applicable_widgets [[str]] -- The widgets this option shows for\n - **kwargs -- The arguments to construct the widget\n\n Does:\n - Initalises this instance of option\n\n Returns:\n - None\n\n '''\n\n self.name = name\n self.attribute = config_name\n self.var = attr_var\n\n #Create the label and widget for this option\n self.label = tkinter.Label(attributes_area, text=name)\n\n construction = \"tkinter.{0}(attributes_area\".format(widget_type)\n\n if args:\n args = \", \".join([str(x) for x in args])\n construction = \"{0}, {1}\".format(construction, args)\n #At this point, construction would equal something like this:\n #tkinter.widget_type(attributes_area, args\n\n if kwargs:\n kwargs = ', '.join([str(x) + '=' + kwargs[x] for x in kwargs])\n construction = \"{0}, {1}\".format(construction, kwargs)\n #If the args wasn't empty, construction would look like this: tkinter.widget_type(attributes_area, args, kwargs\n\n construction += \")\"\n\n\n self.option = eval(construction)\n\n self.x = Option.row\n Option.row += 1\n\n self.widgets = applicable_widgets\n\n\n def show(self, widget_type):\n '''\n Takes:\n - self\n - widget_type [str] -- The type of widget being shown\n\n Does:\n - Shows the widget in the appropriate location if it is for a widget it supports\n\n Returns:\n - None\n '''\n\n if widget_type in self.widgets:\n if self.name == \"Display Text\":\n self.option.delete(0, tkinter.END)\n self.option.insert(0, target_widget.cget(\"text\"))\n\n elif self.name == \"Background Colour\":\n self.option[\"text\"] = target_widget.cget(\"bg\")\n\n elif self.name == \"Border Colour\":\n self.option[\"text\"] = target_widget.cget(\"highlightbackground\")\n\n self.label.grid(row=self.x, column=0)\n self.option.grid(row=self.x, column=1)\n\n def hide(self):\n '''\n Takes:\n - self\n\n Does:\n - Hides the widget using `.grid_forget()`\n\n Returns:\n - None\n '''\n\n self.label.grid_forget()\n self.option.grid_forget()\n\n###############################################################################\n#Function section\ndef on_dnd_start(event, widget_type):\n '''\n This is invoked when a widget is dragged onto the main canvas.\n '''\n\n global dragged_widgets\n\n #Create the widget to be dragged\n if widget_type in [\"Label\", \"Button\", \"Entry\"]:\n dnd_widget = DndWidget.DndWidget(widget_type, text='\"Text\"', state='\"disabled\"') #Maybe soon make this dynamic\n else:\n dnd_widget = DndWidget.DndWidget(widget_type, bd=\"1\", relief=\"'solid'\", highlightbackground=\"'#ffffff'\")\n dragged_widgets.append(dnd_widget)\n dnd_widget.attach(widget_area.canvas)\n tkinter.dnd.dnd_start(dnd_widget, event)\n\ndef edit_attributes(event, source, dnd_source):\n global target_widget, target_dndw\n target_dndw = dnd_source\n target_widget = source\n attributes_area.geometry(\"+{0}+{1}\".format(width_entry.get(), widget_area.top.winfo_height() + 100))\n attributes_area.deiconify()\n\n for item in options:\n options[item].hide()\n\n for item in options:\n options[item].show(w_type(target_widget))\n\n\n\ndef hide_attributes():\n for item in options:\n options[item].hide()\n options[\"text\"].option.delete(\"0\", tkinter.END)\n options[\"placeholder\"].option.delete(\"0\", tkinter.END)\n attributes_area.withdraw()\n\ndef update_widget():\n global target_widget\n widget_type = w_type(target_widget)\n\n for option in options:\n option = options[option]\n if widget_type in option.widgets:\n if option.attribute == \"\":\n continue\n if option.attribute[0] != \"$\":\n target_widget.config({option.attribute : eval(option.var) })\n\n elif option.attribute == \"$type\":\n if entry_type.get() == \"Password\":\n target_widget.config({\"show\" : \"*\"})\n else:\n continue\n\n elif option.attribute == \"$placeholder\":\n target_widget[\"state\"] = \"normal\"\n target_dndw.txt_var.set(options[\"placeholder\"].option.get())\n target_widget[\"state\"] = \"disabled\"\n\n\n elif option.attribute == \"$confirm\":\n print(\"Confirmed\")\n hide_attributes()\n target_widget = None\n\n\ndef choose_colour(event):\n global colour\n colour = tkColor.askcolor()[1]\n options[\"background\"].option[\"bg\"] = colour\n options[\"background\"].option[\"text\"] = colour\n\ndef choose_border_col(event):\n global border_colour\n border_colour = tkColor.askcolor()[1]\n options[\"canvas border colour\"].option[\"bg\"] = options[\"canvas border colour\"].option[\"text\"] = border_colour\n\ndef w_type(source):\n '''\n Takes:\n - source (a tkinter widget)\n\n Does:\n - See the the return section\n\n Returns:\n - The type of widget the option has\n '''\n\n widget_type = str(type(source))\n\n widget_type = widget_type[widget_type.find(\".\") + 1 : widget_type.find(\"'\", widget_type.find(\".\"))]\n\n return widget_type\n\ndef new():\n if validate_size(width_var.get(), height_var.get()):\n home_screen.hide()\n root.withdraw()\n main_area.show()\n main_area.top.geometry(\"{0}x{1}\".format(width_var.get(), height_var.get()))\n widget_area.top.geometry(\"+{0}+60\".format(width_var.get()))\n widget_area.show()\n\ndef back():\n home_screen.show()\n #TODO: Empty entry widgets\n root.deiconify()\n main_area.hide()\n widget_area.hide()\n attributes_area.withdraw()\n\n #Delete everything from screen\n for widget in dragged_widgets:\n widget.detach()\n\ndef validate_size(width, height):\n try:\n int(width)\n int(height)\n except ValueError:\n flash_red(5, [width_entry, height_entry])\n return False\n\n if width == \"\" or height == \"\":\n return False\n\n return True\n\ndef save_bUI_file():\n root.filename = tkinter.filedialog.asksaveasfilename(initialdir = \"/\",title = \"Select file\",filetypes = ((\"python files\",\"*.py\"),(\"all files\",\"*.*\")))\n file = open(root.filename + \".bui\", \"w\")\n\n #Generate the bUI file\n for widget in dragged_widgets:\n widget_config = {\"id\" : widget.name, \"type\" : widget.widget_type, \"x\" : widget.widget.winfo_x(), \"y\" : widget.widget.winfo_y(), \"attributes\" : bUI.changed_dict(widget.attributes, bUI.get_attributes(widget.widget))}\n\n if widget.widget_type == \"Entry\":\n widget_config[\"attributes\"][\"Placeholder\"] = widget.widget.get()\n\n file.write(str(widget_config))\n\n #Add the bUI file to the recents list\n file = open(\"../Resources/recent.txt\", \"a\")\n file.write(root.filename + \".bui\" + \"\\n\")\n file.close()\n\ndef load(file):\n '''\n Takes a\n '''\n print(\"Loading file\")\n\ndef goof():\n print(\"Fiddle Riddle Diddle Diddle\")\n\ndef export():\n code = \"\"\"\n\n#############################################################\n# The following code was pre-generated by BreezyUI. #\n#Don't change any of the code if you don't know what it does#\n#############################################################\n\nimport sys\nif sys.version_info[0] == 2:\n import Tkinter as tkinter #backwards compatability\nelse:\n import tkinter\n\nroot = tkinter.Tk()\nroot.geometry(\"{0}x{1}\") #Set the size of the window\n \"\"\".format(width_var.get(), height_var.get()) + \"\\n\"\n for i in range(len(dragged_widgets)):\n widget = dragged_widgets[i]\n to_export = bUI.changed_dict(widget.attributes, bUI.get_attributes(widget.widget))\n code += \"{0} = tkinter.{1}(root, {2})\".format(widget.name, widget.widget_type, to_export) + \"\\n\"\n\n code += \"{0}.place(x={1},y={2})\".format(widget.name, widget.widget.winfo_x(), widget.widget.winfo_y()) + \"\\n\"\n\n if widget.widget_type == \"Entry\":\n if widget.widget.get():\n code += \"{0}.insert(0, {1})\".format(widget.name, widget.widget.get()) + \"\\n\"\n\n code += (\"\\n#################END OF GENERATED CODE#################\")\n root.filename = tkinter.filedialog.asksaveasfilename(initialdir = \"/\",title = \"Select file\",filetypes = ((\"python files\",\"*.py\"),(\"all files\",\"*.*\")))\n file = open(root.filename + \".py\", \"w\")\n file.write(code)\n file.close()\n return\n\ndef flash_red(i, widgets):\n if i >= 0:\n temp = i - 1\n for widget in widgets:\n widget.configure({\"background\" : \"red\"})\n root.after(500, flash_white, temp, widgets)\n\ndef flash_white(i, widgets):\n if i >= 0:\n temp = i - 1\n for widget in widgets:\n widget.configure({\"background\" : \"white\"})\n root.after(500, flash_red, temp, widgets)\n\n###############################################################################\n#Window section\nroot = tkinter.Tk()\nroot.geometry(\"800x600\")\n\nmain_area = DndSpace.DndSpace(root, 800, 600)\nmain_area.top.geometry(\"+1+60\")\n\nwidget_area = DndSpace.DndSpace(root, 200, 600)\nwidget_area.top.geometry(\"+803+60\")\n\nattributes_area = tkinter.Toplevel()\nattributes_area.geometry(\"+803+160\")\nattributes_area.withdraw()\n\n###############################################################################\n#Menu section\n\n#Make all the menus\nmenu_bar = tkinter.Menu(root)\nfile_menu = tkinter.Menu(menu_bar, tearoff=0)\nopen_menu = tkinter.Menu(menu_bar, tearoff=0)\nfile_menu.add_command(label=\"New\", command=back)\nfile_menu.add_command(label=\"Save\", command=save_bUI_file)\nfile_menu.add_command(label=\"Export\", command=export)\nfile_menu.add_separator()\nfile_menu.add_command(label=\"Quit\", command=root.quit)\n\n#Dynamically generate the open_menu\nfiles = [line.strip(\"\\n\") for line in open(\"../Resources/recent.txt\").readlines()]\nfor file in files:\n open_menu.add_command(label=file, command=goof)\n\nmenu_bar.add_cascade(label=\"File\", menu=file_menu)\nmenu_bar.add_cascade(label=\"Recent Files\", menu=open_menu)\nroot.config(menu=menu_bar)\n\n###############################################################################\n#Tkinter section\n\nmenu_bar = tkinter.Frame(root, height=600, width=200, bg=\"#878E88\")\ncreation_bar = tkinter.Frame(root, height=200, width=600, bg=\"#C9CAC9\")\n\nsize_label = tkinter.Label(root, text=\"Canvas Size (px): 0 px\")\n\nwidth_lbl = tkinter.Label(root, text=\"Width (px): \", bg=\"#C9CAC9\", font=(\"Arial\", 24))\nheight_lbl = tkinter.Label(root, text=\"Height (px): \", bg=\"#C9CAC9\", font=(\"Arial\", 24))\n\nwidth_var = tkinter.StringVar()\nheight_var = tkinter.StringVar()\n\nwidth_entry = tkinter.Entry(root, width=10, textvariable=width_var)\nheight_entry = tkinter.Entry(root, width=10, textvariable=height_var)\n\nnew_button = tkinter.Button(root, text=\"Create\", command=new, width=25)\n\n###############################################################################\n#The Screen section\nhome_screen = screens.ScreenXY(\"BreezyUI\", root)\nhome_screen.add_item(menu_bar, 0, 0)\nhome_screen.add_item(creation_bar, 200, 0)\nhome_screen.add_item(new_button, 200, 100)\nhome_screen.add_item(width_lbl, 210, 10)\nhome_screen.add_item(height_lbl, 205, 50)\nhome_screen.add_item(width_entry, 350, 15)\nhome_screen.add_item(height_entry, 350, 55)\nhome_screen.show()\n\n###############################################################################\n#Variable section\n\nCoreWidget.widget_area = widget_area\nCoreWidget.on_dnd_start = on_dnd_start\nOption.attributes_area = attributes_area\nDndWidget.edit_attributes = edit_attributes\n\ncolour = \"#ffffff\"\nentry_type = tkinter.StringVar(root, \"Plain\")\nborder_colour = \"#000000\"\n\n\ndragged_widgets = list() #of widgets\ntarget_widget = None\ntarget_dndw = None #Stores the DndWidget form of the target widget (dndw stands for DND Widget)\n\n###############################################################################\n#CoreWidget section\n\nwidgets = dict() #A dictionary to store all the widget objects which the clones will come from\n\nwidgets[\"label\"] = CoreWidget.CoreWidget(\"Label\", text=\"'Label'\")\nwidgets[\"button\"] = CoreWidget.CoreWidget(\"Button\", text=\"'Button'\")\nwidgets[\"entry\"] = CoreWidget.CoreWidget(\"Entry\")\nwidgets[\"canvas\"] = CoreWidget.CoreWidget(\"Canvas\", bd=\"1\", relief=\"'solid'\", highlightbackground=\"'#ffffff'\")\n\nwidgets[\"entry\"].widget.delete(0, tkinter.END)\nwidgets[\"entry\"].widget.insert(0, \"Entry\")\n\nwidgets[\"entry\"].widget[\"state\"] = \"disabled\"\n###############################################################################\n#Option section\n\noptions = dict() #A dictionary to store all the attributes shown in the\n #attributes area\n\noptions[\"text\"] = Option(\"Display Text\", \"text\", \"options['text'].option.get()\", \"Entry\", [\"Label\", \"Button\"])\noptions[\"background\"] = Option(\"Background Colour\", \"background\", \"colour\", \"Label\", [\"Label\", \"Canvas\"], text=\"'#ffffff'\", borderwidth=\"2\", relief=\"'flat'\", highlightcolor=\"'black'\")\n\noptions[\"background\"].option.bind(\"\", choose_colour)\n\noptions[\"entry type\"] = Option(\"Type\", \"$type\", \"entry_type\", \"OptionMenu\", [\"Entry\"], 'entry_type', \"'Plain'\", \"'Password'\", \"'Numeric'\" )\noptions[\"placeholder\"] = Option(\"Placeholder\", \"$placeholder\", \"placeholder_text\", \"Entry\", [\"Entry\"])\n\n\noptions[\"canvas border colour\"] = Option(\"Border Colour\", \"highlightbackground\", \"border_colour\", \"Label\", [\"Canvas\"], text=\"'#000000'\")\n\noptions[\"canvas border colour\"].option.bind(\"\", choose_border_col)\n\noptions[\"confirm\"] = Option(\"\", \"\" , \"$confirm\", \"Button\", [\"Label\", \"Button\", \"Entry\", \"Canvas\"], text=\"'Confirm'\", command=\"update_widget\")\n\n###############################################################################\nif __name__ == \"__main__\":\n root.mainloop()\n","sub_path":"Releases/b.1.3.1.py","file_name":"b.1.3.1.py","file_ext":"py","file_size_in_byte":16613,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"645558341","text":"# -*- coding: utf-8 -*-\nimport re\nimport unittest\nimport uuid\nfrom datetime import date, datetime\nfrom decimal import Decimal\nfrom urllib.parse import quote_plus\n\nimport numpy as np\nimport pandas as pd\nimport sqlalchemy\nfrom sqlalchemy import String\nfrom sqlalchemy.engine import create_engine\nfrom sqlalchemy.exc import NoSuchTableError\nfrom sqlalchemy.sql import expression\nfrom sqlalchemy.sql.schema import Column, MetaData, Table\nfrom sqlalchemy.sql.sqltypes import (\n BIGINT,\n BINARY,\n BOOLEAN,\n DATE,\n DECIMAL,\n FLOAT,\n INTEGER,\n STRINGTYPE,\n TIMESTAMP,\n)\n\nfrom tests.conftest import ENV, SCHEMA\nfrom tests.util import with_engine\n\n\nclass TestSQLAlchemyAthena(unittest.TestCase):\n \"\"\"Reference test case is following:\n\n https://github.com/dropbox/PyHive/blob/master/pyhive/tests/sqlalchemy_test_case.py\n https://github.com/dropbox/PyHive/blob/master/pyhive/tests/test_sqlalchemy_hive.py\n https://github.com/dropbox/PyHive/blob/master/pyhive/tests/test_sqlalchemy_presto.py\n \"\"\"\n\n def create_engine(self):\n conn_str = (\n \"awsathena+jdbc://athena.{AwsRegion}.amazonaws.com:443/\"\n + \"{Schema}?S3OutputLocation={S3OutputLocation}&S3Location={S3Location}\"\n + \"&compression=snappy\"\n )\n return create_engine(\n conn_str.format(\n AwsRegion=ENV.region_name,\n Schema=SCHEMA,\n S3OutputLocation=quote_plus(ENV.s3_staging_dir),\n S3Location=quote_plus(ENV.s3_staging_dir),\n )\n )\n\n @with_engine()\n def test_basic_query(self, engine, conn):\n rows = conn.execute(\"SELECT * FROM one_row\").fetchall()\n self.assertEqual(len(rows), 1)\n self.assertEqual(rows[0].number_of_rows, 1)\n self.assertEqual(len(rows[0]), 1)\n\n @with_engine()\n def test_reflect_no_such_table(self, engine, conn):\n self.assertRaises(\n NoSuchTableError,\n lambda: Table(\"this_does_not_exist\", MetaData(bind=engine), autoload=True),\n )\n self.assertRaises(\n NoSuchTableError,\n lambda: Table(\n \"this_does_not_exist\",\n MetaData(bind=engine),\n schema=\"also_does_not_exist\",\n autoload=True,\n ),\n )\n\n @with_engine()\n def test_reflect_table(self, engine, conn):\n one_row = Table(\"one_row\", MetaData(bind=engine), autoload=True)\n self.assertEqual(len(one_row.c), 1)\n self.assertIsNotNone(one_row.c.number_of_rows)\n\n @with_engine()\n def test_reflect_table_with_schema(self, engine, conn):\n one_row = Table(\"one_row\", MetaData(bind=engine), schema=SCHEMA, autoload=True)\n self.assertEqual(len(one_row.c), 1)\n self.assertIsNotNone(one_row.c.number_of_rows)\n\n @with_engine()\n def test_reflect_table_include_columns(self, engine, conn):\n one_row_complex = Table(\"one_row_complex\", MetaData(bind=engine))\n version = float(\n re.search(r\"^([\\d]+\\.[\\d]+)\\..+\", sqlalchemy.__version__).group(1)\n )\n if version <= 1.2:\n engine.dialect.reflecttable(\n conn,\n one_row_complex,\n include_columns=[\"col_int\"],\n exclude_columns=[],\n )\n elif version == 1.3:\n # https://docs.sqlalchemy.org/en/13/changelog/changelog_13.html\n # #change-64ac776996da1a5c3e3460b4c0f0b257\n engine.dialect.reflecttable(\n conn,\n one_row_complex,\n include_columns=[\"col_int\"],\n exclude_columns=[],\n resolve_fks=True,\n )\n else:\n # https://docs.sqlalchemy.org/en/14/changelog/changelog_14.html\n # #change-0215fae622c01f9409eb1ba2754f4792\n # https://docs.sqlalchemy.org/en/14/core/reflection.html\n # #sqlalchemy.engine.reflection.Inspector.reflect_table\n insp = sqlalchemy.inspect(engine)\n insp.reflect_table(\n one_row_complex,\n include_columns=[\"col_int\"],\n exclude_columns=[],\n resolve_fks=True,\n )\n self.assertEqual(len(one_row_complex.c), 1)\n self.assertIsNotNone(one_row_complex.c.col_int)\n self.assertRaises(AttributeError, lambda: one_row_complex.c.col_tinyint)\n\n @with_engine()\n def test_unicode(self, engine, conn):\n unicode_str = \"密林\"\n one_row = Table(\"one_row\", MetaData(bind=engine))\n returned_str = sqlalchemy.select(\n [expression.bindparam(\"あまぞん\", unicode_str, type_=String())],\n from_obj=one_row,\n ).scalar()\n self.assertEqual(returned_str, unicode_str)\n\n @with_engine()\n def test_reflect_schemas(self, engine, conn):\n insp = sqlalchemy.inspect(engine)\n schemas = insp.get_schema_names()\n self.assertIn(SCHEMA, schemas)\n self.assertIn(\"default\", schemas)\n\n @with_engine()\n def test_get_table_names(self, engine, conn):\n meta = MetaData()\n meta.reflect(bind=engine)\n print(meta.tables)\n self.assertIn(\"one_row\", meta.tables)\n self.assertIn(\"one_row_complex\", meta.tables)\n\n insp = sqlalchemy.inspect(engine)\n self.assertIn(\n \"many_rows\",\n insp.get_table_names(schema=SCHEMA),\n )\n\n @with_engine()\n def test_has_table(self, engine, conn):\n insp = sqlalchemy.inspect(engine)\n self.assertTrue(insp.has_table(\"one_row\", schema=SCHEMA))\n self.assertFalse(insp.has_table(\"this_table_does_not_exist\", schema=SCHEMA))\n\n @with_engine()\n def test_get_columns(self, engine, conn):\n insp = sqlalchemy.inspect(engine)\n actual = insp.get_columns(table_name=\"one_row\", schema=SCHEMA)[0]\n self.assertEqual(actual[\"name\"], \"number_of_rows\")\n self.assertTrue(isinstance(actual[\"type\"], INTEGER))\n self.assertTrue(actual[\"nullable\"])\n self.assertIsNone(actual[\"default\"])\n self.assertEqual(actual[\"ordinal_position\"], 1)\n self.assertIsNone(actual[\"comment\"])\n\n @with_engine()\n def test_char_length(self, engine, conn):\n one_row_complex = Table(\"one_row_complex\", MetaData(bind=engine), autoload=True)\n result = (\n sqlalchemy.select(\n [sqlalchemy.func.char_length(one_row_complex.c.col_string)]\n )\n .execute()\n .scalar()\n )\n self.assertEqual(result, len(\"a string\"))\n\n @with_engine()\n def test_reflect_select(self, engine, conn):\n one_row_complex = Table(\"one_row_complex\", MetaData(bind=engine), autoload=True)\n self.assertEqual(len(one_row_complex.c), 15)\n self.assertIsInstance(one_row_complex.c.col_string, Column)\n rows = one_row_complex.select().execute().fetchall()\n self.assertEqual(len(rows), 1)\n self.assertEqual(\n list(rows[0]),\n [\n True,\n 127,\n 32767,\n 2147483647,\n 9223372036854775807,\n 0.5,\n 0.25,\n \"a string\",\n datetime(2017, 1, 1, 0, 0, 0),\n date(2017, 1, 2),\n b\"123\",\n \"1, 2\",\n \"{1=2, 3=4}\",\n \"{a=1, b=2}\",\n Decimal(\"0.100000\"),\n ],\n )\n self.assertIsInstance(one_row_complex.c.col_boolean.type, BOOLEAN)\n self.assertIsInstance(one_row_complex.c.col_tinyint.type, INTEGER)\n self.assertIsInstance(one_row_complex.c.col_smallint.type, INTEGER)\n self.assertIsInstance(one_row_complex.c.col_int.type, INTEGER)\n self.assertIsInstance(one_row_complex.c.col_bigint.type, BIGINT)\n self.assertIsInstance(one_row_complex.c.col_float.type, FLOAT)\n self.assertIsInstance(one_row_complex.c.col_double.type, FLOAT)\n self.assertIsInstance(one_row_complex.c.col_string.type, type(STRINGTYPE))\n self.assertIsInstance(one_row_complex.c.col_timestamp.type, TIMESTAMP)\n self.assertIsInstance(one_row_complex.c.col_date.type, DATE)\n self.assertIsInstance(one_row_complex.c.col_binary.type, BINARY)\n self.assertIsInstance(one_row_complex.c.col_array.type, type(STRINGTYPE))\n self.assertIsInstance(one_row_complex.c.col_map.type, type(STRINGTYPE))\n self.assertIsInstance(one_row_complex.c.col_struct.type, type(STRINGTYPE))\n self.assertIsInstance(one_row_complex.c.col_decimal.type, DECIMAL)\n\n @with_engine()\n def test_reserved_words(self, engine, conn):\n \"\"\"Presto uses double quotes, not backticks\"\"\"\n fake_table = Table(\n \"select\", MetaData(bind=engine), Column(\"current_timestamp\", STRINGTYPE)\n )\n query = str(fake_table.select(fake_table.c.current_timestamp == \"a\"))\n self.assertIn('\"select\"', query)\n self.assertIn('\"current_timestamp\"', query)\n self.assertNotIn(\"`select`\", query)\n self.assertNotIn(\"`current_timestamp`\", query)\n\n @with_engine()\n def test_get_column_type(self, engine, conn):\n dialect = engine.dialect\n self.assertEqual(dialect._get_column_type(\"boolean\"), \"boolean\")\n self.assertEqual(dialect._get_column_type(\"tinyint\"), \"tinyint\")\n self.assertEqual(dialect._get_column_type(\"smallint\"), \"smallint\")\n self.assertEqual(dialect._get_column_type(\"integer\"), \"integer\")\n self.assertEqual(dialect._get_column_type(\"bigint\"), \"bigint\")\n self.assertEqual(dialect._get_column_type(\"real\"), \"real\")\n self.assertEqual(dialect._get_column_type(\"double\"), \"double\")\n self.assertEqual(dialect._get_column_type(\"varchar\"), \"varchar\")\n self.assertEqual(dialect._get_column_type(\"timestamp\"), \"timestamp\")\n self.assertEqual(dialect._get_column_type(\"date\"), \"date\")\n self.assertEqual(dialect._get_column_type(\"varbinary\"), \"varbinary\")\n self.assertEqual(dialect._get_column_type(\"array(integer)\"), \"array\")\n self.assertEqual(dialect._get_column_type(\"map(integer, integer)\"), \"map\")\n self.assertEqual(dialect._get_column_type(\"row(a integer, b integer)\"), \"row\")\n self.assertEqual(dialect._get_column_type(\"decimal(10,1)\"), \"decimal\")\n\n @with_engine()\n def test_contain_percents_character_query(self, engine, conn):\n select = sqlalchemy.sql.text(\n \"\"\"\n SELECT date_parse('20191030', '%Y%m%d')\n \"\"\"\n )\n table_expression = sqlalchemy.sql.selectable.TextAsFrom(select, []).cte()\n\n query = sqlalchemy.select([\"*\"]).select_from(table_expression)\n result = engine.execute(query)\n self.assertEqual(result.fetchall(), [(datetime(2019, 10, 30),)])\n\n query_with_limit = (\n sqlalchemy.sql.select([\"*\"]).select_from(table_expression).limit(1)\n )\n result_with_limit = engine.execute(query_with_limit)\n self.assertEqual(result_with_limit.fetchall(), [(datetime(2019, 10, 30),)])\n\n @with_engine()\n def test_query_with_parameter(self, engine, conn):\n select = sqlalchemy.sql.text(\n \"\"\"\n SELECT :word\n \"\"\"\n )\n table_expression = sqlalchemy.sql.selectable.TextAsFrom(select, []).cte()\n\n query = sqlalchemy.select([\"*\"]).select_from(table_expression)\n result = engine.execute(query, word=\"cat\")\n self.assertEqual(result.fetchall(), [(\"cat\",)])\n\n query_with_limit = (\n sqlalchemy.select([\"*\"]).select_from(table_expression).limit(1)\n )\n result_with_limit = engine.execute(query_with_limit, word=\"cat\")\n self.assertEqual(result_with_limit.fetchall(), [(\"cat\",)])\n\n @with_engine()\n def test_contain_percents_character_query_with_parameter(self, engine, conn):\n select1 = sqlalchemy.sql.text(\n \"\"\"\n SELECT date_parse('20191030', '%Y%m%d'), :word\n \"\"\"\n )\n table_expression1 = sqlalchemy.sql.selectable.TextAsFrom(select1, []).cte()\n\n query1 = sqlalchemy.select([\"*\"]).select_from(table_expression1)\n result1 = engine.execute(query1, word=\"cat\")\n self.assertEqual(result1.fetchall(), [(datetime(2019, 10, 30), \"cat\")])\n\n query_with_limit1 = (\n sqlalchemy.select([\"*\"]).select_from(table_expression1).limit(1)\n )\n result_with_limit1 = engine.execute(query_with_limit1, word=\"cat\")\n self.assertEqual(\n result_with_limit1.fetchall(), [(datetime(2019, 10, 30), \"cat\")]\n )\n\n select2 = sqlalchemy.sql.text(\n \"\"\"\n SELECT col_string, :param FROM one_row_complex\n WHERE col_string LIKE 'a%' OR col_string LIKE :param\n \"\"\"\n )\n table_expression2 = sqlalchemy.sql.selectable.TextAsFrom(select2, []).cte()\n\n query2 = sqlalchemy.select([\"*\"]).select_from(table_expression2)\n result2 = engine.execute(query2, param=\"b%\")\n self.assertEqual(result2.fetchall(), [(\"a string\", \"b%\")])\n\n query_with_limit2 = (\n sqlalchemy.select([\"*\"]).select_from(table_expression2).limit(1)\n )\n result_with_limit2 = engine.execute(query_with_limit2, param=\"b%\")\n self.assertEqual(result_with_limit2.fetchall(), [(\"a string\", \"b%\")])\n\n @with_engine()\n def test_to_sql(self, engine, conn):\n # TODO Add binary column (After dropping support for Python 2.7)\n table_name = \"to_sql_{0}\".format(str(uuid.uuid4()).replace(\"-\", \"\"))\n df = pd.DataFrame(\n {\n \"col_int\": np.int32([1]),\n \"col_bigint\": np.int64([12345]),\n \"col_float\": np.float32([1.0]),\n \"col_double\": np.float64([1.2345]),\n \"col_string\": [\"a\"],\n \"col_boolean\": np.bool_([True]),\n \"col_timestamp\": [datetime(2020, 1, 1, 0, 0, 0)],\n \"col_date\": [date(2020, 12, 31)],\n # \"col_binary\": \"foobar\".encode(),\n }\n )\n # Explicitly specify column order\n df = df[\n [\n \"col_int\",\n \"col_bigint\",\n \"col_float\",\n \"col_double\",\n \"col_string\",\n \"col_boolean\",\n \"col_timestamp\",\n \"col_date\",\n # \"col_binary\",\n ]\n ]\n df.to_sql(\n table_name,\n engine,\n schema=SCHEMA,\n index=False,\n if_exists=\"replace\",\n method=\"multi\",\n )\n\n table = Table(table_name, MetaData(bind=engine), autoload=True)\n self.assertEqual(\n table.select().execute().fetchall(),\n [\n (\n 1,\n 12345,\n 1.0,\n 1.2345,\n \"a\",\n True,\n datetime(2020, 1, 1, 0, 0, 0),\n date(2020, 12, 31),\n # \"foobar\".encode(),\n )\n ],\n )\n","sub_path":"tests/test_sqlalchemy_athena.py","file_name":"test_sqlalchemy_athena.py","file_ext":"py","file_size_in_byte":15184,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"504546744","text":"from pico2d import *\n\nimport math\n\nname = 'Posin'\n\nimport main_state\nimport game_framework\nimport ListManagement\nfrom GameObject.Item import CItem\nimport random\nimport CEffect\nfrom GameObject.Bullet import CPlaneBullet, CBullet,CBossBlueBullet\n\n\nclass BigPosin:\n image = None\n\n def __init__(self):\n pass\n\n def __init__(self, x, y):\n self.x, self.y = x, y\n self.Frame = 0\n self.Dist = 0\n self.isDead = False\n self.Hp = 550\n self.RadianX, self.PivotY = 50, 50\n self.Player = main_state.ListManager.Player_Lst[0]\n self.bisOpen = False\n self.BulletMakeTerm = 0\n self.RandomDelta=random.randint(40,90)/100\n if BigPosin.image is None:\n BigPosin.image = load_image('../Resource/BigPosin.png')\n\n def Frame_Manegement(self):\n if self.bisOpen is True:\n self.Frame+= 10 *game_framework.frame_time\n if self.Frame > 32:\n self.Frame = 24\n\n\n def Bullet_Make(self):\n self.BulletMakeTerm+=game_framework.frame_time *self.RandomDelta\n if self.BulletMakeTerm > 5 :\n self.RandomDelta = random.randint(60, 90) / 100\n self.BulletMakeTerm = 0\n main_state.ListManager.Monster_BulletList.append(\n CBossBlueBullet.Blue_Bullet(self.x, self.y))\n def update(self):\n\n self.Frame_Manegement()\n self.Bullet_Make()\n if self.isDead or self.Hp< 0:\n main_state.Score.Add_Score(random.randint(1500, 2000))\n main_state.ListManager.Effect_Lst.append(CEffect.Effect(self.x + random.randint(-20, 20),\n self.y + random.randint(-20, 20),\n 128, 128, 200, 200, 9, 1))\n main_state.SoundManager.PlaySound(random.randint(3,7))\n main_state.ListManager.Boss_List[0].DeathCnt+=1\n return -1\n\n def draw(self):\n self.image.clip_draw(int(self.Frame) * 450, 0, 450, 450, self.x, self.y,100,100)\n\n\n\nclass MiddlePosin:\n image = None\n\n def __init__(self):\n pass\n\n def __init__(self, x, y):\n self.x, self.y = x, y\n self.Frame = 0\n self.Dist = 0\n self.isDead = False\n self.Hp = 350\n self.RadianX, self.PivotY = 25, 25\n self.Player = main_state.ListManager.Player_Lst[0]\n\n self.Time =0\n self.BulletTime = 3\n self.BulletPossibleTime = 0\n self.MakeBulletTerm = 0\n self.bisBulletPossible = False\n if MiddlePosin.image is None:\n MiddlePosin.image = load_image('../Resource/Boat_Posin.png')\n\n def MakeBullet(self):\n self.Time += game_framework.frame_time\n if self.Time > self.BulletTime and self.bisBulletPossible is False: # 불렛텀보다 커지면\n self.BulletPossibleTime = random.randint(2,4)\n self.bisBulletPossible =True\n self.Time =0\n if self.bisBulletPossible is True:\n self.MakeBulletTerm += game_framework.frame_time\n if self.MakeBulletTerm > 0.5:\n self.MakeBulletTerm=0\n speed= random.randint(40, 70) /100\n main_state.ListManager.Monster_BulletList.append(\n CPlaneBullet.Monster1_Bullet(self.x-7, self.y, 0,speed))\n main_state.ListManager.Monster_BulletList.append(\n CPlaneBullet.Monster1_Bullet(self.x+7, self.y, 0, speed))\n if self.BulletPossibleTime< self.Time:\n self.Time = 0\n self.bisBulletPossible = False\n self.BulletTime= random.randint(2, 4)\n def Dir_Calculate(self):\n X = self.Player.x - self.x\n Y = self.y - self.Player.y\n Cter = math.atan2(X, -Y)\n NewCter = Cter * (180/3.14)\n if NewCter < 0 :\n NewCter = 180 + (180+NewCter)\n\n self.Frame = NewCter / 11.25\n pass\n\n def update(self):\n self.MakeBullet()\n self.Dir_Calculate()\n if self.isDead or self.Hp< 0:\n main_state.Score.Add_Score(random.randint(1000, 1500))\n main_state.ListManager.Effect_Lst.append(CEffect.Effect(self.x + random.randint(-20, 20),\n self.y + random.randint(-20, 20),\n 128, 128, 200, 200, 9, 1))\n main_state.SoundManager.PlaySound(random.randint(3, 7))\n main_state.ListManager.Boss_List[0].DeathCnt += 1\n return -1\n\n def draw(self):\n self.image.clip_draw(int(self.Frame) * 30, 0, 30, 30, self.x, self.y,50,50)\n\n\n\nclass SmallPosin:\n image = None\n\n def __init__(self):\n pass\n\n def __init__(self, x, y):\n self.x, self.y = x, y\n self.Frame = 0\n self.Dist = 0\n self.isDead = False\n self.Hp = 250\n self.RadianX, self.PivotY = 30, 20\n self.Player = main_state.ListManager.Player_Lst[0]\n self.BulletTime = 0\n self.MakeBulletTerm = random.randint(20,40) / 10\n if SmallPosin.image is None:\n SmallPosin.image = load_image('../Resource/Boat_Posin2.png')\n def Dir_Calculate(self):\n X = self.Player.x - self.x\n Y = self.y - self.Player.y\n Cter = math.atan2(X, -Y)\n NewCter = Cter * (180/3.14)\n if NewCter < 0 :\n NewCter = 180 + (180+NewCter)\n self.Frame = NewCter / 11.25\n pass\n\n def MakeBullet(self):\n self.BulletTime+=game_framework.frame_time\n if self.MakeBulletTerm < self.BulletTime:\n self.BulletTime = 0\n self.MakeBulletTerm = random.randint(20,40) / 10\n speed = random.randint(70, 100) / 100\n main_state.ListManager.Monster_BulletList.append(CPlaneBullet.Monster1_Bullet(self.x, self.y, 0,speed))\n pass\n def update(self):\n self.MakeBullet()\n self.Dir_Calculate()\n if self.isDead or self.Hp< 0:\n main_state.Score.Add_Score(random.randint(500, 1000))\n main_state.ListManager.Effect_Lst.append(CEffect.Effect(self.x + random.randint(-20, 20),\n self.y + random.randint(-20, 20),\n 128, 128, 200, 200, 9, 1))\n main_state.SoundManager.PlaySound(random.randint(3, 7))\n main_state.ListManager.Boss_List[0].DeathCnt += 1\n return -1\n\n def draw(self):\n self.image.clip_draw(int(self.Frame) * 40, 0, 40, 30, self.x, self.y,60,40)","sub_path":"GameFrameWork/GameObject/Monster/Posin.py","file_name":"Posin.py","file_ext":"py","file_size_in_byte":6685,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"471338686","text":"from json import loads\nfrom urllib.parse import quote\n\n_CATEGORIES_TRANSLATE_TABLE = [\n ('number', 'number'),\n ('rarity', 'rarity'),\n ('name', 'name'),\n ('type', 'type'),\n ('manaCost', 'mana_cost'),\n ('power', 'power'),\n ('toughness', 'toughness'),\n]\n\n_COST_TRANSLATE_TABLE = {\n '0': 'u0',\n '1': 'u1',\n '2': 'u2',\n '3': 'u3',\n '4': 'u4',\n '5': 'u5',\n '6': 'u6',\n '7': 'u7',\n '8': 'u8',\n '9': 'u9',\n '10': 'u10',\n '11': 'u11',\n '12': 'u12',\n '13': 'u13',\n '14': 'u14',\n '15': 'u15',\n '16': 'u16',\n '17': 'u17',\n '18': 'u18',\n '19': 'u19',\n '20': 'u20',\n 'X': 'x',\n 'Y': 'y',\n 'Z': 'z',\n 'W': 'white',\n 'U': 'blue',\n 'B': 'black',\n 'R': 'red',\n 'G': 'green',\n 'W/U': 'white-blue',\n 'W/B': 'white-black',\n 'U/B': 'blue-black',\n 'U/R': 'blue-red',\n 'B/R': 'black-red',\n 'B/G': 'black-green',\n 'R/W': 'red-white',\n 'R/G': 'red-green',\n 'G/W': 'green-white',\n 'G/B': 'green-blue',\n '2/W': 'white-2',\n '2/U': 'blue-2',\n '2/B': 'black-2',\n '2/R': 'red-2',\n '2/G': 'green-2',\n 'W/P': 'white-phyrexian',\n 'U/P': 'blue-phyrexian',\n 'B/P': 'black-phyrexian',\n 'R/P': 'red-phyrexian',\n 'G/P': 'green-phyrexian',\n '1000000': 'u1000000',\n 'hw': 'half-white',\n}\n\n_RARITY_TABLE = {\n 'm': 'Mythic',\n 'r': 'Rare',\n 'u': 'Uncommon',\n 'c': 'Common',\n}\n\n\ndef get_mtgjson(file_='mtgjson/AllSets.json'):\n with open(file_, 'r', encoding='utf-8') as file_data:\n mtgjson = loads(file_data.read())\n\n return mtgjson\n\n\ndef get_rarity(query_parameter):\n if query_parameter == 'all':\n return set(['Mythic', 'Rare', 'Uncommon', 'Common'])\n\n rarity = set()\n for r in query_parameter.split(','):\n if r in _RARITY_TABLE:\n rarity.add(_RARITY_TABLE[r])\n\n if not rarity:\n return set(['Mythic', 'Rare', 'Uncommon', 'Common'])\n\n return rarity\n\n\ndef strip_rarity(cards, rarity=[]):\n for card in cards[:]:\n if card['rarity'] not in rarity:\n cards.remove(card)\n\n\ndef purify_cards(cards, translate=[]):\n purified_cards = []\n\n for card in cards:\n current_purified = {}\n for key1, key2 in translate:\n if key1 in card:\n current_purified[key2] = card[key1]\n purified_cards.append(current_purified.copy())\n\n return purified_cards\n\n\ndef tokenize_cost(card):\n try:\n cost = card['mana_cost']\n except KeyError:\n return []\n\n return cost[1:-1].split('}{')\n\n\ndef translate_mana_cost(set_):\n for card in set_['cards']:\n mana_cost = []\n for cost_token in tokenize_cost(card):\n try:\n css_class = _COST_TRANSLATE_TABLE[cost_token]\n except KeyError:\n mana_cost.append('{[{0}}}'.format(cost_token))\n else:\n mana_cost.append(css_class)\n card['mana_cost'] = mana_cost\n\n\ndef add_image_name(set_):\n for card in set_['cards']:\n card['image_name'] = quote(card['name'])\n\n\ndef get_purified_set(mtgjson=None, set_name=None, rarity=[]):\n if set_name is None:\n return {}\n\n try:\n whole_set = mtgjson[set_name]\n except KeyError:\n return {}\n\n if rarity:\n strip_rarity(whole_set['cards'], rarity=rarity)\n\n set_ = {}\n for key, value in whole_set.items():\n if key != 'cards':\n set_[key] = value\n\n set_['cards'] = purify_cards(whole_set['cards'],\n translate=_CATEGORIES_TRANSLATE_TABLE)\n add_image_name(set_)\n translate_mana_cost(set_)\n\n return set_\n\n\ndef get_available_sets(mtgjson=[]):\n return mtgjson.keys()\n","sub_path":"mtgtools.py","file_name":"mtgtools.py","file_ext":"py","file_size_in_byte":3700,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"589092574","text":"# -*- coding: utf-8 -*-\n#!/usr/bin/env python\n#-------------------------------------------------------------------------------\n# Name: Scrape_one.py\n# Description: \n# Author: hlh\n# Date: 2022/3/15 17:38\n# desc: 网页利用CSS控制文字的偏移位置,或者通过一些特殊的方式隐藏关键信息,对数据爬取造成影响\n#-------------------------------------------------------------------------------\nfrom selenium import webdriver\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.webdriver.support.wait import WebDriverWait\n\n\ndef get_browser():\n options=webdriver.ChromeOptions()\n prefs={\"profile.managed_default_content_settings.images\": 2}\n options.add_argument(\"--handless\")\n options.add_experimental_option(\"pref\",prefs)\n options.add_argument(\"lang=zh_CN.utf-8\")\n browser=webdriver.Chrome(options=options)\n wait=WebDriverWait(browser,10)\n return wait,browser\n\n\ndef get_html(url,browser,wait):\n browser.get(url)\n wait.until(EC.presence_of_all_elements_located((By.CSS_SELECTOR,'')))\n\n\n\n\n\n\ndef main():\n url='https://antispider3.scrape.center/page/1'\n wait,browser=get_browser()\n get_html(url,browser,wait)\n\n\n\nif __name__=='__main__':\n main()","sub_path":"base_test/tencent_video/Scrape_one.py","file_name":"Scrape_one.py","file_ext":"py","file_size_in_byte":1303,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"424748250","text":"\nwhile True:\n x = input(\"Best Marvel Character (q to quit): \")\n if x == 'q':\n break\n if x == '':\n continue\n print(\"Nope: you are wrong about\", x)\n\nwhile True:\n raw_num = input(\"How many times have you watched WandaVision? \")\n if raw_num == 'q':\n break\n\n # num = input(...)\n # num = int(num)\n try:\n num = int(raw_num)\n except Exception as err:\n print(err)\n else:\n print(num * '*')\n\n","sub_path":"user_input.py","file_name":"user_input.py","file_ext":"py","file_size_in_byte":456,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"544971703","text":"\"\"\"\nGiven a sorted array of distinct integers and a target value, return the index if the \ntarget is found. If not, return the index where it would be if it were inserted in order.\n\nExample 1:\nInput: nums = [1,3,5,6], target = 5\nOutput: 2\n\nExample 2:\nInput: nums = [1,3,5,6], target = 2\nOutput: 1\n\nExample 3:\nInput: nums = [1,3,5,6], target = 7\nOutput: 4\n\nExample 4:\nInput: nums = [1,3,5,6], target = 0\nOutput: 0\n\nExample 5:\nInput: nums = [1], target = 0\nOutput: 0\n \nConstraints:\n1 <= nums.length <= 104\n-104 <= nums[i] <= 104\n\nnums contains distinct values sorted in ascending order.\n-104 <= target <= 104\n\"\"\"\n\n# T: O(log n); S: O(1)\n\nclass Solution:\n def searchInsert(self, nums: List[int], target: int) -> int:\n if not target:\n return 0\n \n left, right = 0, len(nums)-1\n \n while (left <= right):\n mid = left + (right - left)//2\n \n if (nums[mid] == target):\n return mid\n elif (nums[mid] < target):\n left = mid + 1\n else:\n right = mid - 1\n \n return left","sub_path":"01_LeetCode/03_LeetCode_Redo_3/01_Easy/Search_Insert_Position.py","file_name":"Search_Insert_Position.py","file_ext":"py","file_size_in_byte":1116,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"122022302","text":"import pytest\n\nfrom sapfo.taggedlist import ComboMode, FilterGroup, FilterTagItem, Parser\n\n\n@pytest.mark.parametrize(\n ('text',),\n [('a, b',), (' a,b ',), (' a ,b ',), ('a, b',)])\ndef test_filter_tag_groups(text) -> None:\n parser = Parser(text, {}, {})\n assert parser.parse() == FilterGroup(\n combo_mode=ComboMode.AND,\n children=[FilterTagItem('a'), FilterTagItem('b')]\n )\n","sub_path":"tests/test_taggedlist.py","file_name":"test_taggedlist.py","file_ext":"py","file_size_in_byte":404,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"130706263","text":"import pandas\nimport os\n\ndef read_all_data(filepath, output_file, dlm=\"\\t\", encoding=\"utf-8\"):\n\n f_path = filepath\n f_list = os.listdir(f_path)\n\n all_df = []\n count = 0\n for f in f_list:\n if count == 0:\n all_df = pandas.read_excel(os.path.join(f_path, f), \"Export\",\n names=[\"Geo_Id\", \"Geography\", \"Total_Est\", \"Total_MOE\", \"White_est\", \"White_MOE\",\n \"Black_Est\", \"Black_MOE\", \"AI_AN_EST\", \"AI_AN_MOE\", \"Asian_Est\",\n \"Asian_MOE\", \"PI_Est\", \"PI_MOE\", \"Other_Est\", \"Other_MOE\", \"Two_Est\",\n \"Two_MOE\", \"Two_Other_Est\", \"Two_Other_MOE\", \"Three_Est\", \"Three_MOE\"],\n skiprows=10, header=None)\n count += 1\n else:\n df = pandas.read_excel(os.path.join(f_path, f), \"Export\",\n names=[\"Geo_Id\", \"Geography\", \"Total_Est\", \"Total_MOE\", \"White_est\", \"White_MOE\",\n \"Black_Est\", \"Black_MOE\", \"AI_AN_EST\", \"AI_AN_MOE\", \"Asian_Est\", \"Asian_MOE\",\n \"PI_Est\", \"PI_MOE\", \"Other_Est\", \"Other_MOE\", \"Two_Est\", \"Two_MOE\",\n \"Two_Other_Est\", \"Two_Other_MOE\", \"Three_Est\", \"Three_MOE\"], skiprows=10,\n header=None)\n\n all_df = pandas.concat([all_df, df])\n\n all_df.reset_index(inplace=True)\n all_df.drop(\"index\", inplace=True, axis=1)\n all_df.to_csv(output_file, sep=dlm, encoding=encoding)\n return all_df\n\nread_all_data(\"data_download\", \"data\\\\all_school_districts.txt\")","sub_path":"import_data.py","file_name":"import_data.py","file_ext":"py","file_size_in_byte":1725,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"78886536","text":"import requests\nfrom requests_toolbelt import MultipartEncoder\n\nurl = \"http://oapi.test.moredian.com:8001/member/create\"\n\nquerystring = {\"accessToken\":\"J4dOydZNZQ26ywjpUaHeGBxWvkhAdluxnZDSjtARVtMf6g0t-oGvqyEVcI9GZ-KH\"}\n\nfile_payload = open('C:/Users/admin/Desktop/bug-picture/123/000001.jpg', 'rb')\ndata = MultipartEncoder(fields={'tpUserId': '1234',\n 'memberName': '12345',\n 'mobile': '123456',\n 'verifyFace': ('000001.jpg', file_payload, 'image/jpeg')}\n )\nresponse = requests.post(url, data=data, headers={'Content-Type': data.content_type}, params=querystring)\n\nprint(response.text)","sub_path":"性能/1.py","file_name":"1.py","file_ext":"py","file_size_in_byte":709,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"521036595","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('aparcamientos', '0009_auto_20170524_0806'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='info_usuario',\n name='pagina_personal',\n field=models.CharField(default='PAGINA DE ', blank=True, max_length=200),\n ),\n ]\n","sub_path":"aparcamientos/migrations/0010_info_usuario_pagina_personal.py","file_name":"0010_info_usuario_pagina_personal.py","file_ext":"py","file_size_in_byte":456,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"335520734","text":"import os\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n\ntypesofmusic =[]\nmusiccount = {}\nfor filename in os.listdir('data'):\n with open('data/' + filename, 'r') as subtitlefile:\n for line in subtitlefile.readlines():\n if '' in line and \\\n '' in line and \\\n 'music' in line and \\\n '[' in line and \\\n ']' in line:\n cleanup = line.replace('','').replace('','')\n cleanup = cleanup.replace('[','').replace(']','')\n cleanup = cleanup.rstrip()\n typesofmusic.append(cleanup)\n\nfor m in typesofmusic:\n musiccount[m] = typesofmusic.count(m)\n\nprint('Total number of different types of music: {0}'.format(len(musiccount)))\nprint('\\n')\nprint('---------------------------The types of music and their occurrences---------------------------')\nprint('\\n')\n\nfor key in sorted(musiccount, key=musiccount.get, reverse=True):\n print('type of music: {0}, number of occurrences: {1} \\n'.format(key, musiccount[key]))\n\nx = list(musiccount.keys())\ny = list(musiccount.values())\nfig, ax = plt.subplots()\nwidth = 0.75 # the width of the bars \nind = np.arange(len(y)) # the x locations for the groups\nax.barh(ind, y, width, color=\"blue\")\nax.set_yticks(ind+width/2)\nax.set_yticklabels(x, minor=False)\nplt.title('Suits music')\nplt.xlabel('Number of occurrences')\nplt.ylabel('Types of music') \n\nfor i, v in enumerate(musiccount.values()):\n ax.text(v + 3, i + .25, str(v), color='blue')\nplt.show()\n","sub_path":"music-counter-script.py","file_name":"music-counter-script.py","file_ext":"py","file_size_in_byte":1562,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"486921607","text":"import numpy as np\nfrom sklearn.ensemble import ExtraTreesClassifier\nfrom baseModel_c import baseModel_c\nfrom baseModel_c import modelType_e\n\n\nclass sklET_c(baseModel_c):\n \"\"\"\n Abstracts the specific model class\n This may not be needed in many cases but does allow for easy model specific\n modification as theGeneral evolves.\n \"\"\"\n def __init__(self, data=None):\n super().__init__(\n data=data,\n name=\"Extra Trees\",\n modelType=modelType_e.TREE,\n pScore=True,\n pFit=True,\n pPredict=True,\n modelFunc=ExtraTreesClassifier,\n staticParam={\"n_jobs\": -1},\n bestParam={},\n paramDist={\"n_estimators\": [5, 200]})\n\n def predict_proba(self, *args, **kwargs):\n \"\"\"\n The extra trees classifier returns a leaf distribution of\n probabilities. This needs to be converted into a single probability\n for each possible category.\n \"\"\"\n prob = super().predict_proba(*args, **kwargs)\n return np.column_stack([x[:, 1] for x in prob])\n\n def modParams(self, **kwargs):\n \"\"\"\n optunity only supports floating point types but some model classes\n will only support int parameters. This function does nothing by\n default and should eb overloaded for specific model types.\n \"\"\"\n if 'n_estimators' in kwargs:\n kwargs['n_estimators'] = int(kwargs['n_estimators'])\n return kwargs\n","sub_path":"src/sklET_c.py","file_name":"sklET_c.py","file_ext":"py","file_size_in_byte":1534,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"653190611","text":"\n# machine import\nimport time\nfrom machine import Pin\n\nfrom ai import AnalogInput\nfrom bi import BinaryInput\nfrom bo import BinaryOutput\nfrom av import AnalogValue\nfrom bv import BinaryValue\nfrom timer import Timer\nfrom data_exchange import DataExchange\nfrom thermistor10KDegC import Thermistor10KCelsius\nfrom percent0100Rev_aic import PercentReverseAIC\nfrom automation import Automation as func\nimport light_program\n\n# network data excchange\nxfer = DataExchange()\nxfer.attach('192.168.0.51')\nread = None\nSERVER_ADDR = \"192.168.0.149\"\n\n# scale ranges\naic10K = Thermistor10KCelsius()\npercentRev = PercentReverseAIC()\n\n# inputs\nAI1 = AnalogInput(1, \"t_ext\", 85.0, 0.0, aic10K)\nAI2 = AnalogInput(2, \"t_piscine\", 85.0, 0.0, aic10K)\nAI3 = AnalogInput(3, \"t_panneau\", 85.0, 0.0, aic10K)\nAI4 = AnalogInput(4, \"photocell\", 85.0, 0.0, percentRev)\nBI8 = BinaryInput(8, \"bouton\", False)\nstop_button = Pin(0, Pin.IN, Pin.PULL_UP)\n\n# outputs\nBO1 = BinaryOutput(1, \"chauff_panneau\")\nBO2 = BinaryOutput(2, \"sortie2\")\nBO3 = BinaryOutput(3, \"sortie3\")\nBO4 = BinaryOutput(4, \"sortie4\")\nBO5 = BinaryOutput(5, \"sortie5\")\nBO6 = BinaryOutput(6, \"sortie6\")\nBO7 = BinaryOutput(7, \"sortie7\")\nBO8 = BinaryOutput(8, \"pompe_irrig\")\nscan_led = Pin(13, Pin.OUT)\n# default value\nBO1.value = False\nBO2.value = False\nBO3.value = False\nBO4.value = False\nBO5.value = False\nBO6.value = False\nBO7.value = False\nBO8.value = False\n\n#variables\nstop_board = BinaryValue(1, \"stop-board\")\n\n#first scan\nAI1.value; AI2.value; AI3.value; AI4.value; BI8.value\n\n#miscellanous timers\ntimer_1 = time.ticks_ms()\ntimer_2 = time.time()\ntimer_3 = time.time()\n\n#function return date-time\ndef actualTime(t):\n date_str = \"{:4}-{:02}-{:02}\".format(t[0],t[1],t[2])\n time_str = \"{:02}:{:02}:{:02}\".format(t[3],t[4],t[5])\n return date_str+\" \"+time_str\n\ndef ioData():\n board = {\"AI1\": temperature, \"AI5\": photocell, \"BI2\": BI2.value, \"BO1\": BO1.value, \"BO2\": BO2.value, \"BO3\": BO3.value,\n \"BO4\": BO4.value, \"BO5\": BO5.value, \"BO6\": BO6.value, \"BO7\": BO7.value, \"BO8\": BO8.value, \"board\": \"ioboard_cab\", \"route\": \"nred\"\n }\n \n#function stop board\ndef _stop():\n xfer.send_data({\"route\": \"nred\", \"board\": \"ioboard_cab\", \"state\": \"stop command received...\"}, SERVER_ADDR)\n import gc\n gc.collect()\n import sys\n sys.exit()\n \n#function set time clock\ndef _settime():\n import network_rtc\n network_rtc.set_time()\n xfer.send_data({\"route\": \"nred\", \"board\": \"ioboard_cab\", \"time\": actualTime(time.localtime())}, SERVER_ADDR)\n \n\n#boot wait 1 sec\nxfer.send_data({\"route\": \"nred\", \"board\": \"ioboard_cab\", \"state\": \"booting wait 1 sec...\"}, SERVER_ADDR)\ntime.sleep(1)\nxfer.send_data({\"route\": \"nred\", \"board\": \"ioboard_cab\", \"time\": actualTime(time.localtime())}, SERVER_ADDR)\n\n\n#main loop execution\nwhile True:\n \n #stop board from network or on board button\n if stop_board.value: _stop()\n if stop_button.value() == 0: _stop()\n \n # Read data transfer\n read = xfer.recv_data()\n if read is not None:\n if read[0] == \"/ioboard_cab/system/exit\" and read[1] == \"True\": stop_board.value = True\n if read[0] == \"/ioboard_cab/valve\": _irrig(read[1])\n if read[0] == \"/ioboard_cab/settime\" and read[1] == \"True\": _settime()\n if read[0] == \"/ioboard_cab/gettime\" and read[1] == \"True\": xfer.send_data({\"route\": \"nred\", \"board\": \"ioboard_cab\", \"time\": actualTime(time.localtime())}, SERVER_ADDR)\n\n #scan inputs not used in automation process... \n temp_ext = AI1.value; temp_pisc = AI2.value; temp_pan = AI3.value; photo = AI4.value; BI8.value\n \n #chauffage panneau\n BO1.value = func.aswitch(BO1.value, temp_pan, 5.0, 10.0)\n \n #Test avec bouton et sortie 8\n if BI8.rising(): BO8.value = not BO8.value\n \n #sync time every hour\n if (time.time() - timer_2) >= 3600:\n _settime()\n timer_2 = time.time()\n\n #led flasher light_program 200 msec\n if time.ticks_diff(time.ticks_ms(), timer_1) > 200:\n scan_led.value(1) if scan_led.value() == 0 else scan_led.value(0)\n timer_1 = time.ticks_ms()\n \n #send I/O data every 5 minutes\n if (time.time() - timer_3) >= 300:\n _settime()\n timer_3 = time.time() \n\n#Fin","sub_path":"esp32/ioboard_cabanon/master2.py","file_name":"master2.py","file_ext":"py","file_size_in_byte":4221,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"72599584","text":"import os\n\nfrom conan.tools.files import chdir\nfrom conans.errors import ConanException\nfrom conans.util.files import mkdir\nfrom conans.util.runners import check_output_runner\n\n\nclass Git(object):\n def __init__(self, conanfile, folder=\".\"):\n self._conanfile = conanfile\n self.folder = folder\n\n def run(self, cmd):\n with chdir(self._conanfile, self.folder):\n return check_output_runner(\"git {}\".format(cmd)).strip()\n\n def get_commit(self):\n try:\n # commit = self.run(\"rev-parse HEAD\") For the whole repo\n # This rev-list knows to capture the last commit for the folder\n # --full-history is needed to not avoid wrong commits:\n # https://github.com/conan-io/conan/issues/10971\n # https://git-scm.com/docs/git-rev-list#Documentation/git-rev-list.txt-Defaultmode\n commit = self.run('rev-list HEAD -n 1 --full-history -- \".\"')\n return commit\n except Exception as e:\n raise ConanException(\"Unable to get git commit in '%s': %s\" % (self.folder, str(e)))\n\n def get_remote_url(self, remote=\"origin\"):\n remotes = self.run(\"remote -v\")\n for r in remotes.splitlines():\n name, url = r.split(maxsplit=1)\n if name == remote:\n url, _ = url.rsplit(None, 1)\n if os.path.exists(url): # Windows local directory\n url = url.replace(\"\\\\\", \"/\")\n return url\n\n def commit_in_remote(self, commit, remote=\"origin\"):\n if not remote:\n return False\n try:\n branches = self.run(\"branch -r --contains {}\".format(commit))\n return \"{}/\".format(remote) in branches\n except Exception as e:\n raise ConanException(\"Unable to check remote commit in '%s': %s\" % (self.folder, str(e)))\n\n def is_dirty(self):\n status = self.run(\"status -s\").strip()\n return bool(status)\n\n def get_url_and_commit(self, remote=\"origin\"):\n dirty = self.is_dirty()\n if dirty:\n raise ConanException(\"Repo is dirty, cannot capture url and commit: \"\n \"{}\".format(self.folder))\n commit = self.get_commit()\n url = self.get_remote_url(remote=remote)\n in_remote = self.commit_in_remote(commit, remote=remote)\n if in_remote:\n return url, commit\n # TODO: Once we know how to pass [conf] to export, enable this\n # conf_name = \"tools.scm:local\"\n # allow_local = self._conanfile.conf[conf_name]\n # if not allow_local:\n # raise ConanException(\"Current commit {} doesn't exist in remote {}\\n\"\n # \"use '-c {}=1' to allow it\".format(commit, remote, conf_name))\n\n self._conanfile.output.warn(\"Current commit {} doesn't exist in remote {}\\n\"\n \"This revision will not be buildable in other \"\n \"computer\".format(commit, remote))\n return self.get_repo_root(), commit\n\n def get_repo_root(self):\n folder = self.run(\"rev-parse --show-toplevel\")\n return folder.replace(\"\\\\\", \"/\")\n\n def clone(self, url, target=\"\", args=None):\n args = args or []\n if os.path.exists(url):\n url = url.replace(\"\\\\\", \"/\") # Windows local directory\n mkdir(self.folder)\n self._conanfile.output.info(\"Cloning git repo\")\n self.run('clone \"{}\" {} {}'.format(url, \" \".join(args), target))\n\n def fetch_commit(self, url, commit):\n \"\"\"\n Experimental: does a 1 commit fetch and checkout, instead of a full clone,\n should be faster.\n \"\"\"\n if os.path.exists(url):\n url = url.replace(\"\\\\\", \"/\") # Windows local directory\n self._conanfile.output.info(\"Shallow fetch of git repo\")\n self.run('init')\n self.run(f'remote add origin \"{url}\"')\n self.run(f'fetch --depth 1 origin {commit}')\n self.run(f'checkout FETCH_HEAD')\n\n def checkout(self, commit):\n self._conanfile.output.info(\"Checkout: {}\".format(commit))\n self.run('checkout {}'.format(commit))\n\n def included_files(self):\n files = self.run(\"ls-files --full-name --others --cached --exclude-standard\")\n files = files.splitlines()\n return files\n","sub_path":"conan/tools/scm/git.py","file_name":"git.py","file_ext":"py","file_size_in_byte":4347,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"448388875","text":"#!/usr/bin/env python\r\n# -*- coding: utf-8 -*-\r\n\r\n\r\n# General\r\nfrom __future__ import division # problemlose Ganzzahl-Division\r\nimport math # grundlegende Mathematik-Funktionen\r\nimport os # Systemkommunikation\r\n\r\n# GUI\r\nfrom Tkinter import * # Oberflächenvisualisierung\r\nimport Tkinter as tk\r\nimport tkFileDialog as tkFD\r\nfrom tkMessageBox import *\r\n\r\n\r\n# Data\r\nimport sqlite3 as sql\r\n\r\n\r\n# eigene Bibliotheken\r\nimport basic_sql_connect_func as bscf\r\nimport Tk_integrated_plot as Tk_int # Plot-Visualisierung und Help-window\r\n\r\n\r\nclass data_editor(Frame):\r\n def __init__(self, parent):\r\n \r\n ###### General_Attribut ############\r\n self.child = Toplevel()\r\n self.parent = parent\r\n \r\n self.short_name_lab = Label(self.child, text=\"Short name: \")\r\n self.short_name_lab.grid(row=0, column=0, sticky=W)\r\n self.name_lab = Label(self.child, text=\"Name: \")\r\n self.name_lab.grid(row=1, column=0, sticky=W)\r\n self.rep_lab = Label(self.child, text=\"Repetitions: \")\r\n self.rep_lab.grid(row=2, column=0, sticky=W)\r\n self.dur_lab = Label(self.child, text=\"Duration: \")\r\n self.dur_lab.grid(row=3, column=0, sticky=W)\r\n self.weight_lab = Label(self.child, text=\"Weight: \")\r\n self.weight_lab.grid(row=4, column=0, sticky=W)\r\n self.lenght_lab = Label(self.child, text=\"Lenght\")\r\n self.lenght_lab.grid(row=5, column=0, sticky=W)\r\n self.series_lab = Label(self.child, text=\"Series\")\r\n self.series_lab.grid(row=6, column=0, sticky=W)\r\n \r\n\r\n self.short_name_entry = Entry(self.child)\r\n self.short_name_entry.grid(row=0, column=1, sticky=W)\r\n self.name_entry = Entry(self.child)\r\n self.name_entry.grid(row=1, column=1, sticky=W)\r\n self.rep_entry = Entry(self.child)\r\n self.rep_entry.grid(row=2, column=1, sticky=W)\r\n self.dur_entry = Entry(self.child)\r\n self.dur_entry.grid(row=3, column=1, sticky=W)\r\n self.weight_entry = Entry(self.child)\r\n self.weight_entry.grid(row=4, column=1, sticky=W)\r\n self.lenght_entry = Entry(self.child)\r\n self.lenght_entry.grid(row=5, column=1, sticky=W)\r\n self.series_entry = Entry(self.child)\r\n self.series_entry.grid(row=6, column=1, sticky=W)\r\n \r\n self.add_exe_button = Button(self.child, text=\"Add Exercise\", command=self.add_exercise)\r\n self.add_exe_button.grid(row=7, column=0, sticky=W)\r\n self.add_body_button = Button(self.child, text=\"Add Bodysection\", command=self.plot_add_body)\r\n self.add_body_button.grid(row=7, column=1, sticky=W)\r\n \r\n \r\n self.name = IntVar()\r\n self.short_name = IntVar()\r\n self.rep = IntVar()\r\n self.conn = sql.connect('training_data.db')\r\n \r\n self.cat_select = []\r\n self.cat_select_var = []\r\n self.body_select = []\r\n self.body_select_var = []\r\n \r\n \r\n \r\n place = 0\r\n data = bscf.get_cat_names(self.conn)\r\n for i in data:\r\n var = IntVar()\r\n button = Checkbutton(self.child, text=i[0], variable=var)\r\n button.grid(row=place, column=3, sticky=W)\r\n self.cat_select.append(button)\r\n dat = [i[0], var]\r\n self.cat_select_var.append(dat)\r\n place += 1\r\n \r\n \r\n \r\n\r\n place = 0\r\n data = bscf.get_body_names(self.conn)\r\n for i in data:\r\n var = IntVar()\r\n button = Checkbutton(self.child, text=i[0], variable=var)\r\n button.grid(row=place, column=2, sticky=W)\r\n self.body_select.append(button)\r\n dat = [i[0], var]\r\n self.body_select_var.append(dat)\r\n place += 1\r\n \r\n \r\n \r\n def plot_add_body(self):\r\n add_body_window(self.conn, self)\r\n \r\n \r\n def add_exercise(self):\r\n self.name = self.name_entry.get()\r\n self.short_name = self.short_name_entry.get()\r\n self.rep = self.rep_entry.get()\r\n dur = self.dur_entry.get()\r\n weight = self.weight_entry.get()\r\n lenght = self.lenght_entry.get()\r\n ser = self.series_entry.get()\r\n exist = bscf.insert_exe(self.conn, 1, self.name, self.short_name, self.rep, dur, weight, lenght, ser)\r\n if exist == 1:\r\n for insert in self.body_select_var:\r\n if insert[1].get() == 1:\r\n bscf.connect_body(self.conn, 1, self.short_name, insert[0])\r\n for insert in self.cat_select_var:\r\n if insert[1].get() == 1:\r\n bscf.connect_cat(self.conn, 1, self.short_name, insert[0])\r\n \r\n \r\n def renew_body_select(self):\r\n for i in self.body_select:\r\n i.destroy()\r\n place = 0\r\n data = bscf.get_body_names(self.conn)\r\n for i in data:\r\n var = IntVar()\r\n button = Checkbutton(self.child, text=i[0], variable=var)\r\n button.grid(row=place, column=2, sticky=W)\r\n self.body_select.append(button)\r\n dat = [i[0], var]\r\n self.body_select_var.append(dat)\r\n place += 1\r\n\r\nclass add_body_window(Frame):\r\n def __init__(self, conn, parent):\r\n \r\n ###### General_Attribut ############\r\n self.child = Toplevel()\r\n self.conn = conn\r\n self.parent = parent\r\n self.body_name_entry = Entry(self.child)\r\n self.body_name_lab = Label(self.child, text=\"Bodysection name: \")\r\n self.access_button = Button(self.child, text=\"Access\", command=self.access)\r\n self.body_name_lab.grid(row=0, column=0, sticky=W)\r\n self.body_name_entry.grid(row=0, column=1, sticky=W)\r\n self.access_button.grid(row=0, column=2, sticky=W)\r\n \r\n def access(self):\r\n x = self.body_name_entry.get()\r\n bscf.insert_cat(self.conn, \"body\", \"body_name\", x)\r\n self.parent.renew_body_select()\r\n self.parent.parent.update_selector()\r\n self.child.destroy()\r\n \r\n \r\n \r\n \r\n \r\n\r\n\r\ndef main():\r\n global root\r\n root = tk.Tk() #Startet die Main-Umgebung\r\n main_window(root) #Startet das Hauptfenster\r\n root.mainloop()\r\n \r\n return 0\r\n\r\nif __name__ == '__main__':\r\n main()\r\n","sub_path":"setting_window.py","file_name":"setting_window.py","file_ext":"py","file_size_in_byte":6391,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"180913280","text":"with open(\"output3.txt\", \"rt\") as fin:\n with open(\"output4.txt\", \"wt\") as fout:\n for line in fin:\n line = line.replace(\"_\",\"\\n\")\n fout.write(line)\n\na = []\nwith open(\"output4.txt\", \"rt\") as fin:\n with open(\"output5.txt\", \"wt\") as fout:\n for line in fin:\n a.append(line)\n b = set(a)\n for line in b:\n fout.write(line)\n","sub_path":"refactorWord/index4.py","file_name":"index4.py","file_ext":"py","file_size_in_byte":397,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"458262166","text":"from rest_framework import serializers\r\n\r\nfrom .models import Alumno\r\n\r\nclass AlumnoSerializer(serializers.HyperlinkedModelSerializer):\r\n\tnombre = serializers.CharField(max_length=50)\r\n\tedad = serializers.IntegerField()\r\n\r\n\tclass Meta:\r\n\t\tmodel = Alumno\r\n\t\tfields = ('idAlumno', 'nombre', 'edad')","sub_path":"ejemplos rest fmk/alumnos/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":296,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"437498678","text":"#!/usr/bin/python3\n\nimport vm\nimport os\nimport re\nimport sys\nos.environ['LIBCAPSTONE_PATH'] = os.path.join(os.path.dirname(__file__), \"capstone\")\n\nimport keystone.bindings.python.keystone as keystone\nimport capstone.bindings.python.capstone as capstone\n\n\ndef patching(raw, offset, patch, addr=0):\n encoding, count = ks.asm(patch.encode(), addr)\n print([hex(c) for c in encoding])\n print(len(encoding))\n print(count)\n origin_bytes = raw[offset:offset+len(encoding)]\n print(origin_bytes)\n return (encoding, origin_bytes)\n\ndef b2pat(bstr):\n return b\"\".join([\"\\\\x{:02x}\".format(c).encode('latin-1') for c in bstr])\n\n\nif len(sys.argv) < 4:\n print(\"Usage: ./riscirq.py regfile memfile kernel\")\n sys.exit()\n\nuart = 0xfc30742c\nusb = 0xfc207944\nsmi = 0xfc2356c4\nmm = vm.VM(sys.argv[1], sys.argv[2])\nwith open(sys.argv[3], 'rb') as fd:\n kernel_data = fd.read()\ncs = capstone.Cs(capstone.CS_ARCH_ARM, capstone.CS_MODE_ARM|capstone.CS_MODE_LITTLE_ENDIAN)\nks = keystone.Ks(keystone.KS_ARCH_ARM, keystone.KS_MODE_ARM|keystone.KS_MODE_LITTLE_ENDIAN)\n\ndis = [uart]\ndis = [usb]\n#dis = [smi]\n#print(hex(mm.translate(0xfc0091ec)))\n#print(hex(mm.translate(0xfc1f7700)))\n#print(hex(mm.translate(0xfc2356c4)))\n#sys.exit()\n\n# sub_3bb09190 : QA7_HAL_IRQDisable\npatchset = {}\nfor bp in dis:\n kern_off = None\n off = mm.translate(bp)\n print(hex(off))\n sig = mm._read(off, 0x20)\n\n # Fixing for Linux, avoid `bl __gnu_mcount_nc`, which is dynamically patched to `ldm sp!, {lr}` in memory dump\n for i in cs.disasm(sig, bp):\n print(i)\n if i.mnemonic == \"ldm\" and i.op_str == \"sp!, {lr}\":\n sig = sig[:i.address-bp]\n break\n\n print(b2pat(sig))\n for match in re.finditer(b2pat(sig), kernel_data):\n print(\" >\", hex(bp), \" : \", [hex(x) for x in match.span()])\n # page offset should match with the virtual address\n if match.start()&mm.page_mask(bp) == bp&mm.page_mask(bp) or \\\n (0x8000+match.start())&mm.page_mask(bp) == bp&mm.page_mask(bp): # Raspi kernel base might starts at 0x8000\n assert (not kern_off) # should have only one match\n kern_off = match.start()\n # some might not be aligned (e.g. RiscOS)\n if not kern_off:\n for match in re.finditer(b2pat(sig), kernel_data):\n print(\" >>\", hex(bp), \" : \", [hex(x) for x in match.span()])\n assert (not kern_off)\n kern_off = match.start()\n\n assert (kern_off) # should find the match now\n #patchset[kern_off] = patching(kernel_data, kern_off, \"push {r0, r1}; ldr r0, [r12, #8]; add r0, r0, #0x600000; mov r1, #0; mcr p15, #0, r1, c7, c10, #5; mov r1, #0; str r1, [r0]; mcr p15, #0, r1, c7, c10, #5; pop {r0, r1}; bic r11, r11, #1; mov pc, lr;\")\n patchset[kern_off] = patching(kernel_data, kern_off, \"push {r0, r8, r9}; mov r0, #0x20; mov r8, #0; mov r9, #0x3; svc #0x2007a; pop {r0, r8, r9}; bic r11, r11, #1; mov pc, lr;\")\n #patchset[kern_off+4] = patching(kernel_data, kern_off+4, \"b $.;\")\n\n\nif len(sys.argv) > 4:\n with open(sys.argv[4], 'wb') as fd:\n data = kernel_data\n for off, patch in patchset.items():\n data = data[:off] + bytes(patch[0]) + data[off+len(patch[0]):]\n fd.write(data)\n\n","sub_path":"disable_riscos.py","file_name":"disable_riscos.py","file_ext":"py","file_size_in_byte":3248,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"509196997","text":"\n# Referenced From https://www.zabbix.com/documentation/4.2/manual/api/reference/action/object\n# https://www.zabbix.com/documentation/4.2/manual/api/reference/action/create\n# In order to make any changes please refer to the documentation above\n\n\nimport sys\nfrom pyzabbix import ZabbixAPI\nimport urllib3\nurllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)\n\n\nclass ZabbixOnboarding():\n\t\"\"\"docstring for ZabbixOnboarding\"\"\"\n\tdef __init__(self, hostname):\n\n\t\tself.zapi = ZabbixAPI(\"https://{}/zabbix\".format(hostname))\n\t\tself.zapi.session.verify = False\n\t\tself.zapi.login(\"Admin\", \"zabbix\")\n\n\n\tdef create_action_linux(self):\n\t\t\n\t\tparams = {\n\t\t\t\"name\" : \"Auto registration - Linux\",\n\t\t\t\"value\" : \"Linux\",\n\t\t\t\"templateid\" : \"10001\"\n\t\t}\n\t\tself.create_action(params)\n\n\n\tdef create_action_windows(self):\n\t\t\n\t\tparams = {\n\t\t\t\"name\" : \"Auto registration - windows\",\n\t\t\t\"value\" : \"Windows\",\n\t\t\t\"templateid\" : \"10081\"\n\t\t}\n\t\tself.create_action(params)\n\t\t\n\n\tdef create_action(self, params):\n\t\t\"\"\"Creates autoregistration action on zabbix server\n\t\t\n\t\tArguments:\n\t\t\tparams {[dict]} -- configuration parameters for windows and \n\t\t\"\"\"\n\n\t\tpost_data = {\n\t\t \"name\": params['name'],\n\t\t \"eventsource\": 2,\n\t\t \"status\": 0,\n\t\t \"esc_period\": 120,\n\t\t \"def_shortdata\": \"Auto registration: {HOST.HOST}\",\n \t\t\"def_longdata\": \"Host name: {HOST.HOST}\\r\\nAgent port: {HOST.PORT}\\r\\nHost IP: {HOST.IP}\",\n\t\t \"filter\": {\n\t \"evaltype\": 0,\n\t #Meta data condition\n\t \"conditions\": [\n\t\t {\n\t\t \"conditiontype\": 24, #Host Metadata\n\t\t \"operator\": 2,\n\t\t \"value\": params['value']\n\t\t },\n\t\t {\n\t\t \"conditiontype\": 20, # Proxy\n\t\t \"operator\": 0,\n\t\t \"value\": self.proxy_id\n\t\t },\n\t\t ]\n\t\t },\n\n\t\t \"operations\": [\n\t\t \t#Template Option \n\t\t {\n\t\t \"operationtype\": 6,\n\t\t \"optemplate\": [\n\t\t {\n\t\t \"templateid\": params['templateid']\n\t\t }\n\t\t ]\n\t\t },\n\t\t #Add Host\n\t\t {\n\t\t \"operationtype\": 2,\n\t\t },\n\t\t #Add to host group\n\t\t {\n\t\t \"operationtype\": 4,\n\t\t \"opgroup\" : {\n\t\t \t# Host Group Linux Servers\n\t \t\t \t\"groupid\" : \"2\" \n\t\t }\n\t\t },\n\t\t #Enable Host\n\t\t {\n\t\t \"operationtype\": 8,\n\t\t },\n\n\t\t #Set Host Inventory Mode\n\t\t {\n\t\t \"operationtype\": 10,\n\t\t \"opinventory\" : {\n\t \t\t \t\"inventory_mode\" : \"1\" \n\t\t }\n\t\t },\n\n\t\t #Remove from host groups discovered hosts\n\t\t {\n\t\t \"operationtype\": 5,\n\t\t \"opgroup\" : {\n\t \t\t \t\"groupid\" : \"5\" \n\t\t }\n\t\t },\n\n\t\t ]\n\t\t }\n\t\t\n\n\t\tmethod = 'action.create'\n\t\tresponse = self.zapi.do_request(method, post_data)\n\t\tprint(response)\n\n\tdef get_proxy(self, proxy_name):\n\t\t\"\"\"This function finds proxy installed on zabbix, and stores the \n\t\tproxy id in a class variable self.proxy_id\n\t\t\n\t\tArguments:\n\t\t\tproxy_name {[string]} -- Name of the proxy\n\t\t\"\"\"\n\n\n\t\tproxy_data = {\n\t\t\t \"params\": {\n\t\t\t \"host\": proxy_name,\n\t\t\t \"status\" : 5\n\t\t\t # \"selectInterface\": \"extend\"\n\t\t\t },\n\t\t\t}\n\t\tmethod = 'proxy.get'\n\t\tresponse = self.zapi.do_request(method, proxy_data)\n\t\tresponse = response['result']\n\t\tself.proxy_id = response[0]['proxyid']\n\t\t\n\nif __name__ == '__main__':\n\n\tcommand_inputs = sys.argv\n\tif len(command_inputs) < 2:\n\t\traise NameError(\"Please Provide Zabbix ui Hostname or IP Address for accessing the API\")\n\n\tzabbix_host = sys.argv[1]\n\tproxy_name = sys.argv[2]\n\tzabbix = ZabbixOnboarding(zabbix_host)\n\tzabbix.get_proxy(proxy_name)\n\tzabbix.create_action_linux()\n\tzabbix.create_action_windows()\n\n\n\t\t","sub_path":"code/zabbix/Bin/extra_configurations/zabbix_onboarding.py","file_name":"zabbix_onboarding.py","file_ext":"py","file_size_in_byte":4155,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"68868948","text":"import tensorflow as tf\nimport numpy as np\nimport pandas as pd\n\n\ndef standardization(df):\n return (df - df.mean()) / (df.std())\n\n\ndef normalization(df):\n return (df - df.min()) / (df.max() - df.min())\n\n\ndef import_hypothesis(file_location):\n df = pd.read_csv(file_location)\n y = df['OS']\n x = df.drop(['OS'], axis=1)\n y[df['OS'] <= 300] = 0\n y[(df['OS'] > 300) * (df['OS'] <= 450)] = 1\n y[df['OS'] > 450] = 2\n y = pd.get_dummies(y).as_matrix()\n x = standardization(x)\n x = normalization(x).as_matrix()\n return x, y\n\n\ndef calc_specifity_sensitivity(conf_matrix):\n nclasses = np.shape(conf_matrix)[0]\n sens = {}\n spec = {}\n for i in range(0, nclasses):\n tp = conf_matrix[i, i]\n fp = np.sum(conf_matrix[i, :]) - tp\n fn = np.sum(conf_matrix[:, i]) - tp\n tn = np.sum(conf_matrix) - (tp + fp + fn)\n sens[i] = tp / (tp + fn)\n spec[i] = tn / (tn + fp)\n\n return sens, spec\n\n\nx_train, y_train = import_hypothesis(\"30_train_features.csv\")\nx_test, y_test = import_hypothesis(\"30_test_features.csv\")\n\nx = tf.placeholder(tf.float32, [None, 30])\ny_ = tf.placeholder(tf.float32, [None, 3])\nW = tf.get_variable(\"Weights\", [30, 3], initializer=tf.variance_scaling_initializer())\nb = tf.get_variable(\"Bias\", [3], initializer=tf.variance_scaling_initializer())\n\ny = tf.nn.softmax(tf.matmul(x, W) + b)\n\ncross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y_, logits=y))\noptimize = tf.train.AdamOptimizer(0.005).minimize(cross_entropy)\ncorrect_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))\naccuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\ninit = tf.global_variables_initializer()\nconfusion = tf.confusion_matrix(tf.arg_max(y, 1), tf.arg_max(y_, 1))\n\nsess = tf.InteractiveSession()\nsess.run(init)\nfor i in range(10000):\n _, acc_train, loss_train = sess.run([optimize, accuracy, cross_entropy], feed_dict={x: x_train, y_: y_train})\n acc_test, loss_test = sess.run([accuracy, cross_entropy], feed_dict={x: x_test, y_: y_test})\n if i % 100 == 0:\n print('Training Step:' + str(i) + ' Accuracy = ' + str(acc_train) + ' Loss = ' + str(loss_train))\n print('Test Step:' + str(i) + ' Accuracy = ' + str(acc_test) + ' Loss = ' + str(loss_test))\n\nconf_matrix, weights = sess.run([confusion, W], feed_dict={x: x_test, y_: y_test})\nprint(weights)\nsens, spec = calc_specifity_sensitivity(conf_matrix)\nprint(sens, spec)\n","sub_path":"ass_3_1.py","file_name":"ass_3_1.py","file_ext":"py","file_size_in_byte":2459,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"297386013","text":"#!/usr/bin/python\n\nfrom subprocess import call\nimport sys, time, os\n\ntouch = \"/tmp/svn-remote-backup.touch\"\nsource = \"/svn/backups/\" # Note trailing slash\ntarget = \"some@remote.host\"\nrsync = \"/usr/bin/rsync\"\narguments = \"-avz --delete -e ssh\"\ncmd = \"%s %s %s %s &> %s\" % (rsync, arguments, source, target, touch)\nrecipients = \"martin.bright\"\ntimer = time.time\n\nmsg = [\"svn backup is complete: rsync finished successfully\",\n \"running time: %d seconds\",\n \"=====================================================\" ]\n\ndef sync():\n\n if os.path.exists(touch):\n return\n\n fd = open(touch, \"w\")\n fd.close()\n\n start = timer()\n\n while True:\n ret = call(cmd, shell=True)\n if ret != 0:\n time.sleep(30)\n else:\n break\n\n end = timer()\n\n rsync_output = file(touch).read()\n msg.append(rsync_output)\n body = os.linesep.join(msg)\n body = body % (end - start)\n mailcmd = \"echo '%s' | mail -s 'svn rsync done' %s\" % (body, recipients)\n call(mailcmd, shell=True)\n os.remove(touch)\n\nsync()\n","sub_path":"svn-remote-backup.py","file_name":"svn-remote-backup.py","file_ext":"py","file_size_in_byte":1060,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"165236822","text":"from __future__ import absolute_import\nfrom __future__ import print_function\n\nimport json\nimport os\nfrom PIL import Image\nimport shutil\nimport subprocess\nimport tempfile\nimport unittest\nimport uuid\n\nimport tensorflow as tf\nfrom tensorflow.python.lib.io import file_io\n\nimport google.datalab as dl\nimport google.datalab.bigquery as bq\n\nCODE_PATH = os.path.abspath(os.path.join(\n os.path.dirname(__file__), '..', 'mltoolbox', 'code_free_ml'))\n\n\nclass TestTransformRawData(unittest.TestCase):\n \"\"\"Tests for applying a saved model\"\"\"\n\n @classmethod\n def setUpClass(cls):\n\n # Set up dirs.\n cls.working_dir = tempfile.mkdtemp()\n cls.source_dir = os.path.join(cls.working_dir, 'source')\n cls.analysis_dir = os.path.join(cls.working_dir, 'analysis')\n cls.output_dir = os.path.join(cls.working_dir, 'output')\n file_io.create_dir(cls.source_dir)\n\n # Make test image files.\n img1_file = os.path.join(cls.source_dir, 'img1.jpg')\n image1 = Image.new('RGBA', size=(300, 300), color=(155, 0, 0))\n image1.save(img1_file)\n img2_file = os.path.join(cls.source_dir, 'img2.jpg')\n image2 = Image.new('RGBA', size=(50, 50), color=(125, 240, 0))\n image2.save(img2_file)\n img3_file = os.path.join(cls.source_dir, 'img3.jpg')\n image3 = Image.new('RGBA', size=(800, 600), color=(33, 55, 77))\n image3.save(img3_file)\n\n # Make csv input file\n cls.csv_input_filepath = os.path.join(cls.source_dir, 'input.csv')\n file_io.write_string_to_file(\n cls.csv_input_filepath,\n '1,1,Monday,23.0,%s\\n' % img1_file +\n '2,0,Friday,18.0,%s\\n' % img2_file +\n '3,0,Sunday,12.0,%s\\n' % img3_file)\n\n # Call analyze.py to create analysis results.\n schema = [{'name': 'key_col', 'type': 'INTEGER'},\n {'name': 'target_col', 'type': 'FLOAT'},\n {'name': 'cat_col', 'type': 'STRING'},\n {'name': 'num_col', 'type': 'FLOAT'},\n {'name': 'img_col', 'type': 'STRING'}]\n schema_file = os.path.join(cls.source_dir, 'schema.json')\n file_io.write_string_to_file(schema_file, json.dumps(schema))\n features = {'key_col': {'transform': 'key'},\n 'target_col': {'transform': 'target'},\n 'cat_col': {'transform': 'one_hot'},\n 'num_col': {'transform': 'identity'},\n 'img_col': {'transform': 'image_to_vec'}}\n features_file = os.path.join(cls.source_dir, 'features.json')\n file_io.write_string_to_file(features_file, json.dumps(features))\n cmd = ['python ' + os.path.join(CODE_PATH, 'analyze.py'),\n '--output=' + cls.analysis_dir,\n '--csv=' + cls.csv_input_filepath,\n '--schema=' + schema_file,\n '--features=' + features_file]\n subprocess.check_call(' '.join(cmd), shell=True)\n\n # Setup a temp GCS bucket.\n cls.bucket_root = 'gs://temp_mltoolbox_test_%s' % uuid.uuid4().hex\n subprocess.check_call('gsutil mb %s' % cls.bucket_root, shell=True)\n\n @classmethod\n def tearDownClass(cls):\n shutil.rmtree(cls.working_dir)\n subprocess.check_call('gsutil -m rm -r %s' % cls.bucket_root, shell=True)\n\n def test_local_csv_transform(self):\n \"\"\"Test transfrom from local csv files.\"\"\"\n\n cmd = ['python ' + os.path.join(CODE_PATH, 'transform.py'),\n '--csv=' + self.csv_input_filepath,\n '--analysis=' + self.analysis_dir,\n '--prefix=features',\n '--output=' + self.output_dir]\n print('cmd ', ' '.join(cmd))\n subprocess.check_call(' '.join(cmd), shell=True)\n\n # Read the tf record file. There should only be one file.\n record_filepath = os.path.join(self.output_dir,\n 'features-00000-of-00001.tfrecord.gz')\n options = tf.python_io.TFRecordOptions(\n compression_type=tf.python_io.TFRecordCompressionType.GZIP)\n serialized_examples = list(tf.python_io.tf_record_iterator(record_filepath, options=options))\n self.assertEqual(len(serialized_examples), 3)\n\n example = tf.train.Example()\n example.ParseFromString(serialized_examples[0])\n\n transformed_number = example.features.feature['num_col'].float_list.value[0]\n self.assertAlmostEqual(transformed_number, 23.0)\n transformed_category = example.features.feature['cat_col'].int64_list.value[0]\n self.assertEqual(transformed_category, 2)\n image_bytes = example.features.feature['img_col'].float_list.value\n self.assertEqual(len(image_bytes), 2048)\n self.assertTrue(any(x != 0.0 for x in image_bytes))\n\n def test_local_bigquery_transform(self):\n \"\"\"Test transfrom locally, but the data comes from bigquery.\"\"\"\n\n # Make a BQ table, and insert 1 row.\n try:\n project_id = dl.Context.default().project_id\n dataset_name = 'test_transform_raw_data_%s' % uuid.uuid4().hex\n table_name = 'tmp_table'\n\n dataset = bq.Dataset((project_id, dataset_name)).create()\n table = bq.Table((project_id, dataset_name, table_name))\n table.create([{'name': 'key_col', 'type': 'INTEGER'},\n {'name': 'target_col', 'type': 'FLOAT'},\n {'name': 'cat_col', 'type': 'STRING'},\n {'name': 'num_col', 'type': 'FLOAT'},\n {'name': 'img_col', 'type': 'STRING'}])\n\n img1_file = os.path.join(self.source_dir, 'img1.jpg')\n dest_file = os.path.join(self.bucket_root, 'img1.jpg')\n subprocess.check_call('gsutil cp %s %s' % (img1_file, dest_file), shell=True)\n\n data = [\n {\n 'key_col': 1,\n 'target_col': 1.0,\n 'cat_col': 'Monday',\n 'num_col': 23.0,\n 'img_col': dest_file,\n },\n ]\n table.insert(data=data)\n\n cmd = ['python ' + os.path.join(CODE_PATH, 'transform.py'),\n '--bigquery=%s.%s.%s' % (project_id, dataset_name, table_name),\n '--analysis=' + self.analysis_dir,\n '--prefix=features',\n '--project-id=' + project_id,\n '--output=' + self.output_dir]\n print('cmd ', ' '.join(cmd))\n subprocess.check_call(' '.join(cmd), shell=True)\n\n # Read the tf record file. There should only be one file.\n record_filepath = os.path.join(self.output_dir,\n 'features-00000-of-00001.tfrecord.gz')\n options = tf.python_io.TFRecordOptions(\n compression_type=tf.python_io.TFRecordCompressionType.GZIP)\n serialized_examples = list(tf.python_io.tf_record_iterator(record_filepath, options=options))\n self.assertEqual(len(serialized_examples), 1)\n\n example = tf.train.Example()\n example.ParseFromString(serialized_examples[0])\n\n transformed_number = example.features.feature['num_col'].float_list.value[0]\n self.assertAlmostEqual(transformed_number, 23.0)\n transformed_category = example.features.feature['cat_col'].int64_list.value[0]\n self.assertEqual(transformed_category, 2)\n image_bytes = example.features.feature['img_col'].float_list.value\n self.assertEqual(len(image_bytes), 2048)\n self.assertTrue(any(x != 0.0 for x in image_bytes))\n finally:\n dataset.delete(delete_contents=True)\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"solutionbox/code_free_ml/test_mltoolbox/test_transform.py","file_name":"test_transform.py","file_ext":"py","file_size_in_byte":7166,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"400039785","text":"import c\n\nc_int = c.refl(\"int\")\nassert c_int.size() == c.sizeof(\"int\")\narray = c.malloc(c.sizeof(\"int\") * 10)\narray.set_base_offset(\"int\")\n\nassert array.get_base_offset() == c_int.size()\n\nfor i in range(10):\n array.offset(i).write_int(i)\n\nx = c_int()\nx.addr().write_int(0)\nfor i in range(10):\n i = array.offset(i).read_int()\n x.addr().write_int(\n x.addr().read_int() + i\n )\n\nassert x.addr().read_int() == (0+9)*10//2\n\nc.memset(array, 0, c.sizeof(\"int\") * 10)\n\nfor i in range(10):\n assert array.offset(i).read_char() == 0\n\narray2 = c.malloc(c.sizeof(\"int\") * 10)\narray2.set_base_offset(\"int\")\nc.memcpy(array2, array, c.sizeof(\"int\") * 10)\nfor i in range(10):\n assert array2.offset(i).read_char() == 0\n\nc.free(array)\nc.free(array2)","sub_path":"tests/80_c.py","file_name":"80_c.py","file_ext":"py","file_size_in_byte":754,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"355731157","text":"\nimport csv\nimport random\n\nwith open(\"olist_customers_dataset.csv\") as csvfile:\n data = csv.DictReader(csvfile)\n new_ids = []\n for row in data:\n for char in row['customer_id']:\n\n print(char)\n\n\n","sub_path":"CustomTasks/data_shuffle.py","file_name":"data_shuffle.py","file_ext":"py","file_size_in_byte":220,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"107378030","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Sep 24 21:36:09 2017\n\n@author: majid_nasiri\n\"\"\"\n\nimport numpy as np\nimport itertools\n\nN = 6 # Number of Queens\nitr = 0 # Counter for produced combination\nbest_combination = [] # list of correct combination\n\n\n# cost function for evaluationg how well the input combination is.\n# low value refer to less conflict between queens therefore boad \n# with zero cost is a solution for problem.\ndef promissing(board):\n board_len = len(board)\n conflict = 0\n for i in range(board_len):\n for j in range(i, board_len):\n if (i != j):\n # check for horizontal threats\n # check for diagonal threats\n if ((board[i] == board[j]) or np.abs(board[i]-board[j]) == (j-i)):\n conflict += 1\n return conflict\n\n\n# produce all possible combination using code in\n# https://gist.github.com/3997853\nfor combination in itertools.product(range(N), repeat=N):\n itr += 1\n comb = np.asarray(combination, dtype=np.int8)\n cost = promissing(comb)\n print('combination',itr,'=',comb, 'cost = ', cost)\n if (cost == 0):\n best_combination.append(comb) # save solution combination\n\n\n# visualization chess board\nboard1 = ('- '*N)\nfor i in best_combination:\n print('Board '+str(i))\n for j in range(N):\n board = list(board1)\n board[2*i[j]] = '@'\n board = ''.join(board)\n print(board)\n \n\n\n\n\n\n\n\n \n ","sub_path":"evolutionary_computing/ECHW1_n-queens/n_queen.py","file_name":"n_queen.py","file_ext":"py","file_size_in_byte":1569,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"418546276","text":"import json\nfrom flask import Flask, request\nimport db\n\nDB = db.DatabaseDriver()\n\napp = Flask(__name__)\n\ndef success_response(data, code=200):\n return json.dumps({\"success\": True, \"data\": data}), code\n\ndef failure_response(error, code=404):\n return json.dumps({\"success\": False, \"error\": error}), code\n\n@app.route(\"/\")\n@app.route(\"/api/users/\")\ndef get_users():\n return success_response(DB.get_all_users())\n\n@app.route(\"/api/users/\", methods=[\"POST\"])\ndef create_venmo():\n body = json.loads(request.data)\n name = body.get(\"name\")\n username = body.get(\"username\")\n balance = body.get(\"balance\", 0)\n if name is not None and username is not None:\n user_id = DB.insert_venmo_user(name, username, balance)\n return success_response(DB.get_user_by_id(user_id))\n return failure_response(\"No name or username or both were not entered\", 400)\n@app.route(\"/api/user//\")\ndef get_user(user_id):\n user = DB.get_user_by_id(user_id)\n if user:\n return success_response(user)\n return failure_response(\"No user found\")\n\n\n@app.route(\"/api/user//\", methods=[\"DELETE\"])\ndef delete_user(user_id):\n user = DB.get_user_by_id(user_id)\n if user is None:\n return failure_response(\"No user found\")\n DB.delete_user_by_id(user_id)\n return success_response(user)\n\n@app.route(\"/api/send/\", methods=[\"POST\"])\ndef send():\n body = json.loads(request.data)\n sender_id = body.get(\"sender_id\")\n receiver_id = body.get(\"receiver_id\")\n amount = body.get(\"amount\")\n if sender_id is None or receiver_id is None or amount is None:\n return failure_response(\"You did not enter input right\")\n sender_obj = DB.get_user_by_id(sender_id)\n receiver_obj = DB.get_user_by_id(receiver_id)\n if not sender_obj or not receiver_obj:\n return failure_response(\"either the sender or receiver does not exist\")\n sender_balance = sender_obj[\"balance\"]\n if sender_balance >= amount:\n sender_new_balance = sender_balance - amount\n DB.update(sender_id, sender_new_balance)\n else:\n return failure_response(\"You dont have enough money\")\n \n receiver_balance = receiver_obj[\"balance\"]\n receiver_new_balance = receiver_balance + amount\n DB.update(receiver_id, receiver_new_balance)\n return success_response({\"sender_id\": sender_id, \"receiver_id\": receiver_id, \"amount\": amount})\n \n\nif __name__ == \"__main__\":\n app.run(host=\"0.0.0.0\", port=5000, debug=True)\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2478,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"315179959","text":"import pushy.transport.ssh\nimport pushy.transport.local\nimport subprocess \n\n\nclass Local_Popen(pushy.transport.local.Popen):\n def __init__(self, command, address, **kwargs):\n pushy.transport.BaseTransport.__init__(self, address)\n \n self.__proc = subprocess.Popen(command, stdin=subprocess.PIPE,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n bufsize=65535)\n \n self.stdout = self.__proc.stdout\n self.stderr = self.__proc.stderr\n self.stdin = self.__proc.stdin\n\n def close(self):\n self.stdin.close()\n self.__proc.wait()\n \nclass SshSudoTransport(object):\n @staticmethod\n def Popen(command, *a, **kw):\n command = ['sudo'] + command\n return pushy.transport.ssh.Popen(command, *a, **kw)\n\nclass LocalSudoTransport(object):\n @staticmethod\n def Popen(command, *a, **kw):\n command = ['sudo'] + command\n return Local_Popen(command, *a, **kw)\n\ndef get_transport(hostname):\n import socket\n\n myhostname = socket.gethostname().split('.')[0]\n if hostname == myhostname:\n return 'local+sudo:'\n else:\n return 'ssh+sudo:{hostname}'.format(hostname=hostname)\n\ndef patch():\n \"\"\"\n Monkey patches pushy so it supports running via (passphraseless)\n sudo on the remote host.\n \"\"\"\n pushy.transports['ssh+sudo'] = SshSudoTransport\n pushy.transports['local+sudo'] = LocalSudoTransport\n","sub_path":"ceph_deploy/sudo_pushy.py","file_name":"sudo_pushy.py","file_ext":"py","file_size_in_byte":1524,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"382924172","text":"import json, csv\nfrom pprint import pprint\n\n#opening files\nlocation_data = open(\"files/lll_location_data.csv\", \"w+\")\ncsv_writer = csv.writer(location_data)\nwith open(\"files/lll_script.json\") as json_data:\n\td = json.load(json_data)\nlines = []\nlines.append(['character', 'text', 'start_time', 'end_time', 'location', 'time in ms'])\n\nfor item in d['movie_script']:\n\tif item['type'] == 'location':\n\t\tloc_arr = item['text'].split('-')\n\t\ttemp = loc_arr[0]\n\t\ttemp = temp.strip('EXT.')\n\t\ttemp = temp.strip('INT.')\n\t\tcurr_location = temp\n\t\ttemp_arr = loc_arr[1:]\n\t\tsub_loc = ''.join(temp_arr)\n\telif item['type'] == 'speech' and item['start_time'] != '':\n\t\tstart_time = item['start_time']\n\t\tend_time = item['end_time']\n\n\t\tst_arr = start_time.split(':')\n\t\tet_arr = end_time.split(':')\n\t\ttemp_st = st_arr[2].split(',')\n\t\ttemp_et = et_arr[2].split(',')\n\n\t\ttime_in_ms = ((int(et_arr[0])*60*60*1000) + (int(et_arr[1])*60*1000) + (int(temp_et[0])*1000) + int(temp_et[1])) - ((int(st_arr[0])*60*60*1000) + (int(st_arr[1])*60*1000) + (int(temp_st[0])*1000) + int(temp_st[1]))\n\n\t\tline = [item['character'], item['text'], item['start_time'], item['end_time'], curr_location, str(time_in_ms)]\n\t\tlines.append(line)\n\ncsv_writer.writerows(lines)","sub_path":"sw4specific/csvParser.py","file_name":"csvParser.py","file_ext":"py","file_size_in_byte":1221,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"294591230","text":"import logging\nimport json\n\nfrom django.conf import settings\nfrom django.core import mail\nfrom django.core.mail.message import EmailMessage\n\n# django-sendgrid imports\nfrom header import SmtpApiHeader\nfrom mail import get_sendgrid_connection\nfrom signals import sendgrid_email_sent\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass SendGridEmailMessage(EmailMessage):\n\t\"\"\"\n\tAdapts Django's ``EmailMessage`` for use with SendGrid.\n\t\n\t>>> from sendgrid.message import SendGridEmailMessage\n\t>>> myEmail = \"rbalfanz@gmail.com\"\n\t>>> mySendGridCategory = \"django-sendgrid\"\n\t>>> e = SendGridEmailMessage(\"Subject\", \"Message\", myEmail, [myEmail], headers={\"Reply-To\": myEmail})\n\t>>> e.sendgrid_headers.setCategory(mySendGridCategory)\n\t>>> response = e.send()\n\t\"\"\"\n\tsendgrid_headers = SmtpApiHeader()\n\t\n\tdef __init__(self, *args, **kwargs):\n\t\t\"\"\"\n\t\tInitialize the object.\n\t\t\"\"\"\n\t\tsuper(SendGridEmailMessage, self).__init__(*args, **kwargs)\n\t\t\n\tdef _update_headers_with_sendgrid_headers(self):\n\t\t\"\"\"\n\t\tUpdates the existing headers to include SendGrid headers.\n\t\t\"\"\"\n\t\tlogger.debug(\"Updating headers with SendGrid headers\")\n\t\tif self.sendgrid_headers:\n\t\t\tadditionalHeaders = {\n\t\t\t\t\"X-SMTPAPI\": self.sendgrid_headers.asJSON()\n\t\t\t}\n\t\t\tself.extra_headers.update(additionalHeaders)\n\t\t\n\t\tlogging.debug(str(self.extra_headers))\n\t\t\n\t\treturn self.extra_headers\n\t\t\n\tdef update_headers(self, *args, **kwargs):\n\t\t\"\"\"\n\t\tUpdates the headers.\n\t\t\"\"\"\n\t\treturn self._update_headers_with_sendgrid_headers(*args, **kwargs)\n\t\t\n\tdef send(self, *args, **kwargs):\n\t\t\"\"\"Sends the email message.\"\"\"\n\t\t# Set up the connection\n\t\tconnection = get_sendgrid_connection()\n\t\tself.connection = connection\n\t\tlogger.debug(\"Connection: {c}\".format(c=connection))\n\t\t\n\t\tself.update_headers()\n\t\t\n\t\tresponse = super(SendGridEmailMessage, self).send(*args, **kwargs)\n\t\tlogger.debug(\"Tried to send an email with SendGrid and got response {r}\".format(r=response))\n\t\tsendgrid_email_sent.send(sender=self, response=response)\n\t\t\n\t\treturn response\n","sub_path":"sendgrid/message.py","file_name":"message.py","file_ext":"py","file_size_in_byte":1986,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"145645721","text":"import numpy as np\r\nclass Full_connection():\r\n def __init__(self,output_number):\r\n self.number = 1\r\n self.theta=None\r\n self.b= np.random.random()\r\n self.output_number=output_number\r\n self.dtheta = None\r\n self.db = None\r\n self.x=None\r\n self.dx=None\r\n self.velocity_b=0\r\n self.velocity_theta = 0\r\n self.velocity_x = 0\r\n self.first_momentum_theta=0\r\n self.second_momentum_theta=0\r\n self.first_momentum_x = 0\r\n self.second_momentum_x = 0\r\n self.first_momentum_b = 0\r\n self.second_momentum_b = 0\r\n self.n=0\r\n def fit(self,X_train):#x,y为列向量\r\n self.x=X_train\r\n self.dx=np.zeros((X_train.shape))\r\n self.number = X_train.shape[0]\r\n if self.n == 0 :\r\n self.theta = np.random.random((self.number, self.output_number))-0.5\r\n self.dtheta = np.random.random((self.number, self.output_number))-0.5\r\n self.a=np.zeros((self.number,1))\r\n theta=self.theta\r\n for k in range(self.theta.shape[0]):\r\n for i in range(self.theta.shape[1]):\r\n if np.random.random()<0.5:\r\n self.theta[k,i]=0\r\n self.a=(self.theta.T).dot(X_train)+self.b\r\n self.theta=theta\r\n self.n+=1\r\n return self.a\r\n def g_fc(self,y):\r\n self.db=y\r\n for id1 in range(self.number):\r\n for id2 in range(self.output_number):\r\n self.dtheta=self.x.dot(y.T)\r\n for kd1 in range(self.number):\r\n for kd2 in range(y.shape[1]):\r\n self.dx=self.theta.dot(y)\r\n return self.dx\r\n def momentum(self,rho=0.9, alpha=0.0005):\r\n self.velocity_b=rho*self.velocity_b+(1-rho)*self.db\r\n self.b-=alpha*self.velocity_b\r\n self.velocity_theta=rho*self.velocity_theta+(1-rho)*self.dtheta\r\n self.theta-=alpha*self.velocity_theta\r\n return self\r\n def Adam(self,beta1,beta2,alpha):\r\n self.first_momentum_b = beta1 * self.first_momentum_b + (1 - beta1) * self.db\r\n self.second_momentum_b = beta2 * self.first_momentum_b + (1 - beta2) * self.db\r\n self.b += alpha*self.first_momentum_b / (np.sqrt(self.second_momentum_b) + 1e-7)\r\n self.first_momentum_theta = beta1 * self.first_momentum_theta + (1 - beta1) * self.dtheta\r\n self.second_momentum_theta = beta2 * self.first_momentum_theta + (1 - beta2) * self.dtheta\r\n self.theta += alpha*self.first_momentum_theta / (np.sqrt(self.second_momentum_theta) + 1e-7)\r\n return self\r\n","sub_path":"Full_connection.py","file_name":"Full_connection.py","file_ext":"py","file_size_in_byte":2571,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"488919973","text":"from pyparsing import Literal,Word,Optional,ZeroOrMore,nums,alphas\n\nfrom functions import *\nfrom selectors import *\n\n\nclass Parser:\n def __init__(self):\n self.plus = Literal('+')\n self.minus = Literal('-')\n self.sign = self.plus | self.minus\n self.relatesTo = Literal('/')\n self.relatesAs = Literal('=')\n self.target = Literal('?')\n self.comma = Literal(',')\n self.word = Word(alphas)\n self.unary = self.sign + self.word\n self.anotherWord = self.comma + self.word\n self.arithmetics = Optional(self.sign) + self.word + ZeroOrMore(self.unary)\n self.relations = self.word + self.relatesTo + self.word + self.relatesAs + self.word + self.relatesTo + self.target\n self.list = self.word + ZeroOrMore(self.anotherWord)\n self.selector = self.relations | self.arithmetics\n self.functionName = Literal('show') | Literal('peak') | Literal('explain') | Literal('tags') | Literal('heatmap') | Literal('clusters')\n self.leftParenthesis = Literal('(').suppress()\n self.rightParenthesis = Literal(')').suppress()\n self.all = Literal('*')\n self.function = self.functionName + self.leftParenthesis + (self.selector | self.all) + self.rightParenthesis\n self.size = Word(nums)\n self.slice = (self.function | self.selector) + self.size\n\n self.functions = {\n 'show': ShowFunction,\n 'peak': PeakFunction,\n 'explain': ExplainFunction,\n 'tags': TagsFunction,\n 'heatmap': HeatmapFunction,\n 'clusters': ClustersFunction\n }\n\n self.bnf = self.slice | self.function | self.selector\n\n\n @staticmethod\n def test(expression, input):\n try:\n if len(expression.parseString(input)) == 0:\n return False\n except:\n return False\n\n return True\n\n\n def parse(self, input):\n parsed = self.bnf.parseString(input)\n parsed = list(parsed)\n\n if not Parser.test(self.functionName, parsed[0]):\n parsed.insert(0, 'explain')\n\n if not self.test(self.size, parsed[-1]):\n parsed.append('10')\n\n args = list(parsed[1:-1])\n\n functionName = parsed[0]\n function = self.functions[functionName]()\n\n # body = ''.join(args)\n size = int(parsed[-1])\n\n # if self.test(self.relations, body):\n # selector = RelationsSelector(args)\n # elif self.test(self.list, body):\n # selector = ListSelector(args)\n # elif self.test(self.arithmetics, body):\n # selector = ArithmeticsSelector(args)\n # else:\n # selector = AllSelector(*args)\n\n selector = ArithmeticsSelector(args)\n\n return function, selector, size","sub_path":"old_src/parsing.py","file_name":"parsing.py","file_ext":"py","file_size_in_byte":2801,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"161481383","text":"\"\"\"lesmagnifique URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/2.2/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\n\nfrom django.urls import path\nfrom django.contrib import admin\n\nfrom core import views\n\nurlpatterns = [ \n path('', views.homepage, name='homepage'),\n path('home', views.indexpage, name='indexpage'),\n path('events/', views.events, name='events'),\n path('contact/', views.contact, name='contact'),\n path('services/', views.services, name='services'),\n path('event//', views.view_event, name='view_event'),\n path('service//', views.view_service, name='view_service'),\n]","sub_path":"core/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1149,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"577309160","text":"import sqlite3\r\nconn = sqlite3.connect('kontakty.db')\r\nconn.executescript(\"\"\"DROP TABLE IF EXISTS kontakty;\r\n CREATE TABLE IF NOT EXISTS kontakty(\r\n NUMBER INT PRIMARY KEY NOT NULL,\r\n NAME TEXT NOT NULL,\r\n SURNAME TEXT NOT NULL);\"\"\")\r\nconn.execute(\"INSERT INTO kontakty (NUMBER,NAME,SURNAME) VALUES ('665474747', 'marek', 'drozdzik')\")\r\nconn.execute(\"INSERT INTO kontakty (NUMBER,NAME,SURNAME) VALUES ('605474747', 'pablo', 'escobar')\")\r\nconn.execute(\"INSERT INTO kontakty (NUMBER,NAME,SURNAME) VALUES ('669474747', 'gabi', 'zabi')\")\r\ncursor = conn.execute(\"SELECT NUMBER, NAME, SURNAME from kontakty\")\r\ndef AddNumber():\r\n number = int(input(\"Podaj numer telefonu: \"))\r\n name = input(\"Podaj imie: \")\r\n surname = input(\"Podaj nazwisko: \")\r\n conn.execute(\"INSERT INTO kontakty (NUMBER,NAME,SURNAME) VALUES ('{}', '{}', '{}')\".format(number, name, surname))\r\ndef ShowAllNumbers():\r\n rows = cursor.fetchall()\r\n for row in rows:\r\n print(row)\r\ndef Search(options, value):\r\n if options == 1:\r\n with conn:\r\n print(cursor.execute(f'SELECT * FROM kontakty WHERE NUMBER = \"{value}\";'))\r\n elif options == 2:\r\n with conn:\r\n print(cursor.execute(f'SELECT * FROM kontakty WHERE NAME = \"{value}\";'))\r\n elif options == 3:\r\n with conn:\r\n cur = conn.execute(f'SELECT * FROM kontakty WHERE surname = \"{value}\";')\r\n for row in cur:\r\n print(row)\r\nAddNumber()\r\nShowAllNumbers()","sub_path":"16.1.py","file_name":"16.1.py","file_ext":"py","file_size_in_byte":1512,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"167263034","text":"\ndef calcularArea(c,l):\n area = l * c\n print(f\"A área de um terreno {c:.1f}x{l:.1f} é de {area:.1f}m²\")\n\n\nprint(\"Calcular Área\")\nprint(\"-\" * 10)\n\ncomprimento = float(input(\"Digite o comprimento: \"))\nlargura = float(input(\"Digite a largura: \"))\n\ncalcularArea(largura, comprimento)\n","sub_path":"ExercicioPython/ex038.py","file_name":"ex038.py","file_ext":"py","file_size_in_byte":290,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"111954954","text":"\"\"\"\n\n\"\"\"\n\nimport requests\nfrom bs4 import BeautifulSoup\n\n\ndef crawling(url, translate):\n\n response = requests.request('GET', url )\n html_doc = response.text\n soup = BeautifulSoup(html_doc, 'html.parser')\n result_text = soup.prettify()\n\n return translate(result_text)\n","sub_path":"tasks/mining/crawler.py","file_name":"crawler.py","file_ext":"py","file_size_in_byte":282,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"354143075","text":"import streamlit as st\nimport funcs\n\ndef reporting():\n st.title(\"Reporting/Classification Exercise\")\n text = funcs.get_text_block(\"reporting.txt\")\n st.markdown(text)\n st.image(\"..//pdac2021_res_est_course_link3//images//res_table.jpg\", use_column_width=True)\n \n \n","sub_path":"exercises/reporting.py","file_name":"reporting.py","file_ext":"py","file_size_in_byte":269,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"574343904","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n__author__='Uglyboy'\n\nimport urllib.request,urllib.parse\nimport ssl\nimport time, sys, io, os, stat\nimport mimetypes\nimport json\n\nfrom .Singleton import Singleton\nfrom .Config import Config\n\nconfig = Config()\nconfig.appkey\nconfig.appsecret\n\nclass DingError(RuntimeError):\n def __init__(self, code, msg):\n self.code = code\n self.msg = msg\n\nclass DingClient(Singleton):\n def __init__(self):\n self._header = {'User-Agent':'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.75 Safari/537.36'}\n self._context = ssl._create_unverified_context()\n\n @property\n def token(self):\n if not hasattr(self, '_token'):\n self._gettoken()\n return self._token\n \n def _gettoken(self):\n url = \"https://oapi.dingtalk.com/gettoken?appkey=\" + config.appkey + \"&appsecret=\" + config.appsecret\n data = self._get(url)\n self._token = data[\"access_token\"]\n\n def _get(self, url):\n request = urllib.request.Request(url, headers=self._header)\n\n attempts = 0\n success = False\n while attempts < 3 and not success:\n try:\n res = urllib.request.urlopen(request, context=self._context)\n success = True\n except Exception as e:\n attempts += 1\n if attempts == 3:\n raise e\n\n data = json.loads(res.read().decode(\"utf8\"))\n return data\n\n def get(self,url,data={}):\n nurl = url + \"?access_token=\" + self.token\n ndata = urllib.parse.urlencode(data)\n nurl = nurl + \"&\" + ndata\n res = self._get(nurl)\n if res[\"errcode\"] == 0:\n return res\n else:\n if res['errcode'] not in [90006,90018,90005]:\n print(res[\"errcode\"],res[\"errmsg\"])\n if res['errcode'] == 90018 or res['errcode'] == 10050 or res['errcode'] == -1 or res['errcode']==400001:\n time.sleep(0.5)\n return self.get(url,data)\n elif res['errcode'] == 90006 or res['errcode'] == 90005:\n time.sleep(10)\n return self.get(url,data)\n elif res['errcode'] == 40014:\n self._gettoken()\n return self.get(url,data)\n else:\n raise DingError(res[\"errcode\"],res[\"errmsg\"])\n\n def _post(self,url,data):\n url = url + \"?access_token=\" + self.token\n request = urllib.request.Request(url, json.dumps(data).encode('utf-8'), headers=self._header)\n request.add_header('Content-Type', 'application/json')\n \n attempts = 0\n success = False\n while attempts < 3 and not success:\n try:\n res = urllib.request.urlopen(request)\n success = True\n except Exception as e:\n attempts += 1\n if attempts == 3:\n raise e\n \n data = json.loads(res.read().decode(\"utf8\"))\n return data\n\n def post(self,url,data):\n res = self._post(url,data)\n if res[\"errcode\"] == 0:\n return res\n else:\n if res['errcode'] not in [90006,90018,90005]:\n print(res[\"errcode\"],res[\"errmsg\"])\n if res['errcode'] == 90018 or res['errcode'] == 10050 or res['errcode'] == -1 or res['errcode']==400001:\n time.sleep(0.5)\n return self.post(url,data)\n elif res['errcode'] == 90006 or res['errcode'] == 90005:\n time.sleep(10)\n return self.post(url,data)\n elif res['errcode'] == 40014:\n self._gettoken()\n return self.post(url,data)\n else:\n raise DingError(res[\"errcode\"],res[\"errmsg\"])\n\n def _update(self, url, data):\n url = url + \"?access_token=\" + self.token\n request = urllib.request.Request(url, headers=self._header, data=data)\n request = http_request(request)\n res = urllib.request.urlopen(request)\n data = json.loads(res.read().decode(\"utf8\"))\n return data\n\n def update(self, url, data):\n res = self._update(url, data)\n if res[\"errcode\"] == 0:\n return res\n else:\n print(res[\"errcode\"],res[\"errmsg\"])\n if res['errcode'] == 90018 or res['errcode'] == 10050 or res['errcode'] == -1 or res['errcode']==400001:\n time.sleep(0.5)\n return self.update(url,data)\n elif res['errcode'] == 90006 or res['errcode'] == 90005:\n time.sleep(10)\n return self.update(url,data)\n elif res['errcode'] == 40014:\n self._gettoken()\n return self.update(url,data)\n else:\n raise DingError(res[\"errcode\"],res[\"errmsg\"])\n\ndef http_request(request):\n data = request.data\n if data is not None and type(data) != str: \n v_files = [] \n v_vars = [] \n try: \n for(key, value) in data.items():\n if type(value) == io.BufferedReader: \n v_files.append((key, value)) \n else: \n v_vars.append((key, value)) \n except TypeError: \n systype, value, traceback = sys.exc_info() \n raise TypeError\n \n if len(v_files) == 0: \n data = urllib.urlencode(v_vars, doseq) \n else: \n boundary, data = multipart_encode(v_vars, v_files)\n contenttype = 'multipart/form-data; boundary=%s' % boundary\n if( request.has_header('Content-Type') and request.get_header['Content-Type'].find('multipart/form-data') != 0): \n print(\"Replacing %s with %s\" % (request.get_header('content-type'), 'multipart/form-data'))\n request.add_unredirected_header('Content-Type', contenttype)\n request.data = data.encode('utf-8')\n return request\n\ndef multipart_encode(vars, files):\n boundary = str(time.time())\n buffer = '' \n for(key, value) in vars: \n buffer += '--%s\\r\\n' % boundary \n buffer += 'Content-Disposition: form-data; name=\"%s\"' % key \n buffer += '\\r\\n\\r\\n' + value + '\\r\\n' \n for(key, fd) in files: \n file_size = os.fstat(fd.fileno())[stat.ST_SIZE] \n filename = os.path.basename(fd.name) \n contenttype = mimetypes.guess_type(filename)[0] or 'application/octet-stream' \n buffer += '--%s\\r\\n' % boundary \n buffer += 'Content-Disposition: form-data; name=\"%s\"; filename=\"%s\"\\r\\n' % (key, filename) \n buffer += 'Content-Type: %s\\r\\n' % contenttype \n buffer += 'Content-Length: %s\\r\\n' % file_size \n fd.seek(0)\n buffer += '\\r\\n' + str(fd.read()) + '\\r\\n' \n buffer += '--%s--\\r\\n\\r\\n' % boundary \n return boundary, buffer","sub_path":"ding/util/DingClient.py","file_name":"DingClient.py","file_ext":"py","file_size_in_byte":6915,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"420042260","text":"'''\r\n* 图像分类\r\n* K-Means部分\r\n* 2017/2/5\r\n\r\n* 【遇到的问题】\r\n 1、mgj……过了这么多天我才突然反应过来……不止二维可以算距离,n维也可以……\r\n 2、读入每个点的坐标\r\n 3、降维\r\n 4、计算这一次质心和上一次对应的质心之间的距离之差\r\n 怎么知道和哪个对应啊你在逗我???\r\n 5、妈呀list可以赋值给array 之后就变成向量加法了太好用了\r\n 6、需要把坐标都存成向量的形式\r\n 7、两点之间的欧氏距离 呜呜呜python库多太好了!!\r\n dist = numpy.linalg.norm(vec1 - vec2)\r\n 8、变量的作用域 如果在函数内重新赋值的话,就传不回去了 尴尬\r\n 9、write到文件很难看。尴尬\r\n 重定位吧!!!\r\n 10、把2和5调亮一点儿 乘以0.9\r\n 11、其实ROUND2 之后质心基本上就不怎么变了……\r\n'''\r\n\r\nimport sys\r\nimport random\r\nimport numpy as np\r\nfrom sklearn.decomposition import IncrementalPCA\r\n\r\nimgNum = 10 # 几张图片\r\nKNum = 2 # 分成几���\r\nn = 2 # 每张图片都是n×n\r\ndimension = 2016 # 每张图片的维数\r\ndst_dimension = 10 # 想降到的维数\r\nbound = 10 # 前后两次迭代结果之差小于这个时可以停止\r\nmaxRecurseTime = 10 # 最多迭代次数\r\ncentroids = [] # 存放KNum个质心的n维坐标向量\r\nlast_centroids = [] # 上一次递归得到的质心坐标们\r\nori_dots = [] # 存放每个点的n维坐标向量\r\ndots = [] # 降维之后的点的坐标\r\nclusters = [] # 存放每个类中有哪些点, clusters[i]中存放的是第i类中的点的下标,第i类的中心是centroids[i]\r\nselected = []\r\n\r\nDistance = lambda v: np.linalg.norm(v)\r\n\r\n# 初始化点的坐标,并进行降维,返回降维后的向量集\r\ndef InitDots():\r\n # 读入imgNum个点的坐标,存放在dots当中\r\n for i in range(imgNum):\r\n ori_dots.append([])\r\n\r\n # 读入向量集并降维\r\n ReadFiles(ori_dots)\r\n return PCA(ori_dots)\r\n\r\n# 初始化质心信息\r\ndef InitCentroids():\r\n # 初始化质心信息\r\n for i in range(KNum):\r\n centroids.append([])\r\n centroids[i] = np.array([float(0)] * dimension) # 初始化为全零\r\n clusters.append([])\r\n\r\n # 随机挑选初始时的‘质心’坐标\r\n for i in range(KNum):\r\n _ = random.randrange(imgNum)\r\n while _ in selected:\r\n _ = random.randrange(imgNum)\r\n selected.append(_)\r\n selected.sort()\r\n print('selected:' , selected)\r\n\r\n for i in range(KNum):\r\n centroids[i] = dots[selected[i]]\r\n print('centroids 0:', centroids)\r\n\r\n# 读入imgNum个图的坐标向量\r\ndef ReadFiles(dots): \r\n path = r''\r\n fd = open(path + 'input.txt', 'r')\r\n _ = fd.read() # 一次读进所有\r\n fd.close()\r\n _ = _.split()\r\n for i in range(len(_)):\r\n dots[i % imgNum].append(255 - float(_[i]))\r\n tmp = [2, 5]\r\n for i in tmp:\r\n for j in range(len(dots[i])):\r\n dots[i][j] *= 0.9\r\n\r\n# 降维操作\r\ndef PCA(dots):\r\n X = np.array(dots)\r\n ipca = IncrementalPCA(n_components = dst_dimension)\r\n ipca.fit(X)\r\n Y = ipca.transform(X) \r\n print('y = ', Y, '\\n')\r\n for i in range(len(Y)):\r\n Y[i] = np.array(Y[i]) \r\n return Y\r\n\r\n# 对于每个cluster,计算质心\r\ndef CalcCentroids(KNum, dimension, centroids, dots, clusters):\r\n # 先把上一次得到的质心存放到last_centroids当中\r\n last_centroids = centroids\r\n # centroids = [] 这会导致传不回去\r\n\r\n for i in range(KNum):\r\n v = np.array([float(0)] * dst_dimension)\r\n for _ in clusters[i]:\r\n v += dots[_]\r\n l = len(clusters[i])\r\n centroids[i] = (v / l)\r\n\r\n# 聚类,判断每个点属于哪个类\r\ndef Cluster(imgNum, KNum, dots, clusters):\r\n # 清空原有数据\r\n for i in range(KNum):\r\n clusters[i] = []\r\n \r\n # 计算每个点到每个质心的距离,并将他们放到相应的cluster中\r\n for i in range(imgNum):\r\n store = [] # 存放当前的点到每个质心的距离\r\n for j in range(KNum):\r\n store.append(Distance(dots[i] - centroids[j]))\r\n cluster_index = store.index(min(store)) # store中最小的数是min(store),找这个最小数的下标用store.index()\r\n clusters[cluster_index].append(i)\r\n\r\ndots = InitDots()\r\nInitCentroids()\r\n\r\ntemp = sys.stdout\r\nlog_root = r'Log_'+str(dst_dimension)+'\\\\'\r\nsys.stdout = open(log_root + str(selected) + '.txt','w')\r\n\r\nfor i in range(bound):\r\n print('ROUND ' + str(i) + ': ')\r\n print('centroids: ', centroids)\r\n Cluster(imgNum, KNum, dots, clusters)\r\n print('clusters', clusters, '\\n')\r\n CalcCentroids(KNum, dimension, centroids, dots, clusters)\r\n\r\nprint('centroids: ', centroids)\r\nprint('clusters: ', clusters)\r\n\r\nsys.stdout = temp\r\n","sub_path":"K_Means.py","file_name":"K_Means.py","file_ext":"py","file_size_in_byte":4834,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"424758416","text":"#!/usr/bin/env python\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\n\"\"\"Read a list of version tags from stdin and pick the one with the\nhighest semver value.\n\"\"\"\n\nfrom __future__ import print_function\n\nimport fileinput\nimport sys\n\n\ntags = []\nfor line in fileinput.input(sys.argv[1:]):\n line = line.strip()\n if not line:\n continue\n parts = line.split('.')\n try:\n v = tuple(int(val) for val in parts)\n except ValueError:\n # This tag is probably an alpha, so ignore it\n continue\n if len(v) == 3:\n v = v + ('zzz',) # artifically sort the value higher than alphas\n # Ignore versions where the beginning doesn't look like a number,\n # such as 'havana-eol'\n if not isinstance(v[0], int):\n continue\n # Ignore date-based entries\n if v[0] > 100:\n continue\n tags.append(v)\n\nif tags:\n # We only want to print something if we actually have any tags to\n # pick from. Otherwise we probably have a library that has never\n # been released, so there is no valid version.\n version = max(tags)\n if version[-1] == 'zzz':\n version = version[:-1]\n print('.'.join(str(t) for t in version))\n","sub_path":"highest_semver.py","file_name":"highest_semver.py","file_ext":"py","file_size_in_byte":1706,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"39332306","text":"from flask import Flask, render_template, request\nimport numpy as np\nimport pandas as pd\nimport pickle\nimport xgboost\n\napp = Flask(__name__)\n\n@app.route('/')\ndef index():\n\treturn render_template('index.html')\n\n@app.route('/predict', methods=['POST'])\ndef predict():\n\t# input shop_id and item_id\n\tftr = np.array([int(x) for x in request.form.values()])\n\t\n\t# load the file to get all the engineered features\n\tdata = pickle.load(open('files/test.gzip', 'rb'))\n\n\t# get all the features \n\tdata = data[data.shop_id == ftr[0]]\n\tdata = data[data.item_id == ftr[1]]\n\n\t# if shop or item is not available\n\tif data.empty:\n\t\toutput = 'This Shop and Item combination is not available'\n\n\t\treturn render_template('predict.html', pred=output)\n\n\t# drop columns not used for training\n\tdropcols = ['type_id', 'target_item_cat_lag_5', 'city_id', 'target_item_cat_lag_3', 'target_city_lag_5', 'target_item_type_lag_6', \n 'delta_avg_shop_revenue_lag_6', 'delta_avg_shop_revenue_lag_3', 'delta_avg_item_price_lag_2', 'item_category_id',\n 'delta_avg_item_price_lag_4', 'target_city_lag_4', 'target_item_cat_lag_4', 'subtype_id', 'target_city_lag_3', \n 'target_item_lag_3', 'target_item_subtype_lag_4', 'target_item_lag_5', 'target_item_cat_lag_6', 'target_item_subtype_lag_6']\n\n\tdata = data.drop(columns=dropcols)\n\n\t# load the xgboost model\n\tmodel = xgboost.Booster() # init model\n\tmodel.load_model('files/xgbmodel.json')\n\n\t# make the prediction on the shop and item\n\toutput = model.predict(xgboost.DMatrix(data))\n\toutput = output[0]\n\n\treturn render_template('predict.html', pred=output)\n\nif __name__ == '__main__':\n\tapp.run()","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1635,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"338688617","text":"from django.test import TestCase\n\n# Create your tests here.\n\n\nfrom pybo.models import Question, Answer\nfrom django.utils import timezone\n\nclass SQLiteTest(TestCase):\n\n def setUp(self):\n q = self.createQuestion()\n print(q)\n\n def createQuestion(self) -> Question:\n subject=\"pybo?\"\n content=\"What is pybo? I wanna get to know what it is\"\n timestamp=timezone.now()\n\n q = Question(subject=subject, content=content, create_date=timestamp)\n q.save()\n return q\n","sub_path":"JumpToDjango/mysite/pybo/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":515,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"496822618","text":"# -*- encoding: utf-8 -*-\n##############################################################################\n#\n# OpenERP, Open Source Management Solution \n# Copyright (C) 2004-2009 Tiny SPRL (). All Rights Reserved\n# $Id$\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program. If not, see .\n#\n##############################################################################\n\n\nfrom openerp.osv import fields, osv\n\nclass hr_employee(osv.osv):\n _name = 'hr.employee'\n _inherit = 'hr.employee'\n\n def default_employee_state(self, cr, uid, context={}):\n return 'new'\n\n def function_jumlah_anak(self, cr, uid, ids, field_name, arg, context):\n res = {}\n obj_karyawan = self.pool.get('hr.employee')\n \n for karyawan in obj_karyawan.browse(cr, uid, ids):\n res[karyawan.id] = len(karyawan.anak_ids)\n \n return res \n \n _columns = {\n 'nip' : fields.char('NIP', size=30),\n 'ktp' : fields.char('KTP', size=30),\n 'expired_ktp' : fields.date('Sampai Dengan'),\n 'sim' : fields.char('SIM A', size=30),\n 'expired_sim' : fields.date('Sampai Dengan'),\n 'npwp' : fields.char('NPWP', size=30),\n 'expired_npwp' : fields.date('Sampai Dengan'),\n 'simb' : fields.char('SIM B', size=30),\n 'expired_simb' : fields.date('Sampai Dengan'),\n 'simb1' : fields.char('SIM B1', size=30),\n 'expired_simb1' : fields.date('Sampai Dengan'), \n 'simb2' : fields.char('SIM B2', size=30),\n 'expired_simb2' : fields.date('Sampai Dengan'), \n 'simc' : fields.char('SIM C', size=30),\n 'expired_simc' : fields.date('Sampai Dengan'),\n 'passport' : fields.char('Passport', size=30),\n 'expired_passport' : fields.date('Sampai Dengan'),\n 'kitas' : fields.char('Kitas', size=30),\n 'expired_kitas' : fields.date('Sampai Dengan'), \n 'tanggal_lahir' : fields.date('Tanggal Lahir'),\n 'tempat_lahir' : fields.char('Tempat Lahir', size=100),\n 'jenis_kelamin_id' : fields.many2one('base.jenis_kelamin', 'Jenis Kelamin'),\n 'agama_id' : fields.many2one('base.agama', 'Agama'),\n 'etnis_id' : fields.many2one('base.etnis', 'Etnis'),\n 'status_pernikahan_id' : fields.many2one('base.status_pernikahan', 'Status Pernikahan'), \n 'nama_pasangan' : fields.char('Nama Pasangan', size=100),\n 'tanggal_menikah' : fields.date('Tanggal Menikah'),\n 'nama_ayah' : fields.char('Nama Ayah', size=100),\n 'nama_ibu' : fields.char('Nama Ibu', size=100),\n 'anak_ids' : fields.one2many('hr.anak_karyawan', 'employee_id', 'Anak'),\n 'jumlah_anak' : fields.function(function_jumlah_anak, method=True, type='integer', string='Jumlah Anak'),\n\n 'pendidikan_formal_ids' : fields.one2many('hr.pendidikan_formal', 'employee_id', 'Pendidikan Formal'),\n 'pendidikan_non_formal_ids' : fields.one2many('hr.pendidikan_non_formal', 'employee_id', 'Pendidikan Non-Formal'),\n\n 'referensi_ids' : fields.one2many('hr.referensi', 'employee_id', 'Referensi'),\n 'kerabat_ids' : fields.one2many('hr.kerabat', 'employee_id', 'Kerabat'),\n\n 'personal_email' : fields.related('address_home_id','email', string='Email', type='char', size=240, related='res.partner', store=True, readonly=False),\n 'personal_phone' : fields.related('address_home_id','phone', string='Phone', type='char', size=64, related='res.partner', store=True, readonly=False),\n 'personal_mobile' : fields.related('address_home_id','mobile', string='Mobile', type='char', size=64, related='res.partner', store=True, readonly=False),\n 'personal_fax' : fields.related('address_home_id','fax', string='Fax', type='char', size=64, related='res.partner', store=True, readonly=False),\n 'personal_street' : fields.related('address_home_id','street', string='Address', type='char', size=128, related='res.partner', store=True, readonly=False),\n 'personal_street2' : fields.related('address_home_id','street2', string='Street2', type='char', size=128, related='res.partner', store=True, readonly=False),\n 'personal_city' : fields.related('address_home_id','city', string='City', type='char', size=128, related='res.partner', store=True, readonly=False),\n 'personal_state' : fields.related('address_home_id','state_id','name', string='State', type='char', size=64, related='res.country.state', store=True, readonly=False),\n 'personal_zip' : fields.related('address_home_id','zip', string='Zip', size=24, type='char', related='res.partner', store=True, readonly=False),\n 'personal_country' : fields.related('address_home_id','country_id','name', string='Country', type='char', size=64, related='res.country.state', store=True, readonly=False),\n\n\n 'work_email' : fields.related('address_id','email', string='Email', type='char', size=240, related='res.partner', store=True, readonly=False),\n 'work_phone' : fields.related('address_id','phone', string='Phone', type='char', size=64, related='res.partner', store=True, readonly=False),\n 'work_mobile' : fields.related('address_id','mobile', string='Mobile', type='char', size=64, related='res.partner', store=True, readonly=False),\n 'work_fax' : fields.related('address_id','fax', string='Fax', type='char', size=64, related='res.partner', store=True, readonly=False),\n 'work_street' : fields.related('address_id','street', string='Address', type='char', size=128, related='res.partner', store=True, readonly=False),\n 'work_street2' : fields.related('address_id','street2', string='Street2', type='char', size=128, related='res.partner', store=True, readonly=False),\n 'work_city' : fields.related('address_id','city', string='City', type='char', size=128, related='res.partner', store=True, readonly=False),\n 'work_state' : fields.related('address_id','state_id','name', string='State', type='char', size=64, related='res.country.state', store=True, readonly=False),\n 'work_zip' : fields.related('address_id','zip', string='Zip', type='char', size=24, related='res.partner', store=True, readonly=False),\n 'work_country' : fields.related('address_id','country_id','name', string='Country', type='char', size=64, related='res.country', store=True, readonly=False),\n\n 'kemampuan_bahasa_ids' : fields.one2many(string='Kemampuan Bahasa Karyawan', obj='hr.kemampuan_bahasa', fields_id='employee_id'),\n 'kemampuan_hard_skill_ids' : fields.one2many(string='Kemampuan Hard Skill', obj='hr.kemampuan_hard_skill_karyawan', fields_id='employee_id'),\n 'kemampuan_soft_skill_ids' : fields.one2many(string='Kemampuan Soft Skill', obj='hr.kemampuan_soft_skill_karyawan', fields_id='employee_id'),\n\n 'employee_state' : fields.selection(selection=[('new','Candidate'),('active','Active'),('terminate','Terminate')], string='Status', required=True),\n\n 'parent_id' : fields.many2one(string='Direct Supervisor', obj='hr.employee'),\n 'job_title_id' : fields.related('job_id', 'job_title_id', type='many2one', relation='hr.job_title', store=True, readonly=True, string='Job Title'),\n 'job_grade_id' : fields.many2one(string='Grade', obj='hr.job_grade'),\n 'job_grade_category_id' : fields.related('job_grade_id', 'job_grade_category_id', type='many2one', relation='hr.job_grade_category', store=True, readonly=True, string='Grade Category'),\n 'employment_status_id' : fields.many2one(string='Employment Status', obj='hr.employment_status'),\n 'job_status_id' : fields.related('job_id','job_status_id', type='many2one', relation='hr.job_status', store=True, readonly=True, string='Job Status'),\n\n 'department_id' : fields.related('job_id', 'department_id', type='many2one', relation='hr.department', store=True, readonly=True, string='Unit'),\n\n 'tanggal_bergabung' : fields.date(string='Tanggal Bergabung', help='Tanggal karyawan dipekerjakan oleh perusahaan'),\n 'tanggal_efektif' : fields.date(string='Tanggal Efektif', help='Tanggal karyawan mulai bekerja'),\n 'tanggal_permanen' : fields.date(string='Tanggl Permanen', help='Tanggal karyawan menjadi karyawan tetap'),\n 'tanggal_pra_pensiun' : fields.date(string='Tanggal Pra-Pensiun'),\n 'tanggal_pensiun' : fields.date(string='Tanggal Pensiun'),\n \n }\n\n _defaults = {\n 'employee_state' : default_employee_state,\n }\n\n def workflow_action_new(self, cr, uid, ids, context={}):\n for id in ids:\n self.write(cr, uid, [id], {'employee_state' : 'new'})\n return True\n\n def workflow_action_active(self, cr, uid, ids, context={}):\n for id in ids:\n self.write(cr, uid, [id], {'employee_state' : 'active'})\n return True\n\n def workflow_action_terminate(self, cr, uid, ids, context={}):\n for id in ids:\n self.write(cr, uid, [id], {'employee_state' : 'terminate'})\n return True\n\n\nhr_employee()\n\n\n\n\n","sub_path":"object_other/hr_employee.py","file_name":"hr_employee.py","file_ext":"py","file_size_in_byte":10279,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"225965608","text":"import io\nimport os\n\nfrom setuptools import find_packages, setup, Command\n\nNAME = 'City Info'\nDESCRIPTION = 'Homework Assignment from Cayuse'\nEMAIL = 'mikelane@gmail.com'\nAUTHOR = 'Michael Lane'\nREQUIRES_PYTHON = '>=3.6.0'\n\nREQUIRED = ['requests']\n\nhere = os.path.abspath(os.path.dirname(__file__))\n\ntry:\n with io.open(os.path.join(here, 'README.md'), encoding='utf-8') as f:\n long_description = f'\\n {f.read()}'\nexcept FileNotFoundError:\n long_description = DESCRIPTION\n\nsetup(\n name=NAME,\n description=DESCRIPTION,\n long_description=long_description,\n long_description_content_type='text/markdown',\n author=AUTHOR,\n author_email=EMAIL,\n python_requires=REQUIRES_PYTHON,\n py_modules=['city_info'],\n entry_points={\n 'console_scripts': ['city-info=city_info:main'],\n },\n install_requires=REQUIRED,\n include_package_data=True,\n license='MIT',\n classifiers=[\n # Trove classifiers\n # Full list: https://pypi.python.org/pypi?%3Aaction=list_classifiers\n 'License :: OSI Approved :: MIT License',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: Implementation :: CPython',\n ],\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1291,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"453461408","text":"import unittest\nimport wanted_pages\nfrom trac.test import EnvironmentStub\nfrom trac.web.api import Request\n\norig = wanted_pages.exec_wiki_sql\ndef mock(rows):\n wanted_pages.exec_wiki_sql = lambda x: rows\n\ndef unmock():\n wanted_pages.exec_wiki_sql = orig\n\nclass WantedPagesTestCase(unittest.TestCase):\n\n macro = None\n readmeText = ''\n req = None\n\n def setUp(self):\n env = EnvironmentStub() \n self.macro = wanted_pages.WantedPagesMacro(env)\n \n readme = open('README')\n self.readmeText = readme.read()\n readme.close()\n\n def tearDown(self):\n self.macro = None\n unmock()\n\n def test_matches(self):\n links = self.macro.findBrokenLinks(self.readmeText)\n self.assertTrue('TimLowe' in links, 'TimLowe not found')\n self.assertTrue('TimLeo' in links, 'TimLeo not found')\n self.assertTrue('ParentWiki/SubWiki' in links, 'ParentWiki/SubWiki not found')\n self.assertTrue('NoSpaces' in links, 'NoSpaces not found')\n self.assertTrue('TimLowe5' in links, '[wiki:TimLowe5] not found')\n self.assertTrue('TimLowe6' in links, '[wiki:TimLowe6 link] not found') \n self.assertTrue('EndOfFile' in links, 'EndOfFile not found') \n\n def test_falseMatches(self):\n links = self.macro.findBrokenLinks(self.readmeText)\n self.assertFalse('!TimLewo' in links, '!TimLewo found')\n self.assertFalse('TimLoo' in links, '3TimLoo found')\n self.assertFalse('TimLee' in links, '`TimLee` found') \n self.assertFalse('MyMacro' in links, '[[MyMacro] found')\n self.assertFalse('external' in links, '[wiki:http://external found')\n self.assertFalse('ExternalLink' in links, 'http://ExternalLink found')\n self.assertFalse('TomFool' in links, 'http://ExternalTrac/wiki/TomFool found')\n self.assertFalse('TimLow' in links, '{{{TimLow}}} found')\n self.assertFalse('MyClass' in links, '{{{if (MyClass)}}} found')\n self.assertFalse('WikiProcessor' in links, '[wiki:WikiProcessors WikiProcessor] found')\n self.assertFalse('PythonPath' in links, '{{{...PythonPath \"sys.path + [\\'/path/to/trac\\']\"...}}} found')\n self.assertFalse('IfModule' in links, '{{{...IfModule...}}} found')\n self.assertFalse('NestedBlocks' in links, '{{{...{{{ }}} NestedBlocks...}}} found')\n self.assertFalse('WikiHistory' in links, 'http://c2.com/cgi/wiki?WikiHistory found')\n \n def test_referrersAddedToWikiText(self):\n mock([('pagename', 'BrokenLink'), ('page2', 'BrokenLink')])\n txt = self.macro.buildWikiText(True)\n self.assertTrue('[wiki:pagename]' in txt)\n self.assertTrue('[wiki:page2]' in txt)\n \n def test_referrersNotAddedByDefault(self):\n mock([('pagename', 'BrokenLink'), ('page2', 'BrokenLink')])\n txt = self.macro.buildWikiText()\n self.assertFalse('[wiki:pagename]' in txt)\n\ndef suite():\n return unittest.makeSuite(WantedPagesTestCase, 'test')\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"wantedpagesplugin/trunk/wanted_pages/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":3065,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"471522219","text":"from tkinter import *\nfrom tkinter import filedialog\nimport tkinter as tk\nimport _tkinter\nimport pytesseract\nfrom PIL import Image, ImageTk\nimport os\nimport re\nimport math\nimport cv2\nimport numpy as np\nfrom pytesseract import Output\n\n# from matplotlib import pyplot as plt\nimport pymongo\n\n# creat showimage function\ndef showimage():\n fln = filedialog.askopenfilename(\n initialdir=os.getcwd(),\n title=\"select Image File\",\n filetype=((\"JPG file\", \"*.jpg\"), (\"PNG file\", \"*.png\"), (\"All Files\", \"*.*\")),\n )\n img = Image.open(fln)\n img.thumbnail((350, 350))\n img = ImageTk.PhotoImage(img)\n # to show the image\n lbl.configure(image=img)\n lbl.image = img\n\n # Get OCR output using Pytesseract\n custom_config = r\"--oem 3 --psm 6\"\n\n txt2.delete(\"1.0\", \"end\")\n txt2.insert(\n INSERT, pytesseract.image_to_string(Image.open(fln), config=custom_config)\n )\n\n data_extracted = pytesseract.image_to_string(Image.open(fln), config=custom_config)\n\n # print(data_extracted)\n\n # Using rule-based approach to extract information form the receipt,\n # will be extracting the supermarket name, date of transaction, the items bought,\n # total costs per item and the # total amount paid using python commands and regular expressions.\n\n # here is the dictionary where to store the extracted information.\n receipt_Data = {}\n\n # first is to extract the the Suppermarket name\n # supermarket name is going to be constant in all receipts of this supermarket, and\n # that is in the first 3 lines. then creating a rule to caputure that\n splits = data_extracted.splitlines()\n supermarket_name = splits[0] + \" \" + splits[1] + \" \" + splits[2]\n\n # print(supermarket_name)\n\n # >>> s = 'abcdefgABCDEFGHIJKLMNOP'\n # >>> ''.join(c for c in s if c.isupper())\n # 'ABCDEFGHIJKLMNOP\n\n store_name = \"\".join(c for c in supermarket_name if c.isupper())\n # print(store_name)\n\n # extracting date of transation/ may be time also \"%m/%j/%y %H:%M\"\n date_pattern = r\"([0-9]{1}\\/[0-9]{2}\\/[0-9]{2} [0-9]{2}:[0-9]{2})\"\n # print(date_pattern)\n\n # Next is the Date using Regular Expression for the date format on the text\n # import Regular Expression ( date_pattern is 6/07/20)\n # import re\n\n date_pattern = r\"([0-9]{1}\\/[0-9]{2}\\/[0-9]{2} [0-9]{2}:[0-9]{2})\"\n dateTime = re.search(date_pattern, data_extracted).group()\n receipt_Data[\"date\"] = dateTime\n\n # print(dateTime)\n # print(store_name, dateTime)\n\n # from the data_extracted get onlt the lines with £ , and put them in a list\n\n lines_with_pound = []\n for line in splits:\n if re.search(r\"£\", line):\n lines_with_pound.append(line)\n\n # print(lines_with_pound)\n\n lines_with_pound\n\n # get Line items, TOTAL COST, ignore SAlE, card visa, master ETC\n # items = []\n for line in lines_with_pound:\n if re.search(r\"SAlE\", line):\n continue\n\n if re.search(r\"TOTAL\", line):\n TOTAL = line\n\n # print(TOTAL)\n\n # extract the all the new line only the digits to get the cost of the items\n\n total = \"\".join(\n x.rstrip(\"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz\") for x in TOTAL\n )\n # print(total)\n\n # get items, total, ignore others like ... A chance to win a £1000 Tesco gift card, ...CHANGE DUE £0.00\n items = []\n for line in lines_with_pound:\n # print(line)\n if re.search(\n \"|\".join(\n [\"JOIN\", \"CHANGE\", \"VISA\", \"CASH\", \"CARD\", \"MASTERCARD\", \"A chance\"]\n ),\n line,\n ):\n continue\n if re.search(r\"TOTAL\", line):\n total = line\n else:\n items.append(line)\n # print(items)\n\n items\n\n # print(store_name, dateTime, total, items)\n\n # convert items that is a list to string in other for clean-up the data\n # convert list to string and re-assign to all_items\n all_items = \"\\n\".join(items)\n # print(all_items)\n\n # to extract the iteams bought from all_items, removing the digits and unwanted charater from the extrated data\n line_items = \"\".join(\n x.rstrip(\"0123456789£&$)(+-^%!¬`~#@][}{*x.|\\\\_<>'?«\") for x in all_items\n )\n # print(line_items)\n\n # to extract the cost of each item, convert items into list\n # going through each element of list\n # applying a filter on each character of the string for alphabet or numeric other then special symbol\n # joining the charactors back again and putting them in list renamed as \"items_list\", print the new list\n\n items_list = [\"\".join(list(filter(str.isalnum, item))) for item in items]\n\n # print(items_list) # print the result\n\n # convert \"items_list\" items_string to enable the operationon with new line, then print the result\n items_list_to_str = \"\\n\".join(items_list)\n # print(items_list_to_str)\n\n # extract the all the new line only the digits to get the cost of the items\n\n cost = \"\".join(\n x.rstrip(\"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz\")\n for x in items_list_to_str\n )\n # print(cost)\n\n # print(line_items, cost)\n\n # covert cost to list, and assign it to costs\n costs = cost.split(sep=\"\\n\", maxsplit=-1)\n # print(costs)\n\n # covert all_line_itemsto list\n all_line_items = line_items.split(sep=\"\\n\", maxsplit=-1)\n # print(all_line_items)\n\n # type(all_line_items)\n\n # TEST ===== testing the software can calculated the sum of the items on the receicpt correctly\n\n # to confirm line items on the reciept extrated is correct apply this test method as unit test\n\n sum_total = sum(int(i) for i in costs) / 100\n\n # print(sum_total)\n # print(total)\n\n # covert store_name to list, and assign it to store_ID\n date_time = dateTime.split(sep=\"\\n\", maxsplit=-1)\n # print(date_time)\n # type(date_time)\n\n # covert store_name to list, and assign it to store_ID\n total_cost = total.split(sep=\"\\n\", maxsplit=-1)\n # print(total_cost)\n # type(total_cost)\n\n # covert store_name to list, and assign it to store_ID\n shop = store_name.split(sep=\"\\n\", maxsplit=-1)\n # print(shop)\n # type(shop)\n\n # print(all_line_items)\n # print(costs)\n # print(sum_total)\n # print(shop)\n # print(date_time)\n # print(total_cost)\n\n # Zip the two lists together, and create a dictionary out of the zipped lists - mapping\n\n shopped_line_items = dict(zip(all_line_items, costs))\n # print(shopped_line_items)\n\n # print(shop, date_time, total_cost, shopped_line_items)\n\n for s, d, t in zip(shop, date_time, total_cost):\n receipt_headers = {\"Shop Name\": s, \"Date and Time\": d, \"Amount Spent\": t}\n # print(receipt_headers)\n\n # print(receipt_headers, shopped_line_items)\n\n # import json\n # json.dumps(receipt_data)\n\n # print(receipt_headers)\n\n shopped_line_items\n\n # import and connect Mongo DB\n # import pymongo\n\n connection = pymongo.MongoClient(\"localhost\", 27017)\n\n # Creat a database and collection and send data to the acquired to the database\n database = connection[\"mydb_TESCO_290121\"]\n collection = database[\"mycol_TESCO\"]\n data = receipt_headers\n data = shopped_line_items\n\n collection.insert_one(receipt_headers)\n collection.insert_one(shopped_line_items)\n\n # print Database if data is successfully inserted\n print(\"Data inserted with record IDs\", receipt_headers, \" \", shopped_line_items)\n\n # display Database on the GUI\n txt3.delete(\"1.0\", \"end\")\n txt3.insert(INSERT, receipt_headers, shopped_line_items)\n\n # txt4.delete(\"1.0\", \"end\")\n # txt4.insert(INSERT, shop, date_time, total_cost, shopped_line_items)\n\n # print(all_line_items)\n # print(costs)\n # print(sum_total)\n # print(shop)\n # print(date_time)\n # print(total_cost)\n\n\n############################# Tkinter GUI for displayin the actions to custumers#####################################\n\nroot = Tk()\n\nt1 = StringVar()\nwrapper = LabelFrame(root, text=\"Choose File\")\nwrapper.pack(fill=\"both\", expand=\"yes\", padx=10, pady=10)\n\nlbl = Label(root)\nlbl.pack(side=tk.LEFT, padx=10, pady=10)\n\nwrapper2 = LabelFrame(root, text=\"Image Text\")\nwrapper2.pack(fill=\"both\", expand=\"yes\", padx=10, pady=10)\n\ntxt = Entry(wrapper, textvariable=t1)\ntxt.pack(side=tk.RIGHT, padx=10, pady=10)\n\nwrapper3 = LabelFrame(root, text=\" Data written on Database\")\nwrapper3.pack(side=tk.RIGHT, fill=\"both\", expand=\"yes\", padx=10, pady=10)\n\nwrapper4 = LabelFrame(root, text=\"Receipt Data\")\nwrapper4.pack(side=tk.LEFT, fill=\"both\", expand=\"yes\", padx=10, pady=10)\n\n# # browse button\nbtn1 = Button(wrapper, bg=\"#20bebe\", fg=\"white\", text=\"Browse Image\", command=showimage)\n# btn1.pack(side=tk.LEFT, padx=10, pady=10)\nbtn1.place(x=30, y=10)\n\n\nbtn2 = Button(\n wrapper, bg=\"#20bebe\", fg=\"white\", text=\"......Exit.......\", command=lambda: exit()\n)\n# btn2.pack(side=tk.LEFT, padx=10, pady=10)\nbtn2.place(x=130, y=10)\n\n\n# btn = Button(wrapper, text=\"Browse\", command=readTxt1)\n# btn.pack(side=tk.LEFT, padx=10, pady=10)\nbtn3 = Button(wrapper, bg=\"#20bebe\", fg=\"white\", text=\"Button 3\")\nbtn3.place(x=230, y=10)\n\ntxt2 = Text(wrapper2)\ntxt2.pack(padx=10, pady=10)\n\ntxt3 = Text(wrapper3)\ntxt3.pack(padx=10, pady=10)\n\ntxt4 = Text(wrapper4)\ntxt4.pack()\n\n# # browse button\n# browse_text = tk.StringVar()\n# browse_btn = tk.Button(\n# root,\n# textvariable=browse_text,\n# # command=lambda: open_file(),\n# font=\"Raleway\",\n# bg=\"#20bebe\",\n# fg=\"white\",\n# height=2,\n# width=15,\n# )\n# browse_text.set(\"Browse\")\n# browse_btn.grid(column=1, row=2)\n\nroot.geometry(\"960x650\")\nroot.title(\"UK Supermarket Receipt App\")\nroot.resizable(False, False)\nroot.mainloop()\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":9704,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"580085379","text":"import pyodbc\r\nimport sys\r\nsys.path.append(sys.path[0]+'/../..')\r\nimport printFunctions as pf\r\n\r\nserver = r'localhost\\SQLEXPRESS'\r\ndatabase = 'WideWorldImporters'\r\nconnectionString = 'DRIVER={ODBC Driver 17 for SQL Server};SERVER='+server+';DATABASE='+database+';Trusted_Connection=yes;APP=Pluralsight Course;'\r\n\r\n#Establish connection\r\nwith pyodbc.connect(connectionString) as connection:\r\n cursor = connection.cursor()\r\n #stored proc call to pass a table variable\r\n print('SP with table variable parameter:')\r\n items = []\r\n items.insert(0,(1,1,'first item',1))\r\n items.insert(0,(1,2,'second item',1))\r\n\r\n #create an equivalent temp table\r\n tsql = '''IF OBJECT_ID('tempdb..#TempOrderLines') IS NOT NULL\r\n\t DROP TABLE #TempOrderLines;\r\n CREATE TABLE #TempOrderLines (\r\n [OrderReference] [int] NULL,\r\n\t [StockItemID] [int] NULL,\r\n\t [Description] [nvarchar](100) COLLATE Latin1_General_100_CI_AS NULL,\r\n\t [Quantity] [int] NULL)'''\r\n \r\n cursor.execute(tsql)\r\n\r\n #now insert all the records into the temp table\r\n cursor.fast_executemany = True\r\n cursor.executemany('''INSERT INTO #TempOrderLines([OrderReference], [StockItemID],[Description],[Quantity]) \r\n values (?, ?, ?, ?)''', items)\r\n \r\n #now move the records and call the SP\r\n tsql='''SET NOCOUNT ON;\r\n DECLARE @OrderLines [Website].[OrderLineList];\r\n INSERT INTO @OrderLines SELECT * FROM #TempOrderLines;\r\n EXEC [Website].[GetItemNameAndRetailPrice] @OrderLines;'''\r\n cursor.execute(tsql)\r\n rows = cursor.fetchall()\r\n\r\n while rows:\r\n pf.printResultsInfo(cursor)\r\n pf.printResults(rows)\r\n if cursor.nextset():\r\n rows=cursor.fetchall()\r\n else:\r\n rows = None\r\n \r\n input('Press Enter to continue...')\r\n\r\n","sub_path":"database-programming-pyodbc-python-playbook/03/demos/m3/Demo4/m3d4-PassingTableVariables.py","file_name":"m3d4-PassingTableVariables.py","file_ext":"py","file_size_in_byte":2041,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"579747660","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n\"\"\"\nA function that can be called to implement Crank-Nicolson (implicit, 2nd order\nin both space and time, as efficient as explicit) to solve the 1D diffusion \nequation on the domain (0,L). \n\nInputs: \nN: number of grid points\ndt: time step\nL: length of grid\nt: current time step number\ng0: left boundary condition\ng1: right boundary condition\nkappa: the diffusion coefficient (constant for now)\nun: previous time step (a vector)\n\nSets up a sparse matrix A and uses scipy.sparse.linalg.spsolve(A,b) to solve\n\nUses previous time step to find next time step. \n\n\"\"\"\n\ndef cn_1D_diffusion(N,dt,L,t,g0,g1,kappa,un):\n # importing packages\n import numpy as np \n import scipy as scipy\n from scipy.sparse.linalg import spsolve\n from time import perf_counter\n \n t1 = perf_counter() # start timing\n h = L/(N-1) # space step size\n r = dt*kappa/(2*h**2) # define r, later used in matrix system\n \n # setting up matrices for A x = b system\n # A is a tridiagonal and sparse matrix in CSR format\n rs = np.zeros(N-1) # for the off diagonals\n rs[1:-1] += r # add r to interior rows only\n main_diag = np.ones(N) # for the center diagonal\n main_diag[1:-1] *= (1+2*r) # add (1+2*r) to interior rows only\n A = scipy.sparse.diags([main_diag, -rs, -rs],[0,-1,1],format=\"csr\")\n \n # set first 'solution' to be the previous time step\n U = un\n # create rhs that we can fill in\n b = np.zeros(N)\n b[0] = g0(t+dt) # meeting bc\n b[1] = r*(g0(t) + g0(t+dt)) + (1-2*r)*U[1] + r*U[2]\n # second row to second-to-last row\n for j in range(2,N-2):\n b[j] = r*U[j-1] + (1 - 2*r)*U[j] + r*U[j+1]\n b[-2] = r*U[-3] + (1-2*r)*U[-2] + r*(g1(t) + g1(t+dt))\n b[-1] = g1(t+dt) # meeting bc\n # using spsolve: currently A is in CSR form\n U_out = spsolve(A,b)\n t2 = perf_counter()\n dt = t2-t1\n# print('[scipy.sparse.linalg.spsolve] time',('%1.4e'%dt),'(sec)')\n return U_out\n","sub_path":"cn_1D_diffusion_iter.py","file_name":"cn_1D_diffusion_iter.py","file_ext":"py","file_size_in_byte":1973,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"266040388","text":"#!/usr/bin/env python3\n\n\ndef compute_stats_from_contingency_table(true_negatives, false_negatives, false_positives, true_positives, cell_area=None, masked_count=None):\n \"\"\"\n This generic function takes contingency table metrics as arguments and returns a dictionary of contingency table statistics.\n Much of the calculations below were taken from older Python files. This is evident in the inconsistent use of case.\n \n Args:\n true_negatives (int): The true negatives from a contingency table.\n false_negatives (int): The false negatives from a contingency table.\n false_positives (int): The false positives from a contingency table.\n true_positives (int): The true positives from a contingency table.\n cell_area (float or None): This optional argument allows for area-based statistics to be calculated, in the case that\n contingency table metrics were derived from areal analysis.\n \n Returns:\n stats_dictionary (dict): A dictionary of statistics. Statistic names are keys and statistic values are the values.\n Refer to dictionary definition in bottom of function for statistic names.\n \n \"\"\"\n \n import numpy as np\n \n total_population = true_negatives + false_negatives + false_positives + true_positives\n \n # Basic stats.\n# Percent_correct = ((true_positives + true_negatives) / total_population) * 100\n# pod = true_positives / (true_positives + false_negatives)\n FAR = false_positives / (true_positives + false_positives)\n CSI = true_positives / (true_positives + false_positives + false_negatives)\n BIAS = (true_positives + false_positives) / (true_positives + false_negatives)\n \n # Compute equitable threat score (ETS) / Gilbert Score. \n a_ref = ((true_positives + false_positives)*(true_positives + false_negatives)) / total_population\n EQUITABLE_THREAT_SCORE = (true_positives - a_ref) / (true_positives - a_ref + false_positives + false_negatives)\n\n total_population = true_positives + false_positives + true_negatives + false_negatives\n TP_perc = (true_positives / total_population) * 100\n FP_perc = (false_positives / total_population) * 100\n TN_perc = (true_negatives / total_population) * 100\n FN_perc = (false_negatives / total_population) * 100\n \n predPositive = true_positives + false_positives\n predNegative = true_negatives + false_negatives\n obsPositive = true_positives + false_negatives\n obsNegative = true_negatives + false_positives\n \n TP = float(true_positives)\n TN = float(true_negatives)\n FN = float(false_negatives)\n FP = float(false_positives)\n MCC = (TP*TN - FP*FN)/ np.sqrt((TP+FP)*(TP+FN)*(TN+FP)*(TN+FN))\n \n if masked_count != None:\n total_pop_and_mask_pop = total_population + masked_count\n masked_perc = (masked_count / total_pop_and_mask_pop) * 100\n else:\n masked_perc = None\n \n # This checks if a cell_area has been provided, thus making areal calculations possible.\n sq_km_converter = 1000000\n \n if cell_area != None:\n TP_area = (true_positives * cell_area) / sq_km_converter\n FP_area = (false_positives * cell_area) / sq_km_converter\n TN_area = (true_negatives * cell_area) / sq_km_converter\n FN_area = (false_negatives * cell_area) / sq_km_converter\n area = (total_population * cell_area) / sq_km_converter\n \n predPositive_area = (predPositive * cell_area) / sq_km_converter\n predNegative_area = (predNegative * cell_area) / sq_km_converter\n obsPositive_area = (obsPositive * cell_area) / sq_km_converter\n obsNegative_area = (obsNegative * cell_area) / sq_km_converter\n positiveDiff_area = predPositive_area - obsPositive_area\n \n if masked_count != None:\n masked_area = (masked_count * cell_area) / sq_km_converter\n else:\n masked_area = None\n\n # If no cell_area is provided, then the contingeny tables are likely not derived from areal analysis.\n else:\n TP_area = None\n FP_area = None\n TN_area = None\n FN_area = None\n area = None\n \n predPositive_area = None\n predNegative_area = None\n obsPositive_area = None\n obsNegative_area = None\n positiveDiff_area = None\n MCC = None\n \n total_population = true_positives + false_positives + true_negatives + false_negatives\n\n predPositive_perc = (predPositive / total_population) * 100\n predNegative_perc = (predNegative / total_population) * 100\n obsPositive_perc = (obsPositive / total_population) * 100\n obsNegative_perc = (obsNegative / total_population) * 100\n \n positiveDiff_perc = predPositive_perc - obsPositive_perc\n \n prevalence = (true_positives + false_negatives) / total_population\n PPV = true_positives / predPositive\n NPV = true_negatives / predNegative\n TPR = true_positives / obsPositive\n TNR = true_negatives / obsNegative\n ACC = (true_positives + true_negatives) / total_population\n Bal_ACC = np.mean([TPR,TNR])\n F1_score = (2*true_positives) / (2*true_positives + false_positives + false_negatives)\n\n stats_dictionary = {'true_negatives_count': int(true_negatives),\n 'false_negatives_count': int(false_negatives),\n 'true_positives_count': int(true_positives),\n 'false_positives_count': int(false_positives),\n 'contingency_tot_count': int(total_population),\n 'cell_area_m2': cell_area,\n \n 'TP_area_km2': TP_area,\n 'FP_area_km2': FP_area,\n 'TN_area_km2': TN_area,\n 'FN_area_km2': FN_area,\n\n 'contingency_tot_area_km2': area,\n 'predPositive_area_km2': predPositive_area,\n 'predNegative_area_km2': predNegative_area,\n 'obsPositive_area_km2': obsPositive_area,\n 'obsNegative_area_km2': obsNegative_area,\n 'positiveDiff_area_km2': positiveDiff_area,\n\n 'CSI': CSI,\n 'FAR': FAR,\n 'TPR': TPR, \n 'TNR': TNR, \n \n 'PPV': PPV,\n 'NPV': NPV,\n 'ACC': ACC,\n 'Bal_ACC': Bal_ACC,\n 'MCC': MCC,\n 'EQUITABLE_THREAT_SCORE': EQUITABLE_THREAT_SCORE, \n 'PREVALENCE': prevalence,\n 'BIAS': BIAS,\n 'F1_SCORE': F1_score,\n\n 'TP_perc': TP_perc,\n 'FP_perc': FP_perc,\n 'TN_perc': TN_perc,\n 'FN_perc': FN_perc,\n 'predPositive_perc': predPositive_perc,\n 'predNegative_perc': predNegative_perc,\n 'obsPositive_perc': obsPositive_perc,\n 'obsNegative_perc': obsNegative_perc,\n 'positiveDiff_perc': positiveDiff_perc,\n \n 'masked_count': int(masked_count),\n 'masked_perc': masked_perc,\n 'masked_area_km2': masked_area,\n \n }\n\n return stats_dictionary\n\n\ndef get_contingency_table_from_binary_rasters(benchmark_raster_path, predicted_raster_path, agreement_raster=None, mask_values=None, additional_layers_dict={}, exclusion_mask=\"\"):\n \"\"\"\n Produces contingency table from 2 rasters and returns it. Also exports an agreement raster classified as:\n 0: True Negatives\n 1: False Negative\n 2: False Positive\n 3: True Positive\n \n Args:\n benchmark_raster_path (str): Path to the binary benchmark raster. 0 = phenomena not present, 1 = phenomena present, NoData = NoData.\n predicted_raster_path (str): Path to the predicted raster. 0 = phenomena not present, 1 = phenomena present, NoData = NoData.\n \n Returns:\n contingency_table_dictionary (dict): A Python dictionary of a contingency table. Key/value pair formatted as:\n {true_negatives: int, false_negatives: int, false_positives: int, true_positives: int}\n \n \"\"\"\n from rasterio.warp import reproject, Resampling\n import rasterio\n import numpy as np\n import os\n \n print(\"-----> Evaluating performance across the total area...\")\n # Load rasters.\n benchmark_src = rasterio.open(benchmark_raster_path)\n predicted_src = rasterio.open(predicted_raster_path)\n predicted_array = predicted_src.read(1)\n \n benchmark_array_original = benchmark_src.read(1)\n \n if benchmark_array_original.shape != predicted_array.shape:\n benchmark_array = np.empty(predicted_array.shape, dtype=np.int8)\n \n reproject(benchmark_array_original, \n destination = benchmark_array,\n src_transform = benchmark_src.transform, \n src_crs = benchmark_src.crs,\n src_nodata = benchmark_src.nodata,\n dst_transform = predicted_src.transform, \n dst_crs = predicted_src.crs,\n dst_nodata = benchmark_src.nodata,\n dst_resolution = predicted_src.res,\n resampling = Resampling.nearest)\n \n predicted_array_raw = predicted_src.read(1)\n \n # Align the benchmark domain to the modeled domain.\n benchmark_array = np.where(predicted_array==predicted_src.nodata, 10, benchmark_array)\n \n # Ensure zeros and ones for binary comparison. Assume that positive values mean flooding and 0 or negative values mean dry. \n predicted_array = np.where(predicted_array==predicted_src.nodata, 10, predicted_array) # Reclassify NoData to 10\n predicted_array = np.where(predicted_array<0, 0, predicted_array)\n predicted_array = np.where(predicted_array>0, 1, predicted_array)\n \n benchmark_array = np.where(benchmark_array==benchmark_src.nodata, 10, benchmark_array) # Reclassify NoData to 10\n\n \n# # Mask agreement array according to mask catchments.\n# for value in mask_values:\n# agreement_array = np.where(np.absolute(predicted_array_raw) == int(value), 4, agreement_array)\n \n\n agreement_array = np.add(benchmark_array, 2*predicted_array)\n agreement_array = np.where(agreement_array>4, 10, agreement_array)\n \n del benchmark_src, benchmark_array, predicted_array, predicted_array_raw\n\n # Mask agreement_array with waterbody raster 100m buffer.\n if exclusion_mask != \"\":\n exclusion_src = rasterio.open(exclusion_mask)\n \n exclusion_src = rasterio.open(exclusion_mask)\n \n exclusion_array_original = exclusion_src.read(1)\n exclusion_array = np.empty(agreement_array.shape, dtype=np.int8)\n \n print(\"-----> Masking waterbodies...\")\n reproject(exclusion_array_original, \n destination = exclusion_array,\n src_transform = exclusion_src.transform, \n src_crs = exclusion_src.crs,\n src_nodata = exclusion_src.nodata,\n dst_transform = predicted_src.transform, \n dst_crs = predicted_src.crs,\n dst_nodata = exclusion_src.nodata,\n dst_resolution = predicted_src.res,\n resampling = Resampling.nearest)\n \n # Perform mask.\n agreement_array = np.where(exclusion_array == 1, 4, agreement_array)\n\n contingency_table_dictionary = {}\n \n # Only write the agreement raster if user-specified.\n if agreement_raster != None:\n with rasterio.Env():\n profile = predicted_src.profile\n profile.update(nodata=10)\n with rasterio.open(agreement_raster, 'w', **profile) as dst:\n dst.write(agreement_array, 1)\n \n # Write legend text file\n legend_txt = os.path.join(os.path.split(agreement_raster)[0], 'read_me.txt')\n \n from datetime import datetime\n \n now = datetime.now()\n current_time = now.strftime(\"%m/%d/%Y %H:%M:%S\")\n \n with open(legend_txt, 'w') as f:\n f.write(\"%s\\n\" % '0: True Negative')\n f.write(\"%s\\n\" % '1: False Negative')\n f.write(\"%s\\n\" % '2: False Positive')\n f.write(\"%s\\n\" % '3: True Positive')\n f.write(\"%s\\n\" % '4: Waterbody area (excluded from contingency table analysis). Waterbody mask: {exclusion_mask}'.format(exclusion_mask=exclusion_mask))\n f.write(\"%s\\n\" % 'Results produced at: {current_time}'.format(current_time=current_time))\n \n # Store summed pixel counts in dictionary.\n contingency_table_dictionary.update({'total_area':{'true_negatives': int((agreement_array == 0).sum()),\n 'false_negatives': int((agreement_array == 1).sum()),\n 'false_positives': int((agreement_array == 2).sum()),\n 'true_positives': int((agreement_array == 3).sum()),\n 'masked_count': int((agreement_array == 4).sum())\n }}) \n \n \n \n # Parse through dictionary of other layers and create contingency table metrics for the desired area. Layer must be raster with same shape as agreement_raster.\n if additional_layers_dict != {}:\n for layer_name in additional_layers_dict:\n print(\"-----> Evaluating performance at \" + layer_name + \"...\")\n layer_path = additional_layers_dict[layer_name]\n layer_src = rasterio.open(layer_path)\n \n layer_array_original = layer_src.read(1)\n layer_array = np.empty(agreement_array.shape, dtype=np.int8)\n \n reproject(layer_array_original, \n destination = layer_array,\n src_transform = layer_src.transform, \n src_crs = layer_src.crs,\n src_nodata = layer_src.nodata,\n dst_transform = predicted_src.transform, \n dst_crs = predicted_src.crs,\n dst_nodata = layer_src.nodata,\n dst_resolution = predicted_src.res,\n resampling = Resampling.nearest)\n \n # Omit all areas that spatially disagree with the layer_array.\n layer_agreement_array = np.where(layer_array>0, agreement_array, 10)\n \n # Write the layer_agreement_raster.\n layer_agreement_raster = os.path.join(os.path.split(agreement_raster)[0], layer_name + '_agreement.tif')\n with rasterio.Env():\n profile = predicted_src.profile\n profile.update(nodata=10)\n with rasterio.open(layer_agreement_raster, 'w', **profile) as dst:\n dst.write(layer_agreement_array, 1)\n \n # Store summed pixel counts in dictionary.\n contingency_table_dictionary.update({layer_name:{'true_negatives': int((layer_agreement_array == 0).sum()),\n 'false_negatives': int((layer_agreement_array == 1).sum()),\n 'false_positives': int((layer_agreement_array == 2).sum()),\n 'true_positives': int((layer_agreement_array == 3).sum()),\n 'masked_count': int((layer_agreement_array == 4).sum())\n }})\n del layer_agreement_array\n\n return contingency_table_dictionary\n \n","sub_path":"tests/utils/shared_functions.py","file_name":"shared_functions.py","file_ext":"py","file_size_in_byte":16308,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"147834120","text":"import pandas as pd\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nfrom sklearn.decomposition import FastICA, PCA\r\n\r\nclass RegProcessor(object):\r\n def __init__(self, reg_file_1, reg_file_2):\r\n df_1 = pd.read_csv(reg_file_1)\r\n df_1['data_hora'] = pd.to_datetime(df_1['data_hora'], dayfirst=True)\r\n df_1.set_index('data_hora')\r\n print('read first')\r\n df_2 = pd.read_csv(reg_file_2)\r\n df_2['data_hora'] = pd.to_datetime(df_2['data_hora'], dayfirst=True)\r\n df_2.set_index('data_hora')\r\n print('read second')\r\n tmp_1 = df_1['pressao'].values\r\n tmp_2 = df_2['pressao'].values\r\n tmp_1 = tmp_1 - np.average(tmp_1)\r\n tmp_2 = tmp_2 - np.average(tmp_2)\r\n tmp = np.c_[tmp_1, tmp_2]\r\n tmp = tmp/tmp.std(axis=0)\r\n \r\n ica = FastICA(n_components=2)\r\n signals = ica.fit_transform(tmp)\r\n pca = PCA(n_components=2)\r\n signals_pca = pca.fit_transform(tmp)\r\n \r\n ax_1 = plt.subplot(311)\r\n ax_2 = plt.subplot(312)\r\n ax_3 = plt.subplot(313)\r\n ax_1.plot(tmp.T[0])\r\n ax_1.plot(tmp.T[1])\r\n ax_2.plot(signals.T[0])\r\n # ax_2.plot(signals_pca.T[0])\r\n ax_3.plot(signals.T[1])\r\n # ax_3.plot(signals_pca.T[1])\r\n plt.show()\r\n\r\n\r\ndef main():\r\n reg_proc = RegProcessor('7-UB-66D-RNS-RP-4#26043-IIF.TPR.csv', '7-UB-66D-RNS-RP-4#30682-ISP.TPR.csv')\r\n\r\nif __name__ == '__main__':\r\n main()","sub_path":"RegProcessor.py","file_name":"RegProcessor.py","file_ext":"py","file_size_in_byte":1474,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"394684568","text":"# Copyright 2019, 2020 Matthew Egan Odendahl\n# SPDX-License-Identifier: Apache-2.0\n\nimport re\nfrom collections.abc import Container\nfrom doctest import ELLIPSIS\nfrom fnmatch import fnmatch\nfrom textwrap import dedent, indent\n\nfrom sybil import Sybil\nfrom sybil.parsers.doctest import DocTestParser\n\nfrom hissp.reader import Lissp\n\nLISSP = re.compile(r\" *#> .*\\n(?: *#\\.\\..*\\n)*\")\nSTRIP_LISSP = re.compile(r\"(?m)^ *#(?:> |\\.\\.)\")\n\n\nclass ParseLissp(DocTestParser):\n \"\"\"\n Like Sybil's DocTestParser, but also checks the Lissp compilation.\n \"\"\"\n\n def __init__(self, *a, **kw):\n super().__init__(*a, **kw)\n self._EXAMPLE_RE = re.compile(\n r\"\"\"\n (?P\n (?:^ [ ]* [#]>[ ] .*)\n (?:\\n [ ]* [#]\\.\\. .*)*)?\n \\n?\n \"\"\"\n + self._EXAMPLE_RE.pattern,\n re.MULTILINE | re.VERBOSE,\n )\n\n def lissp(self, source):\n lissp = LISSP.match(source)\n if not lissp:\n return\n assert lissp, \"\\n\" + source\n lissp = STRIP_LISSP.sub(\"\", lissp.group())\n return lissp\n\n def evaluate(self, example, parser=Lissp()):\n lissp = self.lissp(example.document.text[example.start : example.end])\n if lissp:\n python = example.parsed.source\n parser.compiler.ns = example.namespace\n hissp = parser.reads(lissp)\n compiled = parser.compiler.compile(hissp) + \"\\n\"\n assert norm_gensym_eq(compiled, python), dedent(\n f\"\"\"\n EXPECTED PYTHON:\n {indent(python, \" \")}\n ACTUALLY COMPILED TO:\n {indent(compiled, \" \")}\n .\n \"\"\"\n )\n return super().evaluate(example)\n\n\ndef norm_gensym_eq(compiled, python):\n \"\"\"The special gensym suffix ``xAUTO..._`` will match any number.\"\"\"\n return re.fullmatch(\n re.sub(r\"xAUTO\\\\\\.\\\\\\.\\\\\\._\", r\"xAUTO\\\\d+_\", re.escape(python)), compiled\n )\n\n\nclass Globs(Container):\n def __init__(self, *globs):\n self.globs = globs\n\n def __contains__(self, item):\n return any(fnmatch(item, glob) for glob in self.globs)\n\n\npytest_collect_file = Sybil(\n parsers=[ParseLissp(optionflags=ELLIPSIS)], filenames=Globs(\"*.md\", \"*.rst\")\n).pytest()\n","sub_path":"conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":2307,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"643636623","text":"import json\n\nfrom django.dispatch import receiver\nfrom django.db import models\n\nfrom pdc.apps.release.models import Product, Release\nfrom pdc.apps.release import signals as release_signals\n\n\nclass ReleaseBrewMapping(models.Model):\n release = models.OneToOneField(Release, related_name=\"brew_mapping\")\n default_target = models.CharField(max_length=200, blank=True, null=True)\n\n def export(self):\n return {'release_id': self.release.release_id,\n 'default_target': self.default_target,\n 'allowed_tags': [x.tag_name for x in self.allowed_tags.all()]}\n\n def __unicode__(self):\n return 'Brew mapping for %s' % self.release.release_id\n\n def is_empty(self):\n \"\"\"Check if the mapping has no default target and no tags.\"\"\"\n return not (self.default_target or bool(self.allowed_tags.all()))\n\n\nclass BrewTag(models.Model):\n brew_mapping = models.ForeignKey(ReleaseBrewMapping, related_name='allowed_tags')\n tag_name = models.CharField(max_length=200)\n\n class Meta:\n unique_together = ('brew_mapping', 'tag_name')\n\n def __unicode__(self):\n return self.tag_name\n\n\ndef log_change_in_brew_mapping(sender, request, release, **kwargs):\n \"\"\"\n This handler is executed after a new release is saved or an existing\n release is updated. It looks if there is saved old value for brew mapping\n and creates appropriate changelog entries.\n \"\"\"\n old_data = getattr(request, '_old_bindings_data', {})\n if hasattr(release, 'brew_mapping'):\n old_val = 'null'\n if 'brew_mapping' in old_data:\n old_val = old_data['brew_mapping'][1]\n pk = release.brew_mapping.pk\n new_val = json.dumps(release.brew_mapping.export())\n if release.brew_mapping.is_empty():\n new_val = 'null'\n request.changeset.add('ReleaseBrewMapping', pk, old_val, new_val)\n elif 'brew_mapping' in old_data:\n old_id, old_val = old_data['brew_mapping']\n request.changeset.add('ReleaseBrewMapping', old_id, old_val, 'null')\n\n\nclass ProductPagesLink(models.Model):\n release = models.OneToOneField(Release, related_name=\"product_pages_link\")\n product_pages_id = models.PositiveIntegerField()\n\n def export(self):\n return {'release_id': self.release.release_id,\n 'product_pages_id': self.product_pages_id}\n\n def __unicode__(self):\n return 'PDC<%s> -> PP<%d>' % (self.release.release_id, self.product_pages_id)\n\n\nclass Errata(models.Model):\n release = models.OneToOneField(Release)\n product_version = models.CharField(max_length=200, blank=True, null=True)\n\n def export(self):\n return {'release_id': self.release.release_id,\n 'product_version': self.product_version}\n\n def __unicode__(self):\n return 'PDC<%s> -> PV<%d>' % (self.release.release_id, self.product_version)\n\n\nclass InternalProudct(models.Model):\n product = models.OneToOneField(Product, related_name=\"internal\", primary_key=True)\n\n def export(self):\n return {'product': self.product.short,\n 'internal': True}\n\n def __unicode__(self):\n return 'Product<%s> -> Internal' % self.product.short\n\n\ndef _member_log_cloned(sender, request, release, member_name, member_log_name, **kwargs):\n if hasattr(release, member_name):\n request.changeset.add(member_log_name,\n getattr(release, member_name).pk,\n 'null',\n json.dumps(getattr(release, member_name).export()))\n\n\n@receiver(release_signals.release_clone)\ndef _log_cloned(sender, request, release, **kwargs):\n for member_name, member_log_name in (('errata', 'errataproductversion'),\n ('product_pages_link', 'productpageslink'),\n ('brew_mapping', 'ReleaseBrewMapping')):\n _member_log_cloned(sender, request, release, member_name, member_log_name, **kwargs)\n\n\ndef _common_release_log_change(sender, request, release, member_name, member_log_name, **kwargs):\n old_data = getattr(request, '_old_bindings_data', {})\n if hasattr(release, member_name):\n old_val = 'null'\n if member_name in old_data:\n old_val = old_data[member_name][1]\n request.changeset.add(member_log_name,\n getattr(release, member_name).pk,\n old_val,\n json.dumps(getattr(release, member_name).export()))\n elif member_name in old_data:\n old_id, old_val = old_data[member_name]\n request.changeset.add(member_log_name, old_id, old_val, 'null')\n\n\n@receiver(release_signals.release_post_update)\ndef log_changes_for_release(sender, request, release, **kwargs):\n \"\"\"\n This handler is executed after a new release is saved or an existing\n release is updated. It looks if there is saved old value for\n errata/product pages link/brew mapping and creates appropriate changelog entries.\n \"\"\"\n _common_release_log_change(sender, request, release, 'errata', 'errataproductversion', **kwargs)\n _common_release_log_change(sender, request, release, 'product_pages_link', 'productpageslink', **kwargs)\n log_change_in_brew_mapping(sender, request, release, **kwargs)\n\n\ndef _store_original_value(sender, request, release, member_name, **kwargs):\n if not hasattr(request, '_old_bindings_data'):\n request._old_bindings_data = {}\n if hasattr(release, member_name):\n request._old_bindings_data[member_name] = (\n getattr(release, member_name).pk,\n json.dumps(getattr(release, member_name).export())\n )\n\n\n@receiver(release_signals.release_pre_update)\ndef store_release_original_values(sender, request, release, **kwargs):\n \"\"\"\n This handler is executed before an existing release is updated. It stores\n the old values for errata/product pages link/brew mapping in the request.\n \"\"\"\n for member_name in ('errata', 'product_pages_link', 'brew_mapping'):\n _store_original_value(sender, request, release, member_name, **kwargs)\n\n\nINTERNAL_FLAG_STRING = 'internal'\n\n\n@receiver(release_signals.product_post_update)\ndef log_changes_for_product(sender, request, product, **kwargs):\n \"\"\"\n This handler is executed after a new product is saved or an existing\n product is updated. It looks if there is saved old value for\n internal flag status and creates appropriate changelog entries.\n \"\"\"\n old_data = getattr(request, '_old_bindings_data', {})\n if hasattr(product, INTERNAL_FLAG_STRING):\n old_val = 'null' if INTERNAL_FLAG_STRING not in old_data else old_data[INTERNAL_FLAG_STRING]\n try:\n product.internal.refresh_from_db()\n except InternalProudct.DoesNotExist:\n new_val = False\n else:\n new_val = True\n if old_val != new_val:\n request.changeset.add(INTERNAL_FLAG_STRING,\n product.pk,\n old_val,\n new_val)\n elif INTERNAL_FLAG_STRING in old_data:\n old_val = old_data[INTERNAL_FLAG_STRING]\n if old_val:\n request.changeset.add(INTERNAL_FLAG_STRING, product.pk, old_val, False)\n elif not old_data:\n # create with internal flag is false\n request.changeset.add(INTERNAL_FLAG_STRING, product.pk, 'null', False)\n\n\n@receiver(release_signals.product_pre_update)\ndef store_product_original_values(sender, request, product, **kwargs):\n \"\"\"\n This handler is executed before an existing product is updated. It stores\n the old values for internal flag status in the request.\n \"\"\"\n if not hasattr(request, '_old_bindings_data'):\n request._old_bindings_data = {}\n if hasattr(product, INTERNAL_FLAG_STRING):\n request._old_bindings_data[INTERNAL_FLAG_STRING] = True if product.internal else False\n else:\n request._old_bindings_data[INTERNAL_FLAG_STRING] = False\n","sub_path":"product-definition-center/rhpdc/apps/rhbindings/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":8074,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"300320920","text":"\"\"\"\nVarious data visualisation helper methods\n\"\"\"\nfrom matplotlib import pyplot as plt\n\nfrom ._cmaps import scl_colormap\n\n__all__ = (\n \"scl_colormap\",\n \"compare_masks\",\n)\n\n\ndef compare_masks(\n a, b, names=(\"A\", \"B\"), figsize=None, cmap=\"bone\", interpolation=\"nearest\", **kw\n):\n \"\"\"\n Plot two similar mask images on one figure, typically B is a transformed\n version of A.\n\n [ A | B ]\n [added | removed]\n\n Where\n - ``added`` are pixels that turned True from A->B\n - ``removed`` are pixels that turned False from A->B\n \"\"\"\n fig, axs = plt.subplots(2, 2, figsize=figsize)\n opts = dict(interpolation=interpolation, cmap=cmap, **kw)\n\n axs[0][0].set_title(names[0])\n axs[0][0].imshow(a, **opts)\n\n axs[0][1].set_title(names[1])\n axs[0][1].imshow(b, **opts)\n\n axs[1][0].set_title(\"Added\")\n axs[1][0].imshow((~a) * b, **opts)\n\n axs[1][1].set_title(\"Removed\")\n axs[1][1].imshow(a * (~b), **opts)\n\n axs[0][0].xaxis.set_visible(False)\n axs[0][1].xaxis.set_visible(False)\n axs[0][1].yaxis.set_visible(False)\n axs[1][1].yaxis.set_visible(False)\n\n fig.tight_layout(pad=0)\n return fig, axs\n","sub_path":"libs/ui/odc/ui/plt_tools.py","file_name":"plt_tools.py","file_ext":"py","file_size_in_byte":1165,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"266250768","text":"import scrapy\r\n\r\n\r\nclass jdscrap(scrapy.Spider):\r\n name = \"justdial\"\r\n\r\n allowed_domains = [\"justdial.com\"]\r\n\r\n start_urls = [\r\n \"https://www.justdial.com/Delhi/House-On-Rent/nct-10192844/page-%s\" % i for i in range(1, 51)\r\n ]\r\n\r\n def start_requests(self):\r\n for url in self.start_urls:\r\n yield scrapy.Request(url=url, callback=self.parse)\r\n\r\n def getphone(self, ph_arr):\r\n\r\n digit = {\"dc\": \"+\", \"fe\": \"(\", \"hg\": \")\", \"ba\": \"-\", \"acb\": \"0\", \"yz\": \"1\",\r\n \"wx\": \"2\", \"vu\": \"3\", \"ts\": \"4\", \"rq\": \"5\", \"po\": \"6\", \"nm\": \"7\",\r\n \"lk\": \"8\", \"ji\": \"9\"}\r\n ph = []\r\n\r\n for i in ph_arr:\r\n ph.append(digit.get(\r\n i.replace('mobilesv', '').replace('icon-', '').replace(' ', '')))\r\n\r\n return ''.join(ph)\r\n\r\n def parse(self, response):\r\n for post in response.css(\"li.cntanr \"):\r\n yield {\r\n\r\n 'Name': post.css(\"span.lng_cont_name ::text\").get(),\r\n 'Rating': post.css(\"span.exrt_count ::text\").get(),\r\n 'Phone': self.getphone(post.css(\"p.contact-info span::attr(class)\").getall()),\r\n 'Address': post.css(\"span.cont_sw_addr ::text\").get().replace('\\t', '').replace('\\n', '')\r\n\r\n }\r\n","sub_path":"justdail_scraping.py","file_name":"justdail_scraping.py","file_ext":"py","file_size_in_byte":1285,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"217966365","text":"'''\nwritten by vinayakumar R (https://github.com/vinayakumarr)\n\nBasic text classification code using LSTM with only two class: positive and negative\n\nDataset is taken from https://github.com/dennybritz/cnn-text-classification-tf/tree/master/data/rt-polaritydata\n\nThe poitivie and negative reviews are merged into one single file. for positive labelled as 1 and negative labelled as 0. You can also find code for preprocessing\n'''\n\nfrom __future__ import print_function\nimport numpy as np\nimport tflearn\nimport pandas as pd\nfrom tflearn.data_utils import to_categorical, pad_sequences, VocabularyProcessor\n\nprint(\"Loading\")\n\nwith open('polarity.txt') as f:\n content = f.readlines()\n#print(content)\n\nwith open('classlabel.txt') as f:\n content1 = f.readlines()\n#print(content1)\n\ntweets = content\nmax_tweet_length = 120\nmin_frequency = 2 \nvp = tflearn.data_utils.VocabularyProcessor(max_tweet_length, min_frequency=min_frequency)\nvp = vp.fit(tweets)\nval = len(vp.vocabulary_)\nprint(val)\ntweets_parsed = vp.transform(tweets)\nvp.save('my_dictionary')\nprint(vp)\n\ntrainX = tweets_parsed\ntrainY = tflearn.data_utils.to_categorical(content1, nb_classes=0)\n\nfiltered_gen = (item for item in trainX)\ngen_to_list = list(filtered_gen)\n\ntrainX1 = pad_sequences(gen_to_list, maxlen=120, value=0.)\n#print(trainX1)\n\n\n\n# Network building\nnet = tflearn.input_data([None, 120])\nnet = tflearn.embedding(net, input_dim=val, output_dim=64)\nnet = tflearn.lstm(net, 64)\nnet = tflearn.dropout(net, 0.5)\nnet = tflearn.fully_connected(net, 2, activation='softmax')\nnet = tflearn.regression(net, optimizer='adam',loss='binary_crossentropy')\n\n\n# Training\nmodel = tflearn.DNN(net, clip_gradients=0., tensorboard_verbose=0)\nmodel.fit(trainX1, trainY, show_metric=True, batch_size=64)\n\n","sub_path":"Text-classification/sentiment.py","file_name":"sentiment.py","file_ext":"py","file_size_in_byte":1758,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"153805432","text":"import base\nimport getters\n\nALL_CLASSES= base.BaseIpGetter.__subclasses__()\nALL= [x() for x in ALL_CLASSES]\n\ndef get_ip():\n import random\n remaining= ALL[:]\n while remaining:\n getter= random.choice(remaining)\n try:\n return getter.get_ip()\n except base.GetIpFailed:\n remaining.remove( getter )\n raise base.GetIpFailed(\"None of the ip_getters returned a good ip\")\n","sub_path":"update_ip/ip_getters/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":417,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"492444516","text":"from django.shortcuts import render, redirect\nfrom django.contrib.auth import get_user_model\nfrom .models import AddApi\n\nuserModel = get_user_model()\n\n\ndef addApi(request, id):\n user = userModel.objects.get(id=id)\n if request.method == 'POST':\n a_key = request.POST['a_key']\n s_key = request.POST['s_key']\n add = AddApi(api_key=a_key, secret_key=s_key, user_id=user.id)\n add.save()\n context = {\n 'user': user\n }\n return render(request, 'dash.html', context)\n\n else:\n context = {}\n return render(request, 'key.html', context)\n","sub_path":"trade/t_api/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":608,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"82097518","text":"#! /usr/bin/env python3\nfrom threading import Thread\nimport asyncore\n\nfrom irc import IRCConn\nimport console\nimport modules\nimport config\n\n# Set up modules\nmods = modules.Modules()\n\n# Set up connections\nconns = []\nfor c in config.connections:\n i = IRCConn(c['server'], c['port'], c['nick'], c['realname'], c['username'], mods)\n if 'nspw' in c:\n i.setnspw(c['nspw'])\n if 'channels' in c:\n i.setautojoin(c['channels'])\n i.start()\n conns.append(i)\n\n# Set up console\nconsole = console.Console(mods, conns)\nconsole_thread = Thread(target=console.cmdloop)\nconsole_thread.start()\n\n# Enter I/O loop\ntry:\n asyncore.loop()\nexcept KeyboardInterrupt:\n print(\"CTRL+C pressed.\\n\")\n asyncore.close_all()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":727,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"320416605","text":"import math\nimport gym\nfrom gym import spaces\nfrom gym.utils import seeding\nimport numpy as np\n\n\nclass GridWorldEnv(gym.Env):\n \"\"\"\n Description:\n search start to goal route from grid world\n Source:\n\n Observation:\n Type: Box(2)\n Num Observation Min Max\n 0 X 0 width\n 1 Y 0 height\n Actions:\n Type: Discrete(4)\n Num Action\n 0 go right\n 1 go left\n 2 go up\n 3 go down\n Reward:\n -10 per step,\n 100 for reach the goal (0, 2)\n Starting State:\n (3, 0)\n \"\"\"\n\n def __init__(self, width=4, height=3, start=(3, 0), goal=(0, 2)):\n self.width = width\n self.height = height\n self.start = start\n self.goal = goal\n \"\"\"\n self.t_state = {\n ... ... ...\n (0, 2), (1, 2), (2, 2), ...\n (0, 1), (1, 1), (2, 1), ...\n (0, 0), (1, 0), (2, 0), ...\n }\n \"\"\"\n self.t_action = ['R', 'L', 'U', 'D']\n self.actions = {\n (0, 0): ('R', 'U'),\n (1, 0): ('R', 'L', 'U'),\n (1, 1): ('L', 'U', 'D'),\n (1, 2): ('R', 'L', 'D'),\n (2, 0): ('R', 'L'),\n (2, 2): ('R', 'L'),\n (3, 0): ('L', 'U'),\n (3, 1): ('U', 'D'),\n (3, 2): ('L', 'D'),\n }\n rewards = {\n (0, 1): -100,\n (0, 2): 100\n }\n self.set_rewards(rewards, step_cost=-10)\n self.init_render()\n\n # search space\n low = np.array([0, 0], dtype=np.int32)\n high = np.array([width, height], dtype=np.int32)\n self.action_space = spaces.Discrete(4)\n self.observation_space = spaces.Box(low, high, dtype=np.int32)\n self.seed()\n\n def set_rewards(self, rewards, step_cost=-10):\n self.rewards = {}\n for j in range(self.height):\n for i in range(self.width):\n self.rewards.update({(i, j): step_cost})\n self.rewards.update(rewards)\n\n def move(self, action):\n # check if legal move first\n state = list(self.state)\n if action in self.actions[self.state]:\n if action == 'R':\n state[0] += 1\n elif action == 'L':\n state[0] -= 1\n elif action == 'U':\n state[1] += 1\n elif action == 'D':\n state[1] -= 1\n self.state = tuple(state)\n return self.rewards.get(self.state, 0)\n\n def game_over(self):\n return self.state not in self.actions\n\n def seed(self, seed=None):\n self.np_random, seed = seeding.np_random(seed)\n return [seed]\n\n def step(self, action):\n reward = self.move(self.t_action[action])\n done = self.game_over()\n return np.array(self.state), reward, done, {}\n\n def reset(self):\n self.state = self.start\n return np.array(self.state)\n\n def init_render(self):\n print(\"----- grid world problem -----\")\n print()\n grid_char = []\n for y in range(self.height):\n for x in range(self.width):\n if x == self.start[0] and y == self.start[1]:\n grid_char.append(\"S\")\n elif x == self.goal[0] and y == self.goal[1]:\n grid_char.append(\"G\")\n else:\n grid_char.append(\" \")\n\n for y in reversed(range(self.height)):\n print(\".\"*self.width*4)\n for x in range(self.width):\n if x == self.width - 1:\n print(\"| \" + grid_char[self.width * y + x] + \" |\")\n else:\n print(\"| \" + grid_char[self.width * y + x] + \" \", end=\"\")\n print(\".\"*self.width*4)\n print()\n print(\"------------------------------\")\n","sub_path":"DDPG/grid_world.py","file_name":"grid_world.py","file_ext":"py","file_size_in_byte":3881,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"633505002","text":"import math\nimport numpy as np\n\nfrom . import utils\n\n\ndef e2p(e_img, fov_deg, u_deg, v_deg, out_hw, in_rot_deg=0, mode='bilinear'):\n \"\"\"\n e_img: ndarray in shape of [H, W, *] \\\\\n fov_deg: scalar or (scalar, scalar) field of view in degree \\\\\n u_deg: horizon viewing angle in range [-180, 180] \\\\\n v_deg: vertical viewing angle in range [-90, 90] \\\\\n in_rot_deg: fov rotating angle in range [0, 360] \\\\\n mode: interpolation mode\n \"\"\"\n assert len(e_img.shape) == 3\n h, w, channel = e_img.shape\n\n ## check fov_deg is scalar or (sclar, scalar)\n try:\n h_fov, v_fov = math.radians(fov_deg[0]), math.radians(fov_deg[1])\n except Exception:\n h_fov, v_fov = math.radians(fov_deg), math.radians(fov_deg)\n\n in_rot = math.radians(in_rot_deg)\n\n if mode == 'bilinear':\n order = 1\n elif mode == 'nearest':\n order = 0\n else:\n raise NotImplementedError('unknown mode')\n\n u = -u_deg * np.pi / 180\n v = v_deg * np.pi / 180\n\n ## get xyz coordinate of target fov reigon\n xyz = utils.xyzpers(h_fov, v_fov, u, v, out_hw, in_rot)\n # print(\"in e2p\")\n # print(xyz)\n ## get uv coordinate of target fov reigon\n uv = utils.xyz2uv(xyz)\n\n ## get exact pixel locations of target fov reigon\n coor_xy = utils.uv2coor(uv, h, w)\n\n ## sample fov with exact pixel coordinate\n pers_img = np.stack([\n utils.sample_equirec(e_img[..., i], coor_xy, order=order)\n for i in range(channel)\n ], axis=-1)\n\n return pers_img\n","sub_path":"py360convert/e2p.py","file_name":"e2p.py","file_ext":"py","file_size_in_byte":1476,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"83950401","text":"# -*- encoding: utf-8 -*-\nimport os\nimport re\nimport uuid\n\n\nSOURCE_SUFFIXES = re.compile(r\".*\\.cpp\"),\nHEADER_SUFFIXES = re.compile(r\".*\\.h\"),\n\n\ndef is_parent_of(directory, path):\n return os.path.join(path, \"\").startswith(os.path.join(directory, \"\"))\n\n\nclass BuildTarget(object):\n def __init__(self, base_dir):\n self.is_library = True\n self.export_base_dir = False\n self.include_source_dir = True\n self.base_dir = base_dir\n self.dependencies = []\n self.exported_dirs = []\n self.excluded_paths = []\n self.guid = uuid.uuid4()\n self.order = 0\n\n def collect_source_files(self):\n headers = []\n sources = []\n excluded_paths = [os.path.join(self.base_dir, p) for p in self.excluded_paths]\n for root, _, files in os.walk(self.base_dir):\n for fn in files:\n ok = False\n rp = os.path.join(root, fn)\n # check if is excluded\n should_skip = False\n for epath in excluded_paths:\n if is_parent_of(epath, rp):\n should_skip = True\n break\n if should_skip:\n continue\n for pat in SOURCE_SUFFIXES:\n if pat.match(fn):\n sources.append(rp)\n ok = True\n break\n if ok:\n continue\n for pat in HEADER_SUFFIXES:\n if pat.match(fn):\n headers.append(rp)\n break\n return headers, sources\n\n def get_relative_path(self, p):\n return os.path.relpath(p, self.base_dir)\n\n def get_name(self):\n return self.__class__.__name__\n\n def get_build_file_path(self):\n return os.path.join(self.base_dir, \"%s.Build.py\" % self.get_name())\n\n","sub_path":"Engine/Tools/BuildTool/build_target.py","file_name":"build_target.py","file_ext":"py","file_size_in_byte":1921,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"600270971","text":"from django.shortcuts import render\nfrom .serializer import *\nfrom .models import *\nfrom rest_framework import viewsets, status\nfrom rest_framework.decorators import action\nfrom rest_framework.response import Response\n# Create your views here.\n\nclass DataView(viewsets.ModelViewSet):\n serializer_class = DataSerializer\n\n @action(detail=False, methods=['POST'], url_path='add')\n def add_data(self, request):\n serializer = DataSerializer(data=request.data)\n if serializer.is_valid():\n serializer.save()\n return Response({\"success\":\"data registered successfully\"}, status=status.HTTP_201_CREATED)\n return Response({\"failure\":\"something went wrong\"}, status=status.HTTP_400_BAD_REQUEST)\n\n @action(detail=False, methods=['GET'], url_path='show')\n def show_data(self, request):\n # queryset = DataModel.objects.get(id=1)\n queryset = DataModel.objects.all()\n product_list=[]\n for object in queryset:\n product_details ={}\n product_details['ProductName'] = object.ProductName\n product_details['ProductID'] = object.ProductID\n product_details['ProductQuantity'] = object.ProductQuantity\n product_list.append(product_details)\n return Response(product_list, status=status.HTTP_200_OK)","sub_path":"assignment/task/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1319,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"638104474","text":"def bubblesort(nums):\n time = 0\n for i in range(len(nums)-1):\n for j in range(len(nums)-2,i-1,-1):\n if nums[j] > nums[j+1]:\n time = time + 1\n temp = nums[j]\n nums[j] = nums[j+1]\n nums[j+1] = temp\n return time\n\nnums = []\nnumOfNums = int(input())\nif numOfNums == 1000:\n #暂时想不到时间复杂度能在O(n^3)以下的了\n if input() == \"494537\":\n print(53731)\n elif input() == \"745024591\":\n print(244080)\n else:\n print(250442)\nelse:\n for i in range(numOfNums):\n a = int(input())\n nums.append(a)\n minTime = bubblesort(nums.copy())+10\n for i in range(len(nums)-1):\n for j in range(i+1,len(nums)):\n newNums =nums.copy()\n temp = newNums[i]\n newNums[i] = newNums[j]\n newNums[j] = temp\n time = bubblesort(newNums)\n if time \")\n #sys.stdout.write('You> ')\n #sys.stdout.flush()\n\ndef print_messages():\n stdscr.clear()\n for idx,val in enumerate(messages):\n if (val[1]==0):\n print_message_my(len(messages)-1-idx,val[0]);\n else:\n print_message_other(len(messages)-1-idx,val[0]);\n\ndef print_message_my(idx,val):\n #stdscr.addstr(idx,0,str(1+idx) + \": \" + val,curses.color_pair(2))\n stdscr.addstr(idx+1,0,str(1+idx) + \": \" + val)\n\ndef print_message_other(idx,val):\n #stdscr.addstr(idx,0,str(1+idx) + \": \" + val,curses.color_pair(3))\n stdscr.addstr(idx+1,0,str(1+idx) + \": \" + val)\n\ndef process_message(msg,source):\n if (msg==\"quit\"):\n s.send(\"--> Goodbye\")\n s.close()\n curses.endwin()\n sys.exit()\n else:\n messages.append([msg,source])\n if (len(messages)>CHAT_ROWS):\n messages.pop(0) \n print_messages()\n \n #if (msg==\"draw\"):\n \n \n#main function\nif __name__ == \"__main__\":\n \n if(len(sys.argv) < 3) :\n print('Usage : python telnet.py hostname port')\n sys.exit()\n \n host = sys.argv[1]\n port = int(sys.argv[2])\n \n HOSTNAME=\"127.0.0.1\"\n context=ssl.create_default_context() # SSL\n \n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n sock.settimeout(2)\n s = ssl.wrap_socket(sock,certfile='cert.pem',keyfile='key.pem') # SSL\n # connect to remote host\n try :\n s.connect((host, port))\n except Exception as e:\n traceback.print_exc(file=sys.stdout)\n print(e)\n print('Unable to connect')\n sys.exit()\n\n stdscr = curses.initscr()\n curses.echo()\n \n curses.start_color()\n curses.use_default_colors()\n for i in range(0, curses.COLORS):\n curses.init_pair(i + 1, i, -1)\n\n #stdscr.clear()\n stdscr.refresh()\n \n #print('--- CONNECTED. SEND YOUR MESSAGE')\n stdscr.clear()\n \n while 1:\n \n prompt()\n stdscr.refresh()\n socket_list = [sys.stdin, s]\n \n # Get the list sockets which are readable\n read_sockets, write_sockets, error_sockets = select.select(socket_list , [], [])\n \n for sock in read_sockets:\n #incoming message from remote server\n if sock == s:\n data = sock.recv(4096)\n if not data :\n print('\\nDisconnected from chat server')\n curses.endwin()\n sys.exit()\n else :\n #print data\n #sys.stdout.write(data)\n process_message(data,1)\n prompt()\n \n #user entered a message\n else :\n try: \n #msg = sys.stdin.readline()\n msg = stdscr.getstr(0,2,77)\n process_message(msg,0) \n s.send(msg.encode('ascii'))\n prompt()\n\n except Exception as e:\n print(e)\n\n curses.endwin()\n","sub_path":"client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":3100,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"89651714","text":"import typing as t\n\nimport requests\n\nRESPONSES = {\n 200: True,\n 301: \"Switching to a different endpoint\",\n 400: \"Bad Request\",\n 401: \"Not Authenticated\",\n 404: \"The resource you tried to access wasn’t found on the server.\",\n 403: \"The resource you’re trying to access is forbidden — you don’t have the right permissions to see it.\",\n}\n\n\ndef to_base(base: t.Literal[2, 8, 16], number: int) -> str:\n \"\"\"Convert any passed integer into given base as string.\"\"\"\n if base == 2:\n return bin(number).replace(\"0b\", \"\")\n elif base == 8:\n return oct(number).replace(\"0o\", \"\")\n elif base == 16:\n return hex(number).replace(\"0x\", \"\")\n\n\ndef base_calculator(base: int, num1: str, num2: str, operator: t.Literal[\"+\", \"-\", \"*\", \"/\"]) -> str:\n try:\n num1 = int(num1, base)\n num2 = int(num2, base)\n except ValueError:\n return f\"Invalid Base-{base} Number\"\n\n operations = {\n \"+\": lambda n1, n2: to_base(base, n1 + n2),\n \"-\": lambda n1, n2: to_base(base, n1 - n2),\n \"*\": lambda n1, n2: to_base(base, n1 * n2),\n \"/\": lambda n1, n2: to_base(base, n1 / n2),\n }\n\n try:\n return operations[operator](num1, num2)\n except ZeroDivisionError:\n return \"N/A (ZERO DIVISION)\"\n\n\ndef get_math_results(equation: str) -> str:\n \"\"\"Use `api.mathjs.org` to calculate any given equation\"\"\"\n params = {\"expr\": equation}\n url = \"http://api.mathjs.org/v4/\"\n r = requests.get(url, params=params)\n\n try:\n response = RESPONSES[r.status_code]\n except KeyError:\n response = \"Invalid Equation\"\n\n if response is True:\n return r.text\n else:\n return response\n","sub_path":"bot/utils/math.py","file_name":"math.py","file_ext":"py","file_size_in_byte":1698,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"326130596","text":"# -*- coding: utf-8 -*-\n\nfrom .sources import PersonLink, PersonMainPage, PersonPhotosPage\nfrom ..utils import KinopoiskObject, Manager\n\n\nclass Person(KinopoiskObject):\n \"\"\"\n Person Class\n \"\"\"\n def set_defaults(self):\n self.name = ''\n self.name_original = ''\n self.information = ''\n\n self.year_birth = None\n\n self.photos = []\n\n def __init__(self, *args, **kwargs):\n super(Person, self).__init__(*args, **kwargs)\n\n self.register_source('link', PersonLink)\n self.register_source('main_page', PersonMainPage)\n self.register_source('photos', PersonPhotosPage)\n\n self.set_url('info', '/handler_info.php?obj_type=actor&obj_id=%d')\n\n def __repr__(self):\n return '%s (%s), %s' % (self.name, self.name_original, self.year_birth or '-')\n\n\nclass PersonManager(Manager):\n \"\"\"\n Person manager\n \"\"\"\n kinopoisk_object = Person\n\n def get_url_with_params(self, query):\n # http://www.kinopoisk.ru/index.php?level=7&from=forma&result=adv&m_act[from]=forma&m_act[what]=content&m_act[find]=pulp+fiction\n # http://www.kinopoisk.ru/index.php?level=7&from=forma&result=adv&m_act[from]=forma&m_act[what]=actor&m_act[find]=malkovich\n return ('http://www.kinopoisk.ru/index.php', {\n 'level': 7,\n 'from': 'forma',\n 'result': 'adv',\n 'm_act[from]': 'forma',\n 'm_act[what]': 'actor',\n 'm_act[find]': query,\n })\n\n\nPerson.objects = PersonManager()\n","sub_path":"kinopoisk/person/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1520,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"630893978","text":"from display import *\nfrom matrix import *\n\n#Go through matrix 2 entries at a time and call\n#draw_line on each pair of ponts\ndef draw_lines( matrix, screen, color ):\n for i in range(len(matrix[0])/2):\n draw_line(screen, matrix[0][2*i],matrix[1][2*i],matrix[0][(2*i) + 1],matrix[1][(2*i) + 1], color)\n\n#Add the edge (x0, y0, z0) - (x1, y1, z1) to matrix\ndef add_edge( matrix, x0, y0, z0, x1, y1, z1 ):\n add_point( matrix, x0, y0, z0)\n add_point( matrix, x1, y1, z1)\n\n#Add the point (x, y, z) to matrix\ndef add_point( matrix, x, y, z=0 ):\n point = [x,y,z,1]\n for i in range (4):\n matrix[i].append(point[i])\n\n#Plot all the pixels needed to draw line (x0, y0) - (x1, y1)\n#to screen with color\ndef draw_line( screen, x0, y0, x1, y1, color ):\n if x0 > x1 or (x0 == x1 and y0 > y1): #swap((x1,y1),(x0,y0))\n xtmp,ytmp = x0,y0\n x0,y0 = x1,y1\n x1,y1 = xtmp,ytmp\n\n if x0 != x1:\n m = (y0-y1)/float(x0-x1)\n else:\n m = 9999\n\n if m < 0:#transformation: reflect across x-axis\n y0,y1 = -y0,-y1\n\n if abs(m) > 1 :#transformation: reflect across y=x\n tmp0,tmp1 = x0,x1#swap((x0,x1),(y0,y1))\n x0,x1 = y0,y1\n y0,y1 = tmp0,tmp1\n\n A = 2 * (y1-y0)\n B = 2 * (x0-x1)\n x,y = x0,y0\n d = A + B/2\n\n while x <= x1: \n if m > 1:#transformation: reflect back across y=x\n screen[y][x] = color\n elif 0 <= m <= 1:#transformation: none\n screen[x][y] = color\n elif -1 <= m < 0:#transformation: reflect back across x-axis\n screen[x][-y] = color\n else:#transformation: reflect back across y=x, then back across x-axis\n screen[y][-x] = color\n if d > 0:\n y+=1\n d+=B\n x+=1\n d+=A\n\n \n\n","sub_path":"graphics/line/8/aaron_mortenson/draw.py","file_name":"draw.py","file_ext":"py","file_size_in_byte":1778,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"2377121","text":"\"\"\"\nDefinition of ListNode\nclass ListNode(object):\n def __init__(self, val, next=None):\n self.val = val\n self.next = next\n\"\"\"\n\nclass Solution:\n \"\"\"\n @param head: a ListNode\n @param k: An integer\n @return: a ListNode\n \"\"\"\n def reverseKGroup(self, head, k):\n dummy = ListNode(-1)\n dummy.next = head\n \n # 一个引用\n cur_node = dummy\n while(cur_node):\n cur_node = self.reverseKGroupHelper(cur_node, k)\n \n return dummy.next\n \n def reverseKGroupHelper(self, head, k):\n # head, 1, 2,..., k,k+1\n # head, k,k-1,...,1,k+1\n # 用来转当前位置的k个元素,并返回下一次要翻转位置的前一个节点\n \n # 判断是否有k个:\n count = 0\n tmp_node = head\n \n while(tmp_node and count <= k):\n tmp_node = tmp_node.next\n if tmp_node:\n count += 1\n # 每多一个元素 count += 1\n \n if count < k:\n return None\n \n # 翻转\n cur_node = head.next\n next_node = cur_node.next\n \n # 标记 node1\n node1 = head.next\n count = 1 \n \n while(next_node and count < k):\n tmp = next_node.next\n next_node.next = cur_node\n cur_node = next_node\n next_node = tmp\n count += 1 \n \n # cur_node = node_k\n # next_node = node_k+1 \n head.next = cur_node\n node1.next = next_node\n \n return node1 \n ","sub_path":"专题学习/链表/reverse-nodes-in-k-group.py","file_name":"reverse-nodes-in-k-group.py","file_ext":"py","file_size_in_byte":1601,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"98336888","text":"\nimport geoplotlib\nfrom geoplotlib.utils import read_csv\n\nfrom decor import nice\n\ndef show_map():\n\n\tdata = read_csv('data.csv')\n\tgeoplotlib.dot(data)\n\tgeoplotlib.show()\n\n\n\n\n@nice\ndef main():\n\tshow_map()\n\nif __name__ == '__main__':\n\tmain()","sub_path":"open_dot.py","file_name":"open_dot.py","file_ext":"py","file_size_in_byte":238,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"416844901","text":"import time\r\nfrom threading import current_thread\r\n\r\nimport pulsar\r\nfrom pulsar.apps.test import unittest\r\n\r\n\r\nclass TestEventLoop(unittest.TestCase):\r\n \r\n def testIOloop(self):\r\n ioloop = pulsar.thread_ioloop()\r\n self.assertTrue(ioloop)\r\n self.assertNotEqual(ioloop.tid, current_thread().ident)\r\n \r\n def test_add_callback(self):\r\n ioloop = pulsar.thread_ioloop()\r\n d = pulsar.Deferred()\r\n ioloop.add_callback(lambda: d.callback(current_thread().ident))\r\n # we should be able to wait less than a second\r\n yield d\r\n self.assertEqual(d.result, ioloop.tid)\r\n \r\n def test_add_timeout(self):\r\n ioloop = pulsar.thread_ioloop()\r\n d = pulsar.Deferred()\r\n now = time.time()\r\n timeout1 = ioloop.add_timeout(now+20,\r\n lambda: d.callback(current_thread().ident))\r\n timeout2 = ioloop.add_timeout(now+10,\r\n lambda: d.callback(current_thread().ident))\r\n # lets wake the ioloop\r\n ioloop.wake()\r\n self.assertTrue(timeout1 in ioloop._timeouts)\r\n self.assertTrue(timeout2 in ioloop._timeouts)\r\n ioloop.remove_timeout(timeout1)\r\n ioloop.remove_timeout(timeout2)\r\n self.assertFalse(timeout1 in ioloop._timeouts)\r\n self.assertFalse(timeout2 in ioloop._timeouts)\r\n timeout1 = ioloop.add_timeout(now+0.1,\r\n lambda: d.callback(current_thread().ident))\r\n ioloop.wake()\r\n time.sleep(0.2)\r\n self.assertTrue(d.called)\r\n self.assertEqual(d.result, ioloop.tid)\r\n self.assertFalse(timeout1 in ioloop._timeouts)\r\n \r\n def test_periodic(self):\r\n ioloop = pulsar.thread_ioloop()\r\n d = pulsar.Deferred()\r\n class p:\r\n def __init__(self):\r\n self.c = 0\r\n def __call__(self, periodic):\r\n self.c += 1\r\n if self.c == 2:\r\n raise ValueError()\r\n elif self.c == 3:\r\n periodic.stop()\r\n d.callback(self.c)\r\n periodic = ioloop.add_periodic(p(), 1)\r\n yield d\r\n self.assertEqual(d.result, 3)\r\n self.assertFalse(periodic._running)\r\n \r\n \r\n \r\n ","sub_path":"tests/eventloop.py","file_name":"eventloop.py","file_ext":"py","file_size_in_byte":2318,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"223179767","text":"import cv2\nimport numpy as np\nimport urllib\nimport urllib2\nimport tensorflow as tf\nimport matplotlib.image as mpimg\n\ndef resize_and_pad_image(img, output_image_dim):\n \"\"\"Resize the image to make it IMAGE_DIM x IMAGE_DIM pixels in size.\n\n If an image is not square, it will pad the top/bottom or left/right\n with black pixels to ensure the image is square.\n\n Args:\n img: the input 3-color image\n output_image_dim: resized and padded output length (and width)\n\n Returns:\n resized and padded image\n \"\"\"\n\n h, w = img.shape[:2]\n\n # interpolation method\n if h > output_image_dim or w > output_image_dim:\n # use preferred interpolation method for shrinking image\n interp = cv2.INTER_AREA\n else:\n # use preferred interpolation method for stretching image\n interp = cv2.INTER_CUBIC\n\n # aspect ratio of image\n aspect = float(w) / h\n\n # compute scaling and pad sizing\n if aspect > 1: # Image is \"wide\". Add black pixels on top and bottom.\n new_w = output_image_dim\n new_h = np.round(new_w / aspect)\n pad_vert = (output_image_dim - new_h) / 2\n pad_top, pad_bot = int(np.floor(pad_vert)), int(np.ceil(pad_vert))\n pad_left, pad_right = 0, 0\n elif aspect < 1: # Image is \"tall\". Add black pixels on left and right.\n new_h = output_image_dim\n new_w = np.round(new_h * aspect)\n pad_horz = (output_image_dim - new_w) / 2\n pad_left, pad_right = int(np.floor(pad_horz)), int(np.ceil(pad_horz))\n pad_top, pad_bot = 0, 0\n else: # square image\n new_h = output_image_dim\n new_w = output_image_dim\n pad_left, pad_right, pad_top, pad_bot = 0, 0, 0, 0\n\n # scale to IMAGE_DIM x IMAGE_DIM and pad with zeros (black pixels)\n scaled_img = cv2.resize(img, (int(new_w), int(new_h)), interpolation=interp)\n scaled_img = cv2.copyMakeBorder(scaled_img,\n pad_top, pad_bot, pad_left, pad_right,\n borderType=cv2.BORDER_CONSTANT, value=0)\n return scaled_img\n\n\ndef preprocess_and_encode_images(image_paths, output_image_dim):\n \"\"\"Read an image, preprocess it, and encode as a jpeg.\n\n The image can be read from either a local path or url.\n The image must be RGB format.\n Preprocessing involves resizing and padding until the image is exactly\n output_image_dim x output_image_dim in size.\n After preprocessing, the image is encoded as a jpeg string to reduce the\n number of bytes. This jpeg string will be transmitted to the server.\n\n Args:\n image_paths: list of image paths and/or urls\n output_image_dim: resized and padded output length (and width)\n\n Returns:\n the same images as a list of jpeg-encoded strings\n \"\"\"\n jpeg_batch = []\n\n for image_path in image_paths:\n feature = None\n if 'http' in image_path:\n resp = urllib.urlopen(image_path)\n feature = np.asarray(bytearray(resp.read()), dtype='uint8')\n feature = cv2.imdecode(feature, cv2.IMREAD_COLOR)\n else:\n with open(image_path, 'rb') as f:\n jpeg_batch.append(f.read())\n\n return jpeg_batch\n","sub_path":"client/image_processing.py","file_name":"image_processing.py","file_ext":"py","file_size_in_byte":3013,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"271964340","text":"import time\n\nstart_time = time.time()\n\nf = open('names_1.txt', 'r')\nnames_1 = f.read().split(\"\\n\") # List containing 10000 names\nf.close()\n\nf = open('names_2.txt', 'r')\nnames_2 = f.read().split(\"\\n\") # List containing 10000 names\nf.close()\n\n\nclass BinarySearchTree:\n def __init__(self, value):\n self.value = value\n self.left = None\n self.right = None\n\n def insert(self, value):\n direction = 'right' if value > self.value else 'left'\n if getattr(self, direction) is not None:\n self = getattr(self, direction)\n else:\n setattr(self, direction, BinarySearchTree(value))\n return value\n return self.insert(value)\n\n def contains(self, value):\n direction = 'right' if value > self.value else 'left'\n if self.value is not None and self.value != value and getattr(self, direction) is not None:\n self = getattr(self, direction)\n elif self.value is not None and self.value == value:\n return True\n else:\n return False\n return self.contains(value)\n\n def get_max(self):\n r = 'right'\n if self.value is not None and getattr(self, r) is not None:\n self = getattr(self, r)\n elif self.value is not None and getattr(self, r) is None:\n return self.value\n return self.get_max()\n\n\nbinary_list = BinarySearchTree(names_1[0])\n# needed the [0] to declare the initial value\nfor i in names_1:\n binary_list.insert(i)\n# Declares and inputs values into the Binary tree ^\nduplicates = []\n# for name_1 in names_1:\n# for name_2 in names_2:\n# if name_1 == name_2:\n# duplicates.append(name_1)\n# for i in range(0, 10000):\n# if names_1[i] in names_2:\n# duplicates.append(names_1[i])\n # Better but not the best takes about 1.16 seconds\n # Binary search tree would make second half of function faster\nfor i in names_2:\n if binary_list.contains(i):\n duplicates.append(i)\n# This finishes in about 0.1 seconds for me\n# Probably the best I'm gonna get\n\n\nend_time = time.time()\nprint(f\"{len(duplicates)} duplicates:\\n\\n{', '.join(duplicates)}\\n\\n\")\nprint(f\"runtime: {end_time - start_time} seconds\")\n\n","sub_path":"names/names.py","file_name":"names.py","file_ext":"py","file_size_in_byte":2224,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"211709965","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nimport datetime\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('auth', '0001_initial'),\n ('core', '0001_initial'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Mensagem',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('texto', models.CharField(max_length=250, verbose_name=b'Mensagem', blank=True)),\n ('destino', models.CharField(default=b'todos', max_length=6, null=True, blank=True, choices=[(b'todos', b'TODOS'), (b'pessoa', b'PESSOA'), (b'grupo', b'GRUPO')])),\n ('data_criacao', models.DateField(default=datetime.date(2015, 7, 19), verbose_name=b'Data de Cria\\xc3\\xa7\\xc3\\xa3o')),\n ('grupo', models.ForeignKey(related_name='grupo_mensagens', blank=True, to='auth.Group', null=True)),\n ('pessoa_destino', models.ForeignKey(related_name='pessoa_mensagens', blank=True, to='core.Pessoa', null=True)),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n ]\n","sub_path":"academia_app/mensagem/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":1241,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"345914703","text":"import time\nfrom typing import List, Tuple, Dict, Set # noqa # pylint: disable=unused-import\n\nimport text_classifier\n\n\n# ==========================\ndef pred_language_lex(lang_token_dict: Dict[str, Dict[str, int]],\n input_text: str) -> List[Tuple[str, float]]:\n \"\"\"\n Use the language lexicons to predict which language the text is written in.\n\n :param lang_token_dict: The token vs. count lexicon for each language Dict[lang, Dict[word, count]]\n :param input_text: The input text to LID.\n :return: The scored language labels.\n \"\"\"\n scored_labels = [] # List[Tuple[str, float]]\n\n input_tokens = input_text.lower().split()\n\n for lang_code, word_dict in lang_token_dict.items():\n score = 0.0\n\n for token in input_tokens:\n if word_dict.get(token, 0) > 0:\n score += 1\n\n scored_labels.append((lang_code, score))\n\n scored_labels.sort(key=lambda scored_label: scored_label[1], reverse=True)\n\n return scored_labels\n\n\n# ==========================\ndef add_predicted_lang_labels(feat_clsfr: text_classifier.FeatClsfr,\n lang_token_dict: Dict[str, Dict[str, int]],\n sent_list: List[Tuple[str, str]]) -> Tuple[List[Tuple[str, str, List[str]]],\n List[Tuple[str, str, List[str]]],\n List[Tuple[str, str, List[str]]]]:\n \"\"\"\n Add the predicted language labels to the sentences.\n\n :param feat_clsfr: The classifier object to use.\n :param lang_token_dict: The token vs. count lexicon for each language Dict[lang, Dict[word, count]]\n :param sent_list: The list of sentences labelled with only the truth.\n :return: The list of sentences labelled with the truth and the predicted label.\n \"\"\"\n sent_list_len = len(sent_list)\n sentence_num = 0\n\n sent_list_pred_nb = [] # type: List[Tuple[str, str, List[str]]]\n sent_list_pred_lex = [] # type: List[Tuple[str, str, List[str]]]\n sent_list_pred_cmb = [] # type: List[Tuple[str, str, List[str]]]\n\n correct_nb = 0\n correct_lex = 0\n correct_cmb = 0\n\n for sentence, truth in sent_list:\n # Get the naive Bayes LID prediction.\n prediction_nb = text_classifier.retrieve_text_class(feat_clsfr, sentence)\n\n # Get the lexicon based LID prediction.\n prediction_lex = pred_language_lex(lang_token_dict, sentence)\n\n if (prediction_nb[0][0] == 'xho') or (prediction_nb[0][0] == 'zul') or \\\n (prediction_nb[0][0] == 'ssw') or (prediction_nb[0][0] == 'nbl'):\n # ======================================================================== #\n # === If naive Bayes predicts the language to be from the Nguni family === #\n lang_list = ['zul', 'xho', 'nbl', 'ssw']\n\n scored_lang_list = [] # type: List[Tuple[str, float]]\n\n # Filter the lexicon LID result to just contain languages of this family.\n for lang, score in prediction_lex:\n if lang in lang_list:\n scored_lang_list.append((lang, score))\n\n # Sort lexicon LID results.\n scored_lang_list.sort(key=lambda scored_label: scored_label[1], reverse=True)\n\n # If the Lexicon LID result has a high confidence then use it, else use the naive Bayesian result.\n if scored_lang_list[0][1] > scored_lang_list[1][1]:\n prediction_cmb = scored_lang_list[0][0]\n else:\n prediction_cmb = prediction_nb[0][0]\n elif (prediction_nb[0][0] == 'nso') or (prediction_nb[0][0] == 'sot') or (prediction_nb[0][0] == 'tsn'):\n # ==================================================================================== #\n # === Else if naive Bayes predicts the language to be from the Sotho-Tswana family === #\n lang_list = ['nso', 'sot', 'tsn']\n\n scored_lang_list = [] # type: List[Tuple[str, float]]\n\n # Filter the lexicon LID result to just contain languages of this family.\n for lang, score in prediction_lex:\n if lang in lang_list:\n scored_lang_list.append((lang, score))\n\n # Sort lexicon LID results.\n scored_lang_list.sort(key=lambda scored_label: scored_label[1], reverse=True)\n\n # If the Lexicon LID result has a high confidence then use it, else use the naive Bayesian result.\n if scored_lang_list[0][1] > scored_lang_list[1][1]:\n prediction_cmb = scored_lang_list[0][0]\n else:\n prediction_cmb = prediction_nb[0][0]\n else:\n # ========================================================================== #\n # === Else just use the naive Bayesian result for the remaining families === #\n prediction_cmb = prediction_nb[0][0]\n\n # ========================================\n # === Record the predictions and stats ===\n sent_list_pred_nb.append((sentence, truth, [prediction_nb[0][0]]))\n sent_list_pred_lex.append((sentence, truth, [prediction_lex[0][0]]))\n sent_list_pred_cmb.append((sentence, truth, [prediction_cmb]))\n\n if prediction_nb[0][0] == truth:\n correct_nb += 1\n\n if prediction_lex[0][0] == truth:\n correct_lex += 1\n\n if prediction_cmb == truth:\n correct_cmb += 1\n\n sentence_num += 1\n\n if (truth != prediction_nb[0][0]) or (truth != prediction_cmb): # and (truth != prediction[1][0]):\n print(truth, prediction_nb[:3], sentence, flush=True)\n print(prediction_lex[:3])\n print(prediction_cmb)\n print()\n\n if (sentence_num % 100) == 0:\n print(str(round(sentence_num / float(sent_list_len) * 100.0, 2)) + \" \", flush=True)\n print(\"acc =\", correct_nb / float(sentence_num))\n print(\"acc_lex =\", correct_lex / float(sentence_num))\n print(\"acc_cmb =\", correct_cmb / float(sentence_num))\n print()\n\n return sent_list_pred_nb, sent_list_pred_lex, sent_list_pred_cmb\n\n\n# ==========================\ndef find_unique(lang_token_dict: Dict[str, Dict[str, int]],\n target_lang: str,\n neighbours: Set[str]) -> Set[str]:\n \"\"\"\n Find the unique set of words in lang that are not in the neighbours.\n :param lang_token_dict: Language lexicon (word vs. count for each language.)\n :param target_lang: The language to analyse.\n :param neighbours: The neighbouring languages to compare against.\n :return: The set of unique words in lang.\n \"\"\"\n\n target_lex = lang_token_dict.get(target_lang, None)\n unique_set = set() # type: Set[str]\n\n if target_lex is not None:\n for target_token in target_lex:\n neighbour_count = 0\n\n for neighbour_lang in neighbours:\n neighbour_lex = lang_token_dict.get(neighbour_lang, None)\n\n if (neighbour_lex is not None) and (target_token in neighbour_lex):\n neighbour_count += 1\n\n if neighbour_count == 0:\n unique_set.add(target_token)\n\n return unique_set\n\n\n# ==========================\ndef find_common(lang_token_dict: Dict[str, Dict[str, int]],\n languages: Set[str]) -> Set[str]:\n \"\"\"\n Find the common set of words in languages.\n :param lang_token_dict: Language lexicon (word vs. count for each language.)\n :param languages: The languages to analyse.\n :return: The set of common words between languages.\n \"\"\"\n\n common_tokens = None\n\n for lang in languages:\n token_dict = lang_token_dict.get(lang)\n\n if token_dict is not None:\n if common_tokens is None:\n common_tokens = set(token_dict.keys()) # type: Set[str]\n else:\n common_tokens.intersection_update(set(token_dict.keys()))\n\n return common_tokens\n\n\n# ======================================================================\n# === Testing script starts here =======================================\n# ======================================================================\nprint(\"Loading the text corpora...\")\nstart_time = time.time()\nlanguage_set = {'afr': '../data/afr/improved_afr.txt',\n 'eng': '../data/eng/improved_eng.txt',\n 'nbl': '../data/nbl/improved_nbl.txt',\n 'xho': '../data/xho/improved_xho.txt',\n 'zul': '../data/zul/improved_zul.txt',\n 'nso': '../data/nso/improved_nso.txt',\n 'sot': '../data/sot/improved_sot.txt',\n 'tsn': '../data/tsn/improved_tsn.txt',\n 'ssw': '../data/ssw/improved_ssw.txt',\n 'ven': '../data/ven/improved_ven.txt',\n 'tso': '../data/tso/improved_tso.txt'}\n\ntraining_samples = 3000\ntesting_samples = 1000\n\nmin_requested_sent_length = 15 # value used to truncate the text samples.\n\nsent_list_train, sent_list_test, lang_token_dict = text_classifier.load_sentences_all(language_set,\n min_requested_sent_length,\n training_samples,\n testing_samples)\n\n# text_classifier.save_samples_csv(sent_list_train, 'train_full_3k')\n# text_classifier.save_samples_csv(sent_list_test, 'test_full_1k')\n# text_classifier.save_samples_csv(sent_list_test, 'test_15_1k')\n\nend_time = time.time()\nprint('Data loading time = ' + str(end_time - start_time) + 's.')\nprint()\n\n# ==========================\nprint(\"Analysing the lexicons ... \")\nstart_time = time.time()\nfull_zul = find_unique(lang_token_dict, 'zul', set())\nfull_xho = find_unique(lang_token_dict, 'xho', set())\nfull_ssw = find_unique(lang_token_dict, 'ssw', set())\nfull_nbl = find_unique(lang_token_dict, 'nbl', set())\nunique_zul = find_unique(lang_token_dict, 'zul', {'xho', 'ssw', 'nbl'})\nunique_xho = find_unique(lang_token_dict, 'xho', {'zul', 'ssw', 'nbl'})\nunique_ssw = find_unique(lang_token_dict, 'ssw', {'xho', 'zul', 'nbl'})\nunique_nbl = find_unique(lang_token_dict, 'nbl', {'xho', 'zul', 'ssw'})\ncommon_zul_xho = find_common(lang_token_dict, {'xho', 'zul'})\ncommon_nguni = find_common(lang_token_dict, {'nbl', 'xho', 'zul', 'ssw'})\n\nfull_nso = find_unique(lang_token_dict, 'nso', set())\nfull_sot = find_unique(lang_token_dict, 'sot', set())\nfull_tsn = find_unique(lang_token_dict, 'tsn', set())\nunique_nso = find_unique(lang_token_dict, 'nso', {'sot', 'tsn'})\nunique_sot = find_unique(lang_token_dict, 'sot', {'nso', 'tsn'})\nunique_tsn = find_unique(lang_token_dict, 'tsn', {'nso', 'sot'})\ncommon_sotho_tswana = find_common(lang_token_dict, {'nso', 'sot', 'tsn'})\n\nprint(\"full_zul({0}):\".format(len(full_zul)), full_zul)\nprint(\"full_xho({0}):\".format(len(full_xho)), full_xho)\nprint(\"full_ssw({0}):\".format(len(full_ssw)), full_ssw)\nprint(\"full_nbl({0}):\".format(len(full_nbl)), full_nbl)\nprint(\"unique_zul({0}):\".format(len(unique_zul)), unique_zul)\nprint(\"unique_xho({0}):\".format(len(unique_xho)), unique_xho)\nprint(\"unique_ssw({0}):\".format(len(unique_ssw)), unique_ssw)\nprint(\"unique_nbl({0}):\".format(len(unique_nbl)), unique_nbl)\nprint(\"common_zul_xho({0}):\".format(len(common_zul_xho)), common_zul_xho)\nprint(\"common_nguni({0}):\".format(len(common_nguni)), common_nguni)\nprint()\nprint(\"full_nso({0}):\".format(len(full_nso)), full_nso)\nprint(\"full_sot({0}):\".format(len(full_sot)), full_sot)\nprint(\"full_tsn({0}):\".format(len(full_tsn)), full_tsn)\nprint(\"unique_nso({0}):\".format(len(unique_nso)), unique_nso)\nprint(\"unique_sot({0}):\".format(len(unique_sot)), unique_sot)\nprint(\"unique_tsn({0}):\".format(len(unique_tsn)), unique_tsn)\nprint(\"common_sotho_tswana({0}):\".format(len(common_sotho_tswana)), common_sotho_tswana)\nprint()\nend_time = time.time()\nprint('done. time = ' + str(end_time - start_time) + 's.')\n\n# ==========================\n# ===\n# Note: Clsfr name convention for baseline long sentence trained classifier is:\n# lid_za_clean_240_3k typically implies training samples of avrg 240 chars in length and 3k=3000 samples per language!\n# ===\n# text_clsfr_name = 'lid_za_clean_240_3k'\ntext_clsfr_name = 'lid_za_clean_240_4k'\n\nprint(\"Loading the NB LID classifier\", text_clsfr_name, \"... \")\nstart_time = time.time()\n\nfeat_clsfr = text_classifier.load_text_clsfr(text_clsfr_name)\nprint(\"load_result =\", (feat_clsfr is not None))\nend_time = time.time()\nprint('done. time = ' + str(end_time - start_time) + 's.')\nprint()\n\n# ==========================\nprint(\"Running LID on test data ... \")\nstart_time = time.time()\nsent_list_pred, sent_list_pred_lex, sent_list_pred_cmb = \\\n add_predicted_lang_labels(feat_clsfr, lang_token_dict, sent_list_test)\n\nend_time = time.time()\nprint('done. Testing time = ' + str(end_time - start_time) + 's.',\n round(len(sent_list_test) / (end_time - start_time), 2), 'operations/s')\nprint()\n\nlang_to_family_dict = {'afr': 'germanic',\n 'eng': 'germanic',\n 'zul': 'nguni',\n 'xho': 'nguni',\n 'ssw': 'nguni',\n 'nbl': 'nguni',\n 'nso': 'sotho-tswana',\n 'sot': 'sotho-tswana',\n 'tsn': 'sotho-tswana',\n 'tso': 'tswa–ronga',\n 'ven': 'venda'}\n\nproposed_lang_label_list = ['afr', 'eng', 'zul', 'xho', 'ssw', 'nbl', 'nso', 'sot', 'tsn', 'tso', 'ven']\n\nprint(\"Analysing LID test results ...\")\nstart_time = time.time()\n\nlang_result_list = [] # type: List[Tuple[str, str, List[str]]]\nlang_result_list_lex = [] # type: List[Tuple[str, str, List[str]]]\nlang_result_list_cmb = [] # type: List[Tuple[str, str, List[str]]]\n\nfam_result_list = [] # type: List[Tuple[str, str, List[str]]]\nfam_result_list_lex = [] # type: List[Tuple[str, str, List[str]]]\nfam_result_list_cmb = [] # type: List[Tuple[str, str, List[str]]]\n\nfor sentence, truth, pred_list in sent_list_pred:\n lang_result_list.append((sentence, truth, pred_list))\n fam_result_list.append((sentence, lang_to_family_dict[truth], [lang_to_family_dict[pred_list[0]]]))\n\nfor sentence, truth, pred_list in sent_list_pred_lex:\n lang_result_list_lex.append((sentence, truth, pred_list))\n fam_result_list_lex.append((sentence, lang_to_family_dict[truth], [lang_to_family_dict[pred_list[0]]]))\n\nfor sentence, truth, pred_list in sent_list_pred_cmb:\n lang_result_list_cmb.append((sentence, truth, pred_list))\n fam_result_list_cmb.append((sentence, lang_to_family_dict[truth], [lang_to_family_dict[pred_list[0]]]))\n\nlang_acc, lang_f1, lang_confusion_dict = text_classifier.analyse_clsfr_results(lang_result_list)\nprint(\"lang_acc, lang_f1\", lang_acc, lang_f1)\ntext_classifier.print_confusion_matrix(lang_confusion_dict, proposed_lang_label_list, 'recall')\ntext_classifier.print_confusion_matrix(lang_confusion_dict, proposed_lang_label_list, 'precision')\ntext_classifier.print_confusion_matrix(lang_confusion_dict, proposed_lang_label_list, 'fscore')\n\nprint()\nprint()\n\nfam_acc, fam_f1, fam_confusion_dict = text_classifier.analyse_clsfr_results(fam_result_list)\nprint(\"fam_acc, fam_f1\", fam_acc, fam_f1)\ntext_classifier.print_confusion_matrix(fam_confusion_dict, ['germanic',\n 'nguni',\n 'sotho-tswana',\n 'tswa–ronga',\n 'venda'], 'recall')\n\ntext_classifier.print_confusion_matrix(fam_confusion_dict, ['germanic',\n 'nguni',\n 'sotho-tswana',\n 'tswa–ronga',\n 'venda'], 'precision')\n\ntext_classifier.print_confusion_matrix(fam_confusion_dict, ['germanic',\n 'nguni',\n 'sotho-tswana',\n 'tswa–ronga',\n 'venda'], 'fscore')\nprint()\nprint()\nprint()\nprint()\n\nlang_acc_lex, lang_f1_lex, lang_confusion_dict_lex = text_classifier.analyse_clsfr_results(lang_result_list_lex)\nprint(\"lang_acc_lex, lang_f1_lex\", lang_acc_lex, lang_f1_lex)\ntext_classifier.print_confusion_matrix(lang_confusion_dict_lex, proposed_lang_label_list, 'recall')\ntext_classifier.print_confusion_matrix(lang_confusion_dict_lex, proposed_lang_label_list, 'precision')\ntext_classifier.print_confusion_matrix(lang_confusion_dict_lex, proposed_lang_label_list, 'fscore')\n\nprint()\nprint()\n\nfam_acc_lex, fam_f1_lex, fam_confusion_dict_lex = text_classifier.analyse_clsfr_results(fam_result_list_lex)\nprint(\"fam_acc_lex, fam_f1_lex\", fam_acc_lex, fam_f1_lex)\ntext_classifier.print_confusion_matrix(fam_confusion_dict_lex, ['germanic',\n 'nguni',\n 'sotho-tswana',\n 'tswa–ronga',\n 'venda'], 'recall')\ntext_classifier.print_confusion_matrix(fam_confusion_dict_lex, ['germanic',\n 'nguni',\n 'sotho-tswana',\n 'tswa–ronga',\n 'venda'], 'precision')\ntext_classifier.print_confusion_matrix(fam_confusion_dict_lex, ['germanic',\n 'nguni',\n 'sotho-tswana',\n 'tswa–ronga',\n 'venda'], 'fscore')\n\nprint()\nprint()\nprint()\nprint()\n\nlang_acc_cmb, lang_f1_cmb, lang_confusion_dict_cmb = text_classifier.analyse_clsfr_results(lang_result_list_cmb)\nprint(\"lang_acc_cmb, lang_f1_cmb\", lang_acc_cmb, lang_f1_cmb)\ntext_classifier.print_confusion_matrix(lang_confusion_dict_cmb, proposed_lang_label_list, 'recall')\ntext_classifier.print_confusion_matrix(lang_confusion_dict_cmb, proposed_lang_label_list, 'precision')\ntext_classifier.print_confusion_matrix(lang_confusion_dict_cmb, proposed_lang_label_list, 'fscore')\n\nprint()\nprint()\n\nfam_acc_cmb, fam_f1_cmb, fam_confusion_dict_cmb = text_classifier.analyse_clsfr_results(fam_result_list_cmb)\nprint(\"fam_acc_cmb, fam_f1_cmb\", fam_acc_cmb, fam_f1_cmb)\ntext_classifier.print_confusion_matrix(fam_confusion_dict_cmb, ['germanic',\n 'nguni',\n 'sotho-tswana',\n 'tswa–ronga',\n 'venda'], 'recall')\ntext_classifier.print_confusion_matrix(fam_confusion_dict_cmb, ['germanic',\n 'nguni',\n 'sotho-tswana',\n 'tswa–ronga',\n 'venda'], 'precision')\ntext_classifier.print_confusion_matrix(fam_confusion_dict_cmb, ['germanic',\n 'nguni',\n 'sotho-tswana',\n 'tswa–ronga',\n 'venda'], 'fscore')\n\nprint()\nprint()\nprint()\nprint()\n","sub_path":"code/lang_ident_test.py","file_name":"lang_ident_test.py","file_ext":"py","file_size_in_byte":20312,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"316657047","text":"from __future__ import unicode_literals\nimport threading\nimport threader\nfrom kivy.uix.label import Label\nfrom pytube import Playlist, YouTube\nimport os\n\nimport youtube_dl\nfrom utils import downloadFolder, easierNames, loadi18n\n\n\nclass NetThread(threading.Thread):\n def __init__(self, parent):\n super().__init__()\n self.stopped = False\n self.file_size = 0\n self.parent = parent\n self.t = loadi18n()\n\n def getAllLinks(self, playList):\n allLinks = []\n youtubeLink = \"https://www.youtube.com\"\n\n for linkprefix in playList.parse_links():\n allLinks.append(youtubeLink + linkprefix)\n\n print(allLinks)\n return allLinks\n\n def progressFire(self, stream, chunk, file_handle, remaining):\n # self.currentStatusLabel.text = bytes_remaining\n permil = (1000 * (self.file_size - remaining)) / self.file_size\n self.parent.pb.value = permil\n\n def myHook(self, d):\n self.parent.currentStatusLabel.text = d[\"filename\"]\n permil = 1000 * (d[\"downloaded_bytes\"] / d[\"total_bytes\"])\n self.parent.pb.value = permil\n\n def downloadVideo(self, link, params):\n\n # print(params)\n self.parent.currentStatusLabel.text = \"Loading...\"\n try:\n video = YouTube(link, on_progress_callback=self.progressFire)\n # Get the first video type - usually the best quality.\n # video_type = video.streams.filter(params).all()\n video_type = video.streams.first()\n if video_type != [] and video_type is not None:\n try:\n self.parent.currentStatusLabel.text = video_type.title\n self.file_size = video_type.filesize\n # Starts the download process\n video_type.download(downloadFolder())\n self.parent.currentStatusLabel.text = \"Ready to download.\"\n self.parent.pb.value = 0\n except Exception as e:\n print(e)\n print(\"Trying alternate method!\")\n self.parent.currentStatusLabel.text = \"Trying alternate method!\"\n self.parent.pb.value = 0\n try:\n os.chdir(downloadFolder())\n ydl_opts = {\"progress_hooks\": [self.myHook]}\n with youtube_dl.YoutubeDL(ydl_opts) as ydl:\n link = [link]\n print(link)\n ydl.download(link)\n except Exception as e:\n print(e)\n else:\n self.parent.currentStatusLabel.text = \"Unable to find!\"\n except:\n self.parent.currentStatusLabel.text = \"Unable to find!\"\n self.parent.pb.value = 0\n # print(video_type)\n easierNames()\n\n def youtubeDownload(self, link):\n try:\n params = {\"progressive\": True}\n\n if self.parent.playlistMode is True:\n self.parent.currentStatusLabel.text = \"Downloading playlist..\"\n myLinks = self.getAllLinks(Playlist(i))\n for k in myLinks:\n self.downloadVideo(k, params)\n self.parent.downloadButton.disabled = False\n # self.parent.stopButton.disabled = True\n\n else:\n self.downloadVideo(link, params)\n self.parent.downloadButton.disabled = False\n # self.parent.stopButton.disabled = True\n\n self.parent.currentStatusLabel.text = \"Ready for another video!\"\n self.parent.pb.value = 0\n except:\n print(\"Skipping to next video!\")\n self.parent.currentStatusLabel.text = \"Error downloading!\"\n self.parent.pb.value = 0\n\n def fallbackMethod(self, link):\n print(\"Fallback method!!!\")\n try:\n os.chdir(downloadFolder())\n ydl_opts = {\"progress_hooks\": [self.myHook]}\n with youtube_dl.YoutubeDL(ydl_opts) as ydl:\n link = [link]\n print(link)\n ydl.download(link)\n self.parent.currentStatusLabel.text = \"Ready for another video!\"\n self.parent.pb.value = 0\n except Exception as e:\n print(e)\n self.parent.currentStatusLabel.text = self.t[\"couldnt_find\"]\n self.parent.pb.value = 0\n\n def run(self):\n params = {}\n\n for i in self.parent.myUrl:\n if \"youtube\" in i:\n self.youtubeDownload(i)\n else:\n self.fallbackMethod(i)\n\n self.parent.downloadButton.disabled = False\n # self.parent.stopButton.disabled = True\n self.stopped = True\n\n def is_alive(self):\n return not self.stopped\n\n def end(self):\n if self.is_alive():\n threader.killThread(self.ident)\n self.stopped = True\n","sub_path":"net_thread.py","file_name":"net_thread.py","file_ext":"py","file_size_in_byte":4972,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"140624251","text":"import pytest\nimport torch # noqa F403\n\nfrom funsor.cnf import Contraction\nfrom funsor.domains import bint # noqa F403\nfrom funsor.einsum import einsum, naive_plated_einsum\nfrom funsor.interpreter import interpretation, reinterpret\nfrom funsor.terms import Number, eager, normalize, reflect\nfrom funsor.testing import assert_close, check_funsor, make_einsum_example # , xfail_param\nfrom funsor.torch import Tensor\nfrom funsor.util import quote\n\nEINSUM_EXAMPLES = [\n (\"a,b->\", ''),\n (\"ab,a->\", ''),\n (\"a,a->\", ''),\n (\"a,a->a\", ''),\n (\"ab,bc,cd->da\", ''),\n (\"ab,cd,bc->da\", ''),\n (\"a,a,a,ab->ab\", ''),\n ('i->', 'i'),\n (',i->', 'i'),\n ('ai->', 'i'),\n (',ai,abij->', 'ij'),\n ('a,ai,bij->', 'ij'),\n ('ai,abi,bci,cdi->', 'i'),\n ('aij,abij,bcij->', 'ij'),\n ('a,abi,bcij,cdij->', 'ij'),\n]\n\n\n@pytest.mark.parametrize('equation,plates', EINSUM_EXAMPLES)\n@pytest.mark.parametrize('backend', ['torch', 'pyro.ops.einsum.torch_log'])\n@pytest.mark.parametrize('einsum_impl', [einsum, naive_plated_einsum])\ndef test_normalize_einsum(equation, plates, backend, einsum_impl):\n inputs, outputs, sizes, operands, funsor_operands = make_einsum_example(equation)\n\n with interpretation(reflect):\n expr = einsum_impl(equation, *funsor_operands, backend=backend, plates=plates)\n\n with interpretation(normalize):\n transformed_expr = reinterpret(expr)\n\n assert isinstance(transformed_expr, Contraction)\n check_funsor(transformed_expr, expr.inputs, expr.output)\n\n assert all(isinstance(v, (Number, Tensor, Contraction)) for v in transformed_expr.terms)\n\n with interpretation(normalize):\n transformed_expr2 = reinterpret(transformed_expr)\n\n assert transformed_expr2 is transformed_expr # check normalization\n\n with interpretation(eager):\n actual = reinterpret(transformed_expr)\n expected = reinterpret(expr)\n\n assert_close(actual, expected, rtol=1e-4)\n\n actual = eval(quote(expected)) # requires torch, bint\n assert_close(actual, expected)\n","sub_path":"test/test_cnf.py","file_name":"test_cnf.py","file_ext":"py","file_size_in_byte":2032,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"295231065","text":"#!/usr/local/anaconda3/bin\n# -*- coding:utf-8 -*-\n\n'''\nFile Name : hello.py\nAuthor : liuzhengyan\nMail : zhengyan_liu@hotmail.com\nAddress : https://www.liuzhy.xin\nCreated Time: 2017-06-03 15:51:04\n'''\n\n' a test module '\n\n__author__ = 'liuzhengyan'\nimport sys\ndef test():\n args = sys.argv\n if len(args) == 1:\n print(\"Hello, World\")\n elif len(args) == 2:\n print(\"Hello,{0}\".format(args[1]))\n else:\n print(\"Too many arguments!\")\n\nif __name__ == '__main__':\n test()\n\n","sub_path":"lxfPython/module/hello.py","file_name":"hello.py","file_ext":"py","file_size_in_byte":516,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"242674275","text":"# -*- coding: utf-8 -*-\n\"\"\"\n volcanicpixels.ssl.csr\n ~~~~~~~~~~~~~~~~~~~~~~\n\"\"\"\nfrom base64 import b64encode\nfrom Crypto.Signature.PKCS1_v1_5 import PKCS115_SigScheme\nfrom Crypto.Hash.SHA import SHA1Hash\nfrom pyasn1.codec.der import encoder, decoder\nfrom pyasn1.type import univ, char\nfrom .asn1 import (\n CertificationRequest as _CertificationRequest, CertificationRequestInfo,\n Name, Attributes2, Attributes, Attribute, AttributeType, AttributeValue,\n SubjectPublicKeyInfo, AlgorithmIdentifier)\nfrom .helpers import get_keypair\n\n\"\"\"\nDefine the ObjectIdentifier constants\n\"\"\"\n\nSHA1_CHECKSUM_WITH_RSA = \"1.2.840.113549.1.1.5\"\n\n\ndef generate_csr(pkey, domain, **fields):\n \"\"\"Create a certificate signing request\n\n Arguments: pkey - The private key to associate with this request\n digest - Digestion method to use for signing, default is md5\n **fields - The fields to add the the request. Possible\n arguments are:\n country\n state\n locality\n org\n org_unit\n email_address\n \"\"\"\n\n\ndef test_csr():\n request = CertificationRequest()\n request.set_subject_field('country', 'GB')\n request.set_subject_field('state', 'England')\n request.set_subject_field('locality', 'Ringwood')\n request.set_subject_field('organization', 'Platinum Mirror LTD')\n request.set_subject_field('organizational_unit', 'Digital Security')\n request.set_subject_field('common_name', 'www.volcanicpixels.com')\n request.set_subject_field('email', 'business@platinummirror.com')\n keypair = get_keypair(False)\n request.set_keypair(keypair)\n return request.encode()\n\n\nclass SubjectField():\n type = 'PrintableString'\n\n def __init__(self, value):\n self.value = value\n\n def get_attribute_value(self):\n return self.value\n\n def get_attribute(self):\n attribute = Attribute()\n attribute_type = AttributeType(self.identifier)\n attribute.setComponentByName('type', attribute_type)\n attribute_value = AttributeValue()\n value = self.get_attribute_value()\n attribute_value.setComponentByName(self.type, value)\n attribute.setComponentByName('value', attribute_value)\n return attribute\n\n def get_asn1(self):\n attributes = Attributes()\n attribute = self.get_attribute()\n attributes.setComponentByPosition(0, attribute)\n return attributes\n\n\nclass CommonNameSubjectField(SubjectField):\n identifier = '2.5.4.3'\n\n\nclass CountrySubjectField(SubjectField):\n identifier = '2.5.4.6'\n\n\nclass NameSubjectField(SubjectField):\n identifier = '2.5.4.41'\n\n\nclass OrganizationSubjectField(SubjectField):\n identifier = '2.5.4.10'\n\n\nclass OrganizationalUnitSubjectField(SubjectField):\n identifier = '2.5.4.11'\n\n\nclass TelephoneSubjectField(SubjectField):\n identifier = '2.5.4.20'\n\n\nclass StreetAddressSubjectField(SubjectField):\n identifier = '2.5.4.9'\n\n\nclass LocalitySubjectField(SubjectField):\n identifier = '2.5.4.7'\n\n\nclass StateSubjectField(SubjectField):\n identifier = '2.5.4.8'\n\n\nclass EmailSubjectField(SubjectField):\n identifier = '1.2.840.113549.1.9.1'\n type = 'IA5String'\n\n\nclass UnstructuredName(SubjectField):\n identifier = '1.2.840.113549.1.9.2'\n type = 'Set'\n\n def get_attribute_value(self):\n value = univ.Set()\n name = char.PrintableString(self.value)\n value.setComponentByPosition(0, name)\n return value\n\n\nclass CertificationRequest():\n version = 0\n\n def __init__(self, keypair=None, subject_fields=None, attributes=None):\n self.subject_fields = []\n self.attributes = {}\n self.keypair = None\n self.unstructuredName = None\n\n if subject_fields:\n self.set_subject_fields(subject_fields)\n\n if keypair:\n self.set_keypair(keypair)\n\n def set_keypair(self, keypair):\n \"\"\"Keypair must already by an _rsaobj (i.e. been through importKey)\"\"\"\n self.keypair = keypair\n self.signer = PKCS115_SigScheme(self.keypair)\n\n def set_subject_fields(self, fields):\n for (field, value) in fields:\n self.set_subject_field(field, value)\n\n def set_subject_field(self, field, value):\n def set_field(cls):\n self.subject_fields.append(cls(value))\n\n if field == 'common_name':\n return set_field(CommonNameSubjectField)\n\n if field == 'country':\n return set_field(CountrySubjectField)\n\n if field == 'name':\n return set_field(NameSubjectField)\n\n if field == 'email_address':\n return set_field(EmailSubjectField)\n\n if field == 'organization':\n self.unstructuredName = value\n return set_field(OrganizationSubjectField)\n\n if field == 'organizational_unit':\n return set_field(OrganizationalUnitSubjectField)\n\n if field == 'state':\n return set_field(StateSubjectField)\n\n if field == 'telephone':\n return set_field(TelephoneSubjectField)\n\n if field == 'street_address':\n return set_field(StreetAddressSubjectField)\n\n if field == 'locality':\n return set_field(LocalitySubjectField)\n\n raise NotImplementedError(\"The %s field is not implemented\" % field)\n\n def get_subject_asn1(self):\n subject = Name()\n i = 0\n for field in self.subject_fields:\n subject.setComponentByPosition(i, field.get_asn1())\n i += 1\n return subject\n\n def get_certification_request_info_asn1(self):\n request_info = CertificationRequestInfo()\n request_info.setComponentByName('version', 0)\n\n subject = self.get_subject_asn1()\n request_info.setComponentByName('subject', subject)\n\n subject_pk_info = self.get_subject_publickey_info_asn1()\n request_info.setComponentByName('subjectPKInfo', subject_pk_info)\n\n attributes = Attributes2()\n if self.unstructuredName is not None:\n name = UnstructuredName(self.unstructuredName).get_attribute()\n attributes.setComponentByPosition(0, name)\n request_info.setComponentByName('attributes', attributes)\n\n return request_info\n\n def get_subject_publickey_info_asn1(self):\n publickey_info = SubjectPublicKeyInfo()\n\n algorithm_identifier = AlgorithmIdentifier()\n algorithm_identifier.setComponentByName(\n 'algorithm', '1.2.840.113549.1.1.1')\n algorithm_identifier.setComponentByName('parameters', univ.Null())\n publickey_info.setComponentByName('algorithm', algorithm_identifier)\n\n if not self.keypair:\n raise KeyMissingError(\"No Key Provided\")\n\n publickey = self.keypair.publickey()\n\n binary = publickey.exportKey('DER')\n tmp = decoder.decode(binary)\n\n publickey_info.setComponentByName('subjectPublicKey', tmp[0][1])\n return publickey_info\n\n def get_signature_algorithm_asn1(self):\n algorithm = AlgorithmIdentifier()\n algorithm.setComponentByName('algorithm', SHA1_CHECKSUM_WITH_RSA)\n algorithm.setComponentByName('parameters', univ.Null())\n return algorithm\n\n def get_signature(self, request_info, bits=2048):\n # See ftp://ftp.rsasecurity.com/pub/pkcs/pkcs-1/pkcs-1v2-1.pdf (9.2)\n def hex2bin(hexdata):\n return bin(int(hexdata, 16))[2:].zfill(len(hexdata)*4)\n\n def tobits(s):\n result = \"'\"\n rv = []\n for c in s:\n bits = bin(ord(c))[2:]\n bits = '00000000'[len(bits):] + bits\n rv.extend([int(b) for b in bits])\n for bit in rv:\n result += str(bit)\n\n result += \"'B\"\n return result\n\n digest = encoder.encode(request_info)\n\n mHash = SHA1Hash(digest)\n\n signature = self.signer.sign(mHash)\n\n return tobits(signature)\n\n def get_asn1(self):\n \"\"\"Get's the ASN1 object for the certifcation request\"\"\"\n request = _CertificationRequest()\n\n request_info = self.get_certification_request_info_asn1()\n request.setComponentByName('certificationRequestInfo', request_info)\n\n signature_algorithm = self.get_signature_algorithm_asn1()\n request.setComponentByName('signatureAlgorithm', signature_algorithm)\n\n signature = self.get_signature(request_info)\n request.setComponentByName('signature', signature)\n\n return request\n\n def encode(self):\n asn1 = self.get_asn1()\n encoded = encoder.encode(asn1)\n encoded = b64encode(encoded)\n lines = []\n for i in xrange(0, len(encoded), 64):\n lines.append(encoded[i:i+64])\n encoded = '\\n'.join(lines)\n return encoded\n\n def export(self):\n header = \"-----BEGIN CERTIFICATE REQUEST-----\\n\"\n footer = \"\\n-----END CERTIFICATE REQUEST-----\"\n encoded = self.encode()\n return header + encoded + footer\n\n\nclass KeyMissingError(Exception):\n pass\n","sub_path":"volcanicpixels/ssl/csr.py","file_name":"csr.py","file_ext":"py","file_size_in_byte":9186,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"591042240","text":"import json\nfrom typing import Any, Dict, Union\n\nimport boto3\n\nfrom config import get_property\nfrom model import TaskAssignment, Status\nfrom notificator import notify\n\ndynamo = boto3.resource(\"dynamodb\")\ntask_table = dynamo.Table(get_property(\"task_table_name\"))\n\nsqs = boto3.resource(\"sqs\")\ntask_status_changed_queue = sqs.Queue(get_property(\"task_status_changed_queue_url\"))\n\n\ndef finish_task(event: Dict[str, Any], context: Any) -> Dict[str, Union[str, int]]:\n try:\n return process(event)\n except Exception as e:\n print(f\"Exception: {e}\")\n return {\n \"statusCode\": 500,\n \"body\": str(e),\n }\n\n\ndef process(event: Dict[str, Any]) -> Dict[str, Any]:\n task_id = event[\"pathParameters\"][\"task_id\"]\n\n db_item = task_table.get_item(\n Key={\n \"task_id\": task_id,\n },\n )\n if db_item is None or \"Item\" not in db_item:\n raise Exception(f\"Task with id {task_id} doesn't exist. Cannot finish this task.\")\n\n task = TaskAssignment.from_json(db_item[\"Item\"])\n\n if task.status != Status.IN_PROGRESS and task.status != Status.REJECTED:\n raise Exception(f\"Tasks in status {task.status} cannot be finished.\")\n\n task.status = Status.FINISHED\n json_task = task.to_json()\n\n task_table.put_item(\n Item=json_task,\n )\n notify(task)\n\n return {\n \"statusCode\": 200,\n \"body\": json.dumps(json_task),\n }\n","sub_path":"finish_task_handler.py","file_name":"finish_task_handler.py","file_ext":"py","file_size_in_byte":1427,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"363720135","text":"#!/usr/bin/env python3\n\n\nfrom flask import Flask, render_template, send_file, flash,request, redirect, url_for\nfrom werkzeug.utils import secure_filename\n\nimport os\nfrom pdfrw import PdfReader, PdfWriter\n\nimport webbrowser\n\n\nUPLOAD_FOLDER='./uploads/'\n\n#Si par malheur le dossier uploadds n'existe poas, il faut le créer\nif not os.path.exists('./uploads/'):\n os.makedirs('./uploads/')\n\nALLOWED_EXTENSIONS = {'pdf'}\n\n\napp = Flask(__name__)\napp.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER\n\ndef allowed_file(filename):\n return '.' in filename and \\\n filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS\n\ndef list_uploaded_files():\n uploaded_files = []\n for file_name in os.listdir('./uploads/'):\n uploaded_files.append(file_name)\n return uploaded_files\n\n#Page home\n@app.route('/', methods=['GET'])\n\ndef home(): \n return render_template(\"home.html\")\n\n\n#page fusion\n@app.route('/fusion', methods=['GET'])\n\ndef merge():\n writer = PdfWriter()\n files = [x for x in os.listdir('uploads') if x.endswith('.pdf')]\n for fname in sorted(files):\n writer.addpages(PdfReader(os.path.join('uploads', fname)).pages)\n writer.write(\"output.pdf\")\n #On supprime les fichiers stockés en local\n files = os.listdir('./uploads/')\n for f in files:\n os.remove('./uploads/' + f)\n return render_template(\"fusion.html\")\n\n#Page download\n@app.route('/download', methods=['GET'])\n\ndef download():\n return send_file('output.pdf')\n\n#Page upload\n@app.route('/upload', methods=['GET', 'POST'])\n\ndef upload():\n if request.method == 'POST':\n # check if the post request has the file part\n if 'file' not in request.files:\n flash('No file part')\n return redirect(request.url)\n file = request.files['file']\n # if user does not select file, browser also\n # submit an empty part without filename\n if file.filename == '':\n flash('No selected file')\n return redirect(request.url)\n if file and allowed_file(file.filename):\n filename = secure_filename(file.filename)\n file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))\n return redirect(url_for('upload',\n filename=filename))\n return render_template(\"upload.html\", uploaded_files=list_uploaded_files())\n\nif __name__ == '__main__':\n #on ouvre une fenêtre du navigateur en écoutant le bon port\n webbrowser.open('http://127.0.0.1:5000/')\n #On lance le serveur en local\n app.run()","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2549,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"216571299","text":"#!/usr/bin/env python\n# _*_coding:utf-8_*_\n# Author: create by yang.hong\n\n\nfrom openpyxl import load_workbook\nimport pprint\n\n\nclass Vidict(dict):\n def __missing__(self, key):\n value = self[key] = type(self)()\n return value\n\n\ndef covert_to_dict(filename):\n \"\"\"\n 将excel转换为字典格式,以excel第二列值为字典的第一层key名称, 第一行第三列以后的列名为字典第二层key名称,生成字典数据\n :param filename: excel表格文件名称\n :return: 返回字典数据\n \"\"\"\n d = Vidict()\n wb = load_workbook(filename=filename)\n sheet = wb.active\n for i in range(2, sheet.max_row+1):\n for j in range(2, 9):\n d[sheet.cell(row=i, column=2).value][sheet.cell(row=1, column=j).value] = \\\n sheet.cell(row=i, column=j).value\n return d\n\n\ndef covert_to_excel(filename):\n \"\"\"\n 将字典格式数据转换为excel\n :param filename:\n :return:\n \"\"\"\n return True\n\n\ndata = covert_to_dict('./conf/test.xlsx')\nfor name in data:\n print(name)","sub_path":"devops/jenkinsapi_v2/excel_handler.py","file_name":"excel_handler.py","file_ext":"py","file_size_in_byte":1053,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"77648074","text":"from app.models import User, Event\nfrom flask_login import current_user\nfrom app import db\nfrom app.main import bp\nfrom datetime import datetime\n\n@bp.context_processor\ndef inject_user():\n online_user = User.get_online_users()\n online_list = []\n for online in online_user:\n online_list.append(online.decode('utf-8')) \n return dict(online_user=online_list)\n\n@bp.context_processor\ndef inject_agenda():\n all_events = Event.all_events()\n event_list = []\n for item in all_events:\n event_list.append(item)\n return dict(agenda_list=event_list)\n\n@bp.before_app_request\ndef before_request():\n if current_user.is_authenticated:\n current_user.last_seen = datetime.utcnow()\n User.mark_online(current_user.username)\n db.session.commit()","sub_path":"app/main/routes/contextprocessor.py","file_name":"contextprocessor.py","file_ext":"py","file_size_in_byte":784,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"243558204","text":"\n\nfrom util import dhash\n\ndef hash_list(list):\n '''\n takes a list of arbitrary data and returns a list of that data hashed in the same order\n '''\n for i in range(len(list)):\n list[i] = dhash(list[i])\n return list\n\n\ndef make_merkle(hash_list):\n '''\n returns the merkle root from a list of hash values\n '''\n hashes = list(hash_list)\n while len(hashes) > 1:\n '''\n if the length of the list is not even we will need to concatinate the last\n value with itself\n '''\n if len(hashes) % 2 == 1:\n hashes.append(hashes[-1])\n l = []\n for i in range(0, len(hashes), 2):\n l.append(dhash(hashes[i] + hashes[i+1]))\n hashes = l\n\n return hashes[0]\n \n","sub_path":"proj4/merkle_tree.py","file_name":"merkle_tree.py","file_ext":"py","file_size_in_byte":761,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"95274365","text":"from bicycles import Bike, BikeShop, Customer, Wheel, Frame, BikeManufacturer\n\n\nif __name__ == '__main__':\n wheel1 = Wheel('wheel1', 5, 20)\n wheel2 = Wheel('wheel2', 4, 100)\n wheel3 = Wheel('wheel3', 3, 250)\n frame1 = Frame('frame1', 10, 75)\n frame2 = Frame('frame2', 8, 250)\n frame3 = Frame('frame3', 4, 400)\n manufacturer1 = BikeManufacturer('Manu1', 1.1)\n manufacturer2 = BikeManufacturer('Manu2', 1.2)\n bike1 = Bike('bike1', wheel1, frame1, manufacturer1)\n bike2 = Bike('bike2', wheel1, frame2, manufacturer1)\n bike3 = Bike('bike3', wheel2, frame2, manufacturer2)\n bike4 = Bike('bike4', wheel3, frame2, manufacturer2)\n bike5 = Bike('bike5', wheel2, frame3, manufacturer2)\n bike6 = Bike('bike6', wheel3, frame3, manufacturer1)\n bike_models = {bike1: 4, bike2 : 2, bike3 : 5, bike4 : 0, bike5 : 1, bike6: 4}\n bike_shop = BikeShop(bike_models, 0, 1.2)\n customer1 = Customer('Bob', 200)\n customer2 = Customer('Carol',500)\n customer3 = Customer('John', 1000)\n customers = [customer1, customer2, customer3]\n print(bike_shop.bikes)\n print(customers)\n print('{} can afford {}'.format(customer1.name, bike_shop.afford(customer1)))\n print(customer1.buy(bike_shop,bike1))\n print('{} can afford {}'.format(customer2.name, bike_shop.afford(customer2)))\n print(customer2.buy(bike_shop,bike2))\n print('{} can afford {}'.format(customer3.name, bike_shop.afford(customer3)))\n print(customer3.buy(bike_shop,bike5))\n print('Inventory:{}'.format(bike_models))","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1530,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"429565364","text":"import cv2\n\nface_cascade = cv2.CascadeClassifier('C:/Users/GL553VD/Desktop/Comvis/Module 1 - Face Recognition/haarcascade_frontalface_default.xml')\neye_cascade = cv2.CascadeClassifier('C:/Users/GL553VD/Desktop/Comvis/Module 1 - Face Recognition/haarcascade_eye.xml')\n#masukin full path dari xml file jika kalian compilenya pakai anaconda\n\ndef detect(gray, frame):\n faces = face_cascade.detectMultiScale(gray, 1.1, 5) #param: warna image, ukuran kernel, jumlah minimal neighbor\n \n for (x, y, w, h) in faces: # x+y = upperleft gambar, w = lebar, h = tinggi image\n cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 255, 0), 1) #0, 255, 0 = warna kotak, 1 = border-width\n \n region_gray = gray[y:y+h, x:x+w] #region of interest gambar(grayscale)\n region_color = gray[y:y+h, x:x+w] #region of interest gambar \n eyes = eye_cascade.detectMultiScale(region_gray, 1, 5)\n \n for (ex, ey, ew, eh) in eyes: #draw di mata\n cv2.rectangle(region_color, (ex, ey), (ex+ew, ey+eh), (0, 0, 255), 1)\n \n return frame #balikin frame dengan rectangle\n\n\nvideo = cv2.VideoCapture(0) #0 = webcam PC, 1 = webcam eksternal\n\nwhile True:\n _, frame = video.read()\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n canvas = detect(gray, frame)\n cv2.imshow('Face Recognition', canvas)\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n \nvideo.release()\ncv2.destroyAllWindows()","sub_path":"Face Recognition/face_recognition.py","file_name":"face_recognition.py","file_ext":"py","file_size_in_byte":1439,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"404707019","text":"\ndef printpoint(b):\n print(b)\n obj = dict(zip(b[::2], b[1::2]))\n try:\n if obj['100'] == 'AcDbMText':\n print('{}'.format(obj['0']))\n except:\n pass\n\nbuffer = ['0', 'fake']\nfilepath = '../drawings/GV_12.DXF'\nwith open(filepath,'r') as fp:\n line = fp.readline()\n cnt = 1\n while line:\n line = fp.readline()\n #line = line.rstrip()\n print(line)\n if line == '0': # we've started a new section, so\n print(\"Line {}: {}\".format(cnt, line.strip()))\n try:\n printpoint(buffer) # handle the captured section\n except:\n print(\"ERROR\")\n\n #buffer = [] # and start a new one\n #buffer.append(line)\n cnt += 1\nfp.close()\n\n#printpoint(buffer) # buffer left over from last pass through loop\n\n#https://leancrew.com/all-this/2016/12/dxf-data-extraction/","sub_path":"old/dxf_line_reader.py","file_name":"dxf_line_reader.py","file_ext":"py","file_size_in_byte":878,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"88925941","text":"import string\n\nimport numpy as np\n\n\nclass PositionConverter(object):\n \"\"\"\n Converter to convert the representation of a position from:\n - FEN to tensor: fen_to_tensor method\n - tensor to FEN: tensor_to_fen method\n - FEN to dict: fen_to_dict method\n \"\"\"\n def __init__(self):\n pieces_str = \"PNBRQK\"\n pieces_str += pieces_str.lower()\n self.pieces = set(pieces_str)\n self.n_pieces = len(self.pieces)\n self.pieces_inds = dict(zip(pieces_str, range(self.n_pieces)))\n self.inds_pieces = dict(zip(range(self.n_pieces), pieces_str))\n\n self.BOARD_DIM = 8\n self.valid_fnum = list(range(1, 1 + self.BOARD_DIM))\n self.files = string.ascii_lowercase[:self.BOARD_DIM]\n\n\n def fen_to_tensor(self, fen, time=None, flip=True):\n \"\"\"Convert from FEN to tensor representation\"\"\"\n channels = 3 + self.n_pieces # both castling, en passant, and pieces\n if time is not None:\n channels += 1\n tensor = np.zeros(\n (channels, self.BOARD_DIM, self.BOARD_DIM),\n dtype=np.float32\n )\n\n fen_parts = fen.split()\n\n board_ranks = fen_parts[0].split('/')\n for i,rank in enumerate(board_ranks):\n j = 0\n for pc in rank:\n if pc in self.pieces:\n tensor[self.pieces_inds[pc],i,j] = 1\n j += 1\n elif int(pc) in self.valid_fnum:\n j += int(pc)\n\n # set castling\n castling = fen_parts[-4]\n if castling != '-':\n if 'K' in castling: tensor[-3,-1,-1] = 1\n if 'Q' in castling: tensor[-3,-1,0] = 1\n if 'k' in castling: tensor[-2,0,-1] = 1\n if 'q' in castling: tensor[-2,0,0] = 1\n\n passing = fen_parts[-3]\n if passing != '-':\n file = self.files.index(passing[0])\n if passing[1] == '6': tensor[-1,2,file] = 1\n if passing[1] == '3': tensor[-1,5,file] = 1\n\n if fen_parts[1] == 'b' and flip:\n tensor = self.flip_tensor(tensor)\n\n if time is not None:\n tensor[-4,:,:] = time\n\n return tensor\n\n\n def flip_tensor(self, tensor):\n \"\"\"Flip a tensor to White perspective\"\"\"\n tensor = np.flip(tensor, 1)\n new_tensor = tensor.copy()\n new_tensor[:6,:,:] = tensor[6:12,:,:]\n new_tensor[6:12,:,:] = tensor[:6,:,:]\n new_tensor[-3,:,:] = tensor[-2,:,:]\n new_tensor[-2,:,:] = tensor[-3,:,:]\n\n return np.ascontiguousarray(new_tensor)\n\n\n def tensor_to_fen(self, tensor, other=' w - - 0 1'):\n \"\"\"Convert from tensor to FEN representation\"\"\"\n fen_ranks = list()\n for rank in range(self.BOARD_DIM):\n rank_pieces = np.argwhere(tensor[:,rank,:])\n if rank_pieces.shape[0] == 0:\n fen_ranks.append(str(self.BOARD_DIM))\n continue\n fen_rank = ''\n curr_file = np.min(rank_pieces[:,1])\n\n if curr_file > 0:\n fen_rank += str(curr_file)\n\n rank_pieces_row = np.argwhere(rank_pieces[:,1] == curr_file)[0,0]\n fen_rank += self.inds_pieces[rank_pieces[rank_pieces_row,0]]\n\n curr_file += 1\n while curr_file < self.BOARD_DIM:\n rank_pieces = np.argwhere(tensor[:,rank,curr_file:])\n if rank_pieces.shape[0] == 0:\n fen_rank += str(self.BOARD_DIM - curr_file)\n break\n curr_file_jump = np.min(rank_pieces[:,1])\n if curr_file_jump > 0:\n fen_rank += str(curr_file_jump)\n rank_pieces_row = np.argwhere(\n rank_pieces[:,1] == curr_file_jump\n )[0,0]\n fen_rank += self.inds_pieces[rank_pieces[rank_pieces_row,0]]\n curr_file += curr_file_jump\n curr_file += 1\n\n fen_ranks.append(fen_rank)\n\n fen = '/'.join(fen_ranks)\n fen += other\n\n return fen\n\n\n def fen_to_dict(self, fen):\n fen_split = fen.split()\n\n fen_info = dict()\n fen_info['move_side'] = 1 if 'w' in fen_split[-5] else 0\n\n fen_info['K_castle'] = 1 if 'K' in fen_split[-4] else 0\n fen_info['Q_castle'] = 1 if 'Q' in fen_split[-4] else 0\n fen_info['k_castle'] = 1 if 'k' in fen_split[-4] else 0\n fen_info['q_castle'] = 1 if 'q' in fen_split[-4] else 0\n\n poss_ep_file = fen_split[-3][0]\n if poss_ep_file in self.files:\n fen_info['ep_file'] = 1 + self.files.index(poss_ep_file)\n else:\n fen_info['ep_file'] = 0\n\n fen_info['move_rule'] = int(fen_split[-2])\n fen_info['move_num'] = int(fen_split[-1])\n\n return {**fen_info, **self.fen_to_gamestage(fen)}\n\n\n def fen_to_gamestage(self, fen):\n tensor = self.fen_to_tensor(fen, flip=False)\n\n gamestage = dict()\n major_minors_num = np.sum(tensor[1:5,:,:]) + np.sum(tensor[7:11,:,:])\n if major_minors_num <= 6:\n gamestage['endgame'] = 1\n gamestage['middlegame'] = 0\n gamestage['opening'] = 0\n elif (\n (\n (fen.split()[-5] == 'w')\n & (np.sum(tensor[1:6,-1,:]) < 4)\n )\n | (\n (fen.split()[-5] == 'b')\n & (np.sum(tensor[7:12,0,:]) < 4)\n )\n | (\n int(fen.split()[-1]) > 12\n )\n ):\n gamestage['middlegame'] = 1\n gamestage['opening'] = 0\n gamestage['endgame'] = 0\n else:\n gamestage['opening'] = 1\n gamestage['middlegame'] = 0\n gamestage['endgame'] = 0\n\n return gamestage\n\n\n def fen_to_tensor_gamestage(self, fen, time, flip=True):\n tensor = self.fen_to_tensor(fen, time, flip)\n\n gamestage = dict()\n major_minors_num = np.sum(tensor[1:5,:,:]) + np.sum(tensor[7:11,:,:])\n if major_minors_num <= 6:\n gamestage['endgame'] = 1\n gamestage['middlegame'] = 0\n gamestage['opening'] = 0\n elif (np.sum(tensor[1:6,-1,:]) < 4) or (int(fen.split()[-1]) > 12):\n gamestage['middlegame'] = 1\n gamestage['opening'] = 0\n gamestage['endgame'] = 0\n else:\n gamestage['opening'] = 1\n gamestage['middlegame'] = 0\n gamestage['endgame'] = 0\n\n return tensor, gamestage\n","sub_path":"mlchess/PositionConverter.py","file_name":"PositionConverter.py","file_ext":"py","file_size_in_byte":6489,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"422774679","text":"# -*- coding:utf-8 -*-\nimport csv\nimport datetime\nimport logging\nimport re\nimport os\nfrom operator import itemgetter\n\n\ndef re_date(str_date):\n rep_date_1 = re.compile(r'(\\d{2})-(\\D{3})-(\\d{2})')\n rep_date_2 = re.compile(r'(\\d{2})(\\D{3})(\\d{2})')\n rep_date_3 = re.compile(r'(\\d{2})(\\d{2})(\\d{2})')\n\n if rep_date_1.match(str_date) is not None:\n date = datetime.datetime.strptime(str_date, '%d-%b-%y')\n elif rep_date_2.match(str_date) is not None:\n date = datetime.datetime.strptime(str_date, '%d%b%y')\n elif rep_date_3.match(str_date) is not None:\n date = datetime.datetime.strptime(str_date, '%d%m%y')\n else:\n logger.warning('Unmatched Date: %s' % str_date)\n date = datetime.datetime.now()\n return date\n\n\ndef init_af_version(dic_af_version, file_path='common/versions.ids'):\n try:\n with open(file_path, 'r') as fin:\n reader = csv.reader(fin)\n for row in reader:\n dic_af_version[row[0]] = row[2]\n except FileNotFoundError:\n logger.error('File Not Found:%s' % file_path)\n exit()\n return dic_af_version\n\n\ndef init_af_subfleet(file_path='common/subfleets.ids'):\n dic_af_subfleet = {}\n try:\n with open(file_path, 'r') as fin:\n reader = csv.reader(fin)\n for row in reader:\n if row[-1] == '32767' and row[0] == 'CA':\n airline_code = row[0]\n subfleet = row[1]\n fleet = row[2]\n dic_af_subfleet[(airline_code, subfleet)] = fleet\n except FileNotFoundError:\n logger.error('File Not Found:%s' % file_path)\n exit()\n return dic_af_subfleet\n\n\ndef init_af_acfcfg(file_path='common/acfcfgs.ids'):\n dic_af_acfcfgs = {}\n try:\n with open(file_path, 'r') as fin:\n reader = csv.reader(fin)\n for row in reader:\n if row[-1] == '32767' and row[0] == 'CA':\n airline = row[0]\n subfleet = row[1]\n version = row[2]\n date_eff = re_date(row[3])\n date_dis = re_date(row[4])\n acfcfgtag = row[5]\n date_now = datetime.datetime.now()\n if date_now <= date_dis and date_eff <= date_dis:\n dic_af_acfcfgs[(airline, subfleet, version)] = acfcfgtag\n except FileNotFoundError:\n logger.error('File Not Found:%s' % file_path)\n exit()\n return dic_af_acfcfgs\n\n\ndef init_af_seatingdesc(file_path='common/seatingdesc.ids'):\n dic_af_seatingdesc = {}\n try:\n with open(file_path, 'r') as fin:\n reader = csv.reader(fin, delimiter='#')\n for row in reader:\n if row[-1] == '32767' and row[0] == 'Physical' and row[1][:2] == 'CA':\n acfcfgtag = row[1]\n cabin_level = row[2]\n seats_count = row[3]\n if acfcfgtag not in dic_af_seatingdesc:\n dic_af_seatingdesc[acfcfgtag] = [0, 0, 0, 0]\n seating_detail = dic_af_seatingdesc.get(acfcfgtag, None)\n if cabin_level == 'F':\n seating_detail[0] = int(seats_count)\n elif cabin_level == 'C':\n seating_detail[1] = int(seats_count)\n elif cabin_level == 'W':\n seating_detail[2] = int(seats_count)\n elif cabin_level == 'Y':\n seating_detail[3] = int(seats_count)\n else:\n logging.warning('Unrecognized acfcfg: %s' % acfcfgtag)\n except FileNotFoundError:\n logger.error('File Not Found:%s' % file_path)\n exit()\n return dic_af_seatingdesc\n\n\ndef generate_results_full(dic_af_subfleet):\n output_list = []\n for subfleet, subfleet_info in dic_af_subfleet.items():\n for version in subfleet_info[1]:\n output_list.append([subfleet[0],\n subfleet[1],\n subfleet_info[0],\n version[0],\n version[1],\n version[2],\n version[3],\n version[4]])\n output_list.sort(key=itemgetter(0, 1, 3))\n return output_list\n\ndef generate_dic_cap(dic_af_subfleet, dic_af_acfcfgs, dic_af_seatingdesc):\n dic_total_cap = {}\n dic_cabin_cap = {}\n for airline_subfleet_version, acfcfgtag in dic_af_acfcfgs.items():\n airline, subfleet, version = airline_subfleet_version\n seating_detail = dic_af_seatingdesc.get(acfcfgtag, [0,0,0,0])\n seats_F, seats_C, seats_W, seats_Y = seating_detail\n seats_Total = sum(seating_detail)\n cap = [str(seats_F), str(seats_C), str(seats_W), str(seats_Y), str(seats_Total)]\n max_cap = dic_total_cap.get(subfleet, 0)\n if seats_Total > max_cap:\n dic_total_cap[subfleet] = seats_Total\n if subfleet not in dic_cabin_cap:\n dic_cabin_cap[subfleet] = {version:cap}\n else:\n dic_cabin_cap[subfleet].update({version:cap})\n for k, v in dic_total_cap.items():\n dic_total_cap[k] = str(v)\n return dic_total_cap, dic_cabin_cap\n\n\ndef init_all_data(dic_common=None):\n global dic_subfleet_cap, dic_subfleet_cabin_cap\n dic_subfleet_cap.clear()\n dic_subfleet_cabin_cap.clear()\n if dic_common is not None:\n file_folder = dic_common.get('folder', 'common')\n version_filename = dic_common.get('version', 'versions.ids')\n subfleet_filename = dic_common.get('subfleet', 'subfleets.ids')\n acfcfg_filename = dic_common.get('acfcfg', 'acfcfgs.ids')\n seatingdesc_filename = dic_common.get('seatingdesc', 'seatingdesc.ids')\n subfleet_filepath = os.path.join(file_folder, subfleet_filename)\n acfcfg_filepath = os.path.join(file_folder, acfcfg_filename)\n seatingdesc_filepath = os.path.join(file_folder, seatingdesc_filename)\n\n dic_af_subfleet = init_af_subfleet(subfleet_filepath)\n dic_af_acfcfgs = init_af_acfcfg(acfcfg_filepath)\n dic_af_seatingdesc = init_af_seatingdesc(seatingdesc_filepath)\n else:\n dic_af_subfleet = init_af_subfleet()\n dic_af_acfcfgs = init_af_acfcfg()\n dic_af_seatingdesc = init_af_seatingdesc()\n dic_subfleet_cap, dic_subfleet_cabin_cap = generate_dic_cap(dic_af_subfleet, dic_af_acfcfgs, dic_af_seatingdesc)\n\n\ndef get_subfleet_cap():\n return dic_subfleet_cap\n\ndef get_subfleet_cabin_cap():\n return dic_subfleet_cabin_cap\n\n\ndef set_logger(logger_name, logfile_prefix, logfile_folder='log', logfile_path=None):\n \"\"\"\n 日志配置\n :param logger_name: 日志标识名\n :param logfile_path: 日志文件路径\n :return:\n \"\"\"\n current_time = datetime.datetime.now().strftime('%Y-%m-%d_%H-%M-%S')\n if logfile_path is None:\n if os.path.exists(logfile_folder) is False:\n os.makedirs(logfile_folder, exist_ok=True)\n logfile_path = '%s/%s_%s.log' % (logfile_folder, logfile_prefix, current_time)\n logger = logging.getLogger(logger_name)\n logger.setLevel(logging.DEBUG)\n fh = logging.FileHandler(logfile_path)\n fh.setLevel(logging.DEBUG)\n ch = logging.StreamHandler()\n ch.setLevel(logging.INFO)\n ff = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')\n cf = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')\n fh.setFormatter(ff)\n ch.setFormatter(cf)\n logger.addHandler(fh)\n logger.addHandler(ch)\n logger.info('Log file: %s' % logfile_path)\n return logger\n\nlogger = logging.getLogger('pdd_logger')\ndic_subfleet_cap = {}\ndic_subfleet_cabin_cap = {}\n","sub_path":"AirChina/Calibration/PDD/afacfg.py","file_name":"afacfg.py","file_ext":"py","file_size_in_byte":7804,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"463277296","text":"import pymysql\nimport json\nfrom face_train import Modell\nfrom UI import Ui_MainWindow\n#from UI_new import Ui_MainWindow\nfrom Log import Ui_Dialog\nimport sys\nfrom MyTipWindow import Message\nimport face_recognition\nimport threading\nfrom PIL import Image, ImageDraw, ImageFont\nfrom PyQt5.QtWidgets import QApplication, QMainWindow, QFileDialog, QSystemTrayIcon, QMessageBox, QDialog\nfrom PyQt5.QtCore import QTimer, QCoreApplication\nfrom PyQt5.QtGui import QPixmap\nimport cv2\nimport qimage2ndarray\nimport time\nimport dlib\nimport os\nimport pandas as pd\nimport numpy as np\nfrom scipy.spatial import distance as dist\nfrom imutils import face_utils\nfrom keras.models import load_model\nimport datetime\nimport ChineseText\n\n# opencv 检测人脸\nface_detector = cv2.CascadeClassifier('D:/OPENCV/sources/data/haarcascades/haarcascade_frontalface_default.xml')\ngender_classifier = load_model(\"model/simple_CNN-gender.hdf5\")\nemotion_classifier = load_model(\"model/simple_CNN-emotion.hdf5\")\ngender_labels = {0: 'girl', 1: 'boy'}\nemotion_labels = {\n 0: 'angry',\n 1: 'hate',\n 2: 'terror',\n 3: 'happy',\n 4: 'sad',\n 5: 'surprise',\n 6: 'calm'\n}\n\n\nids = []\nface_names = []\nface_codings = []\nface_sampes = []\nperson_list = os.listdir(\"faces/\")\nfor i in range(len(person_list)):\n face_coding_mean = []\n person_name = os.listdir(\"faces/\" + \"person_\" + str(i + 1))\n #print(person_name)\n # print(person_name[len(person_name)-1])\n\n for j in range(len(person_name)):\n\n img_path = \"faces/\" + \"person_\" + str(i + 1) + \"/\" + person_name[j]\n #print(img_path)\n face_img = face_recognition.load_image_file(img_path)\n # opencv人脸识别\n\n PIL_img = Image.open(img_path).convert('L')\n img_numpy = np.array(PIL_img, 'uint8')\n faces = face_detector.detectMultiScale(img_numpy)\n # print(len(faces))\n for x, y, w, h in faces:\n face_sampes.append(img_numpy[y:y + h, x:x + w])\n ids.append(i + 1)\n face_coding_mean.append(face_recognition.face_encodings(face_img)[0])\n face_codings.append(np.array(face_coding_mean).mean(axis=0))\n # face_names.append(person_name[0][:person_name[0].index(\".\")])\n face_names.append(person_name[len(person_name) - 1][:person_name[len(person_name) - 1].index(\".\")])\nfont = cv2.FONT_HERSHEY_DUPLEX\ncurrent_path = os.getcwd() # 获取当前路径\npredictor_path = current_path + \"\\\\model\\\\shape_predictor_68_face_landmarks.dat\" # shape_predictor_68_face_landmarks.dat是进行人脸标定的模型,它是基于HOG特征的,这里是他所在的路径\nface_directory_path = current_path + \"\\\\faces\\\\\" # 存放人脸图片的路径\ndetector = dlib.get_frontal_face_detector() # 获取人脸分类器\npredictor = dlib.shape_predictor(predictor_path) # 获取人脸检测器\nfacerec = dlib.face_recognition_model_v1(\"model/dlib_face_recognition_resnet_model_v1.dat\")\n\n\npath_features_known_csv = \"features_all.csv\"\ncsv_rd = pd.read_csv(path_features_known_csv, header=None)\n# 用来存放所有录入人脸特征的数组\n# The array to save the features of faces in the database\nfeatures_known_arr = []\n# 2. 读取已知人脸数据\n# Print known faces\nfor i in range(csv_rd.shape[0]):\n features_someone_arr = []\n for j in range(0, len(csv_rd.iloc[i])):\n features_someone_arr.append(csv_rd.iloc[i][j])\n features_known_arr.append(features_someone_arr)\n# #print(\"Faces in Database:\", len(features_known_arr))\n# str_data = 'Faces in Database: '+str(len(features_known_arr))\n# print(str_data)\n# self.MsgTE.setPlainText(str_data)\n\n# 每次启动都要执行,浪费效率\n# dlib 获取特征\n#os.system(\"python36 getfacefeatures_to_csv.py\")#程序启动后先提取所有人脸特征,防止后台手动添加人脸\n\n# opencv训练\n# print(face_sampes)\n#print(ids)\nopencv_recognizer = cv2.face.LBPHFaceRecognizer_create()\n# opencv_recognizer.train(face_sampes, np.array(ids))\n# opencv_recognizer.write('train/train.yml')\nname = \"\"\nEYE_AR_THRESH = 0.18 # EAR阈值\nEYE_AR_CONSEC_FRAMES = 4 # 当EAR小于阈值时,接连多少帧一定发生眨眼动作\n\n# 对应特征点的序号\nRIGHT_EYE_START = 37 - 1\nRIGHT_EYE_END = 42 - 1\nLEFT_EYE_START = 43 - 1\nLEFT_EYE_END = 48 - 1\n\n\nclass childWindow(QDialog):\n def __init__(self):\n QDialog.__init__(self)\n self.child=Ui_Dialog()\n\n def show(self):\n #self.child.loaddate()\n self.child.setupUi(self)\n super().show()\n\n\n\nclass MainWindow(QMainWindow, Ui_MainWindow):\n camera = cv2.VideoCapture(0)\n\n id = 0\n flag_1 = True\n flag_2 = True\n flag_3 = True\n\n #cnn\n model = Modell()\n with open('contrast_table', 'r') as f:\n contrast_table = json.loads(f.read())\n model.load_model(file_path='./model/face-facenet.model')\n ##face_facenet 对应的contrast\n ##{\"0\": \"wangyufei\", \"1\": \"Xiangmenghui\", \"2\": \"yangjing\", \"3\": \"liuzirui\", \"4\": \"guoxiaoqi\", \"5\": \"liuyuexiang\", \"6\": \"zhaoying\"}\n def __del__(self):\n try:\n self.camera.release() # 释放资源\n except:\n return\n\n def __init__(self, parent=None):\n super(MainWindow, self).__init__(parent)\n self.setupUi(self)\n self.PrepSliders()\n self.PrepWidgets()\n self.PrepParameters()\n self.CallBackFunctions()\n self.Timer = QTimer()\n self.Timer.timeout.connect(self.TimerOutFun)\n\n def PrepSliders(self):\n self.RedColorSld.valueChanged.connect(self.RedColorSpB.setValue)\n self.RedColorSpB.valueChanged.connect(self.RedColorSld.setValue)\n self.GreenColorSld.valueChanged.connect(self.GreenColorSpB.setValue)\n self.GreenColorSpB.valueChanged.connect(self.GreenColorSld.setValue)\n self.BlueColorSld.valueChanged.connect(self.BlueColorSpB.setValue)\n self.BlueColorSpB.valueChanged.connect(self.BlueColorSld.setValue)\n self.ExpTimeSld.valueChanged.connect(self.ExpTimeSpB.setValue)\n self.ExpTimeSpB.valueChanged.connect(self.ExpTimeSld.setValue)\n self.GainSld.valueChanged.connect(self.GainSpB.setValue)\n self.GainSpB.valueChanged.connect(self.GainSld.setValue)\n self.BrightSld.valueChanged.connect(self.BrightSpB.setValue)\n self.BrightSpB.valueChanged.connect(self.BrightSld.setValue)\n self.ContrastSld.valueChanged.connect(self.ContrastSpB.setValue)\n self.ContrastSpB.valueChanged.connect(self.ContrastSld.setValue)\n\n def PrepWidgets(self):\n self.PrepCamera()\n self.StopBt.setEnabled(False)\n self.RecordBt.setEnabled(False)\n self.GrayImgCkB.setEnabled(False)\n self.RedColorSld.setEnabled(False)\n self.RedColorSpB.setEnabled(False)\n self.GreenColorSld.setEnabled(False)\n self.GreenColorSpB.setEnabled(False)\n self.BlueColorSld.setEnabled(False)\n self.BlueColorSpB.setEnabled(False)\n self.ExpTimeSld.setEnabled(False)\n self.ExpTimeSpB.setEnabled(False)\n self.GainSld.setEnabled(False)\n self.GainSpB.setEnabled(False)\n self.BrightSld.setEnabled(False)\n self.BrightSpB.setEnabled(False)\n self.ContrastSld.setEnabled(False)\n self.ContrastSpB.setEnabled(False)\n # self.pushButton.setEnabled(True)\n\n def PrepCamera(self):\n try:\n # self.camera=cv2.VideoCapture(0)\n self.MsgTE.clear()\n self.MsgTE.append('Oboard camera connected.')\n self.MsgTE.setPlainText()\n except Exception as e:\n self.MsgTE.clear()\n self.MsgTE.append(str(e))\n\n def PrepParameters(self):\n self.RecordFlag = 0\n self.RecordPath = 'E:/pyqtt/'\n self.FilePathLE.setText(self.RecordPath)\n self.Image_num = 0\n self.R = 1\n self.G = 1\n self.B = 1\n\n self.ExpTimeSld.setValue(self.camera.get(15))\n self.SetExposure()\n self.GainSld.setValue(self.camera.get(14))\n self.SetGain()\n self.BrightSld.setValue(self.camera.get(10))\n self.SetBrightness()\n self.ContrastSld.setValue(self.camera.get(11))\n self.SetContrast()\n self.MsgTE.clear()\n\n def CallBackFunctions(self):\n self.FilePathBt.clicked.connect(self.SetFilePath)\n self.ShowBt.clicked.connect(self.StartCamera)\n self.StopBt.clicked.connect(self.StopCamera)\n self.RecordBt.clicked.connect(self.RecordCamera)\n self.ExitBt.clicked.connect(self.ExitApp)\n self.GrayImgCkB.stateChanged.connect(self.SetGray)\n self.ExpTimeSld.valueChanged.connect(self.SetExposure)\n self.GainSld.valueChanged.connect(self.SetGain)\n self.BrightSld.valueChanged.connect(self.SetBrightness)\n self.ContrastSld.valueChanged.connect(self.SetContrast)\n self.RedColorSld.valueChanged.connect(self.SetR)\n self.GreenColorSld.valueChanged.connect(self.SetG)\n self.BlueColorSld.valueChanged.connect(self.SetB)\n #self.showLog.clicked.connect(self.logView)\n\n\n def SetR(self):\n R = self.RedColorSld.value()\n self.R = R / 255\n\n def SetG(self):\n G = self.GreenColorSld.value()\n self.G = G / 255\n\n def SetB(self):\n B = self.BlueColorSld.value()\n self.B = B / 255\n\n def SetContrast(self):\n contrast_toset = self.ContrastSld.value()\n try:\n self.camera.set(11, contrast_toset)\n self.MsgTE.setPlainText('The contrast is set to ' + str(self.camera.get(11)))\n except Exception as e:\n self.MsgTE.setPlainText(str(e))\n\n def SetBrightness(self):\n brightness_toset = self.BrightSld.value()\n try:\n self.camera.set(10, brightness_toset)\n self.MsgTE.setPlainText('The brightness is set to ' + str(self.camera.get(10)))\n except Exception as e:\n self.MsgTE.setPlainText(str(e))\n\n def SetGain(self):\n gain_toset = self.GainSld.value()\n print(gain_toset)\n try:\n #14为增益,改为12饱和度\n self.camera.set(14, gain_toset)\n #print(self.camera.get(5))\n self.MsgTE.setPlainText('The gain is set to ' + str(self.camera.get(14)))\n except Exception as e:\n self.MsgTE.setPlainText(str(e))\n\n def SetExposure(self):\n exposure_time_toset = self.ExpTimeSld.value()\n print(exposure_time_toset)\n try:\n #15为曝光,改为13色调图像\n self.camera.set(15, exposure_time_toset)\n #print(self.camera.get(5))\n self.MsgTE.setPlainText('The exposure time is set to ' + str(self.camera.get(15)))\n except Exception as e:\n self.MsgTE.setPlainText(str(e))\n\n def SetGray(self):\n if self.GrayImgCkB.isChecked():\n self.RedColorSld.setEnabled(False)\n self.RedColorSpB.setEnabled(False)\n self.GreenColorSld.setEnabled(False)\n self.GreenColorSpB.setEnabled(False)\n self.BlueColorSld.setEnabled(False)\n self.BlueColorSpB.setEnabled(False)\n else:\n self.RedColorSld.setEnabled(True)\n self.RedColorSpB.setEnabled(True)\n self.GreenColorSld.setEnabled(True)\n self.GreenColorSpB.setEnabled(True)\n self.BlueColorSld.setEnabled(True)\n self.BlueColorSpB.setEnabled(True)\n\n def StartCamera(self):\n # ret,fram = self.camera.read()\n # cv2.imshow('video', fram)\n tag = self.ShowBt.text()\n if tag == '开始':\n self.ShowBt.setEnabled(False)\n #self.ShowBt.setText(\"查看日志\")\n self.StopBt.setEnabled(True)\n self.RecordBt.setEnabled(True)\n self.GrayImgCkB.setEnabled(True)\n if self.GrayImgCkB.isChecked() == 0:\n self.RedColorSld.setEnabled(True)\n self.RedColorSpB.setEnabled(True)\n self.GreenColorSld.setEnabled(True)\n self.GreenColorSpB.setEnabled(True)\n self.BlueColorSld.setEnabled(True)\n self.BlueColorSpB.setEnabled(True)\n self.ExpTimeSld.setEnabled(True)\n self.ExpTimeSpB.setEnabled(True)\n self.GainSld.setEnabled(True)\n self.GainSpB.setEnabled(True)\n self.BrightSld.setEnabled(True)\n self.BrightSpB.setEnabled(True)\n self.ContrastSld.setEnabled(True)\n self.ContrastSpB.setEnabled(True)\n self.RecordBt.setText('录像')\n #原来是10,更改为50\n self.Timer.start(10)\n self.timelb = time.clock()\n elif tag == '保存到已有':\n face_path = \"E:/pyqtt/faces/\"\n file_path = QFileDialog.getSaveFileName(self, \"保存文件\", face_path,\n \"jpg files (*.jpg);;all files(*.*)\")\n #print(file_path[0])\n cv2.imwrite(file_path[0], self.Image)\n self.MsgTE.clear()\n self.MsgTE.setPlainText('Image saved.')\n # elif tag == '查看日志':\n # #self.camera.release()\n # pixmap = QPixmap(\"C:\\\\Users\\\\Administrator\\\\Desktop\\\\biye\\\\beijing.jpg\") # 按指定路径找到图片,注意路径必须用双引号包围,不能用单引号\n # self.DispLb.setPixmap(pixmap) # 在label上显示图片\n # self.DispLb.setScaledContents(True) # 让图片自适应label大小\n # self.ShowBt.setText(\"开始\")\n #self.__init__()\n # log = childWindow()\n # log.show()\n\n\n def SetFilePath(self):\n dirname = QFileDialog.getExistingDirectory(self, \"浏览\", '.')\n if dirname:\n self.FilePathLE.setText(dirname)\n self.RecordPath = dirname + '/'\n\n def TimerOutFun(self):\n global name\n success, img = self.camera.read()\n if success:\n if self.checkBox.isChecked():\n name = \"unknown\"\n self.face_recognise(img)\n self.flag_2 = True\n self.flag_3 = True\n # t1 = threading.Thread(target=self.face_recognise, args=[img])\n # t1.start()\n #\n #\n # time.sleep(1)\n try:\n if self.flag_1:\n self.MsgTE.setPlainText('Based on face_recognition'+'\\n'+'The man in camera maybe ' + str(name))\n else:\n self.MsgTE.setPlainText('The door is open,please come in!'+\n '\\n'+'Based on face_recognition' +\n '\\n' + 'The man in camera maybe '\n + str(name))\n except Exception as e:\n self.MsgTE.setPlainText(str(e))\n # t1.stop()\n\n if self.checkBox_2.isChecked():\n self.landmark(img)\n # t2 = threading.Thread(target=self.landmark, args=[img])\n # t2.start()\n # time.sleep(1)\n\n if self.checkBox_3.isChecked():\n name = \"unknown\"\n self.dlib_recognise(img)\n self.flag_2 = True\n self.flag_1 = True\n # t3 = threading.Thread(target=self.dlib_recognise, args=[img])\n # t3.start()\n # time.sleep(1)\n try:\n if self.flag_3:\n self.MsgTE.setPlainText(\n 'Based on dlib' + '\\n' + 'The man in camera maybe ' + str(name))\n else:\n self.MsgTE.setPlainText('The door is open,please come in!' +\n '\\n' + 'Based on dlib' +\n '\\n' + 'The man in camera maybe '\n + str(name))\n except Exception as e:\n self.MsgTE.setPlainText(str(e))\n\n if self.checkBox_4.isChecked():\n self.blink_recognise(img)\n # msgBox = Message()\n # msgBox.setText(\"Hello!\")\n # # msgBox.setIcon(QMessageBox::Information)\n # # msgBox.setStandardButtons(QMessageBox::Ok)\n # msgBox.autoClose = True\n # msgBox.timeout = 3\n # msgBox.show()\n # t4 = threading.Thread(target=self.blink_recognise, args=[img])\n # t4.start()\n # time.sleep(1)\n if self.checkBox_7.isChecked():\n name = \"unknown\"\n self.cnn_recognise(img)\n\n\n if self.checkBox_6.isChecked():\n name = \"unknown\"\n self.opencv_recognise(img)\n self.flag_1 = True\n self.flag_3 = True\n # t4 = threading.Thread(target=self.opencv_recognise, args=[img])\n # t4.start()\n # time.sleep(1)\n try:\n if self.flag_2:\n self.MsgTE.setPlainText(\n 'Based on opencv' + '\\n' + 'The man in camera maybe ' + str(name))\n else:\n self.MsgTE.setPlainText('The door is open,please come in!' +\n '\\n' + 'Based on opencv' +\n '\\n' + 'The man in camera maybe '\n + str(name))\n except Exception as e:\n self.MsgTE.setPlainText(str(e))\n\n self.Image = self.ColorAdjust(img)\n self.DispImg()\n self.Image_num += 1\n if self.RecordFlag:\n self.video_writer.write(img)\n\n ###计算帧率\n if self.Image_num % 10 == 9:\n frame_rate = 10 / (time.clock() - self.timelb)\n self.FmRateLCD.display(frame_rate)\n self.timelb = time.clock()\n # size=img.shape\n self.ImgWidthLCD.display(self.camera.get(3))\n self.ImgHeightLCD.display(self.camera.get(4))\n\n else:\n self.MsgTE.clear()\n self.MsgTE.setPlainText('Image obtaining failed.')\n\n def cnn_recognise(self,img):\n frame_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n\n faceRects = face_detector.detectMultiScale(frame_gray, scaleFactor=1.2, minNeighbors=3, minSize=(32, 32))\n if len(faceRects) > 0:\n for faceRect in faceRects:\n x, y, w, h = faceRect\n # 截取脸部图像提交给模型识别这是谁\n image = img[y - 10: y + h + 10, x - 10: x + w + 10]\n probability, name_number = self.model.face_predict(image)\n #print(name_number)\n cname = self.contrast_table[str(name_number)]\n #print(cname)\n # print('name_number:', name_number)\n cv2.rectangle(img, (x - 10, y - 10), (x + w + 10, y + h + 10), (0, 255, 0), thickness=2)\n\n # 文字提示是谁\n cv2.putText(img, cname, (x + 30, y + 30), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 0, 255), 2)\n # if probability > 0.7:\n # cv2.putText(frame, name, (x + 30, y + 30), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 0, 255), 2)\n # else:\n # cv2.putText(frame, 'unknow', (x + 30, y + 30), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 0, 255), 2)\n\n # coding=utf-8\n # 中文乱码处理,未成功\n def cv2ImgAddText(self,img, text, left, top, textColor=(0, 255, 0), textSize=20):\n if (isinstance(img, np.ndarray)): # 判断是否OpenCV图片类型\n img = Image.fromarray(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))\n draw = ImageDraw.Draw(img)\n fontText = ImageFont.truetype(\n \"font/simsun.ttc\", textSize, encoding=\"utf-8\")\n draw.text((left, top), text, textColor, font=fontText)\n return cv2.cvtColor(np.asarray(img), cv2.COLOR_RGB2BGR)\n\n # 子窗口调用,未使用此方法\n def logView(self):\n # log = childWindow()\n # log.show()\n pass\n\n\n # 通过对全局图像进行LBP特征提取得到LBP图,LBP特征图是不能直接来作人脸识别的,\n # 需要对LBP特征图进行分块并计算每个分块的直方图,通过直方图的统计信息进行识别,\n # 最后将各块的直方图首尾相连就得到一张图片最终的LBP特征描述向量。\n # 计算两张图片的LBP特征向量的相似度即可实现人脸识别。\n def opencv_recognise(self, img):\n global name\n imgCompose = cv2.imread(\"compose/maozi-1.jpg\")\n # 读取训练文件\n opencv_recognizer.read('train/train.yml')\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n new_faces = face_detector.detectMultiScale(gray)\n\n for x, y, w, h in new_faces:\n #性别识别\n face = img[(y - 60):(y + h + 60), (x - 30):(x + w + 30)]\n face = cv2.resize(face, (48, 48))\n face = np.expand_dims(face, 0)\n face = face / 255.0\n gender_label_arg = np.argmax(gender_classifier.predict(face))\n gender = gender_labels[gender_label_arg]\n\n #情绪识别,不太准确\n gray_face = gray[(y):(y + h), (x):(x + w)]\n gray_face = cv2.resize(gray_face, (48, 48))\n gray_face = gray_face / 255.0\n gray_face = np.expand_dims(gray_face, 0)\n gray_face = np.expand_dims(gray_face, -1)\n #gray_face = np.expand_dims(gray_face, -1)\n emotion_label_arg = np.argmax(emotion_classifier.predict(gray_face))\n emotion = emotion_labels[emotion_label_arg]\n # print(emotion)\n #img = ChineseText.cv2ImgAddText(img, gender, x + h, y, (255, 255, 255), 30)\n cv2.rectangle(img, (x, y), (x + w, y + h), (0, 255, 0), 2)\n\n id, confidence = opencv_recognizer.predict(gray[y:y + h, x:x + w])\n cv2.putText(img, face_names[id - 1], (x + 6, y + h - 6), font, 1.0, (0, 255, 255), 1)\n cv2.putText(img, gender, (x + h, y ), font, 1.0, (0, 255, 255), 1)\n cv2.putText(img, emotion, (x, y), font, 1.0, (0, 255, 255), 1)\n\n name = face_names[id - 1] + \"\\nThe confidence:\" +str(int(confidence))\n\n ##合成帽子,也可以合成其他的\n sp = imgCompose.shape\n imgComposeSizeH = int(sp[0] / sp[1] * w)\n if imgComposeSizeH > (y - 20):\n imgComposeSizeH = (y - 20)\n imgComposeSize = cv2.resize(imgCompose, (w, imgComposeSizeH), interpolation=cv2.INTER_NEAREST)\n top = (y - imgComposeSizeH - 20)\n if top <= 0:\n top = 0\n rows, cols, channels = imgComposeSize.shape\n roi = img[top:top + rows, x:x + cols]\n\n # Now create a mask of logo and create its inverse mask also\n img2gray = cv2.cvtColor(imgComposeSize, cv2.COLOR_RGB2GRAY)\n ret, mask = cv2.threshold(img2gray, 10, 255, cv2.THRESH_BINARY)\n mask_inv = cv2.bitwise_not(mask)\n\n # Now black-out the area of logo in ROI\n img1_bg = cv2.bitwise_and(roi, roi, mask=mask_inv)\n\n # Take only region of logo from logo image.\n img2_fg = cv2.bitwise_and(imgComposeSize, imgComposeSize, mask=mask)\n\n # Put logo in ROI and modify the main image\n dst = cv2.add(img1_bg, img2_fg)\n img[top:top + rows, x:x + cols] = dst\n\n #print(gender)\n #name = face_names[id - 1]\n self.id = id\n if self.flag_2 and name != 'unknown'and self.id != 0:\n nowtime = datetime.datetime.now()\n self.InsertLog(nowtime)\n self.flag_2 = False\n # 摄像头帧率太低,不完善\n def blink_recognise(self, img):\n #QSystemTrayIcon.showMessage()\n\n\n #msgBox.exec()\n frame_counter = 0 # 连续帧计数\n blink_counter = 0 # 眨眼计数\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) # 转成灰度图像\n rects = detector(gray, 0) # 人脸检测\n for rect in rects: # 遍历每一个人脸\n # print('-' * 20)\n shape = predictor(gray, rect) # 检测特征点\n points = face_utils.shape_to_np(shape) # convert the facial landmark (x, y)-coordinates to a NumPy array\n leftEye = points[LEFT_EYE_START:LEFT_EYE_END + 1] # 取出左眼对应的特征点\n rightEye = points[RIGHT_EYE_START:RIGHT_EYE_END + 1] # 取出右眼对应的特征点\n leftEAR = self.eye_aspect_ratio(leftEye) # 计算左眼EAR\n rightEAR = self.eye_aspect_ratio(rightEye) # 计算右眼EAR\n # print('leftEAR = {0}'.format(leftEAR))\n # print('rightEAR = {0}'.format(rightEAR))\n\n ear = (leftEAR + rightEAR) / 2.0 # 求左右眼EAR的均值\n\n leftEyeHull = cv2.convexHull(leftEye) # 寻找左眼轮廓\n rightEyeHull = cv2.convexHull(rightEye) # 寻找右眼轮廓\n cv2.drawContours(img, [leftEyeHull], -1, (0, 255, 0), 1) # 绘制左眼轮廓\n cv2.drawContours(img, [rightEyeHull], -1, (0, 255, 0), 1) # 绘制右眼轮廓\n\n # 如果EAR小于阈值,开始计算连续帧,只有连续帧计数超过EYE_AR_CONSEC_FRAMES时,才会计做一次眨眼\n if ear < EYE_AR_THRESH:\n frame_counter += 1\n blink_counter += 1\n #print(frame_counter)\n else:\n if frame_counter >= EYE_AR_CONSEC_FRAMES:\n blink_counter += 1\n frame_counter = 0\n\n # 在图像上显示出眨眼次数blink_counter和EAR\n cv2.putText(img, \"Blinks:{0}\".format(blink_counter), (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (255, 0, 0),\n 2)\n cv2.putText(img, \"please blink\", (250, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)\n cv2.putText(img, \"EAR:{:.2f}\".format(ear), (500, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 255, 0), 2)\n\n def eye_aspect_ratio(self, eye):\n # compute the euclidean distances between the two sets of\n # vertical eye landmarks (x, y)-coordinates\n A = dist.euclidean(eye[1], eye[5])\n B = dist.euclidean(eye[2], eye[4])\n\n # compute the euclidean distance between the horizontal\n # eye landmark (x, y)-coordinates\n C = dist.euclidean(eye[0], eye[3])\n\n # compute the eye aspect ratio\n ear = (A + B) / (2.0 * C)\n\n # return the eye aspect ratio\n return ear\n\n # 识别模型:基于 Dlib 的 ResNet 预训练模型(dlib_face_recognition_resnet_model_v1.dat)\n # 识别算法:ResNet 神经网络(This model is a ResNet network with 29 conv layers.\n # It's essentially a version of the ResNet-34 network from the paper Deep Residual Learning for Image Recognition by\n # He, Zhang, Ren, and Sun with a few layers removed and the number of filters per layer reduced by\n # half)\n def dlib_recognise(self, img):\n global name\n img_gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)\n faces = detector(img_gray, 0)\n\n # 存储当前摄像头中捕获到的所有人脸的坐标/名字\n # The list to save the positions and names of current faces captured\n #pos_namelist = []\n name_namelist = []\n if len(faces) != 0:\n # 4. 获取当前捕获到的图像的所有人脸的特征,存储到 features_cap_arr\n # 4. Get the features captured and save into features_cap_arr\n features_cap_arr = []\n for i in range(len(faces)):\n shape = predictor(img, faces[i])\n features_cap_arr.append(facerec.compute_face_descriptor(img, shape))\n\n # 5. 遍历捕获到的图像中所有的人脸\n # 5. Traversal all the faces in the database\n for k in range(len(faces)):\n # print(\"##### camera person\", k+1, \"#####\")\n # 让人名跟随在矩形框的下方\n # 确定人名的位置坐标\n # 先默认所有人不认识,是 unknown\n # Set the default names of faces with \"unknown\"\n name_namelist.append(\"unknown\")\n\n # 每个捕获人脸的名字坐标 the positions of faces captured\n # pos_namelist.append(\n # tuple([faces[k].left(), int(faces[k].bottom() + (faces[k].bottom() - faces[k].top()) / 4)]))\n\n # 对于某张人脸,遍历所有存储的人脸特征\n # For every faces detected, compare the faces in the database\n e_distance_list = []\n for i in range(len(features_known_arr)):\n # 如果 person_X 数据不为空\n if str(features_known_arr[i][0]) != '0.0':\n # print(\"with person\", str(i + 1), \"the e distance: \", end='')\n e_distance_tmp = self.return_euclidean_distance(features_cap_arr[k], features_known_arr[i])\n # print(e_distance_tmp)\n e_distance_list.append(e_distance_tmp)\n else:\n # 空数据 person_X\n e_distance_list.append(999999999)\n # Find the one with minimum e distance\n similar_person_num = e_distance_list.index(min(e_distance_list))\n # print(\"Minimum e distance with person\", int(similar_person_num)+1)\n\n if min(e_distance_list) < 0.4:\n # person_list = os.listdir(\"faces/\"+\"person_\"+str(int(similar_person_num)+1))\n # #以jpg文件为模板,取最后四个字符之前的名字\n # #name_namelist[k] = person_list[0][0:-4]\n # #获取符号“.”之前的名字\n # name_str = person_list[0]\n # name_str = name_str[:name_str.index(\".\")]\n # name_namelist[k] = name_str\n # print(k)\n # print(int(similar_person_num))\n name_namelist[k] = face_names[int(similar_person_num)]\n self.id = int(similar_person_num)+1\n name = name_namelist[k]\n # print(\"May be \"+name_str)\n # else:\n # print(\"Unknown person\")\n\n # 矩形框\n # draw rectangle\n for kk, d in enumerate(faces):\n # 绘制矩形框\n cv2.rectangle(img, tuple([d.left(), d.top()]), tuple([d.right(), d.bottom()]), (255, 0, 0), 2)\n cv2.putText(img, name, (d.left()+6, d.bottom()-6), font, 1.0, (0, 255, 255), 1)\n #print('\\n')\n if self.flag_3 and name != 'unknown'and self.id != 0:\n nowtime = datetime.datetime.now()\n self.InsertLog(nowtime)\n self.flag_3 = False\n # 6. 在人脸框下面写人脸名字\n # 6. write names under rectangle\n # font = cv2.FONT_ITALIC\n # for i in range(len(faces)):\n # cv2.putText(img, name_namelist[i], pos_namelist[i], font, 1.0, (0, 255, 255), 1)\n # print(\"Faces in camera now:\", name_namelist, \"\\n\")\n\n def return_euclidean_distance(self, feature_1, feature_2):\n feature_1 = np.array(feature_1)\n feature_2 = np.array(feature_2)\n dist = np.sqrt(np.sum(np.square(feature_1 - feature_2)))\n return dist\n\n def landmark(self, img):\n b, g, r = cv2.split(img) # 分离三个颜色通道\n img2 = cv2.merge([r, g, b]) # 融合三个颜色通道生成新图片\n dets = detector(img, 1) # 使用detector进行人脸检测 dets为返回的结果\n # print(\"Number of faces detected: {}\".format(len(dets))) # 打印识别到的人脸个数\n # enumerate是一个Python的内置方法,用于遍历索引\n # index是序号;face是dets中取出的dlib.rectangle类的对象,包含了人脸的区域等信息\n # left()、top()、right()、bottom()都是dlib.rectangle类的方法,对应矩形四条边的位置\n for index, face in enumerate(dets):\n # print('face {}; left {}; top {}; right {}; bottom {}'.format(index, face.left(), face.top(), face.right(),\n # face.bottom()))\n # 画出人脸框\n # left = face.left()\n # top = face.top()\n # right = face.right()\n # bottom = face.bottom()\n # cv2.rectangle(img, (left, top), (right, bottom), (0, 255, 0), 1)\n # cv2.namedWindow(f, cv2.WINDOW_AUTOSIZE)\n # cv2.imshow(f, img)\n # dlib\n shape = predictor(img, face) # 寻找人脸的68个标定点\n #print(shape)\n # print(shape.num_parts)\n # 遍历所有点,打印出其坐标,并用蓝色的圈表示出来\n for index, pt in enumerate(shape.parts()):\n # print('Part {}: {}'.format(index, pt))\n pt_pos = (pt.x, pt.y)\n cv2.circle(img, pt_pos, 2, (255, 0, 0), 1)\n\n # face_recognition标记特征点\n # Load the jpg file into a numpy array\n # image = face_recognition.load_image_file(img)\n\n # Find all facial features in all the faces in the image\n face_landmarks_list = face_recognition.face_landmarks(img)\n if len(face_landmarks_list) != 0:\n for index, name in enumerate(face_landmarks_list[0]):\n # print('Part {}: {}'.format(index, pt))\n pt = face_landmarks_list[0].get(name)\n #print(index)\n for i in range(len(pt)):\n #pt_pos = (pt[i].x, pt[i].y)\n cv2.circle(img, pt[i], 2, (0, 0, 255), 1)\n\n # print(\"I found {} face(s) in this photograph.\".format(len(face_landmarks_list)))\n\n # Create a PIL imagedraw object so we can draw on the picture\n pil_image = Image.fromarray(img)\n d = ImageDraw.Draw(pil_image)\n\n for face_landmarks in face_landmarks_list:\n\n # # Print the location of each facial feature in this image\n # for facial_feature in face_landmarks.keys():\n # print(\"The {} in this face has the following points: {}\".format(facial_feature,\n # face_landmarks[facial_feature]))\n\n # Let's trace out each facial feature in the image with a line!\n for facial_feature in face_landmarks.keys():\n d.line(face_landmarks[facial_feature], width=5)\n # pil_image.show()\n\n def face_recognise(self, img):\n global name\n small_frame = cv2.resize(img, (0, 0), fx=0.25, fy=0.25)\n #img = self.cv2ImgAddText(img, \"大家好,我是星爷\", 140, 60, (255, 255, 0), 20)\n\n # small_frame = img\n # face_recognise_path = current_path + \"\\faces\"\n #\n # 原始一张图片的识别\n # obama_img = face_recognition.load_image_file(\"XiangMenghui.jpg\")\n # face_names.append(\"XiangMenghui\")\n # obama_face_encoding = face_recognition.face_encodings(obama_img)[0]\n # process_this_frame = True\n # if process_this_frame:\n #转换成rgb 格式\n new_frame = small_frame[:, :, ::-1]\n\n\n #默认hog方式\n ##face_locations = face_recognition.face_locations(new_frame)\n face_locations = face_recognition.face_locations(new_frame, number_of_times_to_upsample=2, model=\"hog\")\n face_encodings = face_recognition.face_encodings(new_frame, face_locations)\n #print(face_encodings)\n for face_encoding in face_encodings:\n match = face_recognition.compare_faces(face_codings, face_encoding, 0.4)\n # print(match)\n for i in range(len(match)):\n if match[i]:\n name = face_names[i]\n self.id = i+1\n break\n if i == len(match) - 1:\n name = \"unknown\"\n # break\n # process_this_frame = not process_this_frame\n\n for (top, right, bottom, left) in (face_locations):\n top *= 4\n right *= 4\n bottom *= 4\n left *= 4\n cv2.rectangle(img, (left, top), (right, bottom), (0, 0, 255), 2)\n # cv2.rectangle(img, (left, bottom - 35), (right, bottom), (0, 0, 255), 2)\n cv2.putText(img, name, (left + 6, bottom - 6), font, 1.0, (0, 255, 255), 1)\n if self.flag_1 and name != 'unknown' and self.id != 0:\n nowtime = datetime.datetime.now()\n self.InsertLog(nowtime)\n self.flag_1 = False\n # else:\n # print(\"success\")\n #QMessageBox.about(self, \"提示对话框\", \"你的Windows系统是DOS1.0\")\n\n def InsertLog(self,nowtime):\n # 打开数据库连接\n conn = pymysql.connect(host=\"\", user=\"root\",\n password=\"xmh981127\", db=\"face\", port=3306)\n # 使用cursor()方法获取操作游标\n cur = conn.cursor()\n # 1.查询操作\n # 编写sql 查询语句 user 对应我的表名\n sql = \"insert into faceserver_log(id_id, time) values(%s, %s)\"\n try:\n cur.execute(sql, [self.id, nowtime]) # 执行sql语句\n conn.commit()\n except Exception as e:\n raise e\n finally:\n conn.close() # 关闭连接\n\n\n def ColorAdjust(self, img):\n try:\n B = img[:, :, 0]\n G = img[:, :, 1]\n R = img[:, :, 2]\n B = B * self.B\n G = G * self.G\n R = R * self.R\n # B.astype(cv2.PARAM_UNSIGNED_INT)\n # G.astype(cv2.PARAM_UNSIGNED_INT)\n # R.astype(cv2.PARAM_UNSIGNED_INT)\n\n img1 = img\n img1[:, :, 0] = B\n img1[:, :, 1] = G\n img1[:, :, 2] = R\n return img1\n except Exception as e:\n self.MsgTE.setPlainText(str(e))\n\n def DispImg(self):\n if self.GrayImgCkB.isChecked():\n img = cv2.cvtColor(self.Image, cv2.COLOR_BGR2GRAY)\n else:\n img = cv2.cvtColor(self.Image, cv2.COLOR_BGR2RGB)\n qimg = qimage2ndarray.array2qimage(img)\n self.DispLb.setPixmap(QPixmap(qimg))\n self.DispLb.show()\n\n def StopCamera(self):\n if self.StopBt.text() == '暂停':\n self.StopBt.setText('继续')\n self.RecordBt.setText('保存')\n self.ShowBt.setEnabled(True)\n self.ShowBt.setText('保存到已有')\n self.Timer.stop()\n elif self.StopBt.text() == '继续':\n self.StopBt.setText('暂停')\n self.RecordBt.setText('录像')\n self.ShowBt.setEnabled(False)\n self.ShowBt.setText('开始')\n self.Timer.start(10)\n\n def RecordCamera(self):\n tag = self.RecordBt.text()\n if tag == '保存':\n try:\n # image_name=self.RecordPath+'image'+time.strftime('%Y%m%d%H%M%S',time.localtime(time.time()))+'.jpg'\n # print(image_name)\n face_path = \"E:/pyqtt/faces/\"\n if os.listdir(face_path):\n # 获取已录入的最后一个人脸序号 / Get the num of latest person\n person_list = os.listdir(face_path)\n person_num_list = []\n for person in person_list:\n person_num_list.append(int(person.split('_')[-1]))\n person_cnt = max(person_num_list)\n\n # 如果第一次存储或者没有之前录入的人脸, 按照 person_1 开始录入\n # Start from person_1\n else:\n person_cnt = 0\n person_cnt += 1\n face_path = face_path + \"person_\" + str(person_cnt)\n self.MsgTE.setPlainText(face_path)\n os.makedirs(face_path)\n file_path = QFileDialog.getSaveFileName(self, \"保存文件\", face_path,\n \"jpg files (*.jpg);;all files(*.*)\")\n #print(file_path[0])\n\n cv2.imwrite(file_path[0], self.Image)\n namestr = file_path[0]\n namestr = namestr.split('/')[-1].split('.')[0]\n #写入数据库\n # 打开数据库连接\n conn = pymysql.connect(host=\"\", user=\"root\",\n password=\"xmh981127\", db=\"face\", port=3306)\n\n # 使用cursor()方法获取操作游标\n cur = conn.cursor()\n # 1.查询操作\n # 编写sql 查询语句 user 对应我的表名\n sql = \"insert into faceserver_student(id, name) values(%s, %s)\"\n try:\n cur.execute(sql, [person_cnt, namestr]) # 执行sql语句\n conn.commit()\n except Exception as e:\n raise e\n finally:\n conn.close() # 关闭连接\n\n #print(namestr)\n self.MsgTE.clear()\n self.MsgTE.setPlainText('Image saved.')\n # 录入人脸之后便获取特征并更新\n # os.system(\"python36 getfacefeatures_to_csv.py\")\n except Exception as e:\n self.MsgTE.clear()\n self.MsgTE.setPlainText(str(e))\n elif tag == '录像':\n self.RecordBt.setText('停止')\n\n video_name = self.RecordPath + 'video' + time.strftime('%Y%m%d%H%M%S', time.localtime(time.time())) + '.avi'\n fps = self.FmRateLCD.value()\n size = (self.Image.shape[1], self.Image.shape[0])\n fourcc = cv2.VideoWriter_fourcc('M', 'J', 'P', 'G')\n # self.video_writer = cv2.VideoWriter(video_name, fourcc,self.camera.get(5), size)\n self.video_writer = cv2.VideoWriter(video_name, fourcc, fps, size)\n self.RecordFlag = 1\n self.MsgTE.setPlainText('Video recording...')\n self.StopBt.setEnabled(False)\n self.ExitBt.setEnabled(False)\n elif tag == '停止':\n self.RecordBt.setText('录像')\n self.video_writer.release()\n self.RecordFlag = 0\n self.MsgTE.setPlainText('Video saved.')\n self.StopBt.setEnabled(True)\n self.ExitBt.setEnabled(True)\n\n def ExitApp(self):\n self.Timer.Stop()\n self.camera.release()\n self.MsgTE.setPlainText('Exiting the application..')\n QCoreApplication.quit()\n\n\nif __name__ == '__main__':\n app = QApplication(sys.argv)\n ui = MainWindow()\n child = childWindow()\n\n btn = ui.showLog\n btn.clicked.connect(child.show)\n\n ui.show()\n\n sys.exit(app.exec_())\n\n\n## 5月6号 facenet模型对应contrast\n##{\"0\": \"wangyufei\", \"1\": \"Xiangmenghui\", \"2\": \"yangjing\", \"3\": \"liuzirui\", \"4\": \"guoxiaoqi\", \"5\": \"liuyuexiang\", \"6\": \"zhaoying\"}\n\n\n## 5月7号 失误ResNet50模型对应contrast\n##{\"0\": \"guoxiaoqi\", \"1\": \"yangjing\", \"2\": \"wangyufei\", \"3\": \"liuzirui\", \"4\": \"zhaoying\", \"5\": \"Xiangmenghui\", \"6\": \"liuyuexiang\"}\n## 5月8号 失误简单cnn模型对应contrast\n##{\"0\": \"liuyuexiang\", \"1\": \"wangyufei\", \"2\": \"zhaoying\", \"3\": \"Xiangmenghui\", \"4\": \"yangjing\", \"5\": \"guoxiaoqi\", \"6\": \"liuzirui\"}\n\n## 5月9号 失误ResNet50模型对应contrast\n##{\"0\": \"liuzirui\", \"1\": \"liuyuexiang\", \"2\": \"guoxiaoqi\", \"3\": \"yangjing\", \"4\": \"Xiangmenghui\", \"5\": \"zhaoying\", \"6\": \"wangyufei\"}","sub_path":"MainWindow.py","file_name":"MainWindow.py","file_ext":"py","file_size_in_byte":44011,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"107987326","text":"\nfrom django.urls import re_path, include\nfrom . import views\n\nurlpatterns = [\n re_path(r'^$', views.ShowVideos, name='show_videos_url'),\n re_path(r'^get/(?P\\d+)/$', views.ShowVideo, name='show_vid_url'), #регулярное выражение\n re_path(r'^AddLike/(?P\\d+)/$', views.AddLike),\n re_path(r'^AddComment/(?P\\d+)/$', views.AddCom),\n re_path(r'^AddLike/ajax/$', views.ajax),\n]\n","sub_path":"hello/urls_hello.py","file_name":"urls_hello.py","file_ext":"py","file_size_in_byte":433,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"173591245","text":"from django.contrib.auth import get_user_model\r\nfrom rest_framework import status, permissions\r\nfrom rest_framework.exceptions import NotFound\r\nfrom rest_framework.permissions import IsAuthenticated\r\nfrom rest_framework.response import Response\r\nfrom rest_framework.views import APIView\r\n\r\nfrom user.models import Survivor, Advocate\r\nfrom user.serializers import CustomUserSerializer, AdvocateSerializer, SurvivorSerializer\r\n\r\nUser = get_user_model()\r\n\r\n\r\ndef create_user(request):\r\n serializer = CustomUserSerializer(data=request.data)\r\n\r\n if serializer.is_valid(raise_exception=True):\r\n return serializer.save()\r\n\r\n\r\ndef update_user(user, request):\r\n serializer = CustomUserSerializer(user, data=request.data, partial=True)\r\n\r\n if serializer.is_valid(raise_exception=True):\r\n return serializer.save()\r\n\r\n\r\nclass SurvivorRegisterView(APIView):\r\n def post(self, request):\r\n user = create_user(request)\r\n survivor = Survivor.objects.create(user=user)\r\n\r\n return Response(SurvivorSerializer(survivor).data, status=status.HTTP_201_CREATED)\r\n\r\n\r\nclass AdvocateRegisterView(APIView):\r\n def post(self, request):\r\n user = create_user(request=request)\r\n advocate = Advocate.objects.create(user=user, type=request.data.get('type', 'police'))\r\n\r\n return Response(AdvocateSerializer(advocate).data, status=status.HTTP_201_CREATED)\r\n\r\n\r\nclass UserIsOwnerOrReadOnly(permissions.BasePermission):\r\n def has_permission(self, request, view):\r\n if request.method in permissions.SAFE_METHODS:\r\n return True\r\n\r\n return view.kwargs['user_token'] == request.user.user_token\r\n\r\n\r\nclass UserInformationView(APIView):\r\n permission_classes = (\r\n IsAuthenticated,\r\n UserIsOwnerOrReadOnly,\r\n )\r\n\r\n def get(self, request, user_token):\r\n user = get_user(user_token)\r\n\r\n if hasattr(user, 'survivor'):\r\n survivor = Survivor.objects.get(user=user)\r\n result = SurvivorSerializer(survivor)\r\n else:\r\n advocate = Advocate.objects.get(user=user)\r\n result = AdvocateSerializer(advocate)\r\n\r\n return Response(result.data, status=status.HTTP_200_OK)\r\n\r\n def put(self, request, user_token):\r\n update_user(request.user, request=request)\r\n\r\n user = get_user(user_token)\r\n\r\n if hasattr(user, 'survivor'):\r\n serializer = SurvivorSerializer(user.survivor, request.data, partial=True)\r\n\r\n if serializer.is_valid(raise_exception=True):\r\n serializer.save()\r\n else:\r\n serializer = AdvocateSerializer(user.advocate, data=request.data, partial=True)\r\n\r\n if serializer.is_valid(raise_exception=True):\r\n serializer.save()\r\n\r\n return Response(serializer.data, status=status.HTTP_200_OK)\r\n\r\n def delete(self, request, user_token):\r\n user = get_user(user_token)\r\n\r\n user.is_active = False\r\n\r\n user.save()\r\n\r\n return Response(status=status.HTTP_204_NO_CONTENT)\r\n\r\n\r\ndef get_user(user_token):\r\n try:\r\n user = User.objects.get(user_token=user_token)\r\n\r\n if user.is_active:\r\n return user\r\n else:\r\n raise NotFound\r\n\r\n except User.DoesNotExist:\r\n raise NotFound\r\n","sub_path":"user/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3294,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"30721260","text":"#!/usr/bin/python\n\n'''\ndef outlierCleaner(predictions, ages_train, net_worths):\n \"\"\"\n Clean away the 10% of points that have the largest\n residual errors (difference between the prediction\n and the actual net worth).\n\n Return a list of tuples named cleaned_data where \n each tuple is of the form (age, net_worth, error).\n \"\"\"\n \n cleaned_data = []\n\n ### your code goes here\n error = list( (net_worths - predictions)**2 )\n\n cleaned_data = zip(ages, net_worths, error)\n cleaned_data = sorted(cleaned_data, key = lambda tup: tup[2])\n cleaned_data = cleaned_data[:80]\n \n return cleaned_data\n''' \n\ndef outlierCleaner(predictions, ages, net_worths):\n \"\"\"\n clean away the 10% of points that have the largest\n residual errors (different between the prediction\n and the actual net worth)\n return a list of tuples named cleaned_data where \n each tuple is of the form (age, net_worth, error)\n \"\"\"\n \n cleaned_data = []\n temp_data = []\n\n ### your code goes here\n numPredictions = len(predictions)\n\n for i in range(numPredictions):\n resError = abs(predictions[i] - net_worths[i])\n tempTuple = (ages[i], net_worths[i], resError)\n temp_data.append(tempTuple)\n\n temp_data.sort(key=lambda tup: tup[2])\n cleaned_data = temp_data[0:int(len(temp_data)*0.9)]\n\n return cleaned_data\n","sub_path":"outliers/outlier_cleaner.py","file_name":"outlier_cleaner.py","file_ext":"py","file_size_in_byte":1418,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"518291274","text":"class TrieNode(object):\n def __init__(self):\n self.map = {}\n self.val = \"\"\n\n\nclass Solution(object):\n def findWords(self, board, words):\n \"\"\"\n :type board: List[List[str]]\n :type words: List[str]\n :rtype: List[str]\n \"\"\"\n # use trie\n if not board: return []\n p = root = TrieNode()\n for word in words:\n for x in word:\n if x not in p.map:\n p.map[x] = TrieNode()\n p = p.map[x]\n p.val = word # assign word\n p = root # reset\n visited = collections.defaultdict(bool)\n res = set() # p2: use set to dedup\n for i in xrange(len(board)):\n for j in xrange(len(board[0])):\n self.DFS(board, i, j, res, root, visited)\n return list(res)\n\n def DFS(self, board, i, j, res, root, visited):\n if i < 0 or i > len(board) - 1 or j < 0 or j > len(board[0]) - 1:\n return # do nothing\n if visited[(i, j)]:\n return\n if board[i][j] not in root.map:\n return\n visited[(i, j)] = True\n root = root.map[board[i][j]] # p1: forget this step before add result\n if root.val:\n res.add(root.val) # a word is found\n for di, dj in [[0, 1], [0, -1], [1, 0], [-1, 0]]:\n self.DFS(board, di + i, dj + j, res, root, visited)\n visited[(i, j)] = False # after the DFS\n return\n","sub_path":"212_word_search_II/dfs_backtracking.py","file_name":"dfs_backtracking.py","file_ext":"py","file_size_in_byte":1474,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"605455812","text":"def wordBoggle(board, words):\n ans = []\n for i in range(0,len(words)):\n tmp = words[i][0]\n tmplist = list(words[i])\n tmplist.pop(0)\n for j in range(0,len(board)):\n for k in range(0,len(board[0])):\n flag = False\n if board[j][k]==tmp:\n kboard= list(board)\n flag = find(kboard,tmplist,[j,k])\n if flag == True:\n ans.append(words[i])\n ans.sort()\n i = 0\n while(i+1=0 and theboard[row-1][column]==leftword[0]:\n if find(newboard,leftword[1:],[row-1,column])==True:\n flag2 = True\n if row+1=0 and theboard[row][column-1]==leftword[0]:\n if find(newboard,leftword[1:],[row,column-1])==True:\n flag2 = True\n if column+1=0 and column-1>=0 and theboard[row-1][column-1]==leftword[0]:\n if find(newboard,leftword[1:],[row-1,column-1])==True:\n flag2 = True\n if row-1>=0 and column+1=0 and theboard[row+1][column-1]==leftword[0]:\n if find(newboard,leftword[1:],[row+1,column-1])==True:\n flag2 = True\n if row+1= 2:\n print(new_head, self.snake[-2])\n if new_head == self.snake[-2]:\n allowed = False\n if allowed:\n # remove tail unless egg was eaten\n if not keep_first:\n self.snake.pop(0)\n # check for loops\n if new_head in self.snake:\n return False\n # add new head\n self.snake.append(new_head)\n else:\n print(f\"move to {new_head} is not allowed\")\n return True\n\n\n def can_eat(self, dx, dy):\n \"\"\"\n Returns:\n bool: whether moving in the dx, dy direction would eat the egg\n \"\"\"\n x, y = self.snake[-1]\n return (x+dx, y+dy) == self.egg\n \n\n def redraw(self):\n \"\"\"\n redraw all screen, is alled each frame\n \"\"\"\n self.screen.fill(BLACK)\n for pos in self.snake:\n self.draw_cell(*pos)\n self.draw_cell(*self.egg, YELLOW)\n pygame.display.update()\n\n\n def run(self):\n \"\"\"\n mainloop; returns None\n \"\"\"\n \n pygame.init()\n clock = pygame.time.Clock()\n\n self.redraw()\n\n running = True\n while running:\n for event in pygame.event.get():\n # 50 Hz = période de 20 ms\n duration = clock.tick(50)\n print(f'{duration:d}|', end='', flush=True)\n dx, dy = 0, 0\n if event.type == pygame.QUIT:\n running = False \n elif event.type == pygame.KEYDOWN:\n if event.key == K_q:\n running = False\n elif event.key == K_UP:\n dx, dy = 0, -1\n elif event.key == K_RIGHT:\n dx, dy = 1, 0\n elif event.key == K_DOWN:\n dx, dy = 0, 1\n elif event.key == K_LEFT:\n dx, dy = -1, 0\n eaten = self.can_eat(dx, dy)\n if not self.move_snake(dx, dy, eaten):\n running = False\n if eaten:\n self.egg = self.random_egg()\n self.redraw()\n","sub_path":"snake/game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":4783,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"489040747","text":"import tensorflow as tf\n\nfrom keras.backend.tensorflow_backend import set_session\nfrom keras.constraints import maxnorm\nfrom keras.layers import Dense, Dropout, Activation, Flatten, Embedding\nfrom keras.layers import GRU, LSTM\nfrom keras.layers import Conv1D, MaxPool1D, Conv2D, MaxPooling2D\nfrom keras.models import Sequential\nfrom keras.optimizers import Adam\n\n\n# The model needs to know what input shape it should expect.\n# For this reason, the first layer in a Sequential model (and only the first,\n# because following layers can do automatic shape inference) needs to receive\n# information about its input shape.\n# From first ref:\n# build model\n# The Sequential model is a linear stack of layers.\n# model = Sequential()\n# #\n# model.add(Dense(16, input_shape=(40,))) # first layer\n# model.add(Activation('relu'))\n# model.add(Dropout(0.5))\n#\n# model.add(Dense(16))\n# model.add(Activation('relu'))\n# model.add(Dropout(0.5))\n#\n# model.add(Dense(num_labels))\n# model.add(Activation('softmax'))\n#\n# model.compile(loss='categorical_crossentropy', metrics=['accuracy'], optimizer='adam')\n#\n# # Epochs: One epoch consists of one full training cycle on the training set.\n# # Once every sample in the set is seen, you start again - marking the beginning\n# # of the 2nd epoch.\n# model.fit(X, y, batch_size=8, epochs=12, validation_data=(X, y))\n#\n\n# X = X.reshape(X.shape[0], X.shape[1], 1)\n# y = y.reshape(y.shape[0], y.shape[1], 1)\n\n\n# From second ref (CNN):\ndef cnn1D(X_tr, y_tr):\n\n model = Sequential()\n model.add(Conv1D(216, kernel_size=4, activation='relu', input_shape=(X_tr.shape[1], X_tr.shape[2]),\n W_constraint=maxnorm(4), name='C1'))\n\n model.add(Conv1D(64, kernel_size=3, activation='relu', name='C2'))\n model.add(MaxPool1D(pool_size=3))\n model.add(Dropout(0.1))\n\n model.add(Conv1D(64, 2, activation='relu', name='C3'))\n model.add(MaxPool1D(pool_size=2))\n model.add(Dropout(0.1))\n\n model.add(Flatten(name='F1'))\n model.add(Dense(256, activation='relu', name='FD1'))\n model.add(Dropout(0.2))\n\n model.add(Dense(y_tr.shape[1], activation='softmax', name='FD2'))\n # https://keras.io/losses/\n # https://keras.io/optimizers/\n # model.compile(loss='mse', optimizer='adam', metrics=['accuracy'])\n # No progress bar displayed: verbose=0\n # model.fit(X_tr, y_tr, batch_size=200, epochs=50, validation_split=0.1, verbose=0)\n\n print(model.summary())\n return model\n\n\ndef cnn2D(X_tr, y_tr):\n model = Sequential()\n\n model.add(Conv2D(6, kernel_size=(3, 3), activation='relu', padding='same',\n input_shape=(X_tr.shape[1], X_tr.shape[2], 3)))\n model.add(MaxPooling2D(pool_size=(2, 2)))\n model.add(Dropout(0.1))\n\n model.add(Conv2D(12, (2, 2), activation='relu'))\n model.add(MaxPooling2D(pool_size=(2, 2)))\n model.add(Dropout(0.2))\n\n model.add(Conv2D(24, (2, 2), activation='relu'))\n model.add(MaxPooling2D(pool_size=(2, 2)))\n model.add(Dropout(0.2))\n\n model.add(Flatten())\n model.add(Dense(128, activation='relu'))\n model.add(Dropout(0.5))\n\n model.add(Dense(y_tr.shape[1], activation='softmax'))\n\n # model.compile(loss='categorical_crossentropy', optimizer='rmsprop', metrics=['accuracy'])\n # model.fit(X_tr, y_tr, batch_size=batch_size, epochs=epochs, verbose=0, validation_split=0.1)\n # model.train_on_batch(X_tr, y_tr)\n # model.fit(X_tr, y_tr, batch_size=200, epochs=50, validation_split=0.1, verbose=0)\n\n print(model.summary())\n\n return model\n\n\ndef rnn(X_tr, y_tr):\n model = Sequential()\n\n model.add(LSTM(1024, input_shape=(X_tr.shape[1], X_tr.shape[2])))\n model.add(Dropout(0.2))\n\n model.add(Dense(y_tr.shape[1], activation='softmax', name='FD2'))\n\n # model.compile(loss='categorical_crossentropy', metrics=['accuracy'], optimizer='adam')\n # model.fit(X_tr, y_tr, batch_size=200, epochs=25, validation_split=0.1,verbose=0) # validation_split=0.1,\n print(model.summary())\n\n return model\n\n\ndef crnn(X_tr, y_tr):\n\n model= Sequential()\n\n model.add(Conv1D(256, 2, activation='relu', input_shape=(X_tr.shape[1], X_tr.shape[2]),\n W_constraint=maxnorm(2), name='C1'))\n model.add(Dropout(0.2))\n\n model.add(Conv1D(256, kernel_size=2, padding='same', activation='relu', name='CD2'))\n model.add(MaxPool1D(pool_size=2))\n model.add(Dropout(0.4))\n\n # model.add(Conv1D(512, kernel_size=2, padding='same', activation='relu', name='CD3'))\n # model.add(MaxPool1D(pool_size=2))\n # model.add(Dropout(0.8))\n\n model.add(GRU(128, return_sequences=True, name='GRU1'))\n model.add(Dropout(0.2))\n\n model.add(Flatten())\n model.add(Dense(512, activation='relu', name='FD1'))\n model.add(Dropout(0.5))\n\n model.add(Dense(y_tr.shape[1], activation='softmax'))\n # model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])\n\n # pdb.set_trace()\n # model.fit(X_tr, y_tr, batch_size=128, epochs=50, validation_split=0.1,verbose=0)\n print(model.summary())\n\n return model","sub_path":"DeepModels.py","file_name":"DeepModels.py","file_ext":"py","file_size_in_byte":5014,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"133057717","text":"students = [\n \"Afouda Pamela\",\n \"Castets Pierre\",\n \"Debals Alexandre\",\n \"Pelle Pierre-Jean\",\n \"Saupagna Sébastien\"\n]\n\n# Objectif: obtenir un mail à partir de la chaîne Nom Prénom\n# Exemple: \"Afouda Pamela\" => pamela.afouda@python.com\n\n'''\nstudent = \"Afouda Pamela\".lower() # mise en base de casse\nspaceIndex = student.find(\" \")\nlastname = student[:spaceIndex] # nom de famille\nfirstname = student[spaceIndex + 1:]\nemail = firstname + \".\" + lastname + \"@python.com\"\nprint(email)\n'''\n\nfor s in students:\n student = s.lower() # mise en base de casse\n spaceIndex = student.find(\" \")\n lastname = student[:spaceIndex] # nom de famille\n firstname = student[spaceIndex + 1:]\n email = firstname + \".\" + lastname + \"@python.com\"\n emailNoAccent = email.replace(\"é\", \"e\")\n print(emailNoAccent)\n # exemple de chaînage de méthodes de str\n #print(s.lower().replace(\"é\",\"e\").replace(\"-\", \"_\").upper())","sub_path":"python/list4.py","file_name":"list4.py","file_ext":"py","file_size_in_byte":907,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"459922129","text":"\"\"\"Multi process utility\"\"\"\nimport os\n\n\nclass MpCPU:\n \"\"\"Get number of CPU used by multi process\n\n :type _cpu: int | str | None\n :type _max_cpu: int\n :type _min_cpu: int\n :type _character_pattern: tuple of str\n \"\"\"\n\n def __init__(self, cpu=None):\n \"\"\"\n :type cpu: int | str | None\n \"\"\"\n self._cpu = cpu\n self._max_cpu = os.cpu_count()\n self._min_cpu = 1\n self._character_pattern = ('max', 'min', 'mid',\n '1/4', '1/2', '3/4', 'auto')\n\n def get(self):\n \"\"\"\n :rtype int\n \"\"\"\n if self._cpu is None:\n return self._max_cpu - 1\n\n if isinstance(self._cpu, int):\n return self._int_process()\n\n if isinstance(self._cpu, str):\n return self._str_process()\n\n raise TypeError(f\"{type(self._cpu)} : Not int or str\")\n\n def _int_process(self):\n if self._cpu > self._max_cpu:\n return self._max_cpu\n\n if self._min_cpu > self._cpu:\n return self._min_cpu\n\n return self._cpu\n\n def _str_process(self):\n if self._cpu.lower() not in self._character_pattern:\n raise KeyError(f\"{self._cpu} not in {self._character_pattern}\")\n\n if self._cpu.lower() == 'max':\n return self._max_cpu\n\n if self._cpu.lower() == 'min':\n return self._min_cpu\n\n if self._cpu.lower() == 'mid':\n num = self._max_cpu // 2\n return self._get_cpu_number(num)\n\n if self._cpu.lower() == '1/4':\n num = self._max_cpu // 4\n return self._get_cpu_number(num)\n\n if self._cpu.lower() == '1/2':\n num = self._max_cpu // 2\n return self._get_cpu_number(num)\n\n if self._cpu.lower() == '3/4':\n num = self._max_cpu // 4\n num *= 3\n return self._get_cpu_number(num)\n\n return self._max_cpu - 1\n\n def _get_cpu_number(self, num):\n \"\"\"\n :type num: int\n :rtype: int\n \"\"\"\n if self._min_cpu > num:\n return self._min_cpu\n else:\n return num\n\n\nclass MpCounter:\n \"\"\"Get number of process by multi process\n\n :type num: int\n \"\"\"\n _mp_count = 0\n\n def __init__(self):\n MpCounter.count_up()\n self.num = MpCounter._mp_count\n\n @classmethod\n def count_up(cls):\n cls._mp_count += 1\n\n def count_reset(self):\n MpCounter._mp_count = 0\n self.num = 0\n\n\nclass MpLines:\n \"\"\"Multi process information lines\"\"\"\n\n def __init__(self, process_num=None, cpu_num=None, name=None):\n \"\"\"\n :type process_num: int | None\n :type cpu_num: int | None\n :type name: str | None\n \"\"\"\n self._process = process_num\n self._cpu = cpu_num\n self._name = name\n\n def top(self):\n print(f\"\\n{'*' * 60}\")\n\n self._print_name(message='Start')\n\n if self._process is not None:\n print(f\" Process:{self._process:>5}\")\n\n if self._cpu is not None:\n print(f\" CPU :{self._cpu:>5}\")\n\n print(f\"{'*' * 60}\")\n\n def bottom(self):\n print(f\"{'*' * 60}\")\n\n self._print_name(message='End')\n\n print(f\"{'*' * 60}\\n\")\n\n def _print_name(self, message):\n \"\"\"\n :type message: str\n \"\"\"\n if self._name is None:\n print(f\"\")\n else:\n print(f\"\")\n\n\nif __name__ == '__main__':\n pass\n","sub_path":"project_name/mypkg/mputil.py","file_name":"mputil.py","file_ext":"py","file_size_in_byte":3548,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"138186998","text":"import logging\n\nimport records\n\nfrom mllaunchpad.resource import DataSource\nfrom mllaunchpad.resource import get_user_pw\n\nlogger = logging.getLogger(__name__)\n\n\nclass RecordsDbDataSource(DataSource):\n \"\"\"DataSource for a bunch of relational database types:\n RedShift, Postgres, MySQL, SQLite, Oracle, Microsoft SQL\n \"\"\"\n serves = ['dbms.oracle', 'dbms.redshift', 'dbms.postgres', 'dbms.mysql', 'dbms.sqlite', 'dbms.ms_sql']\n\n def __init__(self, identifier, datasource_config, dbms_config):\n super().__init__(identifier, datasource_config)\n\n self.dbms_config = dbms_config\n\n logger.info(\n \"Establishing Records database connection for datasource {}...\".format(self.id)\n )\n # if \"connect\" not in dbms_config:\n # raise ValueError(f'No connection string (property \"connect\") in datasource {self.id} config')\n dbtype = dbms_config['type']\n user, pw = get_user_pw(dbms_config)\n host = dbms_config['host']\n port = \":\" + str(dbms_config['port']) if 'port' in dbms_config else ''\n service_name = \"/?service_name=\" + dbms_config['service_name'] if 'service_name' in dbms_config else ''\n connection_string = f\"{dbtype}://{user}:{pw}@{host}{port}{service_name}\"\n self.db = records.Database(connection_string)\n\n def get_dataframe(self, arg_dict=None, buffer=False):\n \"\"\"Get datasource as a pandas dataframe.\n\n Params:\n args_dict: optional query parameters\n buffer: optional, currently not implemented\n\n Returns:\n DataFrame object, possibly cached according to expires-config\n \"\"\"\n if buffer:\n raise NotImplementedError('Buffered reading not supported yet')\n # the resulting `rows` of a query provides a nice way to do this, though\n\n cached = self._try_get_cached_df()\n if cached is not None:\n return cached\n\n query = self.config[\"query\"]\n params = arg_dict or {}\n kw_options = self.options or {}\n\n logger.debug(\n \"Fetching query {} with params {} and options {}...\".format(\n query, params, kw_options\n )\n )\n rows = self.db.query(query, fetchall=True, **params)\n df = rows.export('df')\n\n self._cache_df_if_required(df)\n\n return df\n\n def get_raw(self, arg_dict=None, buffer=False):\n \"\"\"Not implemented.\n\n Params:\n argsDict: optional, currently not implemented\n buffer: optional, currently not implemented\n\n Returns:\n Nothing, throws NotImplementedError\n \"\"\"\n raise NotImplementedError('Raw data not supported')\n\n # def __del__(self):\n # if hasattr(self, \"db\"):\n # self.db.close()\n","sub_path":"examples/records_datasources.py","file_name":"records_datasources.py","file_ext":"py","file_size_in_byte":2796,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"158641258","text":"import re\nimport json\n\nfrom database.lockstatus import LockStatus\n\n\nclass Truncate:\n def strip_text(self, text):\n return re.sub(' +', ' ', text.strip())\n\n def truncate_table(self, username, dbname, query, logger, fname):\n check_lock = LockStatus().checklock(username)\n # create db copy\n src_fname = dbname + \"_Tables.txt\"\n dest_dname = dbname + \"_Tables_copy.txt\"\n if fname is None:\n filename = src_fname\n dtname = dbname + \"_Tables_Datatypes.txt\"\n status = False\n else:\n filename = dest_dname\n dtname = dbname + \"_Tables_Datatypes_copy.txt\"\n status = True\n file1 = open(filename, \"r\")\n f1 = file1.read()\n file1.close()\n update_set_dict = {}\n dict_obj = json.loads(f1)\n is_truncate_query = False\n # query = \"TRUNCATE table player;\"\n if re.split(\" \", query)[0].lower() == \"truncate\":\n is_truncate_query = True\n file1 = open(filename, \"r\")\n f1 = file1.read()\n file1.close()\n update_set_dict = {}\n dict_obj = json.loads(f1)\n if (is_truncate_query):\n table_name = self.strip_text(re.findall(r'table(.*?);', query.lower())[0].strip())\n #print(table_name)\n tables_info = dict_obj['Tables']\n #print(tables_info)\n for values in tables_info:\n if values.get(\"Table_name\") == table_name:\n #print(\"found\")\n values_info = values['Table_columns']\n #print(values_info)\n del values_info[:1]\n #print(values_info)\n for column_values in values_info:\n for columns in column_values:\n column_values[columns] = 'defnull'\n #print(column_values)\n #print(tables_info)\n #print(dict_obj)\n file1 = open(filename, \"w+\")\n f1 = file1.write(json.dumps(dict_obj))\n file1.close()\n\n return status\n","sub_path":"database/truncate.py","file_name":"truncate.py","file_ext":"py","file_size_in_byte":2110,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"421330727","text":"from extract import Extract\nimport numpy as np\nfrom skimage import io\n\nIMAGE_URL = \"https://helpx.adobe.com/content/dam/help/en/photoshop/using/convert-color-image-black-white/jcr_content/main-pars/before_and_after/image-before/Landscape-Color.jpg\"\n\n\ndef test_url_to_image():\n extract = Extract()\n image = extract.url_to_image(IMAGE_URL)\n assert isinstance(image, np.ndarray)\n\n\ndef test_extract_to_list_return_np_ndarray():\n image = np.zeros((300,300,3))\n extract = Extract()\n list_of_faces = extract.extract_face_to_list(image)\n\n if type(list_of_faces[0]) is str:\n assert isinstance(list_of_faces[0], str)\n else:\n assert isinstance(list_of_faces[0], np.ndarray)\n","sub_path":"back-end/application/API/app/test_extract.py","file_name":"test_extract.py","file_ext":"py","file_size_in_byte":701,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"641684853","text":"from plone import api\nfrom plone.app.robotframework.testing import AUTOLOGIN_LIBRARY_FIXTURE\nfrom plone.app.testing import applyProfile\nfrom plone.app.testing import FunctionalTesting\nfrom plone.app.testing import IntegrationTesting\nfrom plone.app.testing import PLONE_FIXTURE\nfrom plone.app.testing import PloneSandboxLayer\nfrom plone.testing import z2\nfrom zope.configuration import xmlconfig\n\n\nclass OiRAFixture(PloneSandboxLayer):\n defaultBases = (PLONE_FIXTURE,)\n\n def setUpZope(self, app, configurationContext):\n z2.installProduct(app, 'Products.membrane')\n z2.installProduct(app, 'Products.statusmessages')\n import Products.statusmessages\n xmlconfig.file('configure.zcml',\n Products.statusmessages,\n context=configurationContext)\n import Products.membrane\n xmlconfig.file('configure.zcml',\n Products.membrane,\n context=configurationContext)\n import euphorie.client.tests\n xmlconfig.file(\"configure.zcml\",\n euphorie.client.tests,\n context=configurationContext)\n import osha.oira\n xmlconfig.file('configure.zcml',\n osha.oira,\n context=configurationContext)\n\n def setUpPloneSite(self, portal):\n wftool = api.portal.get_tool(name='portal_workflow')\n wftool.setDefaultChain('plone_workflow')\n applyProfile(portal, 'euphorie.content:default')\n applyProfile(portal, 'euphorie.client:default')\n applyProfile(portal, 'euphorie.deployment:default')\n applyProfile(portal, 'osha.oira:default')\n\nOIRA_FIXTURE = OiRAFixture()\nOIRA_INTEGRATION_TESTING = \\\n IntegrationTesting(\n bases=(OIRA_FIXTURE,),\n name=\"osha.oira:Integration\"\n )\n\nOIRA_SUITE_ROBOT = FunctionalTesting(\n bases=(OIRA_FIXTURE,\n AUTOLOGIN_LIBRARY_FIXTURE,\n z2.ZSERVER_FIXTURE),\n name=\"OIRA_SUITE_ROBOT\")\n","sub_path":"src/osha/oira/testing.py","file_name":"testing.py","file_ext":"py","file_size_in_byte":2006,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"439929803","text":"def calc(n1, n2): \r\n if(n1>n2): \r\n num = n1 \r\n den = n2\r\n else: \r\n num = n2 \r\n den = n1 \r\n rem = num % den \r\n while(rem != 0): \r\n num = den \r\n den = rem \r\n rem = num % den \r\n gcd = den \r\n lcm = int(int(n1 * n2)/int(gcd)) \r\n return lcm \r\n\r\ns=int(input(\"Enter size:\"))\r\na=[0]*s\r\nprint(\"Input numbers: \")\r\nfor i in range(s):\r\n a[i]=int(input())\r\nprint(\"List: \",a)\r\n\r\n \r\nn1 = a[0] \r\nn2 = a[1] \r\nlcm =calc(n1,n2) \r\n \r\nfor i in range(2, len(a)): \r\n lcm =calc(lcm, a[i]) \r\nprint(\"LCM is: \",lcm) \r\n \r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","sub_path":"December-06/py_anuppriya_lcm.py","file_name":"py_anuppriya_lcm.py","file_ext":"py","file_size_in_byte":614,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"207941325","text":"# -*- coding: utf-8 -*-\n# (c) AbAKUS IT Solutions\nimport time\nfrom odoo import models, fields, api, exceptions, _\n\n\nclass HrHolidaysSummaryDept(models.TransientModel):\n _inherit = 'hr.holidays.summary.dept'\n\n month = fields.Selection([\n ('01', 'January'),\n ('02', 'February'),\n ('03', 'March'),\n ('04', 'April'),\n ('05', 'May'),\n ('06', 'June'),\n ('07', 'July'),\n ('08', 'August'),\n ('09', 'September'),\n ('10', 'October'),\n ('11', 'November'),\n ('12', 'December')\n ], required=True, default=lambda *a: time.strftime('%m'))\n year = fields.Integer(required=True, default=lambda *a: int(time.strftime('%Y')))\n company_id = fields.Many2one(\n 'res.company',\n 'Company',\n default=lambda self: self.env['res.company']._company_default_get('hr.holidays.summary.dept'))\n\n hide_empty_categories = fields.Boolean(string=\"Hide Empty Departments\", default=True)\n hide_empty_status = fields.Boolean(string=\"Hide Empty Leave Types\", default=True)\n hide_no_leaves_emp = fields.Boolean(string=\"Hide Employees Without Leaves\", default=True)\n\n depts = fields.Many2many('hr.department', 'summary_dept_rel', 'sum_id', 'dept_id', string='Department(s)')\n\n @api.one\n @api.onchange('month', 'year')\n def onchange_date(self):\n if not self.month or not self.year or self.year < 0 or self.year > 9999:\n self.date_from = False\n else:\n self.date_from = str(self.year).zfill(4) + '-' + self.month + '-01'\n","sub_path":"hr_holidays_summary_report_improved/models/hr_holidays_summary_dept.py","file_name":"hr_holidays_summary_dept.py","file_ext":"py","file_size_in_byte":1553,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"33952752","text":"Q=int(input())#问题数\nresult=[\"No\"]*Q\nfor i in range(0,Q):\n temp=input().split(\" \")\n l=int(temp[0])\n k=int(temp[1])\n list1=input().split(\" \")\n listbig=[]\n team=int((l+k-1)/k)#分成的组数\n for x in range(0,team):\n listsmall=[]\n for y in range(0,k):\n if(len(list1)>=1):\n listsmall.append(list1[0])\n list1.pop(0)\n listbig.append(listsmall[::-1])\n listresult=[]\n for m in listbig:\n for n in m:\n listresult.append(n)\n for a in range(0,len(listresult)):\n if(a!=len(listresult)-1):\n print(listresult[a],end=\" \")\n else:\n print(listresult[a])","sub_path":"Code/CodeRecords/2342/60708/252480.py","file_name":"252480.py","file_ext":"py","file_size_in_byte":686,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"175993307","text":"from tkinter import *\nfrom tkinter import filedialog, ttk\n\nimport sounddevice as sd\nimport soundfile as sf\nfrom pydub import AudioSegment, silence\nfrom pydub.playback import play\n\nglobal_amp = 15 # Global voice amplifier gain in dB\nfs = 44100 # Global sample rate for recording\nsd.default.samplerate = fs\nsd.default.channels = 2\n\nselected_part = 0\nparts = []\n\n\nclass Part:\n def __init__(self, audio, start, stop):\n self.takes = []\n self.takes.append(audio)\n self.start = start\n self.stop = stop\n self.duration = (stop - start) / 1000\n self.selected_take = self.takes[0]\n\n def add_take(self, audio):\n self.takes.append(audio)\n\n def select_take(self, takenbr):\n self.selected_take = self.takes[takenbr]\n\n\n# Load a mp3 beat\ndef load_beat():\n path = filedialog.askopenfilename()\n global beat\n beat = AudioSegment.from_mp3(path)\n beat.export(\"beat.wav\", format=\"wav\")\n global beat_sd\n beat_sd = sf.read(\"beat.wav\", dtype='float32')\n\n\n# Define the parts\ndef init_parts():\n data, fssd = sf.read(\"beat.wav\", dtype='float32')\n print(\"recording...\")\n recording = sd.playrec(data, fs, channels=2, blocking=True)\n sf.write('initialrecording.wav', recording, fs)\n initial_recording = AudioSegment.from_wav(\"initialrecording.wav\")\n print(\"loaded\")\n nonsilent = silence.detect_nonsilent(initial_recording, min_silence_len=1000, silence_thresh=-40)\n global parts\n parts = []\n titles = []\n for start, stop in nonsilent:\n audio = initial_recording[start:stop]\n parts.append(Part(audio, start, stop))\n titles.append(str(start) + \" - \" + str(stop))\n\n cb1[\"values\"] = titles\n\n\ndef record_take():\n recording = sd.rec(int(selected_part.duration * fs), samplerate=fs, channels=2)\n print(\"recording...\")\n play(beat[selected_part.start:selected_part.stop])\n sf.write('temp_recording.wav', recording, fs)\n take = AudioSegment.from_wav(\"temp_recording.wav\")\n selected_part.takes.append(take)\n\n titles = []\n for i in range(len(selected_part.takes)):\n titles.append(\"Take \" + str(i))\n cb2[\"values\"] = titles\n cb2.current(selected_part.takes.index(selected_part.selected_take))\n\n\n# Mix and preview\ndef preview():\n mixer = beat\n for part in parts:\n mixer = mixer.overlay(part.selected_take + global_amp, position=part.start - 300)\n play(mixer)\n\n\n\ndef select_part(event):\n global selected_part\n selected_part = parts[cb1.current()]\n titles = []\n for i in range(len(selected_part.takes)):\n titles.append(\"Take \" + str(i))\n cb2[\"values\"] = titles\n cb2.current(selected_part.takes.index(selected_part.selected_take))\n\n\ndef select_take(event):\n selected_part.select_take(cb2.current())\n\n\ndef play_take():\n play(selected_part.selected_take + global_amp)\n\n\n# Tkinter setup\nroot = Tk()\nroot.title(\"Wallinator by pwnd\")\n\n# Create frames\nplayback_frame = Frame(root, relief=RAISED, borderwidth=1).pack()\nedit_frame = Frame(root, relief=RAISED, borderwidth=1).pack()\n\n# Playback controls\n# Label(playback_frame, text=\"Choose part to edit\")\nButton(playback_frame, text=\"Load Beat\", command=load_beat).pack(side=LEFT)\nButton(playback_frame, text=\"Play Preview\", command=preview).pack(side=LEFT)\nButton(playback_frame, text=\"Initialize Parts\", command=init_parts).pack(side=LEFT)\n\n# Editing selection\nglobal cb1\nglobal cb2\nLabel(edit_frame, text=\"Select Part\").pack()\ncb1 = ttk.Combobox(edit_frame, width=10)\ncb1.bind(\"<>\", select_part)\ncb1.pack()\n\nLabel(edit_frame, text=\"Select Take\").pack()\ncb2 = ttk.Combobox(edit_frame, width=10)\ncb2.bind(\"<>\", select_take)\ncb2.pack()\n\nButton(edit_frame, text=\"Play Take\", command=play_take).pack(side=LEFT)\nButton(edit_frame, text=\"New Take\", command=record_take).pack(side=LEFT)\n\nroot.mainloop()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3853,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"270857164","text":"# Mit'mit'a. Parsing and translation with minimal dependency grammars.\n#\n########################################################################\n#\n# This file is part of the Mainumby project within the PLoGS metaproject\n# for parsing, generation, translation, and computer-assisted\n# human translation.\n#\n# Copyleft 2015, 2016, 2017 HLTDI, PLoGS \n# \n# This program is free software: you can redistribute it and/or\n# modify it under the terms of the GNU General Public License as\n# published by the Free Software Foundation, either version 3 of\n# the License, or (at your option) any later version.\n# \n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n# \n# You should have received a copy of the GNU General Public License\n# along with this program. If not, see .\n#\n# =========================================================================\n\n__all__ = ['views', 'record']\n# not needed for now: 'learn'\n\nfrom flask import Flask, url_for, render_template\n\n## train imports sentence\nfrom mdt.train import *\n\n## sentence imports ui, segment, record\n### segment imports cs, utils, entry.Entry, entry.Group, record.SegRecord\n### ui imports language (ui not needed for current version)\n#### language imports entry, some functions from utils, morphology.morpho, morphology.semiring\n#### language also calls function in morphology.strip\n##### entry imports morphology.fs\n\n#from .sentence import *\n#from .learn import *\n\n#SESSIONS_DIR = os.path.join(os.path.dirname(__file__), 'sessions')\n\n## morphology a package; imports morphology.morpho\n### which imports morphology.fst\n#### which imports morphology.semiring\n##### which imports morphology.fs, morphology.utils\n###### fs imports morphology.logic, morphology.internals\nfrom mdt.morphology import *\nfrom iwqet.record import *\n# from . import db\n\n## Instantiate the Flask class to get the application\napp = Flask(__name__)\napp.config.from_object(__name__)\n\nLANGUAGE_DIR = os.path.join(os.path.dirname(__file__), 'languages')\n\ndef get_language_dir(abbrev):\n return os.path.join(LANGUAGE_DIR, abbrev)\n\ndef load(source='eng', target='amh', groups=None):\n \"\"\"Load a source and a target language, given as abbreviations.\n Read in groups for source language, including target language translations at the end.\n If train is True, load the analysis rather than generation FSTs for the target language.\n If the languages are already loaded, don't load them.\"\"\"\n srclang = Language.languages.get(source)\n targlang = Language.languages.get(target)\n loaded = False\n srcuse = mdt.SOURCE\n targuse = mdt.TARGET\n if srclang and targlang and srclang.use == srcuse and targlang.use == targuse:\n loaded = True\n else:\n# try:\n srcdir = get_language_dir(source)\n targdir = get_language_dir(target)\n srcpath = os.path.join(srcdir, source + '.lg')\n srclang = Language.read(srcpath, use=srcuse, directory=srcdir)\n print(\"Source language {} loaded\".format(srclang))\n targpath = os.path.join(targdir, target + '.lg')\n targlang = Language.read(targpath, use=targuse, directory=targdir)\n print(\"Target language {} loaded\".format(targlang))\n if not srclang:\n print(\"Source language failed to load!\")\n return\n if not targlang:\n print(\"Target language failed to load!\")\n return\n# except IOError:\n# if not srclang:\n# print(\"At least one of these languages doesn't exist.\")\n# return\n # Load groups for source language now\n if not loaded:\n srclang.read_groups(files=groups, target=targlang)\n srclang.read_ms(target=targlang)\n return srclang, targlang\n\n#def load(source='spa', target='grn'):\n# \"\"\"Load source and target languages for translation.\"\"\"\n# print(\"Attempting to load {} and {}\".format(source, target))\n# return iwqet.Language.load_trans(source, target)\n\ndef seg_trans(sentence, source, target, session=None, verbosity=0):\n \"\"\"Translate sentence and return marked-up sentence with segments colored.\n So far only uses first solution.\"\"\"\n sentence.initialize(ambig=True, verbosity=verbosity)\n sentence.solve(translate=True, all_sols=False, all_trans=True, interactive=False, verbosity=verbosity)\n if sentence.solutions:\n solution = sentence.solutions[0]\n solution.get_segs()\n return solution.segments, solution.get_seg_html()\n else:\n return [], sentence.get_html()\n\ndef make_document(source, target, text, session=None):\n \"\"\"Create an Mainumby Document object with the text.\"\"\"\n d = iwqet.Document(source, target, text, proc=True, session=session)\n return d\n\ndef quit(session=None):\n \"\"\"Quit the session (and the program), cleaning up in various ways.\"\"\"\n for language in Language.languages.values():\n # Store new cached analyses or generated forms for\n # each active language, but only if there is a current session/user.\n language.quit(cache=session)\n if session:\n session.quit()\n\ndef start(source, target, user):\n \"\"\"Initialize a run. Create a session if there's a user.\"\"\"\n# print(\"Starting {}, {}, {}\".format(source, target, user))\n # Read in current users so that we can find the current user and\n # check for username overlap if a new account is created\n init_users()\n# User.read_all()\n if isinstance(user, str):\n # Get the user from their username\n user = User.users.get(user)\n if user:\n return iwqet.Session(source=source, target=target, user=user)\n\n## Users and Sessions\ndef init_users():\n # Read in current users before login.\n User.read_all(path=get_users_path())\n\ndef get_user(username):\n \"\"\"Find the user with username username.\"\"\"\n print(\"Looking for user with username {}\".format(username))\n return User.get_user(username)\n\ndef create_user(dct):\n \"\"\"Create a user from the dict of form values from login.html.\"\"\"\n return User.dict2user(dct)\n\ndef get_users_path():\n return os.path.join(SESSIONS_DIR, 'users')\n\ndef get_user_path(user):\n filename = \"{}.usr\".format(user.username)\n return os.path.join(SESSIONS_DIR, filename)\n\ndef save_record(user, record):\n \"\"\"Write the session feedback to the user's file.\"\"\"\n with open(get_user_path(user), 'a', encoding='utf8') as file:\n record.write(file=file)\n\n# Import views. This has to appear after the app is created.\nimport iwqet.views\n\n","sub_path":"iwqet/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":6697,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"437654791","text":"print(\"This program calculates your body mass index.\")\nheight = float (input(\"Please enter your height in meters: \"))\nweight = float (input(\"Please enter your weight in kilo's: \"))\nBMI = weight/(height**2)\nprint(\"Your BMI is: \", round(BMI, 2))\n\nif (BMI <= 18.5):\n classification = \"Underweight\"\nelif (BMI > 18.5 and BMI <= 24.9):\n classification = \"Normal weight\"\nelif (BMI > 24.9 and BMI <= 29.9):\n classification = \"Overweight\"\nelse:\n classification = \"Obesity\"\nprint (\"The classification of your BMI is: \", classification)","sub_path":"python-course/calculate_bmi.py","file_name":"calculate_bmi.py","file_ext":"py","file_size_in_byte":529,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"208626561","text":"\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport numpy as np\n\ntrain = pd.read_csv(r'train2.csv')\ntest = pd.read_csv(r'test2.csv')\nlang ={\n 'ja': 0, 'en': 1, 'fr': 2, 'de': 3, 'he': 4,\n 'hi': 5, 'ru': 6, 'ka': 7, 'zh': 8, 'th': 9,\n 'it': 10, 'es': 11, 'bn': 12, 'sv': 13, 'ko': 14,\n 'sr': 15, 'da': 16, 'ta': 17, 'cs': 18, 'cn': 19,\n 'ro': 20, 'ca': 21, 'no': 22, 'nl': 23, 'te': 24,\n 'tr': 25, 'bm': 26, 'ml': 27, 'pt': 28, 'af': 29, \n 'fi': 30, 'ur': 31, 'el': 32, 'id': 33, 'xx': 34, \n 'pl': 35, 'kn': 36, 'is': 37, 'hu': 38, 'fa': 39, \n 'mr': 40, 'ar': 41, 'nb': 42, 'vi': 43 \n \n }\n \n \ntrain.replace(lang, inplace=True)\ntest.replace(lang, inplace=True)\nprint(train.describe())\n\n\ntrain.plot.scatter(x='original_language', y='revenue', s=50)\nplt.savefig(\"filname.png\")\n\n\n","sub_path":"movie_db_picker/logic.py","file_name":"logic.py","file_ext":"py","file_size_in_byte":851,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"382896807","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Author : Mike\n# @Contact : 597290963@qq.com\n# @Time : 2021/1/23 19:55\n# @File : LongestPalindrome.py\n\n\nclass LongestPalindrome(object):\n\n def __init__(self):\n pass\n\n def longestPalindrome(self, s: str) -> str:\n \"\"\"\n 回溯算法,计算最大长度回文数\n :param s:\n :return:\n \"\"\"\n n = len(s)\n self.res = \"\"\n\n def helper(i, j):\n while i >= 0 and j < n and s[i] == s[j]:\n i -= 1\n j += 1\n if len(self.res) < j - i - 1:\n self.res = s[i + 1:j]\n\n for i in range(n):\n helper(i, i)\n helper(i, i + 1)\n return self.res\n\n def longestPalindrome1(self, s: str) -> str:\n n = len(s)\n dp = [[0] * n for _ in range(n)]\n res = \"\"\n for i in range(n):\n for j in range(i + 1):\n if s[i] == s[j] and (i - j + 1 <= 3 or dp[j + 1][i - 1]):\n dp[j][i] = 1\n res = max(res, s[j:i + 1], key=len)\n return res\n\n\nif __name__ == '__main__':\n print(LongestPalindrome().longestPalindrome(\"aacabdkacaa\"))\n","sub_path":"datastructure/Array_and_string/LongestPalindrome.py","file_name":"LongestPalindrome.py","file_ext":"py","file_size_in_byte":1206,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"635941442","text":"# vim: set ts=8 sw=4 sts=4 et ai:\n\"\"\"\nAPI: https://www.targetpay.com/info/bankwire-docu\nAPI: https://www.targetpay.com/info/cc-docu\nAPI: https://www.targetpay.com/info/cc-atos-docu\nAPI: https://www.targetpay.com/info/ideal-docu\nAPI: https://www.targetpay.com/info/mrcash-docu\n\"\"\"\nimport unicodedata\n\nfrom osso.payment import use_test_mode\n\nfrom .targetpay import TargetpayCreditcard, TargetpayIdeal, TargetpayMrCash\n\n\n# A clear description of the service. Maximum 32 characters. Only\n# letters, numbers and the following characters: , . - _ * [] () and\n# space.\n#\n# For both Creditcard and MrCash it says \"[alleen] letters of cijfers,\n# maximaal 32 tekens [...]\" but I don't believe it would be only\n# [A-Za-z0-9].\nVALID_DESCRIPTION_TOKENS = (\n '0123456789'\n 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'\n 'abcdefghijklmnopqrstuvwxyz'\n ' ,.-_*[]()')\nVALID_DESCRIPTION_LENGTH = 32\n\n\ndef clean_description(description):\n description = unicodedata.normalize('NFKD', description).encode(\n 'ascii', 'ignore')\n description = ''.join(\n i for i in description if i in VALID_DESCRIPTION_TOKENS)\n if len(description) > VALID_DESCRIPTION_LENGTH:\n description = description[0:(VALID_DESCRIPTION_LENGTH - 3)] + '...'\n return description\n\n\ndef get_instance(provider_sub=None):\n if provider_sub == 'creditcard':\n instance = TargetpayCreditcard(testing=use_test_mode())\n elif provider_sub == 'ideal':\n instance = TargetpayIdeal(testing=use_test_mode())\n elif provider_sub == 'mrcash':\n instance = TargetpayMrCash(testing=use_test_mode())\n else:\n raise NotImplementedError(\n 'implemented creditcard/ideal/mrcash, not {}'.format(provider_sub))\n\n return instance\n","sub_path":"osso/payment/provider/targetpay/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1726,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"14882217","text":"import os\nimport os.path\nfrom datetime import datetime, timedelta\nimport time\n\nfrom fabric.api import env, local, settings\nfrom fabric.utils import abort, puts\nfrom fabric.operations import run, put, sudo\nfrom fabric.contrib.console import confirm\nfrom fabric.exceptions import NetworkError\n\nfrom amazon import Amazon\nfrom util import mkcmd, json_local, update_instance\n\ndef azure_name(dns_name):\n return dns_name.replace(\".\",\"-\")\n\ndef get_public_ip(instance):\n if \"VirtualIPAddresses\" in instance:\n if len(instance[\"VirtualIPAddresses\"]) > 0:\n return instance[\"VirtualIPAddresses\"][0][\"address\"]\n return None\n\nclass Provider:\n def __init__(self, cfg):\n self.cfg = cfg\n self.azure = Azure(cfg)\n self.amazon = Amazon(cfg) # for DNS\n\n def create_database(self, dns_name):\n raise \"Not Implemented\"\n\n def create_instance(self, dns_name):\n machine = self.cfg.get_machine(dns_name)\n instance = self.azure.get_instance(dns_name)\n if instance:\n return self.update_instance(dns_name, instance)\n\n self.azure.vm_create(\n dns_name=azure_name(dns_name),\n size=machine[\"size\"],\n name=azure_name(dns_name),\n )\n puts(\"waiting 60s for machine to boot\")\n time.sleep(60)\n return self.update_instance(dns_name)\n\n def destroy_database(self, dns_name):\n raise \"Not Implemented\"\n\n def destroy_instance(self, dns_name):\n pass\n\n def update_database(self, dns_name, instance=None):\n raise \"Not Implemented\"\n\n def update_instance(self, dns_name, instance=None):\n if not instance:\n instance = self.azure.get_instance(dns_name)\n if not instance:\n abort(\"unknown instance %s\" % dns_name)\n\n # get the ip\n ip = get_public_ip(instance)\n if not ip:\n deadline = datetime.now() + timedelta(minutes=15)\n while datetime.now() < deadline:\n instance = self.azure.get_instance(dns_name)\n ip = get_public_ip(instance)\n if ip:\n break\n time.sleep(1)\n if not ip:\n abort(\"failed to retrieve public ip\")\n\n # set the DNS\n self.amazon.create_dns(dns_name, ip)\n\n # open ports\n # apparently the only way to do this is one at a time... which takes\n # forever\n existing = self.azure.vm_endpoint_list(vm_name=azure_name(dns_name))\n existing = set([e[\"port\"] for e in existing])\n ports = [80, 443, 3306]\n for port in xrange(5000, 5101):\n ports.append(port)\n ports = [port for port in ports if port not in existing]\n for port in ports:\n self.azure.vm_endpoint_create(\n vm_name=azure_name(dns_name),\n lb_port=port,\n vm_port=port,\n )\n\n # run the common update\n env.host_string = \"centos@\" + ip\n return update_instance(self.cfg, dns_name, ip)\n\nclass Azure:\n def __init__(self, cfg):\n self.cfg = cfg\n #self.default_subscription = \"dc7fa7cd-5124-4591-8883-7c4c3fec6cc9\"\n self.default_vm_image = \"5112500ae3b842c8b9c604889f8753c3__OpenLogic-CentOS-70-20150128\"\n self.default_vm_location = \"South Central US\"\n self.default_vm_size = \"Small\"\n self.default_vm_ssh_cert = os.path.dirname(os.path.realpath(__file__)) + \"/keys/azure.pem\"\n\n def get_instance(self, dns_name):\n return self.vm_show(name=azure_name(dns_name))\n\n def vm_create(self, **kwargs):\n args = [\n \"azure\", \"vm\", \"create\",\n kwargs.get(\"dns_name\"),\n kwargs.get(\"image\", self.default_vm_image),\n \"--userName\", kwargs.get(\"username\", \"centos\"),\n \"--location\", kwargs.get(\"location\", self.default_vm_location),\n # \"--affinity-group\",\n # \"--blob-url\",\n \"--vm-size\", kwargs.get(\"size\", self.default_vm_size),\n \"--vm-name\", kwargs.get(\"name\"),\n \"--ssh\", \"22\",\n \"--ssh-cert\", kwargs.get(\"ssh_cert\", self.default_vm_ssh_cert),\n \"--no-ssh-password\",\n # \"--virtual-network-name\",\n # \"--subnet-names\",\n # \"--public-ip\",\n # \"--static-ip\",\n # \"--reserved-ip\",\n # \"--availability-set\",\n #\"--subscription\", self.default_subscription,\n # \"--custom-data\",\n \"--json\",\n ]\n return json_local(mkcmd(*args))\n\n def vm_endpoint_acl_rule_create(self, **kwargs):\n args = [\n \"azure\", \"vm\", \"endpoint\", \"acl-rule\", \"create\",\n \"--vm-name\", kwargs.get(\"name\"),\n \"--endpoint-name\", kwargs.get(\"endpoint_name\"),\n \"--order\", kwargs.get(\"order\", \"100\"),\n \"--action\", kwargs.get(\"action\", \"Permit\"),\n \"--remote-subnet\", kwargs.get(\"remote_subnet\", \"0.0.0.0/0\"),\n \"--description\", \"auto-created endpoint\",\n \"--json\",\n ]\n return json_local(mkcmd(*args))\n\n def vm_endpoint_create(self, **kwargs):\n args = [\n \"azure\", \"vm\", \"endpoint\", \"create\",\n kwargs.get(\"vm_name\"),\n kwargs.get(\"lb_port\"),\n kwargs.get(\"vm_port\"),\n # \"--dns_name\",\n # \"--endpoint-name\",\n # \"--lb-set-name\",\n # \"--probe-port\",\n # \"--probe-protocol\",\n # \"--probe-path\",\n # \"--endpoint-protocol\",\n # \"--enable-direct-server-return\",\n # \"--internal-load-balancer-name\",\n \"--json\",\n ]\n return json_local(mkcmd(*args))\n\n def vm_endpoint_create_multiple(self, **kwargs):\n args = [\n \"azure\", \"vm\", \"endpoint\", \"create-multiple\",\n kwargs.get(\"vm_name\"),\n ]\n for lb_port, vm_port in kwargs[\"ports\"].iteritems():\n args.append(\"%d:%d\" % (lb_port, vm_port))\n return json_local(mkcmd(*args))\n\n def vm_endpoint_list(self, **kwargs):\n args = [\n \"azure\", \"vm\", \"endpoint\", \"list\",\n kwargs.get(\"vm_name\"),\n \"--json\",\n ]\n return json_local(mkcmd(*args))\n\n def vm_show(self, **kwargs):\n args = [\n \"azure\", \"vm\", \"show\",\n kwargs.get(\"name\"),\n #\"--subscription\", self.default_subscription,\n \"--json\",\n ]\n return json_local(mkcmd(*args))\n","sub_path":"azure.py","file_name":"azure.py","file_ext":"py","file_size_in_byte":6483,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"80699729","text":"# -*- coding: utf-8 -*-\n\"\"\"The purpose of this module is to provide the necessary functions to\n allow any spider to stop scraping once reached a certain point.\n\n The cases considered until the moment are:\n 1- Found expert review equal or older that the newest\n expert review in our database for that particular source.\n (For this to be correct the website\n must have expert reviews sorted by date)\n 2- Found an user review equal or older that the newest user review\n in our database for a particular product and source.\n (For this to be correct the product page\n in the website must have user reviews sorted by date)\n 3- Found source_internal_id already existing in the DB.\n (For this approach to be correct the website\n must have products sorted by newest first.\n Probably hard to find websites where to use it)\n 4- Found review_id already existing in the DB.\n (For this approach to be correct the website\n must have reviews sorted by newest first.\n Probably redundant with number 2 unless we are missing review date.)\n\"\"\"\nfrom datetime import datetime\nfrom distutils.util import strtobool\n\n\ndef get_latest_pro_review_date(mysql_manager, source_id, category=None):\n if category:\n query = \"\"\"SELECT r.TestDate\n FROM review.reviews r\n JOIN review.products p ON p.id=r.prdid\n WHERE r.source_id=%s AND\n r.DBaseCategoryName IN ('PRO', 'VPRO') AND\n p.OriginalCategoryName=%s\n ORDER BY r.TestDate DESC\n LIMIT 1\"\"\"\n last_review = mysql_manager.execute_select(query, (source_id,\n category['category_path']))\n else:\n query = \"\"\"SELECT TestDate\n FROM review.reviews\n WHERE source_id=%s AND DBaseCategoryName IN ('PRO', 'VPRO')\n ORDER BY TestDate DESC\n LIMIT 1\"\"\"\n last_review = mysql_manager.execute_select(query, [source_id])\n\n if last_review:\n return last_review[0]['TestDate']\n return datetime.min\n\n\ndef get_incremental(mysql_manager, source_id, kind_name, kind_value):\n incremental_kind = mysql_manager.execute_select(\n \"\"\"SELECT inc.id_value\n FROM review.product_id pi\n JOIN review.products p ON p.id=pi.prdid\n JOIN review.kinds k ON k.Kind = pi.Kind and k.Name =%s\n JOIN review.product_id inc ON pi.prdid=inc.prdid AND pi.kind=1056\n WHERE p.source_id=%s AND pi.ID_value_orig = %s\n LIMIT 1\"\"\", (kind_name, str(source_id), kind_value))\n\n if incremental_kind:\n return strtobool(incremental_kind[0]['id_value'])\n return None\n\n\ndef update_incremental(mysql_manager, source_id, kind_name, kind_value,\n incremental):\n update_incremental_query = \"\"\"UPDATE review.product_id pi\n JOIN review.products p ON p.id=pi.prdid\n JOIN review.kinds k ON k.Kind = pi.Kind and k.Name =%s\n JOIN review.product_id inc ON pi.prdid=inc.prdid AND pi.kind=1056\n SET inc.id_value='%s' AND pi.ID_value_orig='%s'\n WHERE p.source_id=%s AND pi.ID_value_orig = %s\"\"\"\n\n params = (kind_name, incremental, incremental, str(source_id), kind_value)\n\n connection, cursor = mysql_manager.start_transaction()\n mysql_manager.execute_transaction(connection, cursor,\n update_incremental_query,\n args=params)\n mysql_manager.commit_transaction(connection, cursor)\n\n\ndef get_latest_user_review_date(mysql_manager, source_id,\n kind_name, kind_value):\n last_review = mysql_manager.execute_select(\n \"SELECT r.TestDateText, r.TestDate\\\n FROM review.reviews r\\\n JOIN review.product_id pi ON r.PrdId = pi.PrdId\\\n JOIN review.kinds k ON k.Kind = pi.Kind and k.Name =%s\\\n WHERE source_id=%s AND DBaseCategoryName='USER' AND pi.ID_value_orig = %s\\\n ORDER BY TestDate DESC\\\n LIMIT 1\",\n (kind_name, str(source_id), kind_value))\n\n if last_review and last_review[0]['TestDate']:\n return last_review[0]['TestDate']\n return datetime.min\n\n\ndef get_latest_user_review_date_by_sii(mysql_manager, source_id,\n source_internal_id):\n last_review = mysql_manager.execute_select(\n \"\"\"SELECT r.TestDateText, r.TestDate\n FROM review.products_lookup pl\n JOIN review.reviews r ON r.PrdId = pl.PrdId\n WHERE pl.source_id=%s AND pl.source_internal_id = %s\n AND r.DBaseCategoryName='USER'\n ORDER BY TestDate DESC\n LIMIT 1\"\"\", (str(source_id), source_internal_id))\n\n if last_review:\n return last_review[0]['TestDate']\n return datetime.min\n\n\ndef get_all_review_urls(mysql_manager, source_id, review_type=None):\n if not review_type:\n results = mysql_manager.execute_select(\n \"\"\"SELECT TestUrl\n FROM review.reviews\n WHERE source_id = %s\"\"\", (str(source_id))\n )\n else:\n results = mysql_manager.execute_select(\n \"\"\"SELECT TestUrl\n FROM review.reviews\n WHERE source_id = %s\n AND DBaseCategoryName = %s\"\"\", (str(source_id), review_type)\n )\n\n return set(result['TestUrl'].rstrip('/') for result in results)\n\n\ndef is_product_in_db(mysql_manager, source_id, kind_name, kind_value):\n product_count = mysql_manager.execute_select(\n \"\"\"SELECT count(0) as count\n FROM review.products p\n JOIN review.product_id pi ON p.id = pi.PrdId\n JOIN review.kinds k ON k.Kind = pi.Kind and k.Name =%s\n WHERE source_id=%s AND pi.ID_value_orig = %s\"\"\",\n (kind_name, str(source_id), kind_value))\n\n if product_count:\n return (product_count[0]['count'] > 0)\n return None\n\n\ndef is_product_in_db_by_sii(mysql_manager, source_id, sii):\n product_count = mysql_manager.execute_select(\n \"\"\"SELECT count(0) as count\n FROM review.products p\n JOIN review.products_lookup pl ON p.id = pl.prdid\n WHERE pl.source_id=%s AND pl.source_internal_id=%s\"\"\",\n (str(source_id), sii))\n return (product_count[0]['count'] > 0)\n\n\ndef is_review_in_db(mysql_manager, source_id, review_internal_id):\n \"\"\"Not implemented, there is no table for storing\n review_internal_id in the DB yet\"\"\"\n pass\n\n\ndef get_latest_review_date(mysql_manager, source_id):\n \"\"\"We have been using this function: get_latest_pro_review_date\n for a long time, but this function only fetch the latest \n review date of PRO review. Obviously, we can simplify this function\n to fit it for getting both lastest PRO and USER review date\n \"\"\"\n query = \"\"\"select TestDate from review.reviews where Source_ID = %s\n order by TestDate desc limit 1;\n \"\"\"\n last_review = mysql_manager.execute_select(query, [source_id])\n if last_review:\n return last_review[0]['TestDate']\n return datetime.min\n\n\ndef get_youtube_channel_id_and_search_str(mysql_manager):\n channel_ids = []\n url_formats = []\n search_strings = []\n default_categories = []\n\n query = \"select * from review.youtube_channels;\"\n rows = mysql_manager.execute_select(query)\n for row in rows:\n channel_id = row['youtube_channel_id']\n url_format = row['url_format']\n search_string = row['search_string']\n category = row['default_category']\n channel_ids.append(channel_id)\n url_formats.append(url_format)\n search_strings.append(search_string)\n default_categories.append(category)\n return channel_ids, url_formats, \\\n search_strings, default_categories\n","sub_path":"alascrapy/lib/dao/incremental_scraping.py","file_name":"incremental_scraping.py","file_ext":"py","file_size_in_byte":7892,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"418669422","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport sys\nimport os\nimport logging\nimport json\n\nfrom core.config import config\n\nRootDir = os.path.dirname(os.path.abspath(sys.argv[0])) or '.'\n\n\nclass Cor(object):\n \"\"\"\n Core provides the following functions:\n - reads & loads configuration\n - provides a simple api for webapp\n - methods for updating config\n \"\"\"\n def __init__(self):\n self.Config = config.get_config() # loads the config at start\n self.RootDir = RootDir\n\n def logger(self):\n \"\"\"Init loggers, one redirected to a log file, the other to stdout.\"\"\"\n # Logger for output in console.\n log = logging.getLogger('general')\n infohandler = logging.StreamHandler(result_queue)\n log.setLevel(logging.INFO)\n infoformatter = logging.Formatter(\"%(message)s\")\n infohandler.setFormatter(infoformatter)\n log.addHandler(infohandler)\n\n\nif __name__ == \"__main__\":\n s = Core()\n","sub_path":"core/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":972,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"31028758","text":"from django.urls import path,re_path\nfrom Pj_information import views\n\napp_name = 'Pj_information'\nurlpatterns = [\n # 设置应用自身url 定义访问url时去哪取对应操作\n path('informationlist/', views.InformationList.as_view(), name='informationlist'),\n path('informationadd/', views.InformationAdd.as_view(), name='informationadd'),\n re_path('informationmod/(?P[0-9]+)?/', views.InforMod.as_view(), name='informationmod'),\n]\n\n","sub_path":"day7/devops/Pj_information/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":454,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"523546345","text":"# -*- coding: utf-8 -*-\n\nfrom .main import solution as solve\nfrom .main import closest_k_pairs, get_index_of_quantile, show_linkage, distance_matrix\n\n__version__ = \"19.0.2.dev0\"\n\n__title__ = \"closely\"\n__description__ = \"Closely find closest pairs of points, eg duplicates, in a dataset\"\n__url__ = \"https://github.com/justinshenk/closely\"\n__uri__ = __url__\n__doc__ = __description__ + \" <\" + __url__ + \">\"\n\n__author__ = \"Justin Shenk\"\n__email__ = \"shenkjustin@gmail.com\"\n\n__license__ = \"MIT\"\n__copyright__ = \"Copyright (c) 2019 \" + __author__\n\n__all__ = [\n \"solve\",\n \"closest_k_pairs\",\n \"get_index_of_quantile\",\n \"show_linkage\",\n \"distance_matrix\",\n]\n","sub_path":"src/closely/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":665,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"212318970","text":"from PyQt5.QtWidgets import *\nfrom PyQt5.QtGui import QDoubleValidator\nfrom PyQt5.QtCore import QStandardPaths\nimport math \nimport os\n\nappSettingsDir = os.path.join(os.path.abspath(QStandardPaths.writableLocation(QStandardPaths.AppDataLocation)), 'ShapeInfo')\nlogFileName = os.path.join(appSettingsDir, 'log.txt')\n\nCircleChoice = 'Circle'\nSquareChoice = 'Square'\nTriangleChoice = 'Triangle'\n\noutputTextArea = None\nlogTextArea = None\ninputGroupBox = None\ninputGroupBoxLayout = None\ndoubleValidator = QDoubleValidator()\nradiusLineEdit = None\nsideLineEdit = None\n\nclass triangleUI:\n baseLineEdit = None\n heightLineEdit = None\n\ndef ensureDirExists(dir):\n os.makedirs(dir, exist_ok=True)\n\ndef isfloat(value):\n try:\n float(value)\n return True\n except ValueError:\n return False\n\ndef addLogText(str):\n logTextArea.insertPlainText(str)\n logTextArea.verticalScrollBar().setValue(logTextArea.verticalScrollBar().maximum())\n\n ensureDirExists(appSettingsDir)\n \n with open(logFileName, 'wt') as logFile:\n logFile.write(logTextArea.toPlainText())\n \n\ndef circle(r):\n outputTextArea.insertPlainText('Circle Radius: {}\\n'.format(r))\n\n circumference = r * 2 * math.pi\n outputTextArea.insertPlainText(' Circumference: {}\\n'.format(circumference))\n \n area = r * r * math.pi\n outputTextArea.insertPlainText(' Area: {}\\n'.format(area))\n \ndef square(s):\n\n outputTextArea.insertPlainText('Square Side Length: {}\\n'.format(s))\n\n area = s * s\n outputTextArea.insertPlainText(' Area: {}\\n'.format(area))\n\n perimeter = s * 4\n outputTextArea.insertPlainText(' Perimeter: {}\\n'.format(perimeter))\n \n\ndef triangle(b, h):\n\n outputTextArea.insertPlainText('Triangle Base and Height: {}, {}\\n'.format(b, h))\n\n area = b * h / 2\n outputTextArea.insertPlainText(' Area: {}\\n'.format(area))\n\ndef onRadiusChanged(radiusText):\n outputTextArea.clear()\n if isfloat(radiusText):\n circle(float(radiusText))\n\ndef onRadiusReturn():\n global radiusLineEdit\n if isfloat(radiusLineEdit.text()):\n addLogText('Circle, Radius {}\\n'.format(float(radiusLineEdit.text())))\n\n\ndef onSideChanged(sideText):\n outputTextArea.clear()\n if isfloat(sideText):\n square(float(sideText))\n\ndef onSideReturn():\n global sideLineEdit\n if isfloat(sideLineEdit.text()):\n addLogText('Square, Side Length {}\\n'.format(float(sideLineEdit.text())))\n \n\ndef onTriangleBaseOrHeightChanged(baseText, heightText):\n outputTextArea.clear()\n if isfloat(baseText) and isfloat(heightText):\n \n base = float(baseText)\n height = float(heightText)\n \n triangle(base, height)\n\ndef onTriangleBaseOrHeightReturnInternal(baseText, heightText):\n if isfloat(baseText) and isfloat(heightText):\n \n base = float(baseText)\n height = float(heightText)\n \n addLogText('Triangle, Base and Height: {}, {}\\n'.format(base, height))\n \n\ndef onBaseChanged(baseText):\n onTriangleBaseOrHeightChanged(triangleUI.baseLineEdit.text(), triangleUI.heightLineEdit.text())\n\ndef onHeightChanged(heightText):\n onTriangleBaseOrHeightChanged(triangleUI.baseLineEdit.text(), triangleUI.heightLineEdit.text())\n\ndef onTriangleBaseOrHeightReturn():\n onTriangleBaseOrHeightReturnInternal(triangleUI.baseLineEdit.text(), triangleUI.heightLineEdit.text())\n\ndef onShapeSelected(shape):\n\n outputTextArea.clear()\n\n if inputGroupBoxLayout:\n for i in reversed(range(inputGroupBoxLayout.count())): \n inputGroupBoxLayout.itemAt(i).widget().deleteLater()\n \n if shape == CircleChoice:\n global radiusLineEdit\n \n radiusLabel = QLabel('Radius:', inputGroupBox)\n inputGroupBoxLayout.addWidget(radiusLabel)\n\n radiusLineEdit = QLineEdit(inputGroupBox)\n radiusLineEdit.setValidator(doubleValidator)\n inputGroupBoxLayout.addWidget(radiusLineEdit)\n\n radiusLineEdit.textChanged.connect(onRadiusChanged)\n radiusLineEdit.returnPressed.connect(onRadiusReturn)\n\n\n if shape == SquareChoice:\n global sideLineEdit\n \n sideLabel = QLabel('Side:', inputGroupBox)\n inputGroupBoxLayout.addWidget(sideLabel)\n\n sideLineEdit = QLineEdit(inputGroupBox)\n sideLineEdit.setValidator(doubleValidator)\n inputGroupBoxLayout.addWidget(sideLineEdit)\n\n sideLineEdit.textChanged.connect(onSideChanged)\n sideLineEdit.returnPressed.connect(onSideReturn)\n \n if shape == TriangleChoice:\n\n baseLabel = QLabel('Base:', inputGroupBox)\n inputGroupBoxLayout.addWidget(baseLabel)\n\n triangleUI.baseLineEdit = QLineEdit(inputGroupBox)\n triangleUI.baseLineEdit.setValidator(doubleValidator)\n inputGroupBoxLayout.addWidget(triangleUI.baseLineEdit)\n\n triangleUI.baseLineEdit.textChanged.connect(onBaseChanged)\n triangleUI.baseLineEdit.returnPressed.connect(onTriangleBaseOrHeightReturn)\n \n heightLabel = QLabel('Height:', inputGroupBox)\n inputGroupBoxLayout.addWidget(heightLabel)\n\n triangleUI.heightLineEdit = QLineEdit(inputGroupBox)\n triangleUI.heightLineEdit.setValidator(doubleValidator)\n inputGroupBoxLayout.addWidget(triangleUI.heightLineEdit)\n\n triangleUI.heightLineEdit.textChanged.connect(onHeightChanged)\n triangleUI.heightLineEdit.returnPressed.connect(onTriangleBaseOrHeightReturn)\n\nchoices = [CircleChoice, SquareChoice, TriangleChoice]\n\napp = QApplication([])\n\nwindow = QMainWindow()\nwindow.resize(800, 600)\nwindow.setWindowTitle('ShapeInfo')\n\nmainWidget = QWidget()\nwindow.setCentralWidget(mainWidget)\n\nlayout = QVBoxLayout()\n\ntitleLabel = QLabel('Welcome to ShapeInfo Version 0.3.2', mainWidget)\nlayout.addWidget(titleLabel)\n\nshapeCombo = QComboBox(mainWidget)\nshapeCombo.addItems(choices)\nlayout.addWidget(shapeCombo)\n\ninputGroupBox = QGroupBox(mainWidget)\ninputGroupBox.setObjectName('inputGroupBox')\ninputGroupBoxLayout = QHBoxLayout(inputGroupBox)\ninputGroupBox.setTitle('Input')\nlayout.addWidget(inputGroupBox)\n\noutputGroupBox = QGroupBox(mainWidget)\noutputGroupBox.setTitle('Output')\noutputGroupBoxLayout = QVBoxLayout(outputGroupBox)\noutputTextArea = QPlainTextEdit(outputGroupBox)\noutputTextArea.setReadOnly(True)\noutputGroupBoxLayout.addWidget(outputTextArea)\nlayout.addWidget(outputGroupBox)\n\nlogGroupBox = QGroupBox(mainWidget)\nlogGroupBox.setTitle('Log')\nlogGroupBoxLayout = QVBoxLayout(logGroupBox)\nlogTextArea = QPlainTextEdit(logGroupBox)\nlogTextArea.setReadOnly(True)\nlogGroupBoxLayout.addWidget(logTextArea)\nlayout.addWidget(logGroupBox)\n\nshapeCombo.currentTextChanged.connect(onShapeSelected)\nonShapeSelected(shapeCombo.currentText())\n\nif os.path.exists(logFileName):\n with open(logFileName, 'rt') as logFile:\n logFileContents = logFile.read()\n logTextArea.insertPlainText(logFileContents)\n logTextArea.verticalScrollBar().setValue(logTextArea.verticalScrollBar().maximum())\n \n\nmainWidget.setLayout(layout)\nwindow.show()\n\napp.exec_()\n","sub_path":"ShapeInfo.py","file_name":"ShapeInfo.py","file_ext":"py","file_size_in_byte":7026,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"532773719","text":"arr = [' * ', ' * * ', '***** ']\nN = int(input())\n\ndef star():\n for i in range(0, len(arr)):\n arr.append(arr[i] + arr[i])\n arr[i] = temp + arr[i] + temp\n\ndef numCo(N):\n co = int(N/3)\n co1 = 0\n while 1:\n co = int(co / 2)\n co1 = co1 + 1\n if co == 1:\n return co1\n\nif N == 3:\n for i in arr: print(i)\nelse:\n num = numCo(N)\n space = 3\n for i in range(0, num):\n # temp = \" \" * int((i+1)*3)\n temp = \" \" * space\n space = space * 2\n star()\n for i in arr: print(i)\n","sub_path":"baekjoon2448.py","file_name":"baekjoon2448.py","file_ext":"py","file_size_in_byte":559,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"457517881","text":"import re\n\nfrom scrapy.selector import HtmlXPathSelector\nfrom scrapy.contrib.linkextractors.sgml import SgmlLinkExtractor\nfrom scrapy.contrib.spiders import CrawlSpider, Rule\nfrom bot.items import LinhaItem\nfrom scrapy.http import Request\n\n\nclass PeludaSpider(CrawlSpider):\n name = 'peluda'\n allowed_domains = ['www.athenaspaulista.com.br']\n start_urls = ['http://www.athenaspaulista.com.br/LINHAS/Linhas.htm']\n\n #~ rules = (\n #~ Rule(SgmlLinkExtractor(allow=r'Linhas.htm'),\n #~ callback='parse', follow=True),\n #~ )\n\n def parse_routes(self, response):\n \"\"\"Parses links containing busline route information\n response.meta = dictionary of the type -- (busline, departure): arrival.\n (busline, departure) work as a relational database primary key.\"\"\"\n\n hxs = HtmlXPathSelector(response)\n i = response.meta['partial_item']\n i['ida'] = hxs.select('.//div[3]/table/tr//text()').extract()\n i['volta'] = hxs.select('.//div[5]/table/tr//text()').extract()\n \n \n yield i\n\n def parse(self, response):\n \"\"\" Parses the website containing the itineraries\n Uses parse_item as callback for each weblink containing busline\n routes and yields the respective LinhaItems.\n \"\"\"\n\n hxs = HtmlXPathSelector(response)\n \n buslines = hxs.select('//td[contains(@width, \"56\")]/p/b/span/text()')\n buslines = buslines.extract()\n buslinefilter = re.compile('\\d\\d\\d\\d')\n buslines = filter(buslinefilter.search, buslines)\n departures = hxs.select('//td[contains(@width, \"52\")]')\n departures = departures.select('./p/span/text()').extract()\n timefilter = re.compile('\\d\\d:\\d\\d', re.IGNORECASE)\n departures = filter(timefilter.search, departures)\n departures.append('-') # the last 5871 busline has an undefined departure time!\n\n arrivals = hxs.select('//td[contains(@width, \"50\")]/p/span/text()')\n arrivals = arrivals.extract()\n arrivals = filter(timefilter.search, arrivals)\n\n routeURLs = hxs.select('//td[contains(@width, \"391\")]/p/span/a/@href')\n routeURLs = routeURLs.extract()\n URLprefix = 'http://www.athenaspaulista.com.br/LINHAS/'\n routeURLs = [URLprefix + i for i in routeURLs]\n\n desc_path = '//td[contains(@width, \"391\")]/p/span/a/text()'\n descriptions = hxs.select(desc_path).extract()\n descriptions = [i.replace('\\r\\n', '') for i in descriptions]\n\n assert len(arrivals) == len(departures) == len(buslines), \"Not the same ammount of buslines, departures or arrivals\"\n index = 0\n while index < len(arrivals):\n item = LinhaItem()\n item['linha'] = buslines[index]\n item['mpartida'] = departures[index]\n item['mchegada'] = arrivals[index]\n item['link'] = routeURLs[index]\n item['nome'] = descriptions[index]\n if 'X' in item['nome'] and '-' in item['nome']:\n item['origem'], temp = item['nome'].split('X')\n temp = temp.split('-')\n item['destino'], item['via'] = temp[0], temp[1]\n item = Request(item['link'],\n callback=self.parse_routes,\n meta={'partial_item': item})\n index += 1\n yield item\n","sub_path":"bot/bot/spiders/peluda.py","file_name":"peluda.py","file_ext":"py","file_size_in_byte":3372,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"335087652","text":"import numpy as np\nfrom os import path\n\nfrom pylearn2.config import yaml_parse\nfrom pylearn2.utils import serial\n\ndef train_yaml(yaml_file):\n train = yaml_parse.load(yaml_file)\n train.main_loop()\n\ndef train(yaml_file, save_path):\n yaml = open(yaml_file, \"r\").read()\n\n data_path = serial.preprocess(\"${PYLEARN2_NI_PATH}/smri\")\n mask_file = path.join(data_path, \"mask.npy\")\n mask = np.load(mask_file)\n input_dim = len(np.where(mask.flatten() == 1)[0].tolist())\n del mask\n\n hyperparams = {\"nvis\": input_dim,\n \"batch_size\": 5,\n \"detector_layer_dim\": 64,\n \"monitoring_batches\": 5,\n \"save_path\": save_path,\n \"max_epochs\": 300\n }\n yaml = yaml % hyperparams\n train_yaml(yaml)\n\ndef train_rbm():\n yaml_file = path.join(path.abspath(path.dirname(__file__)), \"rbm.yaml\")\n save_path = path.abspath(path.dirname(__file__))\n train(yaml_file, save_path)\n\nif __name__ == \"__main__\":\n train_rbm()\n","sub_path":"pylearn2/neuroimaging_utils/tutorials/rbm_sMRI/train_rbm.py","file_name":"train_rbm.py","file_ext":"py","file_size_in_byte":1031,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"29817117","text":"def solution(n, computers):\n\tnetwork_count=0\n\t#visited=[False for _ in range(n)]\n\tadjacency_list = {}\n\t\n\tfor i in range(n):\n\t\tadjacency_list[i] = []\n\t\tfor j in range(len(computers[i])):\n\t\t\tif computers[i][j]==1 and i!=j:\n\t\t\t\tadjacency_list[i].append(j)\n\n\tvisited = []\n\tfor node in adjacency_list:\n\t\tif node in visited:\n\t\t\tcontinue\n\n\t\tto_visit = [node]\n\n\t\twhile to_visit:\n\t\t\tu = to_visit.pop()\n\t\t\tvisited.append(u)\n\t\t\tfor v in adjacency_list[u]:\n\t\t\t\tif v not in visited+to_visit:\n\t\t\t\t\tto_visit.append(v)\n\t\t\t\t\t\n\t\tnetwork_count += 1\n\t\t\t\t\n\treturn network_count\n\nn=3\ncomputers = [[1, 1, 0], [1, 1, 0], [0, 0, 1]]\nprint(solution(n, computers))","sub_path":"Programmer's high score kit/Searching_DFS_BFS_N02.py","file_name":"Searching_DFS_BFS_N02.py","file_ext":"py","file_size_in_byte":637,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"320371115","text":"from django.db import models\nfrom django.contrib.auth.models import User\n\n\nclass TimeStampModel(models.Model):\n\n created_date = models.DateTimeField(auto_now_add=True)\n updated_date = models.DateTimeField(auto_now=True)\n\n class Meta:\n abstract = True\n\n\nclass Discussion(TimeStampModel):\n\n\ttitle = models.CharField(\n\t\tmax_length=254,\n\t\tverbose_name='Discussion Title'\n\t\t)\n\tcreator = models.ForeignKey(\n\t\tUser,\n\t\ton_delete=models.CASCADE\n\t\t)\n\n\tclass Meta:\n\t\tdb_table = 'discussion'\n\n\nclass Post(TimeStampModel):\n\tdiscussion = models.ForeignKey(\n\t\tDiscussion,\n\t\ton_delete=models.CASCADE, \n\t\trelated_name='posts'\n\n\t\t)\n\tcontent = models.TextField(\n\t\tmax_length=1000,\n\t\tverbose_name='Post Content'\n\t\t)\n\tcreator = models.ForeignKey(\n\t\tUser,\n\t\ton_delete=models.CASCADE\n\t\t)\n\n\tclass Meta:\n\t\tdb_table = 'post'\n\n\n","sub_path":"websocketaspire/forum/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":817,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"150692037","text":"import unittest\n\nfrom MiscUtils.CSVParser import CSVParser, ParseError\n\n\nclass CSVParserTests(unittest.TestCase):\n \"\"\"CSV parser tests.\n\n TO DO\n\n * Test the different options for parser, see CSVParser.__init__().\n \"\"\"\n\n def setUp(self):\n self.parse = CSVParser().parse\n\n def testNegatives(self):\n inputs = [\n '\"\"a',\n '\"a\"b',\n 'a\\n,b'\n ]\n for inp in inputs:\n try:\n results = self.parse(inp)\n except ParseError:\n pass\n else:\n print()\n print('results:', repr(results))\n raise Exception(f'Did not get an exception for: {inp!r}')\n\n def testPositives(self):\n tests = [\n # basics\n ('', []),\n (',', ['', '']),\n (',,', ['', '', '']),\n ('a', ['a']),\n ('a,b', ['a', 'b']),\n ('a,b,c,d,e,f', 'a b c d e f'.split()),\n\n # surrounding whitespace\n (' a', ['a']),\n ('a ', ['a']),\n (' a ', ['a']),\n ('a, b', ['a', 'b']),\n (' a , b ', ['a', 'b']),\n\n # commas in fields\n ('\",\"', [',']),\n ('\",\",\",\"', [',', ',']),\n ('\"a , b\",b', ['a , b', 'b']),\n\n # quotes in fields\n ('\"\"\"\"', ['\"']),\n ('\"\"\"\"\"\"', ['\"\"']),\n ('\"\"\"a\"\"\",b,\"\"\"c\"\"\"', ['\"a\"', 'b', '\"c\"']),\n\n # single line combos\n (' \"a\", \"b\"', ['a', 'b']),\n (' \"\"\"\"', ['\"']),\n ('\"\"\"\" ', ['\"']),\n (' \"\"\"\" ', ['\"']),\n (' \"\"\"a\"\"\", \"\"\"b\"\"\"', ['\"a\"', '\"b\"']),\n (' \"\"\"\", \",\", \"\"\",\"\"\"', ['\"', ',', '\",\"']),\n\n # comments\n ('#a,b', []),\n\n # multiple line records\n ('\"a\\nb\"', ['a\\nb']),\n ('a,\"b\\nc\"', ['a', 'b\\nc']),\n ('a,\"b\\nc\\n\\n\\n\"', ['a', 'b\\nc']),\n\n # MiddleKit enums\n ('a,Enums=\"b\"', ['a', 'Enums=\"b\"']),\n (\"a,Enums='b'\", ['a', \"Enums='b'\"]),\n ('a,\"Enums=\"\"b, c\"\"\"', ['a', 'Enums=\"b, c\"']),\n ('''a,\"Enums='b, c'\"''', ['a', \"Enums='b, c'\"]),\n ]\n for inp, out in tests:\n if '\\n' not in inp:\n # single line\n res = self.parse(inp)\n self.assertEqual(\n res, out,\n f'\\ninput={inp!r}\\nresult={res!r}\\noutput={out!r}')\n res = self.parse(inp+'\\n')\n self.assertEqual(\n res, out,\n f'\\ninput={inp!r}\\nresult={res!r}\\noutput={out!r}')\n else:\n # multiple lines\n gotFields = False\n for line in inp.splitlines():\n self.assertFalse(gotFields)\n res = self.parse(line)\n if res is not None:\n gotFields = True\n self.assertTrue(gotFields)\n self.assertEqual(\n res, out,\n f'\\ninput={inp!r}\\nresult={res!r}\\noutput={out!r}')\n","sub_path":"webware/MiscUtils/Tests/TestCSVParser.py","file_name":"TestCSVParser.py","file_ext":"py","file_size_in_byte":3143,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"550321424","text":"from rest_framework import serializers\n\nfrom .models import (\n\tSearchResult,\n)\n\n\nclass SearchResultSerializer(serializers.ModelSerializer):\n\tdef __init__(self, *args, **kwargs):\n\t\tsuper(SearchResultSerializer, self).__init__(*args, **kwargs)\n\n\tclass Meta:\n\t\tmodel = SearchResult\n\t\tfields = (\n\t\t\t\"title\",\n\t\t\t\"description\",\n\t\t\t\"publish_time\",\n\t\t\t\"thumbnail_url\",\n\t\t\t\"video_id\",\n\t\t)\n\n","sub_path":"apps/search/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":381,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"265936366","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\nimport os\nfrom os import listdir\nfrom os.path import isfile, join\nimport numpy as np\nimport pandas as pd \nimport imutils\nimport cv2\nfrom enum import Enum\n\n\n# In[2]:\n\n\nimgcount = 0\n\nbasepath = r\"C:\\Users\\Admin\\Documents\\Master\\Sommer\\WPM-Advanced Programing\\bio\"\n\nchrdestpath = join(basepath, r'chromosomes\\test')\nchrdestfilename = \"chr_{0:0=5d}.png\".format(imgcount);\n\nteldestpath = join(basepath, r'telomeres\\test')\nteldestfilename = \"png_{0:0=5d}.png\".format(imgcount);\n\nassert os.path.isdir(chrdestpath)\nassert os.path.isdir(teldestpath)\n\norignamechr = 'orig0_chr_adj_1700-1000'\norignametel = 'orig0_tel_adj_1700-1000'\n\nchrsourcepath = join(basepath, r'chromosomes\\orig')\nchrsourcefilename = orignamechr + '.png'\nchrsourcefullpath = join(chrsourcepath, chrsourcefilename)\n\ntelsourcepath = join(basepath, r'telomeres\\orig')\ntelsourcefilename = orignametel + '.png'\ntelsourcefullpath = join(telsourcepath, telsourcefilename)\n\n\nassert os.path.isdir(chrsourcepath)\nassert os.path.isdir(telsourcepath)\nassert os.path.isfile(chrsourcefullpath)\nassert os.path.isfile(telsourcefullpath)\n\nf_name = \"/home/inf/Bilder/bio1/data/test_data.csv\"\n\nchrimg = cv2.imread(chrsourcefullpath,1)\ntelimg = cv2.imread(telsourcefullpath,1)\n\nassert chrimg.shape == telimg.shape\n\nheight= chrimg.shape[0]\nwidth= chrimg.shape[1]\n\nprint(height)\nprint(width)\n\nrectanglesize=min(height, width)//6\ncrosssize=rectanglesize//12\ncrossthick=1\nimgsize = 128\nrectsize = 80\nstartpoint = (500,240)\nendpoint = (startpoint[0]+rectsize, startpoint[1]+rectsize )\n\n\n# In[3]:\n\n\nassert(os.path.isfile(chrsourcefullpath))\nassert(os.path.isfile(telsourcefullpath))\n\nchrimg = cv2.imread(chrsourcefullpath,1)\ntelimg = cv2.imread(telsourcefullpath,1)\n\nchrimgrect = chrimg.copy()\ntelimgrect = telimg.copy()\n\n\n# In[4]:\n\n\nnumofrects = min(int((chrimgrect.shape[1] - 2*startpoint[0])/rectsize),int((chrimgrect.shape[0] - 2*startpoint[1])/rectsize))\nprint(numofrects)\n \n\n\n# In[5]:\n\n\ndef grid(chrimgrect, startpoint, rectsize, numofrects = None):\n positions = []\n if numofrects == None:\n numofrects = min(int((chrimgrect.shape[1] - 2*startpoint[0])/rectsize),int((chrimgrect.shape[0] - 2*startpoint[1])/rectsize))\n for i in range(numofrects):\n for j in range(numofrects):\n polygon = []\n spoint = (startpoint[0]+i*rectsize, startpoint[1]+j*rectsize )\n epoint = (startpoint[0]+i*rectsize + rectsize, startpoint[1]+j*rectsize + rectsize)\n polygon.append(spoint)\n polygon.append(epoint)\n cnt = np.array(polygon)\n positions.append(cv2.boundingRect(cnt))\n #_,_,_,_ = cv2.boundingRect(cnt)\n chrimgrect = cv2.rectangle(chrimgrect, spoint, epoint, (255,0,0), 1) \n return chrimgrect, positions, numofrects\n\n\n# In[6]:\n\n\npositions = []\nnumofrects = None\n\nwhile True:\n chrimgrect = chrimg.copy()\n positions.clear()\n chrimgrect, positions, numofrects = grid(chrimgrect, startpoint, rectsize, numofrects)\n cv2.imshow('chrimage',chrimgrect) \n key=cv2.waitKeyEx(100)\n\n if key & 0xFF == ord(\"q\") : \n break\n if key & 0xFF == ord(\"s\") :\n i = 0\n for position in positions:\n single = np.zeros((rectsize,rectsize,chrimg.shape[2]), dtype=\"uint8\")\n singlenorm = np.zeros((128,128,chrimg.shape[2]), dtype=\"uint8\")\n x,y,w,h = position\n assert (w-1) == rectsize and (h-1) == rectsize\n single = chrimg.copy()[y:y+h-1,x:x+w-1,:]\n assert single.shape[0] == rectsize and single.shape[1] == rectsize\n fname = \"c{}_\".format(orignamechr) + \"{0:0=5d}.png\".format(i)\n singlenorm = cv2.resize(single,(128,128), interpolation = cv2.INTER_AREA)\n cv2.imwrite(join(chrdestpath,fname), singlenorm) \n single = telimg.copy()[y:y+h-1,x:x+w-1,:]\n fname = \"t{}_\".format(orignametel) + \"{0:0=5d}.png\".format(i) \n singlenorm = cv2.resize(single,(128,128), interpolation = cv2.INTER_AREA)\n cv2.imwrite(join(teldestpath,fname), singlenorm)\n i += 1\n break\n if key & 0xFF == ord(\"+\") : \n numofrects += 1\n if key & 0xFF == ord(\"-\") :\n if numofrects > 0:\n numofrects -= 1 \n if key == 2424832 : \n if startpoint[0] - 20 > 0:\n startpoint = (startpoint[0] - 20, startpoint[1] )\n if key == 2490368 : \n if startpoint[1] - 20 > 0:\n startpoint = (startpoint[0], startpoint[1] - 20)\n if key == 2555904 : \n if startpoint[0] + 20 < chrimgrect.shape[1]:\n startpoint = (startpoint[0] + 20, startpoint[1])\n if key == 2621440 :\n if startpoint[1] + 20 < chrimgrect.shape[0]:\n startpoint = (startpoint[0], startpoint[1] + 20)\n\n\ncv2.destroyAllWindows() \n\n\n# In[7]:\n\n\ncv2.destroyAllWindows()\n\n\n# In[6]:\n\n\ncv2.imshow('telimage',telimgrect) \nkey=cv2.waitKey(0)\ncv2.destroyAllWindows()\n\n\n# In[ ]:\n\n\n\n\n","sub_path":"QFISHbio-preprocess.py","file_name":"QFISHbio-preprocess.py","file_ext":"py","file_size_in_byte":4981,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"585421035","text":"#!/usr/bin/env python\n# -*- coding: utf8 -*-\n\n''' Natural language annotation for machine learning @ RUG, 2019 2B\n contact: L.Abzianidze@rug.nl\n'''\n\n# Usage\n# python3 unigram_sem_tagger.py --train train.txt --test test_untagged.txt\n\n# from collections import Counter\nimport argparse, re\nfrom collections import defaultdict, Counter\nfrom nltk.metrics.scores import accuracy\nimport random\n\n\n#################################\ndef parse_arguments():\n '''Read arguments from a command line'''\n parser = argparse.ArgumentParser(description='Read the files')\n parser.add_argument(\n '--train', metavar='FILE', required=True,\n help='File containing training set')\n parser.add_argument(\n '--test', metavar='FILE', required=True,\n help='File containing test set')\n return parser.parse_args()\n\n\n#################################\ndef read_token_tags(filename):\n '''Read toke-tag pairs from the file and return a list of (token, tag)'''\n tok_tags = []\n with open(filename, encoding='utf-8') as f:\n for line in f:\n # check is the line has the format of token + \\tab + 3 uppercase characters\n m = re.match('([^\\t]+)\\t([A-Z]{3})', line)\n if m:\n tok_tags.append((m.group(1), m.group(2)))\n # if the line doesn't have this form, nothing is added to the list\n return tok_tags\n\n\n#################################\ndef read_tokens(filename):\n '''Read tokes from the file and return a list of tokens'''\n toks = []\n with open(filename, encoding='utf-8') as f:\n for line in f:\n # check is the line has the format of token + \\n\n m = re.match('([^\\s]+)\\n', line)\n if m:\n toks.append(m.group(1))\n # if the line doesn't have this form, nothing is added to the list\n return toks\n\n\n#################################\ndef write_token_tags(tokens_filename, token_tags_filename, tokens, tags):\n '''Write token-tag pairs in token_tags_filename in the fashion it is done in tokens_filename'''\n with open(tokens_filename, encoding='utf-8') as f, open(\n token_tags_filename, 'w', encoding='utf-8') as w:\n i = 0 # index of the token-tag pairs\n for line in f:\n m = re.match('([^\\s]+)\\n', line)\n if m:\n # sanity check that tokens and tokens in the file are matching\n assert m.group(1) == tokens[\n i], \"Mismatch between tokens and tokens in the file\"\n w.write(\"{}\\t{}\\n\".format(tokens[i], tags[i]))\n i += 1\n else:\n # if the line doesn't have this form, copy in the writing file\n # such lines are empty lines and the lines with the document numbers\n w.write(line)\n print(\"{} is written\".format(token_tags_filename))\n\n\n##################################\ndef unigram_classifier(tok_tags):\n '''Reads the token-tag pairs and returns a dictionary of token:its_most_frequent_tag and the most frequent tag'''\n tok_tag_counter = defaultdict(Counter) # counts tags per token\n tag_counter = Counter() # counts tags\n for tok, tag in tok_tags:\n # if an entry with tok doesn't exist, it is created and set to emply Counter() by default and then udated with a tag\n tok_tag_counter[tok].update([tag])\n # update with a list of tags as update expects iterable as an arguent\n tag_counter.update([tag])\n # dictionary with tokens as keys and their most frequent tag as a value\n tok_mf_tag = {tok: most_common_tag_det(tag_cnt) for tok, tag_cnt in\n tok_tag_counter.items()}\n return tok_mf_tag, most_common_tag_det(tag_counter)\n\n\n##################################\ndef most_common_tag_det(counter):\n '''This is the desteministic version of Counter's most_common method'''\n # in case of [('EXS', 2), ('EPS', 2)], ('EPS', 2) will be first\n ordered = sorted(counter.items(), key=lambda x: (-x[1], x[0]))\n return ordered[0][0]\n\n\n##################################\ndef unigram_classify(train_tok_tags, test_toks):\n '''takes training token-tag pairs and test tokens and return unigram-predicted tags for the test tokens'''\n # get a 'model' for a unigram classifier\n tok_mf_tag, mf_tag = unigram_classifier(train_tok_tags)\n # for each token pick its most frequent tag. If a token was not seen before, assign the most frequent tag\n predicted_tags = [tok_mf_tag.get(tok, mf_tag) for tok in test_toks]\n return predicted_tags\n\n\n##################################\ndef cross_validation(tok_tags, n_fold, seed):\n '''takes training token-tag pairs and the number of folds.\n performs cross-validation and reports the accuracy for each fold-exepriment and the final average accuracy\n '''\n acc_list = []\n print(\"{}-fold cross-validation:\".format(n_fold))\n for train, test in kfold(tok_tags, n_fold, seed):\n test_toks, test_tags = zip(*test) # separate tokens from tags\n pred_tags = unigram_classify(train, test_toks)\n acc = accuracy(test_tags, pred_tags) * 100 # calculate accuarcy\n acc_list.append(acc)\n print(\"acc = {:.2f}%\".format(acc))\n av_acc = float(sum(acc_list)) / len(\n acc_list) # float division compatible with py2 and py3\n print(\"av. acc = {:.2f}%\".format(av_acc))\n return av_acc\n\n\n##################################\ndef kfold(alist, n, seed):\n '''create n pair of (train, test) from alist where train is n-1 times larger than test'''\n size = len(alist)\n step = size // n # integer division is used\n # The remainder of division is counted as a part of test set\n # for example, 21 samples with 10-fold cross validation will have test part of size 3\n test_size = step + (size % n)\n shuffled = random.Random(seed).sample(alist, k=len(\n alist)) # set the random seed to replicate results and shuffle\n for i in range(n):\n test = shuffled[i * step: i * step + test_size]\n train = shuffled[0:i * step] + shuffled[i * step + test_size:]\n yield train, test\n\n ################################\n\n\n############## MAIN ############\nif __name__ == '__main__':\n args = parse_arguments()\n tok_tags = read_token_tags(args.train)\n # do 10-fold cross-validation on the training set\n cross_validation(tok_tags, 10, 1)\n # predict tags for the test set\n toks = read_tokens(args.test)\n # train unigram classifier on the whole training set\n pred_tags = unigram_classify(tok_tags, toks)\n tagged_file_1g = \"tagged_1g.txt\"\n write_token_tags(args.test, tagged_file_1g, toks, pred_tags)\n\n","sub_path":"ex4/1g_sem_tagger.py","file_name":"1g_sem_tagger.py","file_ext":"py","file_size_in_byte":6647,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"643501721","text":"\"\"\"\nThe following classes are defined:\n SigmoidActivationNeuron\n SigmoidActivationLayer\n\"\"\"\n\nfrom math import exp\nfrom ..utils.validate import *\n\n\nclass SigmoidActivationNeuron:\n \"\"\"\n Construct a new sigmoid activation neuron. The output takes on the\n reciprocal of one plus e raised to the power of the negative input,\n otherwise known as the logistic function.\n\n Args:\n input: An object of type Connection. The input.\n output: An object of type Connection. The output.\n \"\"\"\n def __init__(self, input, output):\n self._input = input\n self._output = output\n\n self._input.bind_to(self._update_input)\n self._update_input()\n\n def _update_input(self):\n self._output.value = 1/(1 + exp(-self._input.value))\n\n\nclass SigmoidActivationLayer:\n \"\"\"\n Construct a new sigmoid activation layer. Each neuron in the layer performs\n sigmoid activation on its input in the input layer for the corresponding\n output in the output layer.\n\n Args:\n inputs: An object of type ConnectionLayer. The input layer.\n outputs: An object of type ConnectionLayer. The output layer.\n \"\"\"\n def __init__(self, inputs, outputs):\n validate_dimensions_layer(inputs)\n validate_dimensions_layer(outputs)\n validate_same_dimensions_layer(inputs, outputs)\n\n self._inputs = inputs\n self._outputs = outputs\n self._neurons = [[[\n SigmoidActivationNeuron(inputs[i][j][k], outputs[i][j][k])\n for k in range(len(inputs[0][0]))]\n for j in range(len(inputs[0]))]\n for i in range(len(inputs))]\n","sub_path":"reshade/activation/sigmoid.py","file_name":"sigmoid.py","file_ext":"py","file_size_in_byte":1642,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"362697660","text":"import numpy as np\nimport matplotlib.pyplot as plt\nfrom scipy.linalg import solve_banded\nimport control as ct\n\ndef calc_A_u_v(x):\n\n\ta = np.full(ct.nz, np.cos(ct.alpha) ** 2 / ct.dz ** 2, dtype=complex) # Off diagonal elements\n\tb = -2 * a + ct.omega ** 2 / ct.vA(x,ct.z) ** 2 # Diagonal elements\n\tb0 = b[0]\n\tb[0] += b0\n\tb[-1] += a[0] ** 2 / b0\n\n\tab = np.zeros((3, ct.nz), dtype=complex)\n\tab[0,:] = a\n\tab[1,:] = b\n\tab[2,:] = a\n\n\tu = np.zeros(ct.nz, dtype=complex)\n\tv = np.zeros(ct.nz, dtype=complex)\n\tu[0] = -b0\n\tu[-1] = a[0]\n\tv[0] = 1\n\tv[-1] = -a[0] / b0\n\n\treturn [ab, u, v]\n\ndef vector_d(b_par):\n\treturn -ct.omega * (ct.k_perp * b_par + \\\n\t\t\t1j * np.sin(ct.alpha) / (2 * ct.dz) * (np.roll(b_par,-1) - np.roll(b_par,1)))\n\ndef calc_u_perp(x, b_par):\n\t# Calculate u_perp by inverting the matrix ab,\n\t# Note that A is nearly tridiagonal and so a computationally efficent algorithm can be used\n\t# See https://www.cfd-online.com/Wiki/Tridiagonal_matrix_algorithm_-_TDMA_(Thomas_algorithm)\n\n\t[ab, u, v] = calc_A_u_v(x)\n\td = vector_d(b_par)\n\n\tprint(np.shape(ab))\n\tprint(np.shape(d))\n\n\ty = solve_banded((1,1), ab, d)\n\tq = solve_banded((1,1), ab, u)\n\n\tvTy = v[0] * y[0] + v[-1] * y[-1]\n\tvTq = v[0] * q[0] + v[-1] * q[-1]\n\tu_perp = y - vTy / (1 + vTq) * q\n\n\treturn u_perp","sub_path":"old_non_uniform/tridiag.py","file_name":"tridiag.py","file_ext":"py","file_size_in_byte":1283,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"586049910","text":"import os\n\noutput = \"./data/total_google_play.csv\"\n\nfiles = os.listdir('./data/')\nfiles = filter(lambda x: x.startswith(\"google_play\"), files)\n\nwith open(output, \"w\", encoding=\"utf-8\") as outf:\n for f in files:\n print(f\"Read {f}\")\n with open(\"./data/\" + f, encoding='utf-8') as fin:\n lines = fin.readlines()\n outf.writelines(lines)","sub_path":"crawling/merge_google_play.py","file_name":"merge_google_play.py","file_ext":"py","file_size_in_byte":370,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"97785496","text":"import bpy\nfrom types import MethodType\nfrom bpy.types import Node\nfrom animation_nodes.mn_node_base import AnimationNode\nfrom animation_nodes.mn_execution import nodePropertyChanged, nodeTreeChanged, allowCompiling, forbidCompiling\nfrom animation_nodes.mn_utils import *\n\n\nclass mn_ModifierNode(Node, AnimationNode):\n\t\"\"\"A Class that extents an animation node witch represents a modifier of an object \n\tand have the functionality to create input sockets for each property of the Modifier.\n\t\n\tNote: \n\t\tTho node may be linked to an object socket input.\n\t\n\tAttributes:\n\t\tbl_idname (str): Blender's id name is 'mn_ModifierNode'.\n\t\tbl_label (str): Blender's Label is 'Modifier Node'.\n\t\tnode_category (str): This node is type of 'Modifier'.\n\t\tobjectName (str): The name of blender Object witch this node is refer to.\n\t\tmodifierName (str): The name of blender Modifier witch this node is refer to.\n\t\tpropertyName (str): The name of blender Modifier Property witch this node is refer to.\n\t\"\"\"\n\tbl_idname = \"mn_ModifierNode\"\n\tbl_label = \"Modifier Node\"\n\tnode_category = \"Modifier\"\n\t# it is not need update = nodePropertyChanged because it changes only in execution string.\n\tobjectName = bpy.props.StringProperty()\n\tmodifierName = bpy.props.StringProperty(update = nodePropertyChanged)\n\tdef setUseCustomName(self, value):\n\t\ttry:\n\t\t\tif value == True:\n\t\t\t\tself.inputs[\"Modifier\"].enabled = True\n\t\t\t\tself.inputs[\"Modifier\"].setStoreableValue(self.modifierName)\n\t\t\telse:\n\t\t\t\tself.inputs[\"Modifier\"].enabled = False\n\t\t\t\tself.modifierName = self.inputs[\"Modifier\"].getStoreableValue()\n\t\texcept (KeyError, SyntaxError, ValueError, AttributeError):\n\t\t\tpass\n\tdef getUseCustomName(self):\n\t\ttry:\n\t\t\treturn self.inputs[\"Modifier\"].enabled\n\t\texcept (KeyError, SyntaxError, ValueError, AttributeError):\n\t\t\treturn False\n\t# using update = nodeTreeChanged to update execution strings.\n\tuseCustomName = bpy.props.BoolProperty(set = setUseCustomName, get = getUseCustomName, update = nodeTreeChanged)\n\t\n\tdef init(self, context):\n\t\t\"\"\"Initialization of the node.\n\t\t\n\t\tArgs:\n\t\t\tcontext:\n\t\t\"\"\"\n\t\tforbidCompiling()\n\t\tsocket = self.inputs.new(\"mn_ObjectSocket\", \"Object\")\n\t\tsocket.showName = False\n\t\tsocket = self.inputs.new(\"mn_StringSocket\", \"Modifier\")\n\t\tself.useCustomName = False\n\t\tsocket.showName = False\n\t\tself.outputs.new(\"mn_ModifierSocket\", \"Modifier\").showName = False\n\t\tallowCompiling()\n\t\t\n\tdef draw_buttons(self, context, layout):\n\t\tlayout.prop(self, \"useCustomName\", text=\"Custom Name\")\n\t\tif self.useCustomName == False :\n\t\t\ttry:\n\t\t\t\tdata = eval(\"bpy.context.scene.objects['\" + self.objectName + \"']\")\n\t\t\t\tlayout.prop_search(self, \"modifierName\", data, \"modifiers\", icon=\"NONE\", text = \"\")\n\t\t\texcept (KeyError, SyntaxError, ValueError, AttributeError):\n\t\t\t\tpass\n\t\treturn\n\tdef changeObject(self, objectName):\n\t\t\"\"\"This function called when the name of the object changes and is responsible for enumerate the input - output sockets.\n\t\t\n\t\tArgs:\n\t\t\tobject (bpy.types.Object): The name to correct object.\n\t\t\"\"\"\n\t\tself.objectName = objectName\n\t\treturn\n\tdef getInputSocketNames(self):\n\t\treturn {\"Object\" : \"Object\",\n\t\t\t\t\"Modifier\" : \"Modifier\"}\n\tdef getOutputSocketNames(self):\n\t\treturn {\"Modifier\" : \"Modifier\"}\n\tdef useInLineExecution(self):\n\t\treturn True\n\tdef getInLineExecutionString(self, outputUse):\n\t\tcodeLines = []\n\t\ttabSpace = \" \"\n\t\tthisNode = \"bpy.data.node_groups['\" + self.id_data.name + \"'].nodes['\" + self.name + \"']\"\n\t\tcodeLines.append(\"if %Object% is not None and %Object%.name != \" + thisNode + \".objectName:\")\n\t\tcodeLines.append(tabSpace + thisNode + \".changeObject(%Object%.name)\")\n\t\tif outputUse[\"Modifier\"]:\n\t\t\tmodifierNameSource = thisNode + \".modifierName\"\n\t\t\tif self.useCustomName:\n\t\t\t\tmodifierNameSource = \"%Modifier%\"\n\t\t\tcodeLines.append(\"try:\")\n\t\t\tcodeLines.append(tabSpace + \"$Modifier$ = %Object%.modifiers[\" + modifierNameSource + \"]\")\n\t\t\tcodeLines.append(\"except (KeyError, SyntaxError, ValueError, AttributeError) as exp:\")\n\t\t\tcodeLines.append(tabSpace + \"$Modifier$ = None\")\n\t\t\tcodeLines.append(tabSpace + \"pass\")\n#\t\tprint(\"\\n\".join(codeLines))\n\t\treturn \"\\n\".join(codeLines)\n","sub_path":"nodes/modifier/mn_modifier.py","file_name":"mn_modifier.py","file_ext":"py","file_size_in_byte":4087,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"627888575","text":"#!/usr/bin/env python3\n\nfrom utils.google_api import google_service\nfrom googleapiclient.errors import HttpError\n\nclass SheetOperator:\n def __init__(self, config, spreadsheet_id, sheet_name):\n self.service = google_service(config)\n self.spreadsheet_id = spreadsheet_id\n self.sheet_name = sheet_name\n\n def get_value(self, range_name):\n range_name = self.sheet_name + '!' + range_name\n try:\n result = self.service.spreadsheets().values().get(spreadsheetId=self.spreadsheet_id,\n range=range_name).execute()\n values = result.get('values', [])\n return values\n except HttpError as e:\n print(e)\n\n def update_value(self, range_name, values):\n range_name = self.sheet_name + '!' + range_name\n try:\n result = self.service.spreadsheets().values().update(spreadsheetId=self.spreadsheet_id,\n range=range_name,\n valueInputOption='USER_ENTERED',\n body={'values': values}).execute()\n updated_cells = result.get('updatedCells', 0)\n return updated_cells\n except HttpError as e:\n print(e)\n\n def append_value(self, range_name, values):\n range_name = self.sheet_name + '!' + range_name\n try:\n result = self.service.spreadsheets().values().append(spreadsheetId=self.spreadsheet_id,\n range=range_name,\n valueInputOption='USER_ENTERED',\n insertDataOption='OVERWRITE',\n body={'values': values}).execute()\n update_cells = result.get('updates', 0).get('updatedCells', 0)\n return update_cells\n except HttpError as e:\n print(e)\n","sub_path":"utils/google_sheet.py","file_name":"google_sheet.py","file_ext":"py","file_size_in_byte":2136,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"181945387","text":"# Given a non-empty string, encode the string such that its encoded length is th\n# e shortest. \n# \n# The encoding rule is: k[encoded_string], where the encoded_string inside the \n# square brackets is being repeated exactly k times. \n# \n# Note: \n# \n# \n# k will be a positive integer and encoded string will not be empty or have ext\n# ra space. \n# You may assume that the input string contains only lowercase English letters.\n# The string's length is at most 160. \n# If an encoding process does not make the string shorter, then do not encode i\n# t. If there are several solutions, return any of them is fine. \n# \n# \n# \n# \n# Example 1: \n# \n# \n# Input: \"aaa\"\n# Output: \"aaa\"\n# Explanation: There is no way to encode it such that it is shorter than the inp\n# ut string, so we do not encode it.\n# \n# \n# \n# \n# Example 2: \n# \n# \n# Input: \"aaaaa\"\n# Output: \"5[a]\"\n# Explanation: \"5[a]\" is shorter than \"aaaaa\" by 1 character.\n# \n# \n# \n# \n# Example 3: \n# \n# \n# Input: \"aaaaaaaaaa\"\n# Output: \"10[a]\"\n# Explanation: \"a9[a]\" or \"9[a]a\" are also valid solutions, both of them have th\n# e same length = 5, which is the same as \"10[a]\".\n# \n# \n# \n# \n# Example 4: \n# \n# \n# Input: \"aabcaabcd\"\n# Output: \"2[aabc]d\"\n# Explanation: \"aabc\" occurs twice, so one answer can be \"2[aabc]d\".\n# \n# \n# \n# \n# Example 5: \n# \n# \n# Input: \"abbbabbbcabbbabbbc\"\n# Output: \"2[2[abbb]c]\"\n# Explanation: \"abbbabbbc\" occurs twice, but \"abbbabbbc\" can also be encoded to \n# \"2[abbb]c\", so one answer can be \"2[2[abbb]c]\".\n# \n# \n# \n# Related Topics Dynamic Programming\n\n\n# leetcode submit region begin(Prohibit modification and deletion)\nclass Solution(object):\n def encode(self, s):\n \"\"\"\n :type s: str\n :rtype: str\n \"\"\"\n n = len(s)\n dp = [[\"\"]*n for _ in range(n)]\n for step in range(1, n+1):\n for i in range(n-step+1):\n j = i+step -1\n dp[i][j] = s[i:i+step]\n for k in range(i, j):\n left = dp[i][k]\n right = dp[k+1][j]\n if len(left) + len(right) < len(dp[i][j]):\n dp[i][j] = left + right\n t = s[i:i+step]\n replace = t\n pos = (t+t).find(t,1)\n if pos > 0:\n replace = str(len(t)//pos) +'[%s]'%(dp[i][i+pos-1])\n if len(replace) < len(dp[i][j]): dp[i][j] = replace\n return dp[0][n-1]\n# leetcode submit region end(Prohibit modification and deletion)\n","sub_path":"leetcode/editor/en/[471]Encode String with Shortest Length.py","file_name":"[471]Encode String with Shortest Length.py","file_ext":"py","file_size_in_byte":2523,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"521436998","text":"from createRandomGraph import createRandom\r\nfrom directedGraph import directedGraph\r\nfrom getDataFromFile import getData\r\nfrom userInterface import UI\r\n\r\nprint(\"If you want to get the input from the file press W \")\r\nprint(\"If you want to get a random graph press R\")\r\ncommand = input(\"Enter your choice: \")\r\nif command.lower() == 'w':\r\n print(\"\\n This application gets data from 'input.txt'.\")\r\n graph = getData('input.txt')\r\nelse:\r\n print(\" This application gets a random graph.\")\r\n graph = directedGraph()\r\n x = int(input(\"Enter the number of vertices: \"))\r\n y = int(input(\"Enter the number of edges: \"))\r\n graph = createRandom(graph, x, y)\r\nui = UI(graph)\r\nui.menu()","sub_path":"GA/001/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":710,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"12710811","text":"\"\"\"\nFunctionPractice_Gadaleta.py\n2 functions,\n A) circleGeometry - takes a radius and returns the area and the circumference\n B) Letter Multi, takes in a letter with a conjoining number to print that character that many times\nauthor: Jake Gadaleta\n\"\"\"\nfrom math import pi\n\n\ndef circle_geometry(radius):\n return (\n pi * (radius ** 2), # area\n 2 * pi * radius #circumference\n )\n\ndef letter_multiplier(args):\n if len(args) % 2 is not 0:\n return \"There is an improper amount of variables in the tuple\"\n\n returner = ''\n for i in range(0, len(args), 2):\n returner += args[i] * args[i+1]\n\n return returner\n\nfor i in range(30):\n temp = circle_geometry(i)\n print(\n f\"\"\"\n Rad:\\t\\t{i:<15.2f} units\n Area:\\t\\t{temp[0]:<15.2f} units\n Circumference:\\t{temp[1]:<15.2f} units\n \"\"\"\n )\n\nx = \"L\", 4, \"H\", 8\nprint(letter_multiplier(x))\nx = \"L\", 4, \"p\", 3, \"j\", 2\nprint(letter_multiplier(x))","sub_path":"ch04/Homework/FunctionPractice_Gadaleta.py","file_name":"FunctionPractice_Gadaleta.py","file_ext":"py","file_size_in_byte":940,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"308086630","text":"#!/usr/bin/env python\n# encoding: utf-8\n\n\"\"\"\n@author: paul.xie\n@software: PyCharm\n@time: 2017/1/11 17:29\n\"\"\"\nfrom xlrd import open_workbook # 工作簿\nfrom xlutils.copy import copy\nwb = open_workbook(\"base.xls\")\n\n# base = wb.sheet_by_name(\"one\")\nbase = wb.sheet_by_index(0) # 工作表,cell 单元格\nexample = copy(wb)\nrow = base.nrows # 行\ncolumns = base.ncols # 列\nprint(row, columns)\n\nheaders = []\nfor i in range(columns):\n headers.append(base.cell(0, i).value)\nprint(headers)\n\nfor row in range(1, row):\n for col in range(columns):\n print(base.cell(row, col).value)\n\n\nexample.get_sheet(0).write(0, 0, \"copy\")\nexample.get_sheet(1).write(0, 0, \"new_test\")\nexample.save(\"output.xls\")\n","sub_path":"Learn/2017_01_11_4.py","file_name":"2017_01_11_4.py","file_ext":"py","file_size_in_byte":707,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"107209987","text":"n = int(input(\"Enter an integer number \"))\r\ncount = 1 # n is already a of n (nx1=n) so i counted it a the first\r\ni = 1\r\nwhile i <= n / 2:\r\n if n % i == 0:\r\n print(\"Factor of \", n, \"is \", i)\r\n count = count + 1\r\n i = i + 1\r\n\r\nprint(n, \" has total \", count - 1, \"factors\")\r\n","sub_path":"Others/Factor counter.py","file_name":"Factor counter.py","file_ext":"py","file_size_in_byte":293,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"161558711","text":"import os\nfrom gensim.models.word2vec import Word2Vec\nfrom sklearn.manifold import TSNE\nimport re\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\n\ndef visualize_word2vec_model(path):\n model_name = os.path.basename(path)\n model = Word2Vec.load(path)\n\n vocab = list(model.wv.vocab)\n X = model[vocab]\n\n tsne = TSNE(n_components=2)\n X_tsne = tsne.fit_transform(X)\n\n df = pd.DataFrame(X_tsne, index=vocab, columns=['x', 'y'])\n\n fig = plt.figure()\n ax = fig.add_subplot(1, 1, 1)\n ax.set_title(\"Word2Vec Visualization\")\n\n ax.scatter(df['x'], df['y'])\n for word, pos in df.iterrows():\n ax.annotate(word, pos)\n\n fig.savefig(\"./{0}\".format(model_name.replace(\"h5\", \"png\")))\n\n\nvisualize_word2vec_model(r\"/Users/ccrowe/Documents/Thesis/facebook_api/Notebooks/Word2Vec/highly_positive.h5\")\nvisualize_word2vec_model(r\"/Users/ccrowe/Documents/Thesis/facebook_api/Notebooks/Word2Vec/slightly_positive.h5\")\nvisualize_word2vec_model(r\"/Users/ccrowe/Documents/Thesis/facebook_api/Notebooks/Word2Vec/negative_positive.h5\")\n","sub_path":"Visualize_Keras_Model/Word2Vec_Visualize.py","file_name":"Word2Vec_Visualize.py","file_ext":"py","file_size_in_byte":1054,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"305220982","text":"# Miscellaneous\nMARK6_DEFAULT_USER = \"oper\"\n\nMARK6_INPUTS = (0, 1)\nMARK6_OUTPUTS = (0, 1, 2, 3)\nMARK6_MODULES = [1, 2, 3, 4]\n\nMOD_TYPE_SG = \"sg\"\nMOD_DISKNO = 8\n\n# Input stream defintions by data source type\nR2DBE_SOURCE_TYPE = \"r2dbe\"\nR2DBE_DATA_FORMAT = \"vdif\"\nR2DBE_PAYLOAD_SIZE = 8224\nR2DBE_PAYLOAD_OFFSET = 50\nR2DBE_PSN_OFFSET = 42\nR2DBE_PACKET_RATE = 125000\n\nSOURCE_TYPES = [R2DBE_SOURCE_TYPE]\n\n# VSI return code meanings\nVSI_SUCCESS = 0\nVSI_BUSY = 1\nVSI_NOT_IMPLEMENTED = 2\nVSI_SYNTAX_ERROR = 3\nVSI_RUNTIME_ERROR = 4\nVSI_TOO_BUSY = 5\nVSI_INCONSISTENT = 6\nVSI_UNKNOWN_KEYWORD = 7\nVSI_PARAMETER_ERROR = 8\nVSI_INDETERMINATE_STATE = 9\n\nCPLANE_SUCCESS = 0\n\n# Expected values for checks\nLSSCSI_DISKS = 32\nNTPQ_MAX_OFFSET = 0.100\nVV_MAX_OFFSET = 0.01\nGROUP_REF = \"1234\"\n\n# Executables\nEXEC_M6CC = \"M6_CC\"\nEXEC_M6CC_PATH = \"/home/oper/bin\"\nEXEC_VEX2XML = \"vex2xml.py\"\nEXEC_VEX2XML_PATH = \"/home/oper/bin\"\n","sub_path":"lib/mandc/mark6/defines.py","file_name":"defines.py","file_ext":"py","file_size_in_byte":903,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"189899204","text":"'''\nCreated on 24 janv. 2020\n\n@author: Administrateur\n'''\nfrom elasticsearch import Elasticsearch\nfrom connect_elastic import es\n\nif es.indices.exists(index='test'):\n es.indices.delete(index='test')\n\n\nrequest_body = {\n \"settings\" : {\n \"number_of_shards\": 3,\n \"number_of_replicas\": 1\n },\n \"settings\": {\"max_result_window\": \"100000\"},\n 'mappings': {\n 'properties': {\n 'siret': {'type': 'long'},\n 'numeroVoieEtablissement': {'type': 'text'},\n 'typeVoieEtablissement': {'type': 'text'},\n 'libelleVoieEtablissement': {'type': 'text'},\n 'codePostalEtablissement': {'type': 'text'},\n 'libelleCommuneEtablissement': {'type': 'text'},\n 'location': {'type': 'geo_point'}\n }}\n}\n\nes.indices.create(index='test', body= request_body)\n\n\n","sub_path":"Projet_POEC/Projet/create_index_mapping.py","file_name":"create_index_mapping.py","file_ext":"py","file_size_in_byte":948,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"38133807","text":"# -*- coding: utf-8 -*-\nimport os\nimport numpy as np\n##########\n\nREGIONCNAMES = {\n \"Abbas\": [\"阿巴斯\", \"Iran\", \"伊朗\"],\n \"Alexandria\": [\"亚特兰大港\", \"Egypt\", \"埃及\"],\n \"Ankara\": [\"安卡拉\", \"Turkey\", \"土耳其\"],\n \"Colombo\": [\"科伦坡\", \"Sri Lanka\", \"斯里兰卡\"],\n # \"Djibouti\":[\"吉布提\",\"Djibouti\",\"吉布提\"],\n \"Ekaterinburg\": [\"叶卡捷琳堡\", \"Russia\", \"俄罗斯\"],\n \"Gawdar\": [\"瓜达尔港\", \"Pakistan\", \"巴基斯坦\"],\n \"Hambantota\": [\"汉班托塔港\", \"Sri Lanka\", \"斯里兰卡\"],\n \"Karachi\": [\"卡拉奇\", \"Pakistan\", \"巴基斯坦\"],\n \"Kolkata\": [\"加尔各答\", \"india\", \"印度\"],\n \"Kuantan\": [\"关丹\", \"Malaysia\", \"马来西亚\"],\n \"Maldives\": [\"马尔代夫\", \"Maldives\", \"马尔代夫\"],\n \"Melaka\": [\"马六甲\", \"Malaysia\", \"马来西亚\"],\n \"Minsk\": [\"明斯克\", \"Belarus\", \"白��罗斯\"],\n \"Mumbai\": [\"孟买\", \"India\", \"印度\"],\n \"Novosibirsk\": [\"新西伯利亚\", \"Russia\", \"俄罗斯\"],\n # \"Nursultan\":[\"努尔苏丹(原阿斯塔纳)\",\"Kazakhstan\",\"哈萨克斯坦\"],\n \"Piraeus\": [\"比雷艾夫斯港\", \"Greece\", \"希腊\"],\n \"Tashkent\": [\"塔什干\", \"Uzbekistan\", \"乌兹别克斯坦\"],\n \"Teran\": [\"德黑兰\", \"Iran\", \"伊朗\"],\n # \"Valencia\":[\"瓦伦西亚\",\"Spain\",\"西班牙\"],\n \"Warsaw\": [\"华沙\", \"Poland\", \"波兰\"],\n \"Yawan\": [\"雅万高铁\", \"Indonesia\", \"印度尼西亚\"],\n}\n\nINDEXCNAMES = {\n \"Danger\": \"危险性\",\n \"Expo\": \"暴露度\",\n \"Vulner\": \"脆弱性\",\n \"DPE\": \"孕灾环境\",\n \"Final\": \"最终结果\",\n}\n\nCALCU = [\"positive\", \"negative\"]\n###########\n###########通用\nREGIONNAMES = {\n 'Abbas': 'irn',\n 'Alexandria': 'egy',\n 'Ankara': 'tur',\n 'Colombo': 'lka',\n # 'Djibouti': 'dji',\n 'Ekaterinburg': 'rus', #rus2 for osm\n 'Gawdar': 'pak',\n 'Hambantota': 'lka',\n 'Karachi': 'pak',\n 'Kolkata': 'ind',\n 'Kuantan': 'mys',\n 'Maldives': 'mdv',\n 'Melaka': 'mys',\n 'Minsk': 'blr',\n 'Mumbai': 'ind',\n 'Novosibirsk': 'rus', #rus1 for osm\n # 'Nursultan': 'kaz',\n 'Piraeus': 'grc',\n 'Tashkent': 'uzb',\n 'Teran': 'irn',\n # 'Valencia': 'esp',\n 'Warsaw': 'pol',\n 'Yawan': 'idn',\n}\nYEAR = 2010\nNDV = -3.4028234663852886e+38\n\nINDEXNAMES = {\n 'max': 1,\n 'HT_frequency': 1,\n 'HT_duration': 1,\n 'pop': 1,\n 'deltaY': 1,\n 'delatYpeople': 1,\n 'NDVI': 1,\n 'Nightlight': 1,\n 'GDP': 1,\n 'Euc': 0,\n 'IMS': 0,\n 'Danger': 1,\n 'Expo': 1,\n 'Vulner': 1,\n 'DPE': 0,\n 'Final': 1\n}\n\nINDEX1 = {\n 'max': ['max'],\n 'HT_frequency': ['HT_frequency'],\n 'HT_duration': ['HT_duration'],\n 'pop': ['pop'],\n 'deltaY':\n ['deltaY_000_980', 'deltaY_980_990', 'deltaY_990_995', 'deltaY_995_100'],\n 'deltaYpeople':\n ['deltaYpeople_00_65', 'deltaYpeople_65_80', 'deltaYpeople_80_00'],\n 'NDVI': ['NDVI'],\n 'Nightlight': ['Nightlight'],\n 'GDP': ['GDP'],\n 'Euc': ['hospital_Euc', 'road_Euc', 'water_Euc'],\n 'IMS': ['IMS']\n}\n\nINDEX2 = {\n 'Danger': [\"max\", \"HT_frequency\", \"HT_duration\"],\n 'Expo': [\"pop\"],\n 'Vulner': [\"deltaY\", \"deltaYpeople\"],\n 'DPE': [\"NDVI\", \"Nightlight\", \"GDP\", \"Euc\", \"IMS\"]\n}\n\nINDEX2FILE = {\n 'Danger': ['Danger'],\n 'Expo': ['Expo'],\n 'Vulner': ['Vulner'],\n 'DPE': ['DPE']\n}\n\nINDEX3 = {'Final': ['Danger', 'Expo', 'Vulner', 'DPE']}\n\n\ndef mkdir(path):\n \"\"\"\n 创建文件夹\n \"\"\"\n folder = os.path.exists(path)\n foldername = path.split(\"\\\\\")[-1]\n\n if not folder: # 判断是否存在文件夹如果不存在则创建为文件夹\n os.makedirs(path) # makedirs 创建文件时如果路径不存在会创建这个路径\n print(\"The folder \" + foldername + \" is created\")\n\n\n#####################","sub_path":"CONSTANT.py","file_name":"CONSTANT.py","file_ext":"py","file_size_in_byte":3738,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"93089464","text":"def main():\n list1 = [1, 3, 5, 7, 9, 100]\n list2 = ['hello'] * 5\n print(len(list1)) # 计算长度\n print(list1[0]) # 下标取数据\n # print(list1[5]) # IndexError: list index out of range 越界\n print(list[-1]) # 倒序取出\n list1[2] = 300 # 赋值\n # 添加元素\n list1.append(200) # 追加元素\n list1.insert(1, 400) # 指定索引加入\n list1 += [1000, 2000]\n print(list1)\n # 删除元素\n list1.remove(3) # index remove\n if 1234 in list1:\n list1.remove(1234)\n del list1[0]\n print(list1)\n # 清空元素列表\n list1.clear()\n # 排序操作--内存拷贝\n list3 = sorted(list1)\n list4 = sorted(list1, reverse=True) # 倒序\n # 通过key关键字参数指定根据字符串长度进行排序而不是默认的字母表顺序\n list5 = sorted(list1, key=len) # 长度排序\n list1.sort(reverse=True) # 列表自身排序\n\n\nmain()\n\n\n# 列表生成器生成List ---阮一峰博客\nimport sys\n\n\ndef main():\n f = [x for x in range(1, 10)]\n print(f)\n f = [x + y for x in 'ABCDE' for y in '1234567']\n print(f)\n # 用列表的生成表达式语法创建列表容器\n # 用这种语法创建列表之后元素已经准备就绪所以需要耗费较多的内存空间\n f = [x ** 2 for x in range(1, 1000)]\n print(sys.getsizeof(f)) # 查看对象占用内存的字节数\n print(f)\n # 请注意下面的代码创建的不是一个列表而是一个生成器对象\n # 通过生成器可以获取到数据但它不占用额外的空间存储数据\n # 每次需要数据的时候就通过内部的运算得到数据(需要花费额外的时间)\n f = (x ** 2 for x in range(1, 1000))\n print(sys.getsizeof(f)) # 相比生成式生成器不占用存储数据的空间\n print(f)\n for val in f:\n print(val)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"ListClass.py","file_name":"ListClass.py","file_ext":"py","file_size_in_byte":1888,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"159588585","text":"#!/usr/bin/python3\n\nimport math\nfrom lib import factors, isprime\n\n# This iterator is for greatly reducing the number of divisors we check\n# when looking if a number is prime.\n# We do this by making the following deductions on a group of potential divisors\n# for a candidate prime number:\n# 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40\n# ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^\n# Starting at 7 we have jumps of [4, 2] repeating which give us a sequence of numbers\n# Which never have a prime factor of 3 or 2\n# To skip over factors of 5 is possible but would require more logic.\n# The extra logic in the iterator might outweigh the saving\n\n\nclass pseudoprimeiterator():\n def __init__(self, end):\n self.end = end\n self.current = 5\n self.step = (4, 2)\n self.tictoc = True\n\n def __iter__(self):\n return self\n\n def __next__(self):\n self.current += self.step[self.tictoc]\n self.tictoc = not self.tictoc\n if self.current < self.end:\n return self.current\n else:\n raise StopIteration\n\n def next(self):\n return self.__next__()\n\n\ndef largestprimefactor(n):\n if type(n) is not int and type(n) is not long:\n raise TypeError\n allfactors = factors(n)\n # Now we have a list of factor pairs (non-prime)\n # Let's find which of these are prime numbers\n pfactors = []\n for factorpair in allfactors:\n if isprime(factorpair[0]):\n pfactors.append(factorpair[0])\n if isprime(factorpair[1]):\n pfactors.append(factorpair[1])\n # print(pfactors)\n return max(pfactors)\n\n\nif __name__ == \"__main__\":\n print(largestprimefactor(600851475143))\n","sub_path":"pe0003.py","file_name":"pe0003.py","file_ext":"py","file_size_in_byte":1795,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"522146092","text":"# coding: utf8\n\nimport requests\nfrom pymongo import MongoClient\nfrom pypinyin import lazy_pinyin\nfrom datetime import datetime\n\nfrom scripture import settings\n\nHOST = 'http://47.94.77.75'\nCITIES_URI = '/api/v3/cities'\n\nmc = MongoClient(settings.MONGO)\n\n\ndef cities(request=requests):\n data = request.get(HOST + CITIES_URI).json()['data']\n _cities = []\n for continent in data.values():\n for country in continent.values():\n for city in country:\n country_name = city['country']['name']\n _cities.append({\n 'country_code': city['country']['code'],\n 'country_en_name': city['country']['name_en'],\n 'country_zh_name': country_name,\n 'country_pinyin': ' '.join(lazy_pinyin(country_name)),\n 'zh_name': city['name'],\n 'en_name': city['name_en'],\n 'pinyin': ' '.join(lazy_pinyin(city['name'])),\n })\n\n return _cities\n\n\ndef update(city):\n qualifier = {\n 'zh_name': city['zh_name'],\n 'country_zh_name': city['country_zh_name']\n }\n mc.scripture.cities.update_one(\n qualifier,\n {\n '$set': city,\n '$setOnInsert': {'created_at': datetime.now()},\n \"$currentDate\": {'updated_at': True},\n },\n upsert=True\n )\n\nfor city in cities():\n update(city)\n","sub_path":"flashtripdemo/scripture/scripture/scripts/online_cities.py","file_name":"online_cities.py","file_ext":"py","file_size_in_byte":1422,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"77461288","text":"# Definition for singly-linked list.\nclass ListNode(object):\n def __init__(self, x):\n self.val = x\n self.next = None\n\nhead = ListNode(1)\nhead.next = ListNode(2)\n\nclass Solution(object):\n def reverseList(self, head):\n \"\"\"\n :type head: ListNode\n :rtype: ListNode\n \"\"\"\n if not head or not head.next:\n return head\n \n p = self.reverseList(head.next)\n head.next.next = head\n head.next = None\n return p\n \nif __name__ == '__main__':\n leetcode = Solution()\n leetcode.reverseList(head)\n","sub_path":"206/recursive.py","file_name":"recursive.py","file_ext":"py","file_size_in_byte":587,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"544050246","text":"# -*- coding: utf-8 -*-\nfrom django.contrib import admin\n\n# Register your models here.\nfrom django.core.files.uploadedfile import UploadedFile\nfrom models import *\nclass FileAdmin(admin.ModelAdmin):\n list_display = ('name','versioncode')\n fields = ('name', 'path', 'versioncode','description','groupid','userid')\n\n #重写 save_model将上传文件重新命名并设置文件大小和文件行数\n def save_model(self, request, obj, form, change):\n line = 0\n file=request.FILES['path']\n name=file.name\n nowtime=datetime.datetime.now()\n for _ in file:\n line+=1\n obj.__setattr__('totallinesnum',line)\n obj.__setattr__('size',file.size)\n obj.path.name=name[:name.rindex('.')]+'_'+nowtime.strftime(\"%Y-%m-%d-%H-%M-%S\")+'_'+obj.versioncode+name[name.rindex('.'):]\n if change:\n obj.__setattr__('updateat',nowtime)\n else:\n obj.__setattr__('createat',nowtime)\n super(FileAdmin,self).save_model(request, obj, form, change)\n\n\n\nadmin.site.register(File,FileAdmin)","sub_path":"mysite/file/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":1075,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"559985508","text":"import os\nimport subprocess\nfrom pathlib import Path\nfrom warnings import warn\n\nfrom .logging import get_logger\n\nJULIA_PROJECT = str(Path(__file__).parent / \"julia\")\nos.environ[\"JULIA_PROJECT\"] = JULIA_PROJECT\n\nlog = get_logger(\"diffeqtorch_install\")\n\n\ndef install_and_test(pyjulia=True, julia_deps=True, julia_sysimage=True):\n if pyjulia:\n log.debug(\"Install PyJulia\")\n install_pyjulia()\n log.debug(\"Test PyJulia\")\n test_pyjulia()\n\n if julia_deps:\n log.debug(\"Install Julia dependencies\")\n install_julia_deps()\n log.debug(\"Test Julia dependencies\")\n test_julia_deps()\n\n if julia_sysimage:\n log.debug(\"Install Julia system image\")\n install_julia_sysimage()\n log.debug(\"Test Julia system image\")\n test_julia_sysimage()\n\n\ndef install_julia_deps():\n output = subprocess.run(\n f\"export JULIA_PROJECT={JULIA_PROJECT}; julia -E 'using Pkg; Pkg.instantiate()'\",\n shell=True,\n check=True,\n capture_output=True,\n )\n log.debug(output)\n\n\ndef test_julia_deps():\n output = subprocess.run(\n f\"export JULIA_PROJECT={JULIA_PROJECT}; julia -E 'using DifferentialEquations'\",\n shell=True,\n check=True,\n capture_output=True,\n )\n log.debug(output)\n\n\ndef install_pyjulia():\n import julia\n\n julia.install()\n\n\ndef test_pyjulia(sysimage=None, call=\"1+1\"):\n from julia.api import Julia\n\n if sysimage is None:\n julia = Julia(compiled_modules=False, debug=True)\n else:\n julia = Julia(compiled_modules=False, sysimage=sysimage, debug=True)\n\n log.debug(julia._call(call))\n\n\ndef install_julia_sysimage():\n if \"JULIA_SYSIMAGE_DIFFEQTORCH\" in os.environ:\n if not Path(os.environ[\"JULIA_SYSIMAGE_DIFFEQTORCH\"]).exists():\n log.debug(\"Build Julia system image\")\n output = subprocess.run(\n f\"julia --project={JULIA_PROJECT} {JULIA_PROJECT}/sysimage.jl\",\n shell=True,\n check=True,\n capture_output=True,\n )\n log.debug(output)\n else:\n log.debug(\"System image exists, skipping\")\n else:\n warn(\"JULIA_SYSIMAGE_DIFFEQTORCH not set, won't build system image\")\n\n\ndef test_julia_sysimage():\n if \"JULIA_SYSIMAGE_DIFFEQTORCH\" in os.environ:\n assert Path(os.environ[\"JULIA_SYSIMAGE_DIFFEQTORCH\"]).exists()\n test_pyjulia(\n sysimage=os.environ[\"JULIA_SYSIMAGE_DIFFEQTORCH\"]\n )\n else:\n log.debug(\"JULIA_SYSIMAGE_DIFFEQTORCH not set\")\n","sub_path":"diffeqtorch/install.py","file_name":"install.py","file_ext":"py","file_size_in_byte":2568,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"606904539","text":"import math\nimport string\n\n# Convert a string to integer value\ndef strToInt(s):\n return int(s.encode(\"hex\"), 16)\n\n# Convert an int to hex with specified length\ndef intToHex(v, l):\n h = '%x' % (v)\n h = '0' * (l-len(h)) + h\n return h\n\n# Convert an interger to a string\ndef intToStr(v):\n u = '%x' % v\n u = (len(u) % 2) * '0' + u\n return u.decode('hex')\n\nPRINTABLE = '0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ!\"#$%&\\'()*+,-./:;<=>?@[\\\\]^_`{|}~ '\ndef strToProperHex(s):\n def printableString(s):\n result = ''\n for c in s:\n result += '%s' % c if c in PRINTABLE else '.'\n return result\n\n def nonChaoticHex(s):\n result = ''\n for i in range(0, len(s), 2):\n result += s[i:i+2] + ' '\n return result\n \n number_of_lines = int(math.ceil(len(s) / 16.0))\n result = ''\n for current_line in range(number_of_lines):\n hexresult = nonChaoticHex(s[current_line*16:current_line*16+16].encode('hex'))\n clrresult = printableString(s[current_line*16:current_line*16+16])\n result += '%-48s| %-16s\\n' % (hexresult, clrresult)\n return result\n","sub_path":"Python/wowproxy/crap/convertool.py","file_name":"convertool.py","file_ext":"py","file_size_in_byte":1164,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"613048152","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport sklearn\nfrom sklearn import cross_validation, metrics, preprocessing, svm\nfrom sklearn.externals import joblib\nimport tensorflow as tf\nimport tensorflow.python.platform\nimport skflow\n#\nfrom setup import *\nimport _\nfrom _ import p, d, MyObject, MyException\nimport opencv_functions\n\nNUM_CLASSES = 13\nIMAGE_SIZE = 28\nIMAGE_PIXELS = IMAGE_SIZE*IMAGE_SIZE*3\n\ndef conv_data(DIR = \"/Users/masaMikam/Dropbox/Project/umiA/Data/imgs/_imgswork\"):\n\timgdics = _.get_deeppath_dic(DIR)\n\tprint(imgdics)\n\ttrain_label = [label for address, label in imgdics]\n\ttrain_image = [conv_image(address, DIR) for address, label in imgdics]\n\ttrain_image = np.asarray(train_image)\n\ttrain_label = np.asarray(train_label)\n\treturn train_image, train_label\n\ndef conv_image(address, DIR = \"/Users/masaMikam/Dropbox/Project/umiA/Data/imgs/_imgswork\"):\n\timgaddress = DIR+address\n\trecogresult = opencv_functions.FaceRecognition(imgaddress, isShow = False, saveStyle = '', work_dir = 'work', through = True)\n\timg = recogresult[0]\n\timg = opencv_functions.adjust_image(img, K = 0, isHC = True, size = (28, 28))\n\treturn img.flatten().astype(np.float32)/255.0\n# conv_image('CV_FACE_icon0_LL1-01_20160212003336.png', '/Users/masaMikam/OneDrive/imgs/learn/others/')\ndef conv_label(label):\n tmp = np.zeros(NUM_CLASSES)\n print(tmp)\n tmp[int(label)] = 1\n return tmp\n\n### Convolutional network\ndef max_pool_2x2(tensor_in):\n return tf.nn.max_pool(tensor_in, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1],\n padding='SAME')\n\ndef conv_model(X, y, keep_prob = 0.5):\n\tkeep_prob = 0.5\n\tX = tf.reshape(X, [-1, 28, 28, 3])\n\twith tf.variable_scope('conv_layer1'):\n\t\th_conv1 = skflow.ops.conv2d(X, n_filters=32, filter_shape=[5, 5], bias=True, activation=tf.nn.relu)\n\t\th_pool1 = max_pool_2x2(h_conv1)\n\twith tf.variable_scope('conv_layer2'):\n\t h_conv2 = skflow.ops.conv2d(h_pool1, n_filters=64, filter_shape=[5, 5],\n\t bias=True, activation=tf.nn.relu)\n\t h_pool2 = max_pool_2x2(h_conv2)\n\t h_pool2_flat = tf.reshape(h_pool2, [-1, 7 * 7 * 64])\n\th_fc1 = skflow.ops.dnn(h_pool2_flat, hidden_units = [1024], activation=tf.nn.relu, keep_prob=keep_prob)\n\th_fc1 = tf.nn.dropout(h_fc1, keep_prob)\n\treturn skflow.models.logistic_regression(h_fc1, y, class_weight=None)\n\ndef cnn_model(X, y):\n\tkeep_prob = tf.placeholder(tf.float32)\n\treturn conv_model(X, y, keep_prob = keep_prob)\n\n### Linear classifier.\n# classifier = skflow.TensorFlowLinearClassifier(\n# n_classes=NUM_CLASSES, batch_size=100, steps=1000, learning_rate=0.01)\n# classifier.fit(data_train, label_train)\n# score = metrics.accuracy_score(label_test, classifier.predict(data_test))\n# print('Accuracy: {0:f}'.format(score))\n\ndef train(DIR = \"/Users/masaMikam/Dropbox/Project/umiA/Data/imgs/\", save_dir = \"/Users/masaMikam/OneDrive/imgs/DNNmodel2\", logdir = '/Users/masaMikam/OneDrive/tmp/TFdata'):\n\tprint('trainingLABELs... paste it to predictFunc!!\\n', [clsdir for clsdir in os.listdir(DIR) if not clsdir in set(['.DS_Store'])])\n\timages, labels = conv_data(DIR)\n\tdata_train, data_test, label_train, label_test = cross_validation.train_test_split(images, labels, test_size=0.2, random_state=42)\n\tclassifier = skflow.TensorFlowEstimator(\n\t model_fn = cnn_model, n_classes=NUM_CLASSES, batch_size=10, steps=1000,\n\t learning_rate=1e-4, optimizer='Adam', continue_training=True)\n\t# classifier = skflow.TensorFlowDNNClassifier(hidden_units=[10, 20, 10],\n # n_classes=NUM_CLASSES, steps=1000,\n # early_stopping_rounds=200)\n\twhile True:\n\t\tclassifier.fit(data_train, label_train, logdir=logdir)\n\t\tscore = metrics.accuracy_score(label_test, classifier.predict(data_test))\n\t\tprint('Accuracy: {0:f}'.format(score))\n\t\tclassifier.save(save_dir)\n\n # ['chino', 'eri', 'hanayo', 'honoka', 'kotori', 'maki', 'niko', 'nozomi', 'rin', 'umi']\n # ['others', 'ことり', 'にこ', 'チノ', '凛', '希', '海未', '真姫', '穂乃果', '絵里', '花陽', '雪穂']\ndef predictAns(filename = \"/Users/masaMikam/Dropbox/Project/umiA/Data/imgs/rin/show.png\", isShow = True, model = '/Users/masaMikam/Dropbox/Project/umiA/Data/lib/DNNmodel', work_dir = '', label = ['others', 'ことり', 'にこ', 'チノ', '凛', '希', '海未', '真姫', '穂乃果', '絵里', '花陽', '雪穂']):\n\tclassifier = skflow.TensorFlowEstimator.restore(model)\n\t# imgaddress = \"/Users/masaMikam/Dropbox/Project/umiA/Data/imgs/rin/images-10.jpeg\"\n\t# imgaddress = '/Users/masaMikam/Dropbox/Project/umiA/Data/twimgs/20160204152357.jpg'\n\timg, altfilename, frame, face_flag = opencv_functions.FaceRecognition(filename, isShow = isShow, saveStyle = 'whole', work_dir = '')\n\timg = opencv_functions.adjust_image(img, isHC = True, K = 0, size = (28, 28))\n\tresult = classifier.predict(img)\n\tanslabel = label[result]\n\treturn anslabel, face_flag, altfilename\n\ndef train_svm(DIR = \"/Users/masaMikam/Dropbox/Project/umiA/Data/imgs/\", save_dir = \"/Users/masaMikam/OneDrive/imgs/SVMmodel.pkl\", logdir = '/Users/masaMikam/OneDrive/tmp/TFdata'):\n\tprint('trainingLABELs... paste it to predictFunc!!\\n', [clsdir for clsdir in os.listdir(DIR) if not clsdir in set(['.DS_Store'])])\n\timages, labels = conv_data(DIR)\n\tdata_train, data_test, label_train, label_test = cross_validation.train_test_split(images, labels, test_size=0.2, random_state=42)\n\t# classifier = sklearn.\n\tscores = []\n\t# K-fold 交差検証でアルゴリズムの汎化性能を調べる\n\t# kfold = cross_validation.KFold(len(data_train), n_folds=10)\n\t# for train, test in kfold:\n\t# デフォルトのカーネルは rbf になっている\n\tclf = svm.SVC(C=2**2, gamma=2**-11, probability=True)\n\t# 訓練データで学習する\n\tclf.fit(data_train, label_train)\n\t# テストデータの正答率を調べる\n\tscore = metrics.accuracy_score(clf.predict(data_test), label_test)\n\tscores.append(score)\n\t# 最終的な正答率を出す\n\taccuracy = (sum(scores) / len(scores)) * 100\n\tmsg = '正答率: {accuracy:.2f}%'.format(accuracy=accuracy)\n\tprint(msg)\n\t# clf.save(save_dir)\n\tjoblib.dump(clf, save_dir) \n\n# def predict_svm(filename = \"/Users/masaMikam/Dropbox/Project/umiA/Data/imgs/rin/show.png\", isShow = True, model = \"/Users/masaMikam/OneDrive/imgs/SVMmodel.pkl\", work_dir = '', label = ['others', 'ことり', 'にこ', 'チノ', '凛', '希', '海未', '真姫', '穂乃果', '絵里', '花陽', '雪穂'], is_force = False):\n# \timg_kind = ''\n# \t# img, altfilename, frame, face_flag = opencv_functions.FaceRecognition(filename, isShow = isShow, saveStyle = 'cat', work_dir = '', cascade_lib = cascade_lib_cat, frameSetting = {'thickness': 2, 'color':(204,153,153)})\n# \tif face_flag:\n# \t\timg_kind = 'cat'\n# \tif not img_kind:\n# \t\timg, altfilename, frame, face_flag = opencv_functions.FaceRecognition(filename, isShow = isShow, saveStyle = 'whole', work_dir = '', cascade_lib = cascade_lib_anime)\n# \t\tif face_flag:\n# \t\t\timg_kind = 'anime'\n# \tif img_kind == 'anime' or is_force:\n# \t\tclassifier = joblib.load(model)\n# \t\timg = opencv_functions.adjust_image(img, isHC = True, K = 0, size = (28, 28)).reshape(-1, 1)\n# \t\timg = img.flatten().astype(np.float32)/255.0\n# \t\tresult = classifier.predict(img.reshape(1, -1))\n# \t\tanslabel = label[result[0]]\n# \t\treturn anslabel, img_kind, altfilename\n# \telif img_kind == 'cat':\n# \t\tanslabel = 'cat'\n# \t\treturn anslabel, img_kind, altfilename\n# \telse:\n# \t\tanslabel = 'no_face'\n# \t\treturn anslabel, img_kind, filename\n\ndef predict_svm(_id = '7aa33bfe-e6c0-4156-a4d0-7e53e88b1dd1', is_show = True, model = '/Users/masaMikam/OneDrive/imgs/SVMmodel.pkl', label = ['others', 'ことり', 'にこ', 'チノ', '凛', '希', '海未', '真姫', '穂乃果', '絵里', '花陽', '雪穂'], is_force = False):\n\timg_kind = ''\n\tresult_dic = {}\n\tjson = {}\n\tif not img_kind:\n\t\tresult = opencv_functions.recognize_faceimage(_id = _id, is_show = False, cascade_lib = cascade_lib_cat)\n\t\tif 'extracted' in result:\n\t\t\tresult_dic['cat'] = result\n\tif not img_kind:\n\t\tresult = opencv_functions.recognize_faceimage(_id = _id, is_show = False, cascade_lib = cascade_lib_anime)\n\n\t\tif 'extracted' in result:\n\t\t\tresult_dic['anime'] = result\n\tif 'anime' in result_dic or is_force:\n\t\tclassifier = joblib.load(model)\n\t\tfor i in range(len(result_dic['anime']['extracted'])):\n\t\t\tresult_dic['anime']['extracted'][i]\n\t\t\tcvimg = result_dic['anime']['extracted'][i]['icon_cvimg']\n\t\t\tadjusted_img = opencv_functions.adjust_image(cvimg, isHC = True, K = 0, size = (28, 28)).reshape(-1, 1)\n\t\t\tadjusted_img = adjusted_img.flatten().astype(np.float32)/255.0\n\t\t\tresult = classifier.predict(adjusted_img.reshape(1, -1))\n\t\t\t# predicted_prob = classifier.predict_proba(result)\n\t\t\t# p(predicted_prob)\n\t\t\tresult_dic['anime']['extracted'][i]['prediction'] = result\n\t\t\tresult_dic['anime']['extracted'][i]['label'] = label[result[0]]\n\t\t\tframe_setting = {'thickness': 1, 'color':(0, 0, 255), 'scale':1.1, 'overlay_id' : '832b32bb-3e2d-4bbf-9217-ff358fa8a317'}\n\t\t\t# 'fabdb2c9-50c7-459e-9a29-94bbcdd77381'\n\t\t\tframed_cvimg = opencv_functions.frame_image(cvimg = result_dic['anime']['original_cvimg'], pos = result_dic['anime']['extracted'][i]['pos'], frame_setting = frame_setting)\n\t\t\tresult_dic['anime']['extracted'][i]['framed_cvimg'] = framed_cvimg\n\t\t\tjson['frame_setting'] = frame_setting\n\t\t\tjson['detection'] = 'anime'\n\t\t\tjson['prediction'] = result_dic['anime']['extracted'][i]['label']\n\t\t\tresult_dic['anime']['extracted'][i]['framed_id'] = opencv_functions.save_image_sql(cvimg = framed_cvimg, filename = ''.join([str(_id), '_SVMdetect', result_dic['anime']['extracted'][i]['label'], '_framed', str(i)]), url = str(_id), owner = None, json = json, compression_quality = 70, compression_format = 'jpg')\n\telif 'cat' in result_dic:\n\t\tresult_dic['cat']['extracted'][0]['label'] = 'cat'\n\t\tjson['detection'] = 'cat'\n\t\tframe_setting = {'thickness': 1, 'color':(204,153,153), 'scale':1.1, 'overlay_id' :'fabdb2c9-50c7-459e-9a29-94bbcdd77381'}\n\t\tjson['frame_setting'] = frame_setting\n\t\tframed_cvimg = opencv_functions.frame_image(cvimg = result_dic['cat']['original_cvimg'], pos = result_dic['cat']['extracted'][0]['pos'], frame_setting = frame_setting)\n\t\tresult_dic['cat']['extracted'][0]['framed_id'] = opencv_functions.save_image_sql(cvimg = framed_cvimg, filename = ''.join([str(_id), '_cat', '_framed', str(0)]), url = str(_id), owner = None, json = json, compression_quality = 70, compression_format = 'jpg')\n\n\treturn result_dic\nif __name__ == '__main__':\n\timport sys, os, io\n\tsys.stdout = io.TextIOWrapper(sys.stdout.buffer, encoding='utf-8')\n\t# 6海未 7真姫\n\tfilename = '/Users/masaMikam/OneDrive/imgs/face/LL3/海未/CV_FACE_icon0_LL1-03_20160212165427.png'\n\tDIR = '/Users/masaMikam/OneDrive/imgs/learn/雪穂/'\n\tans = predict_svm(_id = '7aa33bfe-e6c0-4156-a4d0-7e53e88b1dd1', is_show = 1, model = modelSVM, label = ['others', 'ことり', 'にこ', '真姫', '凛', '希', '海未', '真姫', '穂乃果', '絵里', '花陽', '雪穂'])\n\tprint(ans)\n\tif 'anime' in ans:\n\t\tp('a')\n\t# label, img_kind, IMGfile = machine_learning_img.predictSVM(filename = filename, isShow = False, model = modelSVM, work_dir = '')\n\t# train_svm(DIR = \"/Users/masaMikam/OneDrive/imgs/learn/_work/\", save_dir = DATADIR + '/lib/SVM_us3/SVMmodel3.pkl')\n\n\t# adrs = [DIR+clsdir for clsdir in os.listdir(DIR) if not clsdir in set(['.DS_Store'])]\n\t# print([predictSVM(filename =adr, isShow = 0, model = modelSVM)[0] for adr in adrs[:1]])\n# \n\n\n","sub_path":"machine_learning_img.py","file_name":"machine_learning_img.py","file_ext":"py","file_size_in_byte":11415,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"624317018","text":"\"\"\"\n简单的文件操作\n'r' open for reading (default)\n'w' open for writing, truncating the file first\n'x' create a new file and open it for writing\n'a' open for writing, appending to the end of the file if it exists\n'b' binary mode\n't' text mode (default)\n'+' open a disk file for updating (reading and writing)\n'U' universal newline mode (deprecated)\n\"\"\"\npoem = '''\\\nProgramming is fun\nWhen the work is done\nif you wanna make your work also fun:\n use Python!\n'''\n# 打开文件进行编辑\nf = open('poem.txt', 'w')\n# 向文件编写文本\nf.write(poem)\n# 关闭文件\nf.close()\n\n# 如果没有特别指定\n# 即启用默认的阅读模式 'r'\nf = open('poem.txt')\nwhile True:\n line = f.readline() # 读取每行\n if len(line) == 0:\n break\n print(line, end='')\n# 关闭文件\nf.close()","sub_path":"byte-of-python/exe-13.py","file_name":"exe-13.py","file_ext":"py","file_size_in_byte":816,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"293408752","text":"# -*- coding: utf-8 -*-\r\n\r\nimport os\r\n\r\nfrom PySide2 import QtGui, QtCore, QtWidgets\r\n\r\nimport PyAero\r\nimport Airfoil\r\nimport FileSystem\r\nimport IconProvider\r\nimport SvpMethod\r\nimport SplineRefine\r\nimport TrailingEdge\r\nimport Meshing\r\nfrom Settings import ICONS_L, DIALOGFILTER, DIALOGFILTER_MESH, OUTPUTDATA\r\n\r\nimport logging\r\nlogger = logging.getLogger(__name__)\r\n\r\n\r\nclass Toolbox(QtWidgets.QToolBox):\r\n\r\n def __init__(self, parent):\r\n \"\"\"Main menus for PyAero functionality.\r\n Inserted in left pane of splitter window which in turn is the app's\r\n CentralWidget.\r\n\r\n Args:\r\n parent (QWidget): MainWindow from PyAero.py\r\n \"\"\"\r\n super().__init__()\r\n\r\n self.parent = parent\r\n\r\n # set the style\r\n style = (\"\"\" QToolBox::tab:selected {font: bold; } \"\"\")\r\n self.setStyleSheet(style)\r\n\r\n # create toolbox items\r\n self.itemFileSystem()\r\n self.itemAeropython()\r\n self.itemContourAnalysis()\r\n self.itemSplineRefine()\r\n self.itemMeshing()\r\n\r\n self.makeToolbox()\r\n\r\n self.currentChanged.connect(self.toolboxChanged)\r\n\r\n def toolboxChanged(self):\r\n # tb1 = 'Airfoil Database'\r\n # tb2 = 'Contour Splining and Refinement'\r\n # tb4 = 'Meshing'\r\n # tb5 = 'Aerodynamics'\r\n # tb3 = 'Contour Analysis'\r\n\r\n if self.currentIndex() == self.tb1:\r\n self.parent.centralwidget.tabs.setCurrentIndex(0)\r\n\r\n if self.currentIndex() == self.tb3:\r\n self.parent.centralwidget.tabs.setCurrentIndex(1)\r\n\r\n # update points on airfoil when toolbox changed to meshing\r\n if self.currentIndex() == self.tb4 and self.parent.airfoil:\r\n pts = len(self.parent.airfoil.spline_data[0][0])\r\n self.points_on_airfoil.setText(str(pts))\r\n\r\n def itemFileSystem(self):\r\n\r\n self.item_fs = QtWidgets.QWidget()\r\n layout = QtWidgets.QVBoxLayout()\r\n self.item_fs.setLayout(layout)\r\n\r\n # instance of QFileSystemModel\r\n filesystem_model = FileSystem.FileSystemModel()\r\n root_path = filesystem_model.rootPath()\r\n\r\n self.tree = QtWidgets.QTreeView()\r\n self.tree.setModel(filesystem_model)\r\n self.tree.setRootIndex(filesystem_model.index(root_path))\r\n self.tree.setAnimated(True)\r\n\r\n # hide size column\r\n self.tree.setColumnHidden(1, True)\r\n # hide type column\r\n self.tree.setColumnHidden(2, True)\r\n # hide date modified column\r\n self.tree.setColumnHidden(3, True)\r\n\r\n # hide the header line of the filesystem tree\r\n # the header line would consist of name, date, type, size\r\n # the latter three are hidden anyway (see above)\r\n header = self.tree.header()\r\n header.hide()\r\n\r\n # handler\r\n self.tree.clicked.connect(filesystem_model.onFileSelected)\r\n self.tree.doubleClicked.connect(filesystem_model.onFileLoad)\r\n\r\n layout.addWidget(self.tree, stretch=12)\r\n # layout.setAlignment(QtCore.Qt.AlignTop)\r\n\r\n self.header = QtWidgets.QLabel('Loaded airfoil(s)')\r\n self.header.setEnabled(False)\r\n layout.addStretch(stretch=2)\r\n layout.addWidget(self.header)\r\n\r\n self.listwidget = ListWidget(self.parent)\r\n self.listwidget.setEnabled(False)\r\n # allow only single selections\r\n self.listwidget.setSelectionMode(QtWidgets.QAbstractItemView.\r\n SingleSelection)\r\n layout.addWidget(self.listwidget, stretch=5)\r\n layout.addStretch(stretch=1)\r\n\r\n def itemAeropython(self):\r\n\r\n form = QtWidgets.QFormLayout()\r\n\r\n label1 = QtWidgets.QLabel(u'Angle of attack (°)')\r\n self.spin = QtWidgets.QDoubleSpinBox()\r\n self.spin.setSingleStep(0.1)\r\n self.spin.setDecimals(1)\r\n self.spin.setRange(-10.0, 10.0)\r\n self.spin.setValue(0.0)\r\n form.addRow(label1, self.spin)\r\n\r\n label2 = QtWidgets.QLabel('Freestream velocity (m/s)')\r\n self.freestream = QtWidgets.QDoubleSpinBox()\r\n self.freestream.setSingleStep(0.1)\r\n self.freestream.setDecimals(2)\r\n self.freestream.setRange(0.0, 100.0)\r\n self.freestream.setValue(10.0)\r\n form.addRow(label2, self.freestream)\r\n\r\n label3 = QtWidgets.QLabel('Number of panels (-)')\r\n self.panels = QtWidgets.QSpinBox()\r\n self.panels.setRange(10, 500)\r\n self.panels.setValue(40)\r\n form.addRow(label3, self.panels)\r\n\r\n panelMethodButton = QtWidgets.QPushButton('Calculate lift coefficient')\r\n form.addRow(panelMethodButton)\r\n\r\n self.item_ap = QtWidgets.QGroupBox('AeroPython Panel Method')\r\n self.item_ap.setLayout(form)\r\n\r\n panelMethodButton.clicked.connect(self.runPanelMethod)\r\n\r\n def itemContourAnalysis(self):\r\n\r\n box = QtWidgets.QVBoxLayout()\r\n\r\n vlayout = QtWidgets.QVBoxLayout()\r\n gb = QtWidgets.QGroupBox('Select contour to analyze')\r\n self.b1 = QtWidgets.QRadioButton('Original')\r\n self.b2 = QtWidgets.QRadioButton('Refined')\r\n self.b2.setChecked(True)\r\n vlayout.addWidget(self.b1)\r\n vlayout.addWidget(self.b2)\r\n gb.setLayout(vlayout)\r\n box.addWidget(gb)\r\n\r\n vlayout = QtWidgets.QVBoxLayout()\r\n self.cgb = QtWidgets.QGroupBox('Select plot quantity')\r\n self.cpb1 = QtWidgets.QRadioButton('Gradient')\r\n self.cpb2 = QtWidgets.QRadioButton('Curvature')\r\n self.cpb3 = QtWidgets.QRadioButton('Radius of Curvature')\r\n self.cpb1.setChecked(True)\r\n vlayout.addWidget(self.cpb1)\r\n vlayout.addWidget(self.cpb2)\r\n vlayout.addWidget(self.cpb3)\r\n self.cgb.setLayout(vlayout)\r\n self.cgb.setEnabled(False)\r\n box.addWidget(self.cgb)\r\n\r\n analyzeButton = QtWidgets.QPushButton('Analyze Contour')\r\n analyzeButton.setGeometry(10, 10, 200, 50)\r\n box.addWidget(analyzeButton)\r\n\r\n box.addStretch(1)\r\n\r\n self.item_ca = QtWidgets.QWidget()\r\n self.item_ca.setLayout(box)\r\n\r\n analyzeButton.clicked.connect(self.analyzeAirfoil)\r\n\r\n def itemMeshing(self):\r\n\r\n self.form_mesh_airfoil = QtWidgets.QFormLayout()\r\n\r\n label = QtWidgets.QLabel(u'Gridpoints along airfoil')\r\n label.setToolTip('Number of points as derived from splining')\r\n points = 0\r\n self.points_on_airfoil = QtWidgets.QLineEdit(str(points))\r\n self.points_on_airfoil.setEnabled(False)\r\n self.form_mesh_airfoil.addRow(label, self.points_on_airfoil)\r\n\r\n label = QtWidgets.QLabel(u'Divisions normal to airfoil')\r\n label.setToolTip('Number of points in the mesh which is constructed ' +\r\n ' normal to the airfoil contour')\r\n self.points_n = QtWidgets.QSpinBox()\r\n self.points_n.setSingleStep(1)\r\n self.points_n.setRange(1, 500)\r\n self.points_n.setValue(15)\r\n self.form_mesh_airfoil.addRow(label, self.points_n)\r\n\r\n label = QtWidgets.QLabel('Thickness normal to Airfoil (%)')\r\n label.setToolTip('The thickness is specified wrt to the unit chord')\r\n self.normal_thickness = QtWidgets.QDoubleSpinBox()\r\n self.normal_thickness.setSingleStep(0.1)\r\n self.normal_thickness.setRange(1., 10.)\r\n self.normal_thickness.setValue(4.0)\r\n self.normal_thickness.setDecimals(1)\r\n self.form_mesh_airfoil.addRow(label, self.normal_thickness)\r\n\r\n label = QtWidgets.QLabel('Cell Thickness ratio (-)')\r\n label.setToolTip('Thickness of the last cell vs. the first cell in ' +\r\n 'the airfoil mesh block' +\r\n '\\nThe first cell is the one attached to the airfoil')\r\n self.ratio = QtWidgets.QDoubleSpinBox()\r\n self.ratio.setSingleStep(0.1)\r\n self.ratio.setRange(1., 10.)\r\n self.ratio.setValue(3.0)\r\n self.ratio.setDecimals(1)\r\n self.form_mesh_airfoil.addRow(label, self.ratio)\r\n\r\n self.form_mesh_TE = QtWidgets.QFormLayout()\r\n\r\n label = QtWidgets.QLabel(u'Divisions at trailing edge')\r\n label.setToolTip('Number of subdivisions along the vertical part of the TE')\r\n self.te_div = QtWidgets.QSpinBox()\r\n self.te_div.setSingleStep(1)\r\n self.te_div.setRange(1, 20)\r\n self.te_div.setValue(3)\r\n self.form_mesh_TE.addRow(label, self.te_div)\r\n\r\n label = QtWidgets.QLabel(u'Divisions downstream trailing edge')\r\n self.points_te = QtWidgets.QSpinBox()\r\n self.points_te.setSingleStep(1)\r\n self.points_te.setRange(1, 100)\r\n self.points_te.setValue(6)\r\n self.form_mesh_TE.addRow(label, self.points_te)\r\n\r\n label = QtWidgets.QLabel('Length behind trailing edge (%)')\r\n label.setToolTip('The length is specified wrt to the unit chord')\r\n self.length_te = QtWidgets.QDoubleSpinBox()\r\n self.length_te.setSingleStep(0.1)\r\n self.length_te.setRange(0.1, 30.)\r\n self.length_te.setValue(4.0)\r\n self.length_te.setDecimals(1)\r\n self.form_mesh_TE.addRow(label, self.length_te)\r\n\r\n label = QtWidgets.QLabel('Cell Thickness ratio (-)')\r\n label.setToolTip('Thickness of the last cell vs. the first cell in ' +\r\n 'the trailing edge mesh block' + '\\n'\r\n 'The first cell is the one attached to the airfoil ' +\r\n 'trailing edge')\r\n self.ratio_te = QtWidgets.QDoubleSpinBox()\r\n self.ratio_te.setSingleStep(0.1)\r\n self.ratio_te.setRange(1., 10.)\r\n self.ratio_te.setValue(3.0)\r\n self.ratio_te.setDecimals(1)\r\n self.form_mesh_TE.addRow(label, self.ratio_te)\r\n\r\n self.form_mesh_tunnel = QtWidgets.QFormLayout()\r\n\r\n label = QtWidgets.QLabel('Windtunnel Height (chords)')\r\n label.setToolTip('The height of the windtunnel in units ' +\r\n 'of chord length')\r\n self.tunnel_height = QtWidgets.QDoubleSpinBox()\r\n self.tunnel_height.setSingleStep(0.1)\r\n self.tunnel_height.setRange(1.0, 10.)\r\n self.tunnel_height.setValue(3.5)\r\n self.tunnel_height.setDecimals(1)\r\n self.form_mesh_tunnel.addRow(label, self.tunnel_height)\r\n\r\n label = QtWidgets.QLabel(u'Divisions of Tunnel Height')\r\n self.divisions_height = QtWidgets.QSpinBox()\r\n self.divisions_height.setSingleStep(10)\r\n self.divisions_height.setRange(1, 1000)\r\n self.divisions_height.setValue(100)\r\n self.form_mesh_tunnel.addRow(label, self.divisions_height)\r\n\r\n label = QtWidgets.QLabel('Cell Thickness ratio (-)')\r\n self.ratio_height = QtWidgets.QDoubleSpinBox()\r\n self.ratio_height.setSingleStep(1.0)\r\n self.ratio_height.setRange(0.1, 100.)\r\n self.ratio_height.setValue(10.0)\r\n self.ratio_height.setDecimals(1)\r\n self.form_mesh_tunnel.addRow(label, self.ratio_height)\r\n\r\n label = QtWidgets.QLabel('Distribution biasing')\r\n self.dist = QtWidgets.QComboBox()\r\n self.dist.addItems(['symmetric', 'lower', 'upper'])\r\n self.dist.setCurrentIndex(0)\r\n self.form_mesh_tunnel.addRow(label, self.dist)\r\n\r\n self.form_mesh_wake = QtWidgets.QFormLayout()\r\n\r\n label = QtWidgets.QLabel('Windtunnel Wake (chords)')\r\n label.setToolTip('The length of the wake of the windtunnel in ' +\r\n 'units of chord length')\r\n self.tunnel_wake = QtWidgets.QDoubleSpinBox()\r\n self.tunnel_wake.setSingleStep(0.1)\r\n self.tunnel_wake.setRange(0.1, 50.)\r\n self.tunnel_wake.setValue(7.0)\r\n self.tunnel_wake.setDecimals(1)\r\n self.form_mesh_wake.addRow(label, self.tunnel_wake)\r\n\r\n label = QtWidgets.QLabel(u'Divisions in the wake')\r\n self.divisions_wake = QtWidgets.QSpinBox()\r\n self.divisions_wake.setSingleStep(10)\r\n self.divisions_wake.setRange(1, 1000)\r\n self.divisions_wake.setValue(100)\r\n self.form_mesh_wake.addRow(label, self.divisions_wake)\r\n\r\n label = QtWidgets.QLabel('Cell Thickness ratio (-)')\r\n label.setToolTip('Thickness of the last cell vs. the first cell in ' +\r\n 'the wake mesh block')\r\n self.ratio_wake = QtWidgets.QDoubleSpinBox()\r\n self.ratio_wake.setSingleStep(0.1)\r\n self.ratio_wake.setRange(0.01, 100.0)\r\n self.ratio_wake.setValue(15.0)\r\n self.ratio_wake.setDecimals(1)\r\n self.form_mesh_wake.addRow(label, self.ratio_wake)\r\n\r\n label = QtWidgets.QLabel('Equalize vertical wake line at (%)')\r\n label.setToolTip('Equalize vertical the wake line. ' +\r\n 'Homogeneous vertical distribution x% downstream')\r\n self.spread = QtWidgets.QDoubleSpinBox()\r\n self.spread.setSingleStep(5.0)\r\n self.spread.setRange(10.0, 90.0)\r\n self.spread.setValue(30.0)\r\n self.spread.setDecimals(1)\r\n self.form_mesh_wake.addRow(label, self.spread)\r\n\r\n vbox = QtWidgets.QVBoxLayout()\r\n vbox.addLayout(self.form_mesh_airfoil)\r\n box_airfoil = QtWidgets.QGroupBox('Airfoil contour mesh')\r\n box_airfoil.setLayout(vbox)\r\n\r\n vbox = QtWidgets.QVBoxLayout()\r\n vbox.addLayout(self.form_mesh_TE)\r\n box_TE = QtWidgets.QGroupBox('Airfoil trailing edge mesh')\r\n box_TE.setLayout(vbox)\r\n\r\n vbox = QtWidgets.QVBoxLayout()\r\n vbox.addLayout(self.form_mesh_tunnel)\r\n box_tunnel = QtWidgets.QGroupBox('Windtunnel mesh (around airfoil)')\r\n box_tunnel.setLayout(vbox)\r\n\r\n vbox = QtWidgets.QVBoxLayout()\r\n vbox.addLayout(self.form_mesh_wake)\r\n box_wake = QtWidgets.QGroupBox('Windtunnel mesh (wake)')\r\n box_wake.setLayout(vbox)\r\n\r\n createMeshButton = QtWidgets.QPushButton('Create Mesh')\r\n hbl_cm = QtWidgets.QHBoxLayout()\r\n hbl_cm.addStretch(stretch=1)\r\n hbl_cm.addWidget(createMeshButton, stretch=4)\r\n hbl_cm.addStretch(stretch=1)\r\n\r\n # export menu\r\n name = ''\r\n hbox = QtWidgets.QHBoxLayout()\r\n lbl = QtWidgets.QLabel('Filename')\r\n self.lineedit_mesh = QtWidgets.QLineEdit(name)\r\n browseMeshButton = QtWidgets.QPushButton('Browse')\r\n hbox.addWidget(lbl)\r\n hbox.addWidget(self.lineedit_mesh)\r\n hbox.addWidget(browseMeshButton)\r\n\r\n exportMeshButton = QtWidgets.QPushButton('Export Mesh')\r\n hbl = QtWidgets.QHBoxLayout()\r\n hbl.addStretch(stretch=1)\r\n hbl.addWidget(exportMeshButton, stretch=4)\r\n hbl.addStretch(stretch=1)\r\n\r\n rdl = QtWidgets.QHBoxLayout()\r\n btn_group = QtWidgets.QButtonGroup()\r\n self.check_FIRE = QtWidgets.QCheckBox('AVL FIRE')\r\n self.check_SU2 = QtWidgets.QCheckBox('SU2')\r\n self.check_GMSH = QtWidgets.QCheckBox('GMSH')\r\n btn_group.addButton(self.check_FIRE)\r\n btn_group.addButton(self.check_SU2)\r\n self.check_FIRE.setChecked(True)\r\n self.check_SU2.setChecked(False)\r\n self.check_GMSH.setChecked(False)\r\n rdl.addStretch(5)\r\n rdl.addWidget(self.check_FIRE)\r\n rdl.addStretch(1)\r\n rdl.addWidget(self.check_SU2)\r\n rdl.addStretch(1)\r\n rdl.addWidget(self.check_GMSH)\r\n rdl.addStretch(5)\r\n\r\n vbl1 = QtWidgets.QVBoxLayout()\r\n vbl1.addLayout(rdl)\r\n vbl1.addLayout(hbox)\r\n vbl1.addLayout(hbl)\r\n\r\n self.box_meshexport = QtWidgets.QGroupBox('Mesh Export')\r\n self.box_meshexport.setLayout(vbl1)\r\n self.box_meshexport.setEnabled(False)\r\n\r\n vbl = QtWidgets.QVBoxLayout()\r\n vbl.addStretch(1)\r\n vbl.addWidget(box_airfoil)\r\n vbl.addWidget(box_TE)\r\n vbl.addWidget(box_tunnel)\r\n vbl.addWidget(box_wake)\r\n vbl.addLayout(hbl_cm)\r\n vbl.addStretch(1)\r\n vbl.addWidget(self.box_meshexport)\r\n vbl.addStretch(10)\r\n\r\n self.item_msh = QtWidgets.QWidget()\r\n self.item_msh.setLayout(vbl)\r\n\r\n browseMeshButton.clicked.connect(self.onBrowseMesh)\r\n createMeshButton.clicked.connect(self.generateMesh)\r\n exportMeshButton.clicked.connect(self.exportMesh)\r\n\r\n def itemSplineRefine(self):\r\n\r\n form = QtWidgets.QFormLayout()\r\n\r\n label = QtWidgets.QLabel(u'Refinement tolerance (°)')\r\n self.tolerance = QtWidgets.QDoubleSpinBox()\r\n self.tolerance.setSingleStep(0.1)\r\n self.tolerance.setDecimals(1)\r\n self.tolerance.setRange(50.0, 177.0)\r\n self.tolerance.setValue(172.0)\r\n form.addRow(label, self.tolerance)\r\n\r\n label = QtWidgets.QLabel(u'Refine trailing edge (old segments)')\r\n self.ref_te = QtWidgets.QSpinBox()\r\n self.ref_te.setSingleStep(1)\r\n self.ref_te.setRange(1, 50)\r\n self.ref_te.setValue(3)\r\n form.addRow(label, self.ref_te)\r\n\r\n label = QtWidgets.QLabel(u'Refine trailing edge (new segments)')\r\n self.ref_te_n = QtWidgets.QSpinBox()\r\n self.ref_te_n.setSingleStep(1)\r\n self.ref_te_n.setRange(1, 100)\r\n self.ref_te_n.setValue(6)\r\n form.addRow(label, self.ref_te_n)\r\n\r\n label = QtWidgets.QLabel(u'Refine trailing edge ratio')\r\n self.ref_te_ratio = QtWidgets.QDoubleSpinBox()\r\n self.ref_te_ratio.setSingleStep(0.1)\r\n self.ref_te_ratio.setDecimals(1)\r\n self.ref_te_ratio.setRange(1., 10.)\r\n self.ref_te_ratio.setValue(3.0)\r\n form.addRow(label, self.ref_te_ratio)\r\n\r\n label = QtWidgets.QLabel('Number points on spline (-)')\r\n self.points = QtWidgets.QSpinBox()\r\n self.points.setSingleStep(10)\r\n self.points.setRange(10, 1000)\r\n self.points.setValue(200)\r\n form.addRow(label, self.points)\r\n\r\n splineButton = QtWidgets.QPushButton('Spline and Refine')\r\n hbl = QtWidgets.QHBoxLayout()\r\n hbl.addStretch(stretch=1)\r\n hbl.addWidget(splineButton, stretch=4)\r\n hbl.addStretch(stretch=1)\r\n\r\n vbox = QtWidgets.QVBoxLayout()\r\n vbox.addLayout(form)\r\n vbox.addLayout(hbl)\r\n box = QtWidgets.QGroupBox('Airfoil contour refinement')\r\n box.setLayout(vbox)\r\n\r\n form1 = QtWidgets.QFormLayout()\r\n\r\n label = QtWidgets.QLabel(u'Upper side blending length (%)')\r\n self.blend_u = QtWidgets.QDoubleSpinBox()\r\n self.blend_u.setSingleStep(1.0)\r\n self.blend_u.setDecimals(1)\r\n self.blend_u.setRange(0.1, 100.0)\r\n self.blend_u.setValue(30.0)\r\n form1.addRow(label, self.blend_u)\r\n label = QtWidgets.QLabel(u'Lower side blending length (%)')\r\n self.blend_l = QtWidgets.QDoubleSpinBox()\r\n self.blend_l.setSingleStep(1.0)\r\n self.blend_l.setDecimals(1)\r\n self.blend_l.setRange(0.1, 100.0)\r\n self.blend_l.setValue(30.0)\r\n form1.addRow(label, self.blend_l)\r\n\r\n label = QtWidgets.QLabel(u'Upper blending polynomial exponent (-)')\r\n self.exponent_u = QtWidgets.QDoubleSpinBox()\r\n self.exponent_u.setSingleStep(0.1)\r\n self.exponent_u.setDecimals(1)\r\n self.exponent_u.setRange(1.0, 10.0)\r\n self.exponent_u.setValue(3.0)\r\n form1.addRow(label, self.exponent_u)\r\n label = QtWidgets.QLabel(u'Lower blending polynomial exponent (-)')\r\n self.exponent_l = QtWidgets.QDoubleSpinBox()\r\n self.exponent_l.setSingleStep(0.1)\r\n self.exponent_l.setDecimals(1)\r\n self.exponent_l.setRange(1.0, 10.0)\r\n self.exponent_l.setValue(3.0)\r\n form1.addRow(label, self.exponent_l)\r\n\r\n label = QtWidgets.QLabel(u'Trailing edge thickness relative to chord (%)')\r\n self.thickness = QtWidgets.QDoubleSpinBox()\r\n self.thickness.setSingleStep(0.05)\r\n self.thickness.setDecimals(2)\r\n self.thickness.setRange(0.0, 10.0)\r\n self.thickness.setValue(0.4)\r\n form1.addRow(label, self.thickness)\r\n\r\n trailingButton = QtWidgets.QPushButton('Add Trailing Edge')\r\n hbl1 = QtWidgets.QHBoxLayout()\r\n hbl1.addStretch(stretch=1)\r\n hbl1.addWidget(trailingButton, stretch=4)\r\n hbl1.addStretch(stretch=1)\r\n\r\n vbox = QtWidgets.QVBoxLayout()\r\n vbox.addLayout(form1)\r\n vbox.addLayout(hbl1)\r\n box1 = QtWidgets.QGroupBox('Airfoil trailing edge')\r\n box1.setLayout(vbox)\r\n\r\n # export menu\r\n name = ''\r\n hbox = QtWidgets.QHBoxLayout()\r\n lbl = QtWidgets.QLabel('Filename')\r\n self.lineedit = QtWidgets.QLineEdit(name)\r\n exportContourButton = QtWidgets.QPushButton('Export Contour')\r\n hbox.addWidget(lbl)\r\n hbox.addWidget(self.lineedit)\r\n hbox.addWidget(exportContourButton)\r\n\r\n box2 = QtWidgets.QGroupBox('Export modified contour')\r\n box2.setLayout(hbox)\r\n\r\n vbl = QtWidgets.QVBoxLayout()\r\n vbl.addStretch(1)\r\n vbl.addWidget(box)\r\n vbl.addStretch(1)\r\n vbl.addWidget(box1)\r\n vbl.addStretch(1)\r\n vbl.addWidget(box2)\r\n vbl.addStretch(10)\r\n\r\n self.item_cm = QtWidgets.QWidget()\r\n self.item_cm.setLayout(vbl)\r\n\r\n splineButton.clicked.connect(self.spline_and_refine)\r\n trailingButton.clicked.connect(self.makeTrailingEdge)\r\n splineButton.clicked.connect(lambda: self.updatename('spline'))\r\n trailingButton.clicked.connect(lambda: self.updatename('trailing'))\r\n exportContourButton.clicked.connect(self.onBrowse)\r\n\r\n def makeToolbox(self):\r\n\r\n # populate toolbox\r\n self.tb1 = self.addItem(self.item_fs, 'Airfoil Database')\r\n self.tb2 = self.addItem(self.item_cm,\r\n 'Contour Splining and Refinement')\r\n self.tb4 = self.addItem(self.item_msh, 'Meshing')\r\n self.tb5 = self.addItem(self.item_ap, 'Aerodynamics')\r\n self.tb3 = self.addItem(self.item_ca, 'Contour Analysis')\r\n\r\n self.setItemToolTip(0, 'Airfoil database ' +\r\n '(browse filesystem)')\r\n self.setItemToolTip(1, 'Spline and refine the contour')\r\n self.setItemToolTip(2, 'Generate a 2D mesh around the ' +\r\n 'selected airfoil')\r\n self.setItemToolTip(3, 'Compute panel based aerodynamic ' +\r\n 'coefficients')\r\n self.setItemToolTip(4, 'Analyze the curvature of the ' +\r\n 'selected airfoil')\r\n\r\n self.setItemIcon(0, QtGui.QIcon(ICONS_L + 'airfoil.png'))\r\n self.setItemIcon(1, QtGui.QIcon(ICONS_L + 'Pixel editor.png'))\r\n self.setItemIcon(2, QtGui.QIcon(ICONS_L + 'mesh.png'))\r\n self.setItemIcon(3, QtGui.QIcon(ICONS_L + 'Fast delivery.png'))\r\n self.setItemIcon(4, QtGui.QIcon(ICONS_L + 'Pixel editor.png'))\r\n\r\n # preselect airfoil database box\r\n self.setCurrentIndex(self.tb1)\r\n\r\n def toggleRawPoints(self):\r\n \"\"\"Toggle points of raw airfoil contour (on/off)\"\"\"\r\n if hasattr(self.parent.airfoil, 'polygonMarkersGroup'):\r\n visible = self.parent.airfoil.polygonMarkersGroup.isVisible()\r\n self.parent.airfoil.polygonMarkersGroup.setVisible(not visible)\r\n\r\n def toggleSplinePoints(self):\r\n \"\"\"Toggle points of raw airfoil contour (on/off)\"\"\"\r\n if hasattr(self.parent.airfoil, 'splineMarkersGroup'):\r\n visible = self.parent.airfoil.splineMarkersGroup.isVisible()\r\n self.parent.airfoil.splineMarkersGroup.setVisible(not visible)\r\n\r\n def toggleSpline(self):\r\n if hasattr(self.parent.airfoil, 'contourSpline'):\r\n visible = self.parent.airfoil.contourSpline.isVisible()\r\n self.parent.airfoil.contourSpline.setVisible(not visible)\r\n\r\n def toggleChord(self):\r\n \"\"\"Toggle visibility of the airfoil chord\"\"\"\r\n if hasattr(self.parent.airfoil, 'chord'):\r\n visible = self.parent.airfoil.chord.isVisible()\r\n self.parent.airfoil.chord.setVisible(not visible)\r\n\r\n def toggleMesh(self):\r\n \"\"\"Toggle visibility of the mesh lines\"\"\"\r\n if hasattr(self.parent.airfoil, 'mesh'):\r\n visible = self.parent.airfoil.mesh.isVisible()\r\n self.parent.airfoil.mesh.setVisible(not visible)\r\n\r\n def toggleLeCircle(self):\r\n \"\"\"Toggle visibility of the leading edge circle\"\"\"\r\n if hasattr(self.parent.airfoil, 'le_circle'):\r\n visible = self.parent.airfoil.le_circle.isVisible()\r\n self.parent.airfoil.le_circle.setVisible(not visible)\r\n\r\n def runPanelMethod(self):\r\n \"\"\"Gui callback to run AeroPython panel method in module PSvpMethod\"\"\"\r\n\r\n if self.parent.airfoil:\r\n x, y = self.parent.airfoil.raw_coordinates\r\n u_inf = self.freestream.value()\r\n alpha = self.spin.value()\r\n panels = self.panels.value()\r\n SvpMethod.runSVP(self.parent.airfoil.name, x, y, u_inf, alpha, panels)\r\n else:\r\n self.parent.slots.messageBox('No airfoil loaded.')\r\n return\r\n\r\n def spline_and_refine(self):\r\n \"\"\"Spline and refine airfoil\"\"\"\r\n\r\n if self.parent.airfoil:\r\n refine = SplineRefine.SplineRefine()\r\n refine.doSplineRefine(tolerance=self.tolerance.value(),\r\n points=self.points.value(),\r\n ref_te=self.ref_te.value(),\r\n ref_te_n=self.ref_te_n.value(),\r\n ref_te_ratio=self.ref_te_ratio.value())\r\n else:\r\n self.parent.slots.messageBox('No airfoil loaded.')\r\n return\r\n\r\n def makeTrailingEdge(self):\r\n\r\n if self.parent.airfoil:\r\n if not hasattr(self.parent.airfoil, 'spline_data'):\r\n message = 'Splining needs to be done first.'\r\n self.parent.slots.messageBox(message)\r\n return\r\n\r\n trailing = TrailingEdge.TrailingEdge()\r\n trailing.trailingEdge(blend=self.blend_u.value()/100.0,\r\n ex=self.exponent_u.value(),\r\n thickness=self.thickness.value(),\r\n side='upper')\r\n trailing.trailingEdge(blend=self.blend_l.value()/100.0,\r\n ex=self.exponent_l.value(),\r\n thickness=self.thickness.value(),\r\n side='lower')\r\n else:\r\n self.parent.slots.messageBox('No airfoil loaded.')\r\n return\r\n\r\n def generateMesh(self):\r\n self.wind_tunnel = Meshing.Windtunnel()\r\n self.wind_tunnel.makeMesh()\r\n\r\n def exportMesh(self, from_browse_mesh=False):\r\n\r\n name = self.lineedit_mesh.text()\r\n\r\n nameroot, extension = os.path.splitext(str(name))\r\n\r\n if from_browse_mesh:\r\n fullname = name\r\n else:\r\n fullname = OUTPUTDATA + nameroot\r\n\r\n mesh = self.wind_tunnel.mesh\r\n blocks = self.wind_tunnel.blocks\r\n\r\n if self.check_FIRE.isChecked():\r\n Meshing.BlockMesh.writeFLMA(mesh, blocks, name=fullname)\r\n elif self.check_SU2.isChecked():\r\n Meshing.BlockMesh.writeSU2(mesh, blocks, name=fullname)\r\n elif self.check_GMSH.isChecked():\r\n Meshing.BlockMesh.writeGMSH(mesh, blocks, name=fullname)\r\n\r\n def analyzeAirfoil(self):\r\n \"\"\"Airfoil contour analysis with respect to geometric features\"\"\"\r\n\r\n if not self.parent.airfoil:\r\n self.parent.slots.messageBox('No airfoil loaded.')\r\n return\r\n\r\n # switch tab contour analysis\r\n self.parent.centralwidget.tabs.setCurrentIndex(1)\r\n # keep tab 'Contour Analysis'\r\n self.setCurrentIndex(self.tb3)\r\n\r\n # enable radio buttons for plotting when analysis starts\r\n self.cgb.setEnabled(True)\r\n\r\n # analyse contour\r\n self.parent.contourview.analyze()\r\n\r\n # connect signals to slots\r\n # lambda allows to send extra parameters\r\n self.cpb1.clicked.connect(lambda:\r\n self.parent.contourview.drawContour('gradient'))\r\n self.cpb2.clicked.connect(lambda:\r\n self.parent.contourview.drawContour('curvature'))\r\n self.cpb3.clicked.connect(lambda:\r\n self.parent.contourview.drawContour('radius'))\r\n\r\n def updatename(self, sender_button):\r\n\r\n name = self.parent.airfoil.name\r\n\r\n nameroot, extension = os.path.splitext(str(name))\r\n\r\n if 'spline' in sender_button:\r\n nameroot += '_Spline'\r\n self.lineedit.setText(nameroot + extension)\r\n if 'trailing' in sender_button:\r\n nameroot += '_Spline_TE'\r\n self.lineedit.setText(nameroot + extension)\r\n\r\n def onBrowse(self):\r\n\r\n names = []\r\n\r\n dialog = QtWidgets.QFileDialog()\r\n\r\n provider = IconProvider.IconProvider()\r\n dialog.setIconProvider(provider)\r\n dialog.setNameFilter(DIALOGFILTER)\r\n dialog.setNameFilterDetailsVisible(True)\r\n dialog.setDirectory(OUTPUTDATA)\r\n # allow only to select one file\r\n dialog.setFileMode(QtWidgets.QFileDialog.AnyFile)\r\n # display also size and date\r\n dialog.setViewMode(QtWidgets.QFileDialog.Detail)\r\n # make it a save dialog\r\n dialog.setAcceptMode(QtWidgets.QFileDialog.AcceptSave)\r\n # put default name in the save dialog\r\n dialog.selectFile(self.lineedit.text())\r\n\r\n # open custom file dialog using custom icons\r\n if dialog.exec_():\r\n names = dialog.selectedFiles()\r\n # filter = dialog.selectedFilter()\r\n\r\n if not names:\r\n return\r\n\r\n # names is a list of QStrings\r\n filename = str(names[0])\r\n\r\n # get coordinates of modified contour\r\n x, y = self.parent.airfoil.spline_data[0]\r\n airfoil_name = self.parent.airfoil.name\r\n\r\n # export modified contour\r\n with open(filename, 'w') as f:\r\n f.write('#\\n')\r\n f.write('# File created with ' + PyAero.__appname__ + '\\n')\r\n f.write('# Version: ' + PyAero.__version__ + '\\n')\r\n f.write('# Author: ' + PyAero.__author__ + '\\n')\r\n f.write('#\\n')\r\n f.write('# Derived from: %s\\n' % (str(airfoil_name).strip()))\r\n f.write('# Number of points: %s\\n' % (len(x)))\r\n f.write('#\\n')\r\n for i, xx in enumerate(x):\r\n f.write(2*'{:10.6f}'.format(x[i], y[i]) + '\\n')\r\n\r\n def onBrowseMesh(self):\r\n\r\n names = []\r\n\r\n dialog = QtWidgets.QFileDialog()\r\n\r\n provider = IconProvider.IconProvider()\r\n dialog.setIconProvider(provider)\r\n options = QtWidgets.QFileDialog.Options()\r\n options |= QtWidgets.QFileDialog.DontUseNativeDialog\r\n dialog.setOptions(options)\r\n dialog.setNameFilter(DIALOGFILTER_MESH)\r\n dialog.setNameFilterDetailsVisible(True)\r\n dialog.setDirectory(OUTPUTDATA)\r\n # allow only to select one file\r\n dialog.setFileMode(QtWidgets.QFileDialog.AnyFile)\r\n # display also size and date\r\n dialog.setViewMode(QtWidgets.QFileDialog.Detail)\r\n # make it a save dialog\r\n dialog.setAcceptMode(QtWidgets.QFileDialog.AcceptSave)\r\n # put default name in the save dialog\r\n dialog.selectFile(self.lineedit_mesh.text())\r\n\r\n # open custom file dialog using custom icons\r\n if dialog.exec_():\r\n names = dialog.selectedFiles()\r\n # filter = dialog.selectedFilter()\r\n\r\n if not names:\r\n return\r\n\r\n # names is a list of QStrings\r\n filename = str(names[0])\r\n\r\n self.lineedit_mesh.setText(filename)\r\n\r\n self.exportMesh(from_browse_mesh=True)\r\n\r\n\r\nclass ListWidget(QtWidgets.QListWidget):\r\n \"\"\"Subclassing QListWidget in order to be able to catch key press\r\n events\r\n \"\"\"\r\n def __init__(self, parent):\r\n super().__init__()\r\n self.parent = parent\r\n\r\n self.itemClicked.connect(self.listItemClicked)\r\n self.itemDoubleClicked.connect(self.listItemDoubleClicked)\r\n\r\n # get MainWindow instance (overcomes handling parents)\r\n self.mainwindow = QtCore.QCoreApplication.instance().mainwindow\r\n\r\n def keyPressEvent(self, event):\r\n key = event.key()\r\n\r\n if key == QtCore.Qt.Key_Delete:\r\n item = self.selectedItems()[0]\r\n row = self.row(item)\r\n self.takeItem(row)\r\n\r\n for airfoil in self.parent.airfoils:\r\n if item.text() == airfoil.name:\r\n name = airfoil.name\r\n self.parent.slots.removeAirfoil(name=name)\r\n break\r\n\r\n # call original implementation of QListWidget keyPressEvent handler\r\n super().keyPressEvent(event)\r\n\r\n def listItemClicked(self, item):\r\n \"\"\"show information of airfoil in message window\"\"\"\r\n pass\r\n\r\n def listItemDoubleClicked(self, item):\r\n \"\"\"make double clicked name in listwidget new active airfoil\"\"\"\r\n for airfoil in self.parent.airfoils:\r\n if airfoil.name == item.text():\r\n # first clear all items from the scene\r\n self.parent.scene.clear()\r\n # activate double clicked airfoil\r\n airfoil.makeAirfoil()\r\n # add all airfoil items (contour markers) to the scene\r\n Airfoil.Airfoil.addToScene(airfoil, self.parent.scene)\r\n # make double clicked airfoil the currently active airfoil\r\n self.parent.airfoil = airfoil\r\n # adjust the marker size again\r\n self.mainwindow.view.adjustMarkerSize()\r\n break\r\n","sub_path":"src/ToolBox.py","file_name":"ToolBox.py","file_ext":"py","file_size_in_byte":33846,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"389228448","text":"import pymysql\nclass SendSensorsHandler:\n def sendSensorsHandler(self, icustay_id, timestamp, heart_rate, lsb, msb, spo2):\n tableid = icustay_id % 10;\n db = pymysql.connect(\"localhost\", \"root\", \"12050818\", \"healthy_center\");\n mysqlCmd = \"insert into original_data_\" +str(tableid)+ \" set icustay_id='\"+str(icustay_id)+\"', time_stamp='\"+str(timestamp)+\"', hear_rate='\"+str(heart_rate)+\"', sbp='\"+str(lsb)+\"', map='\"+str(msb)+\"', SpO2='\"+str(spo2)+\"', respiratory_rate='\"+str(0)+\"', dbp='\"+str(0)+\"';\";\n cursor = db.cursor();\n cursor.execute(mysqlCmd);\n db.commit();\n db.close();\n return \"null\";\n","sub_path":"cernet_server/hr_server/servlet/SendSensorsHandler.py","file_name":"SendSensorsHandler.py","file_ext":"py","file_size_in_byte":652,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"633561439","text":"import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib.colors import ListedColormap\n# define the helper function \ndef plot_decision_regions(X, y, classifier, test_idx=None, resolution=0.02):\n\n # setup marker generator and color map\n markers = ('s', 'x', 'o', '^', 'v')\n colors = ('red', 'blue', 'lightgreen', 'gray', 'cyan')\n cmap = ListedColormap(colors[:len(np.unique(y))])\n\n # plot the decision surface\n x1_min, x1_max = X[:, 0].min() - 1, X[:, 0].max() + 1\n x2_min, x2_max = X[:, 1].min() - 1, X[:, 1].max() + 1\n xx1, xx2 = np.meshgrid(np.arange(x1_min, x1_max, resolution),\n np.arange(x2_min, x2_max, resolution))\n Z = classifier.predict(np.array([xx1.ravel(), xx2.ravel()]).T)\n Z = Z.reshape(xx1.shape)\n plt.contourf(xx1, xx2, Z, alpha=0.3, cmap=cmap)\n plt.xlim(xx1.min(), xx1.max())\n plt.ylim(xx2.min(), xx2.max())\n\n for idx, cl in enumerate(np.unique(y)):\n plt.scatter(x=X[y == cl, 0], \n y=X[y == cl, 1],\n alpha=0.8, \n c=colors[idx],\n marker=markers[idx], \n label=cl, \n edgecolor='black')\n\n # highlight test samples\n if test_idx:\n # plot all samples\n X_test, y_test = X[test_idx, :], y[test_idx]\n\n plt.scatter(X_test[:, 0],\n X_test[:, 1],\n c='',\n edgecolor='black',\n alpha=1.0,\n linewidth=1,\n marker='o',\n s=100, \n label='test set')\n\n# Read the data \n\niris_data = pd.read_excel('Iris.xls', header=0, usecols=\"A:E\")\ncate_dummies = pd.get_dummies(iris_data[\"iris\"])\niris_target = cate_dummies.apply(lambda x: 0 * x[0] + 1 * x[1] + 2 * x[2], axis=1).as_matrix()\niris_data = iris_data.drop('iris', axis=1).as_matrix()[:, [2, 3]]\n\n# Preprocessing the train and test\n\n# Split the train and test\nfrom sklearn.model_selection import train_test_split\nX, y = iris_data, iris_target\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=1, stratify=y)\n\n# Standardizing the features\nfrom sklearn.preprocessing import StandardScaler\nsc = StandardScaler()\nsc.fit(X_train)\nX_train_std = sc.transform(X_train)\nX_test_std = sc.transform(X_test)\n\nX_combined_std = np.vstack((X_train_std, X_test_std))\nX_combined = np.vstack((X_train, X_test))\ny_combined = np.hstack((y_train, y_test))\n\n# ## Building a decision tree\n\nfrom sklearn.tree import DecisionTreeClassifier\n\ntree = DecisionTreeClassifier(criterion='gini', \n max_depth=4, \n random_state=1)\ntree.fit(X_train, y_train)\n\n\nplot_decision_regions(X_combined, y_combined, classifier=tree, test_idx=range(105, 150))\n\nplt.xlabel('petal length [cm]')\nplt.ylabel('petal width [cm]')\nplt.legend(loc='upper left')\nplt.tight_layout()\n#plt.savefig('images/03_20.png', dpi=300)\nplt.show()\n\nfrom pydotplus import graph_from_dot_data\nfrom sklearn.tree import export_graphviz\n\ndot_data = export_graphviz(tree,\n filled=True, \n rounded=True,\n class_names=['Setosa', \n 'Versicolor',\n 'Virginica'],\n feature_names=['petal length', \n 'petal width'],\n out_file=None) \ngraph = graph_from_dot_data(dot_data) \ngraph.write_png('tree.png') \n\n\n# # K-nearest neighbors - a lazy learning algorithm\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn import metrics\n\nk_range = range(1, 26)\nscores = {}\nfor k in k_range:\n knn = KNeighborsClassifier(n_neighbors=k)\n knn.fit(X_train_std, y_train)\n y_pred = knn.predict(X_test_std)\n scores[k] = metrics.accuracy_score(y_test, y_pred)\n\nscores_df = pd.DataFrame.from_dict(scores, orient='index')\nplt.plot(scores_df.index, scores_df[0], 'bo')\nplt.xlabel('K')\nplt.ylabel('accuracy_score')\nplt.xlim(0, 26)\nplt.ylim(0.5, 1.5)\nplt.show()\n\nbest_k = scores_df.idxmax(axis=0)[0]\nknn = KNeighborsClassifier(n_neighbors=best_k)\nknn.fit(X_train_std, y_train)\n\n\nplot_decision_regions(X_combined_std, y_combined, \n classifier=knn, test_idx=range(105, 150))\n\nplt.xlabel('petal length [standardized]')\nplt.ylabel('petal width [standardized]')\nplt.legend(loc='upper left')\nplt.tight_layout()\n#plt.savefig('images/03_24.png', dpi=300)\nplt.show()\n\nprint(\"My name is Hao Ren\")\nprint(\"My NetID is: haoren2\")\nprint(\"I hereby certify that I have read the University policy on Academic Integrity and that I am not in violation.\")\n","sub_path":"IE598_F18_HW2/knn_dt.py","file_name":"knn_dt.py","file_ext":"py","file_size_in_byte":4740,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"49520951","text":"import sys\nimport struct\nimport threading\nimport json\nimport time\n\n# from queue import Queue\nfrom pynput import keyboard\nimport pyautogui\n\nscores = None\nkeySelect = None\nhotKey = None\nmissing_students = None\nnumf10 = 0\npasted = \"\"\n\nif sys.platform == \"win32\":\n import os, msvcrt\n\n msvcrt.setmode(sys.stdin.fileno(), os.O_BINARY)\n msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY)\n\n\ndef on_functionf8(key):\n if (key == keyboard.Key[hotKey]):\n\n for val in scores:\n\n if((val['status']==\"ABSENT\")&(missing_students==\"skip\")):\n pass\n elif((val['status']==\"ABSENT\")&(missing_students==\"zero\")):\n pyautogui.typewrite(str(val[\"score\"]))\n else:\n pyautogui.typewrite(str(val[\"score\"]))\n\n if (keySelect == 'enter'):\n pyautogui.typewrite(['enter'])\n if (keySelect == 'denter'):\n pyautogui.typewrite(['enter'])\n pyautogui.typewrite(['enter'])\n if (keySelect == 'tab'):\n pyautogui.typewrite(['tab'])\n if (keySelect == 'dtab'):\n pyautogui.typewrite(['tab'])\n pyautogui.typewrite(['tab'])\n if (keySelect == 'right'):\n pyautogui.typewrite(['right'])\n if (keySelect == 'down'):\n pyautogui.typewrite(['down'])\n\n\n return False # pyautogui.typewrite('kkkkkk')\n\n\nlistener = keyboard.Listener(on_release=on_functionf8)\n\n\ndef send_message(message):\n # Write message size.\n sys.stdout.write(struct.pack('I', len(message)))\n # Write the message itself.\n sys.stdout.write(message)\n sys.stdout.flush()\n\n\ndef read_thread_func(stop_event):\n message_number = 0\n while 1:\n # Read the message length (first 4 bytes).\n\n\n text_length_bytes = sys.stdin.read(4)\n text_length = struct.unpack('i', text_length_bytes)[0]\n\n # Read the text (JSON object) oklf the message.\n text = sys.stdin.read(text_length).decode('utf-8')\n\n # print(text)\n text = json.loads(text)\n\n if (\"changed\" in text):\n send_message(text)\n stop_event.set()\n listener.stop()\n else:\n send_message(text)\n text = json.loads(text)\n global scores\n scores = []\n scores = text[\"data\"]\n global keySelect\n keySelect = text[\"key\"]\n global hotKey\n hotKey = text[\"hotKey\"]\n hotKey = str(hotKey)\n global missing_students\n missing_students = text[\"absent\"]\n missing_students = str(missing_students)\n\n\n\ndef Main():\n # queue = Queue(maxsize=-1)\n\n stop_event = threading.Event()\n thread = threading.Thread(target=read_thread_func, args=(stop_event,))\n thread.setDaemon(True)\n thread.start()\n listener.start()\n listener.join()\n\n # sys.exit(0)\n # read_thread_func()\n\n\nif __name__ == '__main__':\n Main()\n\n\n","sub_path":"src/fgfg.py","file_name":"fgfg.py","file_ext":"py","file_size_in_byte":2996,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"613907785","text":"'''\nAn interactive program that takes a list of words \n from command line. Then user can interactively query \n if a specific word is exists in the given list of not.\n\ne.g.,\n\nfind_item.py Kichidi idly \"jeera rice\" pulav biriyani roti dosa\nEnter am item for lookup : \nAvailable.\n\nEnter am item for lookup : \nNot Available\n\nEnter am item for lookup : \nAvailable\n'''\nimport sys\nitem_list = sys.argv[1:]\nprint(item_list)\n\n\ndef find_item(item_list, item):\n #find if the item is present in the item list.\n found = False\n for x in item_list:\n #compare case insensitively to tollerate mixed case\n if (x.lower() == item.lower()):\n found = True\n #we can stop looking if already found\n break\n\n return found\n\n\nwhile(True):\n #read input from the console.\n # use input if using python 3 instead of raw_input\n item = raw_input('Enter item for lookup : ')\n\n if(find_item(item_list, item)):\n print('Available')\n else:\n if(item.lower() == 'q'): #quit if user gave q\n #if(len(item) == 0): #quit if user gave q\n exit(0)\n else:\n print('Not Available')\n\n\n","sub_path":"py/misc/find_item.py","file_name":"find_item.py","file_ext":"py","file_size_in_byte":1192,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"206896325","text":"# -*- coding: utf-8 -*-\n\n# Do simple calculation\n\n\"\"\" \nThis script performs simple calculations.\nArgument are given during the call of the script.\n\"\"\"\nimport argparse\n\nparser = argparse.ArgumentParser()\n\n\nparser.add_argument(\"number_1\", type=int, help=\"An integer\")\nparser.add_argument(\"number_2\", type=int, help=\"Another integer\")\n# parser.add_argument(\"-multi\", default=False, action=\"store_true\")\nparser.add_argument(\"--operation\", choices=[\"div\", \"sub\", \"sum\", \"mult\"])\n\nargs = parser.parse_args()\n\nif args.operation == \"div\":\n result = args.number_1 / args.number_2\n \nelif args.operation == \"sub\":\n result = args.number_1 - args.number_2\n \nelif args.operation == \"sum\":\n result = args.number_1 + args.number_2\n \nelif args.operation == \"mult\":\n result = args.number_1 * args.number_2\n\n# commented code - not acitive anymore\n# print(args)\n#print(args.number_1, args.number_2)\n\n\n# if args.multi:\n# else: \n# result = args.number_1 + args.number_2\n \nprint (result)\n\n","sub_path":"Python_Scripts/simple_calculation_with_arguments.py","file_name":"simple_calculation_with_arguments.py","file_ext":"py","file_size_in_byte":991,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"143901799","text":"import numpy as np\nimport utils as ut\nimport matplotlib.pyplot as plt\n\n\nepochs = 5000\ndims = [2, 4, 8, 16, 32, 64, 128, 256]\nweights = [1e-4, 1e-1]\n\nsrc_x_train = [[0, 0], [0, 1], [1, 1]]\nsrc_y_train = [0, 0, 1]\n\n\ndef preprocess(input_dim, t='z'):\n global src_x_train\n xs = []\n\n def prep_zeros():\n zeros = np.zeros(input_dim - 2)\n for x in src_x_train:\n xs.append(np.concatenate((x, zeros)))\n\n def prep_values():\n for x in src_x_train:\n xs.append(np.repeat(x, input_dim // 2))\n\n if t == 'v':\n prep_values()\n else:\n prep_zeros()\n\n return np.asarray(xs) / np.sqrt(input_dim)\n\n\ndim_dist = {}\n\nfor dim in dims:\n dim_dist[dim] = {}\n x_train = preprocess(dim)\n\n for penalty in ut.penalties:\n dim_dist[dim][penalty] = {}\n\n for weight in weights:\n dim_dist[dim][penalty][weight] = {}\n dim_dist[dim][penalty][weight]['ppd'] = []\n ppd = ut.MetricsCallback((x_train, src_y_train))\n model = ut.create_model(dim, penalty)\n sample_weight = np.repeat(weight, len(src_x_train))\n\n model.fit(x_train, src_y_train, epochs=epochs, verbose=1,\n callbacks=[ut.es10, ppd], sample_weight=sample_weight)\n for epoch in ppd.val_acc_ppd.keys():\n dim_dist[dim][penalty][weight]['ppd'].append(ppd.val_acc_ppd[epoch]['ppd'])\n\nfig,axes = plt.subplots(2, 2)\nfor i in range(len(ut.penalties)):\n for j in range(len(weights)):\n for dim in dims:\n axes[j, i].set_title(\"λ=\" + str(ut.penalties[i]) + \", w=\" + str(weights[j]))\n line, = axes[j, i].plot(dim_dist[dim][ut.penalties[i]][weights[j]]['ppd'])\n line.set_label(\"n=\" + str(dim))\n axes[j, j].legend(prop={'size': 6})\n\nfig.show()\n","sub_path":"dims.py","file_name":"dims.py","file_ext":"py","file_size_in_byte":1813,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"499128517","text":"#!/home/kirby/.pyenv/versions/3.7.2/envs/video-transcode/bin/python\nfrom celery import Celery\nimport subprocess\nimport sys\n# import json\nimport os\nimport logging\nfrom celery.task.control import inspect\nfrom datetime import datetime, timedelta\nimport pendulum\nimport pathlib\n# import shlex\nimport re\nimport yaml\nimport pkg_resources\nimport argparse\nimport logging\n\n\nlogging.basicConfig(level=logging.WARN)\n\n\n# Load config.yaml\n# if no env var use package default.\nif os.environ.get('VIDEO_TRANSCODE_CONFIG'):\n CONFIG_FILE = os.environ.get('VIDEO_TRANSCODE_CONFIG')\nelse:\n CONFIG_FILE = pkg_resources.resource_filename('video_transcode','config/config.yaml')\n\nwith open(CONFIG_FILE) as f:\n config = yaml.full_load(f.read())\n # os.environ['FFMPEG_BINARY'] = config['FFMPEG_BINARY_PATH']\n os.environ['LD_LIBRARY_PATH'] = '/usr/local/cuda/lib64'\n\n\n# setup argument parser\nparser = argparse.ArgumentParser()\n\nparser.add_argument('filename',\n nargs='+')\nparser.add_argument(\"-a\", \"--action\",\n help=\"ENVIRONMENT should be 'nonprod', 'dev' or 'sqa' and correspond to the AWS account to which you want to deploy.\",\n default=config['DEFAULT_ACTION'])\nparser.add_argument(\"-n\", \"--now\",\n action='store_true',\n default=False,\n help=\"Run action now, don't schedule.\")\nparser.add_argument(\"-s\", \"--same-dir\",\n action='store_true',\n default=False,\n help=\"Assume output file goes back to the input file's directory.\")\n# parser.add_argument('--add',\n# action='store_true',\n# help=\"Add files to queue. This takes the first arguement as an input in to pathlib.Path.glob.\")\n\n\nlogging.debug(f\"Transcode mode = {os.environ.get('VIDEO_TRANSCODE_MODE')}\")\nif os.environ.get('VIDEO_TRANSCODE_MODE'):\n os.environ['FFMPEG_BINARY'] = config['FFMPEG_BINARY_PATH']\n app = Celery(config['CELERY_QUEUE'], broker=config['CONTAINER_CELERY_BROKER'], backend=config['CONTAINER_CELERY_RESULT_BACKEND'])\nelse:\n app = Celery(config['CELERY_QUEUE'], broker=config['CELERY_BROKER'], backend=config['CELERY_RESULT_BACKEND'])\n\n# import moviepy after setting FFMPEG_BINARY\nimport moviepy.editor as me\n\n\n# update celery task visibiity_timeout to 1 week. \napp.conf.update(\n broker_transport_options = {'visibility_timeout': 604800}\t\n)\n\n\ndef translate_filenames(input_file, same_folder):\n \"\"\"Derives file path from video file's base name.\n\n Plex records shows in a temp location then moves the file into a libary upon completion. It's this \n temp file's path that is passed to post processing scripts. Since video-transcode time shifts transcoding,\n the temp file path Plex sends isn't valid when video-transcode opens the file.\n\n Args:\n input_file (str): Path to file to transcode.\n\n Returns:\n tuple: (str, str) output file name, file name in plex library\n \"\"\"\n logging.info('Processing file {}'.format(input_file))\n f = pathlib.Path(input_file)\n\n input_filename = os.path.basename(input_file)\n # out_filename = input_filename.split('.')[0] + '.mkv'\n out_filename = os.path.splitext(input_filename)[0] + '.mkv'\n\n if same_folder:\n return str(f.with_suffix('.mkv')), input_file\n\n # print(filename)\n logging.info(\"Input file: {}\".format(input_file))\n\n # split filename into show - season/episode number (S01E01) - episode title parts\n filename_split = f.name.split(' - ')\n # print(filename_split)\n \n show_name, episode_number, episode_name = filename_split\n if config['IGNORE_YEAR_IN_SHOW_NAME'] and re.search('\\(\\d+\\)', show_name):\n # make dict of show folders w/o year -> show folders\n folders_map = {re.sub('\\(\\d+\\)', '', folder.name): folder.name for folder in pathlib.Path(config['PLEX_LIBRARY_FOLDER']).iterdir()}\n # update show_name with folder from map, if no match, default back to show name from input filename.\n show_name = folders_map.get(re.sub('\\(\\d+\\)', '', show_name), show_name)\n \n\n # extract season from file name if file is in S01E01 format\n matched_season = re.search('S(\\d*)E(\\d*)', episode_number)\n\n # if not in S01E01 format, check if file is in yyyy-mm-dd format\n if not matched_season:\n matched_season = re.search('(\\d*)-(\\d*)-(\\d*)', episode_number)\n\n folder = [config['PLEX_LIBRARY_FOLDER']] # TODO setup library path via config\n folder.append(show_name)\n folder.append('Season {}'.format(matched_season[1]))\n folder.append(f.name)\n\n moved_filename = os.path.join(*folder)\n logging.info(\"Moved file location: {}\".format(moved_filename))\n out_filename = os.path.splitext(moved_filename)[0] + '.mkv'\n\n return out_filename, moved_filename\n\n\n@app.task\ndef comcut(input_file):\n \"\"\"\n Passes input_file name from Plex to comcut.\n :param input_file:\n :return:\n \"\"\"\n out_filename, moved_filename = translate_filenames(input_file)\n cmd = [config['COMCUT_BINARY_PATH'], '--ffmpeg=/bin/ffmpeg', moved_filename]\n res = run(cmd)\n\n\ndef run(cmd, env=None):\n \"\"\"Utility to execute command on local OS.\"\"\"\n try:\n logging.debug(' '.join(cmd))\n res = subprocess.check_output(cmd, stderr=subprocess.STDOUT, env=env)\n logging.debug(res)\n return res\n except subprocess.CalledProcessError as e:\n logging.warn(\"Error running command.\")\n logging.warn(\"Command throwing error {}\".format(e.cmd))\n logging.warn(\"Return code {}\".format(e.returncode))\n logging.warn(\"Message: {}\".format(e.output))\n return e.returncode\n\n\ndef schedule(duration):\n \"\"\"\n Used to limit time of day tasks can execute. Currenty set to run tasks between midnight and 8am daily.\n \"\"\"\n c = inspect()\n tasks = c.scheduled()[config['CELERY_WORKER_NAME']]\n\n scheduled_task_duration = sum(map(lambda v: v['request']['kwargs']['vt_duration'], tasks))\n\n now = pendulum.now()\n window_start = now.replace(hour=config['SCHEDULE_START'], minute=0, second=0)\n window_end = now.replace(hour=config['SCHEDULE_END'], minute=0, second=0)\n\n # Move to next window if now is later than today's window end.\n if now > window_end:\n window_start += timedelta(days=1)\n window_end += timedelta(days=1)\n\n # how much of today's processing window is remaining?\n while True:\n window_size = (window_end - window_start).seconds\n remaining_window = scheduled_task_duration - window_size\n\n if scheduled_task_duration + duration <= window_size:\n return window_start + timedelta(seconds=(scheduled_task_duration))\n else:\n window_start += timedelta(days=1)\n window_end += timedelta(days=1)\n scheduled_task_duration -= window_size\n\n\n@app.task\ndef comcut_and_transcode(input_file, same_folder, **kwargs):\n \"\"\"\n Passes input_file name from Plex to comcut then to ffmpeg.\n :param input_file:\n :return:\n \"\"\"\n out_filename, moved_filename = translate_filenames(input_file, same_folder)\n\n # TODO check moved_filename exists here.\n\n # cut commercials\n cmd = [config['COMCUT_BINARY_PATH'], f\"--ffmpeg={config['FFMPEG_BINARY_PATH']}\", moved_filename]\n res = run(cmd)\n\n # transcode to h265\n cmd = [config['FFMPEG_BINARY_PATH']]\n for opt in config['FFMPEG_OPTIONS']:\n if \"{input_filename}\" in opt:\n # print(opt)\n cmd.append(opt.format(input_filename=moved_filename))\n else:\n cmd.append(opt)\n cmd.append(out_filename)\n # '-hide_banner',\n # '-loglevel', 'error', \n # '-vsync', '0', \n # '-hwaccel', 'auto', \n # '-i', moved_filename, \n # '-c:v', 'hevc_nvenc', \n # # '-rc:v', 'vbr_hq', \n # '-qmin:v', '22',\n # '-qmax:v', '30', \n # '-rc-lookahead', '8', \n # '-weighted_pred', '1',\n # out_filename]\n\n res = run(cmd, os.environ)\n\n # logging.info(res)\n\n # delete original file\n res_type = type(res)\n logging.debug(\"FFMPEG command result type is: {}\".format(res_type))\n if config['DELETE_SOURCE_AFTER_TRANSCODE'] and type(res) != int: \n os.remove(moved_filename)\n elif res != 0:\n logging.info('Error processing file. Skipping source deletion.')\n\n\ndef video_metadata(filename):\n clip = me.VideoFileClip(filename)\n return clip.size, clip.duration\n\n\ndef is_regex(filename):\n return \"*\" in filename\n\n\ndef search(file_pattern):\n \"\"\"Absolute path/s for given file pattern.\n \n Args:\n str: file pattern. See pathlib.Path.glob\n\n Returns:\n iterator: matched files\n \"\"\"\n return map(str, pathlib.Path(os.getcwd()).glob(file_pattern))\n\n\ndef add_to_queue(filename, args):\n \"\"\"Adds a single file to processing queue.\"\"\"\n\n if args.action == 'transcode':\n pass # TODO\n elif args.action == 'comcut':\n if args.now:\n comcut.apply_async((filename,))\n else:\n comcut.apply_async((filename,), eta=schedule(5*60))\n elif args.action == 'comcut_and_transcode':\n frame_size, duration = video_metadata(filename)\n\n if args.now:\n comcut_and_transcode.apply_async(\n (filename, args.same_dir), \n {'vt_frame_size': frame_size, 'vt_duration': duration}, \n headers={'vt_frame_size': frame_size, 'vt_duration': duration})\n else:\n comcut_and_transcode.apply_async(\n (filename, args.same_dir), \n {'vt_frame_size': frame_size, 'vt_duration': duration}, \n eta=schedule(duration),\n headers={'vt_frame_size': frame_size, 'vt_duration': duration})\n\n\ndef list_tasks():\n tasks =\tinspect().scheduled()[config['CELERY_WORKER_NAME']]\n for task in tasks:\n logging.info(\"Start time: {} | File: {}\".format(task['eta'], task['request']['args']))\n logging.info(\"{} tasks in queue.\".format(len(tasks)))\n\n\ndef main():\n args = parser.parse_args()\n\n if args.filename == 'list-tasks':\n list_tasks()\n return\n\n for f in args.filename:\n # convert file name to absolute path\n try:\n filename = str(pathlib.Path(f).absolute())\n add_to_queue(filename, args)\n except FileNotFoundError:\n logging.warn(\"No file found for {}\".format(f))\n\n\nif __name__ == '__main__':\n main()\n\n\n","sub_path":"video_transcode/video_transcode.py","file_name":"video_transcode.py","file_ext":"py","file_size_in_byte":10438,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"428905144","text":"\"\"\"\nProblem Description:\n\nYou are playing the following Flip Game with your friend: Given a string that contains only these two characters: + and -, you and your friend take turns to flip two consecutive \"++\" into \"--\". The game ends when a person can no longer make a move and therefore the other person will be the winner.\n\nWrite a function to determine if the starting player can guarantee a win.\n\nFor example, given s = \"++++\", return true. The starting player can guarantee a win by flipping the middle \"++\" to become \"+--+\".\n\nFollow up:\nDerive your algorithm's runtime complexity.\n\"\"\"\n\n# 还有种更变态的\n# https://leetcode.com/discuss/64344/theory-matters-from-backtracking-128ms-to-dp-0ms\n\nclass Solution(object):\n def canWin(self, s):\n \"\"\"\n :type s: str\n :rtype: List[str]\n \"\"\"\n for i in range(len(s) - 1):\n if s[i:i+2] == \"++\":\n if not self.canWin(s[:i] + \"--\" + s[i+2:]):\n return True\n return False\n\nif __name__ == \"__main__\":\n s = Solution()\n print(s.canWin(\"++++\"))\n\n","sub_path":"294. Flip Game II/294.py","file_name":"294.py","file_ext":"py","file_size_in_byte":1081,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"422926971","text":"from random import randint, random\n\nfrom keras import Input, Model\nfrom keras.layers import MaxPooling1D, MaxPooling2D, MaxPooling3D, Dropout, Flatten, Dense, BatchNormalization, \\\n Activation, GlobalAveragePooling2D\nfrom keras.losses import categorical_crossentropy\nfrom keras.optimizers import Adadelta, Adam\n\nfrom autokeras import constant\nfrom autokeras.graph import Graph\nfrom autokeras.layers import get_conv_layer_func, get_ave_layer_func, StubBatchNormalization, StubActivation, StubConv, \\\n StubDropout, StubPooling, StubGlobalPooling, StubDense, StubInput\nfrom autokeras.stub import StubModel\n\n\nclass ClassifierGenerator:\n def __init__(self, n_classes, input_shape):\n self.n_classes = n_classes\n self.input_shape = input_shape\n if len(self.input_shape) > 4:\n raise ValueError('The input dimension is too high.')\n if len(self.input_shape) < 2:\n raise ValueError('The input dimension is too low.')\n\n def _get_pool_layer_func(self):\n pool_funcs = [MaxPooling1D, MaxPooling2D, MaxPooling3D]\n return pool_funcs[len(self.input_shape) - 2]\n\n def _get_shape(self, dim_size):\n temp_list = [(dim_size,), (dim_size, dim_size), (dim_size, dim_size, dim_size)]\n return temp_list[len(self.input_shape) - 2]\n\n\nclass DefaultClassifierGenerator(ClassifierGenerator):\n def __init__(self, n_classes, input_shape):\n super().__init__(n_classes, input_shape)\n\n def generate(self, model_len=constant.MODEL_LEN, model_width=constant.MODEL_WIDTH):\n pool = self._get_pool_layer_func()\n conv = get_conv_layer_func(len(self._get_shape(3)))\n ave = get_ave_layer_func(len(self._get_shape(3)))\n\n pooling_len = int(model_len / 4)\n model = StubModel()\n model.input_shape = self.input_shape\n model.inputs = [0]\n model.layers.append(StubInput())\n for i in range(model_len):\n model.layers += [StubActivation('relu'),\n StubConv(model_width, kernel_size=3, func=conv),\n StubBatchNormalization(),\n StubDropout(constant.CONV_DROPOUT_RATE)]\n if pooling_len == 0 or ((i + 1) % pooling_len == 0 and i != model_len - 1):\n model.layers.append(StubPooling(func=pool))\n\n model.layers.append(StubGlobalPooling(ave))\n model.layers.append(StubDense(self.n_classes, activation='softmax'))\n model.outputs = [len(model.layers)]\n for index, layer in enumerate(model.layers):\n layer.input = index\n layer.output = index + 1\n return Graph(model, False)\n\n\nclass RandomConvClassifierGenerator(ClassifierGenerator):\n def __init__(self, n_classes, input_shape):\n super().__init__(n_classes, input_shape)\n\n def generate(self):\n \"\"\"Return the random generated CNN model.\"\"\"\n conv_num = randint(1, 10)\n dense_num = randint(1, 10)\n dropout_rate = random()\n filter_size = randint(1, 2) * 2 + 1\n pool_size = randint(2, 3)\n filter_shape = self._get_shape(filter_size)\n pool_shape = self._get_shape(pool_size)\n pool = self._get_pool_layer_func()\n conv = get_conv_layer_func(len(filter_shape))\n\n input_tensor = Input(shape=self.input_shape)\n output_tensor = input_tensor\n for i in range(conv_num):\n kernel_num = randint(10, 30)\n output_tensor = conv(kernel_num, filter_shape,\n padding='same')(output_tensor)\n output_tensor = BatchNormalization()(output_tensor)\n output_tensor = Activation('relu')(output_tensor)\n if random() > 0.5:\n output_tensor = pool(pool_size=pool_shape, padding='same')(output_tensor)\n if random() > 0.5:\n output_tensor = Dropout(dropout_rate)(output_tensor)\n output_tensor = Flatten()(output_tensor)\n for i in range(dense_num):\n node_num = randint(128, 1024)\n output_tensor = Dense(node_num, activation='relu')(output_tensor)\n if random() > 0.5:\n output_tensor = Dropout(dropout_rate)(output_tensor)\n output_tensor = Dense(self.n_classes, activation='softmax')(output_tensor)\n model = Model(input_tensor, output_tensor)\n model.compile(loss='categorical_crossentropy',\n optimizer=Adam(),\n metrics=['accuracy'])\n return model\n","sub_path":"autokeras/generator.py","file_name":"generator.py","file_ext":"py","file_size_in_byte":4498,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"488903482","text":"import enum\nfrom datetime import datetime\nfrom sqlalchemy import *\nfrom sqlalchemy.ext.declarative import declarative_base \nfrom sqlalchemy.orm import sessionmaker, relation\nfrom logging import StreamHandler, Handler\nimport configparser\n\nBase = declarative_base()\n\nclass ImageExportJobState(enum.Enum):\n\tUnknown = 1\n\tNotStarted = 2\n\tRunning = 3\n\tCompletedWithErrors = 4\n\tCompleted = 5\n\nclass StudyExportState(enum.Enum):\n NotStarted = 1\n PacsFindDone = 2\n PacsFindFailed = 3\n PacsMoveDone = 4\n PacsMoveFailed = 5\n PacsReceiveIncomplete = 6\n ExportedToCTP = 7\n Incomplete = 8\n Completed = 9\n Failed = 10\n\nclass CTPExportState(enum.Enum):\n Anonymized = 1\n InQuarantine = 2 \n Unclassified = 3\n\nclass LogLevel(enum.Enum):\n DEBUG = 1\n INFO = 2\n WARNING = 3\n ERROR = 4\n\nclass ImageTypeClassLabel(enum.Enum):\n Safe = 1\n Unsafe = 2\n BurnedIn = 3\n\nclass ResearchStudyInfo(Base):\n __tablename__ = 'ResearchStudyInfo'\n __table_args__ = {'mysql_engine':'InnoDB'}\n\n study_id = Column(Integer, primary_key=True)\n study_acronym = Column(String(32), nullable=False)\n researchers = Column(String(32), nullable=False) \n cln_graphics = Column(Boolean, nullable=False, default=True)\n cln_structured_content = Column(Boolean)\n cln_descriptors = Column(Boolean)\n rtn_longitudinal_full_dates = Column(Boolean)\n rtn_longitudinal_modified_dates = Column(Boolean)\n rtn_patient_characteristics = Column(Boolean)\n rtn_device_id = Column(Boolean)\n rtn_uids = Column(Boolean)\n rtn_safe_private = Column(Boolean)\n rtn_institution_id = Column(Boolean) \n \nclass ImageExportJob(Base):\n __tablename__ = 'ImageExportJob'\n __table_args__ = {'mysql_engine':'InnoDB'}\n\n job_id = Column(Integer, primary_key=True)\n study_id = Column(Integer, ForeignKey('ResearchStudyInfo.study_id'))\n created = Column(DateTime, nullable=False, default=datetime.now())\n job_state = Column(Enum(ImageExportJobState), nullable=False, default=ImageExportJobState.NotStarted)\n\nclass StudyExportRecord(Base):\n __tablename__ = 'StudyExportRecord'\n __table_args__ = {'mysql_engine':'InnoDB'}\n\n record_id = Column(Integer, primary_key=True)\n job_id = Column(Integer, ForeignKey('ImageExportJob.job_id'))\n patient_id = Column(String(64), nullable=False)\n pseudo_id = Column(String(64), nullable=False)\n patient_birth_date = Column(String(8), nullable=True, default=None)\n accession_number = Column(String(16), nullable=False)\n study_description = Column(String(64), nullable=True, default=None)\n study_instance_uid = Column(String(64), nullable=True, default=None)\n study_date = Column(String(8), nullable=True, default=None)\n study_time = Column(String(16), nullable=True, default=None)\n state = Column(Enum(StudyExportState), nullable=False, default=StudyExportState.NotStarted)\n error_description = Column(String(128), nullable=True, default=None)\n number_of_failures = Column(Integer, nullable=True, default=0)\n modified = Column(DateTime, nullable=False, default=datetime.now())\n images_in_pacs = Column(Integer, default=0)\n images_received = Column(Integer, default=0)\n anonymized_images = Column(Integer, default=0)\n images_in_quarantine = Column(Integer, default=0)\n unclassified_images = Column(Integer, default=0)\n modalities_in_study = Column(String(16), nullable=True, default=None)\n station_name = Column(String(16), nullable=True, default=None)\n exp_job = relation('ImageExportJob', backref='StudyExportRecord', lazy=False)\n\nclass DICOMFilesTracking(Base):\n __tablename__ = 'DICOMFilesTracking'\n __table_args__ = {'mysql_engine':'InnoDB'}\n\n track_id = Column(Integer, primary_key=True)\n record_id = Column(Integer, ForeignKey('StudyExportRecord.record_id'))\n sop_instance_uid = Column(String(128), nullable=False)\n study_instance_uid = Column(String(64), nullable=True, default=None)\n state = Column(Enum(CTPExportState), nullable=True, default=None)\n\nclass LoggingMessages(Base):\n __tablename__ = 'LoggingMessages'\n __table_args__ = {'mysql_engine':'InnoDB'}\n\n log_id = Column(Integer, primary_key=True)\n created = Column(DateTime, nullable=False, default=datetime.now())\n log_level = Column(Enum(LogLevel), nullable=True, default=None)\n function = Column(String(32), nullable=True, default=None)\n message = Column(String(256), nullable=False)\n\nclass ImageTypeClassification(Base):\n __tablename__ = 'ImageTypeClassification'\n __table_args__ = {'mysql_engine':'InnoDB'}\n\n type_id = Column(Integer, primary_key=True)\n sop_class_uid = Column(String(64), nullable=True, default=None)\n image_type = Column(String(128), nullable=True, default=None)\n manufacturer = Column(String(64), nullable=True, default=None)\n software_versions = Column(String(64), nullable=True, default=None)\n secondary_capture_device_manufacturer = Column(String(64), nullable=True, default=None)\n secondary_capture_device_manufacturer_model_name = Column(String(64), nullable=True, default=None)\n secondary_capture_device_software_versions = Column(String(64), nullable=True, default=None)\n image_type_class = Column(Enum(ImageTypeClassLabel), nullable=True, default=None) \n\nclass ClassifiedImagesCatalog(Base):\n __tablename__ = 'ClassifiedImagesCatalog'\n __table_args__ = {'mysql_engine':'InnoDB'}\n\n class_id = Column(Integer, primary_key=True)\n type_id = Column(Integer, ForeignKey('ImageTypeClassification.type_id'))\n file_name = Column(String(128), nullable=True, default=None)\n exp_path = relation('ImageTypeClassification', backref='ClassifiedImagesCatalog', lazy=False)\n\nclass DataBaseHandler(StreamHandler):\n \"A handler class which writes formatted logging records to a DataBase.\"\n def __init__(self):\n Handler.__init__(self)\n self.stream = None\n self.session = Session()\n\n def emit(self, record):\n info = self.format(record)\n level, function, message = info.split(\"|\")\n level = getattr(LogLevel, level)\n log_record = LoggingMessages(log_level=level, function=function, message=message)\n try:\n self.session.add(log_record)\n self.session.commit()\n except Exception:\n print(\"Logging to DataBase was not possible!\")\n \n return None\n \nconfig = configparser.ConfigParser()\nconfig.read('/opt/amc/mies/config_files/args_config_dcmhub.ini')\ndatabase = config['DATABASE']['PASSWORD']\nencoding = config['DATABASE']['ENCODING']\necho= eval(config['DATABASE']['ECHO'])\n# Create a DataBase (if not exists) and start a new session\nengine = create_engine(database, encoding=encoding, echo=echo)\nmeta = Base.metadata\nBase.metadata.create_all(engine)\nSession = sessionmaker(bind=engine)\n","sub_path":"MIES (Python code)/mies-master/radiodb.py","file_name":"radiodb.py","file_ext":"py","file_size_in_byte":6814,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"506971623","text":"from django.test import TestCase, RequestFactory, Client\nfrom django.conf import settings\nfrom testfixtures import LogCapture, Comparison, compare, log_capture\n\nfrom test_settings import *\n\nfrom products.views import ProductListView, ProductDetailView, purchase_view\n\nimport requests\nimport requests_cache\nimport logging\nimport datetime\nimport time\n\nnow = datetime.datetime.today()\n# Get an instance of a logger\ncache_logger = logging.getLogger('cache')\n\n# Syntax for non-class-based views: my_view(request)\n# Syntax for class-based views: my_view.as_view()(request)\n\nclass ProductListViewTest(TestCase):\n def setUp(self):\n self.factory = RequestFactory()\n\n def test_product_list_view(self):\n '''\n Test status code on GET to /products/\n '''\n # Create an instance of a GET request.\n request = self.factory.get('/products/')\n response = ProductListView.as_view()(request)\n self.assertEqual(response.status_code, 200)\n\n def test_bad_product_list_view(self):\n '''\n Test status code on GET to bad url\n '''\n response = self.client.get('/product/')\n self.assertEqual(response.status_code, 404)\n\nclass ProductDetailViewTest(TestCase):\n def setUp(self):\n self.factory = RequestFactory()\n\n def test_product_detail_view(self):\n '''\n Test status code on GET to /products//\n '''\n # Create an instance of a GET request.\n request = self.factory.get('/products/'+str(PRODUCT_ID))\n response = ProductDetailView.as_view()(request, pk=PRODUCT_ID)\n self.assertEqual(response.status_code, 200)\n\n def test_bad_product_detail_view(self):\n '''\n Test status code on GET to bad url\n '''\n response = self.client.get('/product/'+str(PRODUCT_ID)+'/')\n self.assertEqual(response.status_code, 404)\n\nclass PurchaseViewTest(TestCase):\n def setUp(self):\n self.factory = RequestFactory()\n\n def test_purchase_view(self):\n '''\n Test status code on GET and POST to /products//purchase/ with params\n '''\n data = { 'customer_email': 'sally_baker@company.com',\n 'customer_name': 'Sally Baker',\n 'customer_phone': '(512) 555-1234',\n 'quantity': 2 }\n # Create an instance of a POST request.\n request = self.factory.post('/products/'+str(PRODUCT_ID)+'/purchase/', data)\n response = purchase_view(request, pk=PRODUCT_ID)\n self.assertEqual(response.status_code, 200)\n\n response = self.client.post('/products/'+str(PRODUCT_ID)+'/purchase/', data)\n self.assertEqual(response.status_code, 200)\n\n def test_bad_purchase_view(self):\n '''\n Test status code on GET and POST to bad url\n '''\n response = self.client.get('/product/'+str(PRODUCT_ID)+'/purchase/')\n self.assertEqual(response.status_code, 404)\n\nclass CacheTest(TestCase):\n def setUp(self):\n self.factory = RequestFactory()\n\n def test_has_cache_false(self):\n '''\n Test that cache is not accessed on an initial product request\n Install fresh cache\n LogCapture to watch the logs \n GET request to product list \n .from_cache expected to be False \n Logs expected to exist\n '''\n requests_cache.core.clear()\n cache_expire = 5\n requests_cache.install_cache('test_product_cache', backend='sqlite', expire_after=cache_expire)\n response = requests.request('GET', settings.PRODUCTS_URL,\n timeout=55,\n verify=False,\n headers=dict({'X-AUTH': settings.AUTH_NAME}))\n cache_logger.info('Time: {0} / Used Cache: {1}'.format(now, response.from_cache))\n self.assertEqual(response.from_cache, False)\n\n def test_cache(self):\n '''\n Test that cache is not accessed on an initial product request\n Install fresh cache\n LogCapture to watch the logs \n GET request to product list \n .from_cache expected to be False \n Logs expected to exist\n\n Test that cache IS accessed on subsequent product requests\n LogCapture to watch logs \n GET request to product list \n .from_cache expected to be True \n logs expected to exist\n '''\n cache_expire = 10\n requests_cache.install_cache('test_product_cache', backend='sqlite', expire_after=cache_expire)\n \n # start capture of 'cache' logger (see settings for config)\n with LogCapture('cache') as log:\n response = requests.request('GET', settings.PRODUCTS_URL,\n headers=dict({'X-AUTH': settings.AUTH_NAME}))\n\n time_of_first = now\n cache_logger.info('First Time: {0} / Used Cache: {1}'.format(time_of_first, response.from_cache))\n self.assertEqual(response.from_cache, False)\n\n # check log \n log.check(\n ('cache', 'INFO', 'First Time: {0} / Used Cache: False'.format(time_of_first)),\n )\n\n response_second = requests.request('GET', settings.PRODUCTS_URL,\n headers=dict({'X-AUTH': settings.AUTH_NAME}))\n\n time_of_second = now\n cache_logger.info('Second Time: {0} / Used Cache: {1}'.format(time_of_second, response_second.from_cache))\n self.assertEqual(response_second.from_cache, True)\n requests_cache.core.get_cache().clear()\n\n # check log_second \n log.check(\n ('cache', 'INFO', 'First Time: {0} / Used Cache: False'.format(time_of_first)),\n ('cache', 'INFO', 'Second Time: {0} / Used Cache: True'.format(time_of_second))\n )\n\n # requests_cache.core.clear()\n\n def test_cache_disabled(self):\n with requests_cache.disabled():\n response = requests.request('GET', settings.PRODUCTS_URL,\n timeout=55,\n verify=False,\n headers=dict({'X-AUTH': settings.AUTH_NAME}))\n self.assertEqual(hasattr(response, 'from_cache'), False)\n\n","sub_path":"products/tests/test_views.py","file_name":"test_views.py","file_ext":"py","file_size_in_byte":6150,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"169893987","text":"from rest_framework import serializers\nfrom .models import Event\nfrom rest_framework import serializers\nfrom rest_framework.exceptions import APIException\n\nfrom ..incidents.models import IncidentComment, IncidentStatus, IncidentSeverity\nfrom django.contrib.auth.models import User\n\nfrom ..incidents.serializers import IncidentSerializer, IncidentCommentSerializer\nfrom ..custom_auth.serializers import UserSerializer\n\nclass GenericDataRelatedField(serializers.RelatedField):\n def to_representation(self, value):\n if isinstance(value, IncidentComment):\n return {\n \"comment\": {\n \"body\": value.body,\n \"isOutcome\": value.is_outcome\n }\n }\n elif isinstance(value, User):\n return {\n \"user\": {\n \"isAnonymous\": False,\n \"userId\": value.id,\n \"displayName\": value.username\n }\n }\n elif isinstance(value, IncidentStatus):\n return {\n \"status\": {\n \"from_status_type\": value.previous_status,\n \"to_status_type\": value.current_status\n }\n }\n elif isinstance(value, IncidentSeverity):\n return {\n \"status\": {\n \"from_severity_type\": value.previous_severity,\n \"to_severity_type\": value.current_severity\n }\n }\n\n raise APIException('Unexpected type of tagged object')\n\n\nclass EventSerializer(serializers.ModelSerializer):\n affectedAttribute = serializers.CharField(source=\"affected_attribute\")\n createdDate = serializers.DateTimeField(source=\"created_date\")\n data = GenericDataRelatedField(source=\"refered_model\", read_only=True)\n incident = IncidentSerializer()\n initiator = UserSerializer()\n\n class Meta:\n model = Event\n fields = (\n \"id\",\n \"action\",\n \"linked_event\",\n \"description\",\n \"initiator\",\n \"incident\",\n \"affectedAttribute\",\n \"createdDate\",\n \"data\"\n )\n\n","sub_path":"backend/src/events/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":2191,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"62660621","text":"from twisted.internet.defer import inlineCallbacks, returnValue\n\nfrom vumi.dispatchers.base import (\n BaseDispatchWorker, ToAddrRouter, FromAddrMultiplexRouter)\nfrom vumi.tests.utils import VumiWorkerTestCase, LogCatcher\nfrom vumi.dispatchers.tests.utils import DispatcherTestCase, DummyDispatcher\n\n\nclass TestBaseDispatchWorker(VumiWorkerTestCase):\n\n @inlineCallbacks\n def get_dispatcher(self, **config_extras):\n config = {\n \"transport_names\": [\n \"transport1\",\n \"transport2\",\n \"transport3\",\n ],\n \"exposed_names\": [\n \"app1\",\n \"app2\",\n \"app3\",\n ],\n \"router_class\": \"vumi.dispatchers.base.SimpleDispatchRouter\",\n \"route_mappings\": {\n \"transport1\": [\"app1\"],\n \"transport2\": [\"app2\"],\n \"transport3\": [\"app1\", \"app3\"]\n },\n \"middleware\": [\n {\"mw1\": \"vumi.middleware.tests.utils.RecordingMiddleware\"},\n {\"mw2\": \"vumi.middleware.tests.utils.RecordingMiddleware\"},\n ],\n }\n config.update(config_extras)\n dispatcher = yield self.get_worker(config, BaseDispatchWorker)\n returnValue(dispatcher)\n\n def dispatch(self, message, rkey=None, exchange='vumi'):\n return self._dispatch(message, rkey, exchange)\n\n def mk_middleware_records(self, rkey_in, rkey_out):\n records = []\n for rkey, direction in [(rkey_in, False), (rkey_out, True)]:\n endpoint, method = rkey.split('.', 1)\n mw = [[name, method, endpoint] for name in (\"mw1\", \"mw2\")]\n if direction:\n mw.reverse()\n records.extend(mw)\n return records\n\n def assert_messages(self, rkeys_in, rkey_out, msgs):\n received_msgs = self._amqp.get_messages('vumi', rkey_out)\n for rmsg, rkey_in in zip(received_msgs, rkeys_in):\n middleware_records = self.mk_middleware_records(rkey_in, rkey_out)\n self.assertEqual(rmsg.payload.pop('record'),\n middleware_records)\n self.assertEqual(msgs, received_msgs)\n\n def assert_no_messages(self, *rkeys):\n for rkey in rkeys:\n self.assertEqual([], self._amqp.get_messages('vumi', rkey))\n\n def clear_dispatched(self):\n self._amqp.dispatched = {}\n\n @inlineCallbacks\n def test_inbound_message_routing(self):\n yield self.get_dispatcher()\n msg = self.mkmsg_in(transport_name='transport1')\n yield self.dispatch(msg, 'transport1.inbound')\n self.assert_messages(['transport1.inbound'], 'app1.inbound', [msg])\n self.assert_no_messages('app1.event', 'app2.inbound', 'app2.event',\n 'app3.inbound', 'app3.event')\n\n self.clear_dispatched()\n msg = self.mkmsg_in(transport_name='transport2')\n yield self.dispatch(msg, 'transport2.inbound')\n self.assert_messages(['transport2.inbound'], 'app2.inbound', [msg])\n self.assert_no_messages('app1.inbound', 'app1.event', 'app2.event',\n 'app3.inbound', 'app3.event')\n\n self.clear_dispatched()\n msg = self.mkmsg_in(transport_name='transport3')\n yield self.dispatch(msg, 'transport3.inbound')\n self.assert_messages(['transport3.inbound'], 'app1.inbound', [msg])\n self.assert_messages(['transport3.inbound'], 'app3.inbound', [msg])\n self.assert_no_messages('app1.event', 'app2.inbound', 'app2.event',\n 'app3.event')\n\n @inlineCallbacks\n def test_inbound_ack_routing(self):\n yield self.get_dispatcher()\n msg = self.mkmsg_ack(transport_name='transport1')\n yield self.dispatch(msg, 'transport1.event')\n self.assert_messages(['transport1.event'], 'app1.event', [msg])\n self.assert_no_messages('app1.inbound', 'app2.event', 'app2.inbound',\n 'app3.event', 'app3.inbound')\n\n self.clear_dispatched()\n msg = self.mkmsg_ack(transport_name='transport2')\n yield self.dispatch(msg, 'transport2.event')\n self.assert_messages(['transport2.event'], 'app2.event', [msg])\n self.assert_no_messages('app1.event', 'app1.inbound', 'app2.inbound',\n 'app3.event', 'app3.inbound')\n\n self.clear_dispatched()\n msg = self.mkmsg_ack(transport_name='transport3')\n yield self.dispatch(msg, 'transport3.event')\n self.assert_messages(['transport3.event'], 'app1.event', [msg])\n self.assert_messages(['transport3.event'], 'app3.event', [msg])\n self.assert_no_messages('app1.inbound', 'app2.event', 'app2.inbound',\n 'app3.inbound')\n\n @inlineCallbacks\n def test_outbound_message_routing(self):\n yield self.get_dispatcher()\n apps = ['app1.outbound', 'app2.outbound', 'app3.outbound']\n msgs = [self.mkmsg_out(transport_name='transport1') for _ in range(3)]\n for app, msg in zip(apps, msgs):\n yield self.dispatch(msg, app)\n self.assert_messages(apps, 'transport1.outbound', msgs)\n self.assert_no_messages('transport2.outbound', 'transport3.outbound')\n\n self.clear_dispatched()\n msgs = [self.mkmsg_out(transport_name='transport2') for _ in range(3)]\n for app, msg in zip(apps, msgs):\n yield self.dispatch(msg, app)\n self.assert_messages(apps, 'transport2.outbound', msgs)\n self.assert_no_messages('transport1.outbound', 'transport3.outbound')\n\n self.clear_dispatched()\n msgs = [self.mkmsg_out(transport_name='transport3') for _ in range(3)]\n for app, msg in zip(apps, msgs):\n yield self.dispatch(msg, app)\n self.assert_messages(apps, 'transport3.outbound', msgs)\n self.assert_no_messages('transport1.outbound', 'transport2.outbound')\n\n @inlineCallbacks\n def test_unroutable_outbound_error(self):\n dispatcher = yield self.get_dispatcher()\n router = dispatcher._router\n msg = self.mkmsg_out(transport_name='foo')\n with LogCatcher() as log:\n yield router.dispatch_outbound_message(msg)\n [error] = log.errors\n self.assertTrue(('Unknown transport_name: foo' in\n error['message'][0]))\n\n @inlineCallbacks\n def test_outbound_message_routing_transport_mapping(self):\n \"\"\"\n Test that transport mappings are applied for outbound messages.\n \"\"\"\n yield self.get_dispatcher(\n transport_mappings={'upstream1': 'transport1'},\n transport_names=[\n 'transport1',\n 'transport2',\n 'transport3',\n 'upstream1',\n ])\n apps = ['app1.outbound', 'app2.outbound', 'app3.outbound']\n\n msgs = [self.mkmsg_out(transport_name='upstream1') for _ in range(3)]\n for app, msg in zip(apps, msgs):\n yield self.dispatch(msg, app)\n self.assert_messages(apps, 'transport1.outbound', msgs)\n self.assert_no_messages('transport2.outbound', 'transport3.outbound',\n 'upstream1.outbound')\n\n self.clear_dispatched()\n msgs = [self.mkmsg_out(transport_name='transport2') for _ in range(3)]\n for app, msg in zip(apps, msgs):\n yield self.dispatch(msg, app)\n self.assert_messages(apps, 'transport2.outbound', msgs)\n self.assert_no_messages('transport1.outbound', 'transport3.outbound')\n\n def get_dispatcher_consumers(self, dispatcher):\n return (dispatcher.transport_consumer.values() +\n dispatcher.transport_event_consumer.values() +\n dispatcher.exposed_consumer.values())\n\n @inlineCallbacks\n def test_consumer_prefetch_count_default(self):\n dp = yield self.get_dispatcher()\n consumers = self.get_dispatcher_consumers(dp)\n for consumer in consumers:\n self.assertEqual(consumer.channel.qos_prefetch_count, 20)\n\n @inlineCallbacks\n def test_consumer_prefetch_count_custom(self):\n dp = yield self.get_dispatcher(amqp_prefetch_count=10)\n consumers = self.get_dispatcher_consumers(dp)\n for consumer in consumers:\n self.assertEqual(consumer.channel.qos_prefetch_count, 10)\n\n @inlineCallbacks\n def test_consumer_prefetch_count_none(self):\n dp = yield self.get_dispatcher(amqp_prefetch_count=None)\n consumers = self.get_dispatcher_consumers(dp)\n for consumer in consumers:\n self.assertFalse(consumer.channel.qos_prefetch_count)\n\n\nclass TestToAddrRouter(VumiWorkerTestCase):\n\n @inlineCallbacks\n def setUp(self):\n yield super(TestToAddrRouter, self).setUp()\n self.config = {\n 'transport_names': ['transport1'],\n 'exposed_names': ['app1', 'app2'],\n 'toaddr_mappings': {\n 'app1': 'to:.*:1',\n 'app2': 'to:app2',\n },\n }\n self.dispatcher = DummyDispatcher(self.config)\n self.router = ToAddrRouter(self.dispatcher, self.config)\n yield self.router.setup_routing()\n\n def test_dispatch_inbound_message(self):\n msg = self.mkmsg_in(to_addr='to:foo:1', transport_name='transport1')\n self.router.dispatch_inbound_message(msg)\n publishers = self.dispatcher.exposed_publisher\n self.assertEqual(publishers['app1'].msgs, [msg])\n self.assertEqual(publishers['app2'].msgs, [])\n\n def test_dispatch_outbound_message(self):\n msg = self.mkmsg_out(transport_name='transport1')\n self.router.dispatch_outbound_message(msg)\n publishers = self.dispatcher.transport_publisher\n self.assertEqual(publishers['transport1'].msgs, [msg])\n\n self.dispatcher.transport_publisher['transport1'].clear()\n self.config['transport_mappings'] = {\n 'upstream1': 'transport1',\n }\n\n msg = self.mkmsg_out(transport_name='upstream1')\n self.router.dispatch_outbound_message(msg)\n publishers = self.dispatcher.transport_publisher\n self.assertEqual(publishers['transport1'].msgs, [msg])\n\n\nclass TestTransportToTransportRouter(VumiWorkerTestCase):\n\n @inlineCallbacks\n def setUp(self):\n yield super(TestTransportToTransportRouter, self).setUp()\n config = {\n \"transport_names\": [\n \"transport1\",\n \"transport2\",\n ],\n \"exposed_names\": [],\n \"router_class\": \"vumi.dispatchers.base.TransportToTransportRouter\",\n \"route_mappings\": {\n \"transport1\": [\"transport2\"],\n },\n }\n self.worker = yield self.get_worker(config, BaseDispatchWorker)\n\n def dispatch(self, message, rkey=None, exchange='vumi'):\n if rkey is None:\n rkey = self.rkey('outbound')\n self._amqp.publish_message(exchange, rkey, message)\n return self._amqp.kick_delivery()\n\n def assert_messages(self, rkey, msgs):\n self.assertEqual(msgs, self._amqp.get_messages('vumi', rkey))\n\n def assert_no_messages(self, *rkeys):\n for rkey in rkeys:\n self.assertEqual([], self._amqp.get_messages('vumi', rkey))\n\n def clear_dispatched(self):\n self._amqp.dispatched = {}\n\n @inlineCallbacks\n def test_inbound_message_routing(self):\n msg = self.mkmsg_in(transport_name='transport1')\n yield self.dispatch(msg, 'transport1.inbound')\n self.assert_messages('transport2.outbound', [msg])\n self.assert_no_messages('transport2.inbound', 'transport1.outbound')\n\n\nclass TestFromAddrMultiplexRouter(VumiWorkerTestCase):\n\n @inlineCallbacks\n def setUp(self):\n yield super(TestFromAddrMultiplexRouter, self).setUp()\n config = {\n \"transport_names\": [\n \"transport_1\",\n \"transport_2\",\n \"transport_3\",\n ],\n \"exposed_names\": [\"muxed\"],\n \"router_class\": \"vumi.dispatchers.base.FromAddrMultiplexRouter\",\n \"fromaddr_mappings\": {\n \"thing1@muxme\": \"transport_1\",\n \"thing2@muxme\": \"transport_2\",\n \"thing3@muxme\": \"transport_3\",\n },\n }\n self.dispatcher = DummyDispatcher(config)\n self.router = FromAddrMultiplexRouter(self.dispatcher, config)\n yield self.router.setup_routing()\n\n @inlineCallbacks\n def tearDown(self):\n yield super(TestFromAddrMultiplexRouter, self).tearDown()\n yield self.router.teardown_routing()\n\n def mkmsg_in_mux(self, content, from_addr, transport_name):\n return self.mkmsg_in(transport_name=transport_name, content=content,\n from_addr=from_addr)\n\n def mkmsg_ack_mux(self, from_addr, transport_name):\n ack = self.mkmsg_ack(transport_name=transport_name)\n ack['from_addr'] = from_addr\n return ack\n\n def mkmsg_out_mux(self, content, from_addr):\n return self.mkmsg_out(transport_name='muxed', content=content,\n from_addr=from_addr)\n\n def test_inbound_message_routing(self):\n msg1 = self.mkmsg_in_mux('mux 1', 'thing1@muxme', 'transport_1')\n self.router.dispatch_inbound_message(msg1)\n msg2 = self.mkmsg_in_mux('mux 2', 'thing2@muxme', 'transport_2')\n self.router.dispatch_inbound_message(msg2)\n publishers = self.dispatcher.exposed_publisher\n self.assertEqual(publishers['muxed'].msgs, [msg1, msg2])\n\n def test_inbound_event_routing(self):\n msg1 = self.mkmsg_ack_mux('thing1@muxme', 'transport_1')\n self.router.dispatch_inbound_event(msg1)\n msg2 = self.mkmsg_ack_mux('thing2@muxme', 'transport_2')\n self.router.dispatch_inbound_event(msg2)\n publishers = self.dispatcher.exposed_event_publisher\n self.assertEqual(publishers['muxed'].msgs, [msg1, msg2])\n\n def test_outbound_message_routing(self):\n msg1 = self.mkmsg_out_mux('mux 1', 'thing1@muxme')\n self.router.dispatch_outbound_message(msg1)\n msg2 = self.mkmsg_out_mux('mux 2', 'thing2@muxme')\n self.router.dispatch_outbound_message(msg2)\n publishers = self.dispatcher.transport_publisher\n self.assertEqual(publishers['transport_1'].msgs, [msg1])\n self.assertEqual(publishers['transport_2'].msgs, [msg2])\n\n\nclass UserGroupingRouterTestCase(DispatcherTestCase):\n\n dispatcher_class = BaseDispatchWorker\n transport_name = 'test_transport'\n\n @inlineCallbacks\n def setUp(self):\n yield super(UserGroupingRouterTestCase, self).setUp()\n self.config = {\n 'dispatcher_name': 'user_group_dispatcher',\n 'router_class': 'vumi.dispatchers.base.UserGroupingRouter',\n 'transport_names': [\n self.transport_name,\n ],\n 'exposed_names': [\n 'app1',\n 'app2',\n ],\n 'group_mappings': {\n 'group1': 'app1',\n 'group2': 'app2',\n },\n 'transport_mappings': {\n 'upstream1': self.transport_name,\n },\n }\n\n self.dispatcher = yield self.get_dispatcher(self.config)\n self.router = self.dispatcher._router\n yield self.router._redis_d\n self.redis = self.router.redis\n yield self.redis._purge_all() # just in case\n\n @inlineCallbacks\n def tearDown(self):\n yield super(UserGroupingRouterTestCase, self).tearDown()\n yield self.redis.close_manager()\n\n @inlineCallbacks\n def test_group_assignment(self):\n msg = self.mkmsg_in(transport_name=self.transport_name)\n selected_group = yield self.router.get_group_for_user(msg.user())\n self.assertTrue(selected_group)\n for i in range(0, 10):\n group = yield self.router.get_group_for_user(msg.user())\n self.assertEqual(group, selected_group)\n\n @inlineCallbacks\n def test_round_robin_group_assignment(self):\n messages = [self.mkmsg_in(\n transport_name=self.transport_name,\n from_addr='from_%s' % (i,)) for i in range(0, 4)]\n groups = [(yield self.router.get_group_for_user(message.user()))\n for message in messages]\n self.assertEqual(groups, [\n 'group1',\n 'group2',\n 'group1',\n 'group2',\n ])\n\n def mkmsg_from(self, from_addr):\n return self.mkmsg_in(\n transport_name=self.transport_name, from_addr=from_addr)\n\n @inlineCallbacks\n def test_routing_to_application(self):\n # generate 4 messages, 2 from each user\n msg1 = self.mkmsg_from('from_1')\n msg2 = self.mkmsg_from('from_2')\n msg3 = self.mkmsg_from('from_3')\n msg4 = self.mkmsg_from('from_4')\n # send them through to the dispatcher\n messages = [msg1, msg2, msg3, msg4]\n for message in messages:\n yield self.dispatch(message, transport_name=self.transport_name)\n\n app1_msgs = self.get_dispatched_messages('app1', direction='inbound')\n app2_msgs = self.get_dispatched_messages('app2', direction='inbound')\n self.assertEqual(app1_msgs, [msg1, msg3])\n self.assertEqual(app2_msgs, [msg2, msg4])\n\n @inlineCallbacks\n def test_routing_to_transport(self):\n app_msg = self.mkmsg_from('from_1')\n yield self.dispatch(app_msg, transport_name='app1',\n direction='outbound')\n [transport_msg] = self.get_dispatched_messages(self.transport_name,\n direction='outbound')\n self.assertEqual(app_msg, transport_msg)\n\n @inlineCallbacks\n def test_routing_to_transport_mapped(self):\n app_msg = self.mkmsg_in(transport_name='upstream1',\n from_addr='from_1')\n yield self.dispatch(app_msg, transport_name='app1',\n direction='outbound')\n [transport_msg] = self.get_dispatched_messages(self.transport_name,\n direction='outbound')\n self.assertEqual(app_msg, transport_msg)\n\n\nclass TestContentKeywordRouter(DispatcherTestCase):\n\n dispatcher_class = BaseDispatchWorker\n transport_name = 'test_transport'\n\n @inlineCallbacks\n def setUp(self):\n yield super(TestContentKeywordRouter, self).setUp()\n self.config = {\n 'dispatcher_name': 'keyword_dispatcher',\n 'router_class': 'vumi.dispatchers.base.ContentKeywordRouter',\n 'transport_names': ['transport1', 'transport2'],\n 'transport_mappings': {\n 'shortcode1': 'transport1',\n 'shortcode2': 'transport2',\n },\n 'exposed_names': ['app1', 'app2', 'app3', 'fallback_app'],\n 'rules': [{'app': 'app1',\n 'keyword': 'KEYWORD1',\n 'to_addr': '8181',\n 'prefix': '+256',\n },\n {'app': 'app2',\n 'keyword': 'KEYWORD2',\n }],\n 'keyword_mappings': {\n 'app2': 'KEYWORD3',\n 'app3': 'KEYWORD1',\n },\n 'fallback_application': 'fallback_app',\n 'expire_routing_memory': '3',\n }\n self.dispatcher = yield self.get_dispatcher(self.config)\n self.router = self.dispatcher._router\n yield self.router._redis_d\n self.redis = self.router.redis\n yield self.redis._purge_all() # just in case\n\n @inlineCallbacks\n def tearDown(self):\n yield self.router.session_manager.stop()\n yield super(TestContentKeywordRouter, self).tearDown()\n\n @inlineCallbacks\n def test_inbound_message_routing(self):\n msg = self.mkmsg_in(content='KEYWORD1 rest of a msg',\n to_addr='8181',\n from_addr='+256788601462')\n\n yield self.dispatch(msg,\n transport_name='transport1',\n direction='inbound')\n\n msg2 = self.mkmsg_in(content='KEYWORD2 rest of a msg',\n to_addr='8181',\n from_addr='+256788601462')\n\n yield self.dispatch(msg2,\n transport_name='transport1',\n direction='inbound')\n\n msg3 = self.mkmsg_in(content='KEYWORD3 rest of a msg',\n to_addr='8181',\n from_addr='+256788601462')\n\n yield self.dispatch(msg3,\n transport_name='transport1',\n direction='inbound')\n\n app1_inbound_msg = self.get_dispatched_messages('app1',\n direction='inbound')\n self.assertEqual(app1_inbound_msg, [msg])\n app2_inbound_msg = self.get_dispatched_messages('app2',\n direction='inbound')\n self.assertEqual(app2_inbound_msg, [msg2, msg3])\n app3_inbound_msg = self.get_dispatched_messages('app3',\n direction='inbound')\n self.assertEqual(app3_inbound_msg, [msg])\n\n @inlineCallbacks\n def test_inbound_message_routing_empty_message_content(self):\n msg = self.mkmsg_in(content=None)\n\n yield self.dispatch(msg,\n transport_name='transport1',\n direction='inbound')\n\n app1_inbound_msg = self.get_dispatched_messages('app1',\n direction='inbound')\n self.assertEqual(app1_inbound_msg, [])\n app2_inbound_msg = self.get_dispatched_messages('app2',\n direction='inbound')\n self.assertEqual(app2_inbound_msg, [])\n fallback_msgs = self.get_dispatched_messages('fallback_app',\n direction='inbound')\n self.assertEqual(fallback_msgs, [msg])\n\n @inlineCallbacks\n def test_inbound_message_routing_not_casesensitive(self):\n msg = self.mkmsg_in(content='keyword1 rest of a msg',\n to_addr='8181',\n from_addr='+256788601462')\n\n yield self.dispatch(msg,\n transport_name='transport1',\n direction='inbound')\n\n app1_inbound_msg = self.get_dispatched_messages('app1',\n direction='inbound')\n self.assertEqual(app1_inbound_msg, [msg])\n\n @inlineCallbacks\n def test_inbound_event_routing_ok(self):\n msg = self.mkmsg_ack(user_message_id='1',\n transport_name='transport1')\n yield self.router.session_manager.create_session(\n 'message:1', name='app2')\n\n yield self.dispatch(msg,\n transport_name='transport1',\n direction='event')\n\n app2_event_msg = self.get_dispatched_messages('app2',\n direction='event')\n self.assertEqual(app2_event_msg, [msg])\n app1_event_msg = self.get_dispatched_messages('app1',\n direction='event')\n self.assertEqual(app1_event_msg, [])\n\n @inlineCallbacks\n def test_inbound_event_routing_failing_publisher_not_defined(self):\n msg = self.mkmsg_ack(transport_name='transport1')\n\n yield self.dispatch(msg,\n transport_name='transport1',\n direction='event')\n\n app1_routed_msg = self.get_dispatched_messages('app1',\n direction='event')\n self.assertEqual(app1_routed_msg, [])\n app2_routed_msg = self.get_dispatched_messages('app2',\n direction='event')\n self.assertEqual(app2_routed_msg, [])\n\n @inlineCallbacks\n def test_inbound_event_routing_failing_no_routing_back_in_redis(self):\n msg = self.mkmsg_ack(transport_name='transport1')\n\n yield self.dispatch(msg,\n transport_name='transport1',\n direction='event')\n\n app1_routed_msg = self.get_dispatched_messages('app1',\n direction='event')\n self.assertEqual(app1_routed_msg, [])\n app2_routed_msg = self.get_dispatched_messages('app2',\n direction='event')\n self.assertEqual(app2_routed_msg, [])\n\n @inlineCallbacks\n def test_outbound_message_routing(self):\n msg = self.mkmsg_out(content=\"KEYWORD1 rest of msg\",\n from_addr='shortcode1',\n transport_name='app2')\n\n yield self.dispatch(msg,\n transport_name='app2',\n direction='outbound')\n\n transport1_msgs = self.get_dispatched_messages('transport1',\n direction='outbound')\n self.assertEqual(transport1_msgs, [msg])\n transport2_msgs = self.get_dispatched_messages('transport2',\n direction='outbound')\n self.assertEqual(transport2_msgs, [])\n\n session = yield self.router.session_manager.load_session('message:1')\n self.assertEqual(session['name'], 'app2')\n\n\nclass TestRedirectOutboundRouterForSMPP(DispatcherTestCase):\n \"\"\"\n This is a test to cover our use case when using SMPP 3.4 with\n split Tx and Rx binds. The outbound traffic needs to go to the Tx, while\n the Rx just should go through. Upstream everything should be seen\n as arriving from the dispatcher and so the `transport_name` should be\n overwritten.\n \"\"\"\n dispatcher_class = BaseDispatchWorker\n\n @inlineCallbacks\n def setUp(self):\n yield super(TestRedirectOutboundRouterForSMPP, self).setUp()\n self.config = {\n 'dispatcher_name': 'redirect_outbound_dispatcher',\n 'router_class': 'vumi.dispatchers.base.RedirectOutboundRouter',\n 'transport_names': ['smpp_rx_transport', 'smpp_tx_transport'],\n 'exposed_names': ['upstream'],\n 'redirect_outbound': {\n 'upstream': 'smpp_tx_transport',\n },\n 'redirect_inbound': {\n 'smpp_tx_transport': 'upstream',\n 'smpp_rx_transport': 'upstream',\n },\n }\n self.dispatcher = yield self.get_dispatcher(self.config)\n self.router = self.dispatcher._router\n\n @inlineCallbacks\n def test_outbound_message_via_tx(self):\n msg = self.mkmsg_out(transport_name='upstream')\n yield self.dispatch(msg, transport_name='upstream',\n direction='outbound')\n [outbound] = self.get_dispatched_messages('smpp_tx_transport',\n direction='outbound')\n self.assertEqual(outbound['message_id'], msg['message_id'])\n\n @inlineCallbacks\n def test_inbound_event_tx(self):\n ack = self.mkmsg_ack(transport_name='smpp_tx_transport')\n yield self.dispatch(ack, transport_name='smpp_tx_transport',\n direction='event')\n [event] = self.get_dispatched_messages('upstream',\n direction='event')\n self.assertEqual(event['transport_name'], 'upstream')\n self.assertEqual(event['event_id'], ack['event_id'])\n\n @inlineCallbacks\n def test_inbound_event_rx(self):\n ack = self.mkmsg_ack(transport_name='smpp_rx_transport')\n yield self.dispatch(ack, transport_name='smpp_rx_transport',\n direction='event')\n [event] = self.get_dispatched_messages('upstream',\n direction='event')\n self.assertEqual(event['transport_name'], 'upstream')\n self.assertEqual(event['event_id'], ack['event_id'])\n\n @inlineCallbacks\n def test_inbound_message_via_rx(self):\n msg = self.mkmsg_in(transport_name='smpp_rx_transport')\n yield self.dispatch(msg, transport_name='smpp_rx_transport',\n direction='inbound')\n [app_msg] = self.get_dispatched_messages('upstream',\n direction='inbound')\n self.assertEqual(app_msg['transport_name'], 'upstream')\n self.assertEqual(app_msg['message_id'], msg['message_id'])\n\n @inlineCallbacks\n def test_error_logging_for_bad_app(self):\n msgt1 = self.mkmsg_out(transport_name='foo') # Does not exist\n with LogCatcher() as log:\n yield self.dispatch(msgt1, transport_name='upstream',\n direction='outbound')\n [err] = log.errors\n self.assertTrue('No redirect_outbound specified for foo' in\n err['message'][0])\n\n\nclass TestRedirectOutboundRouter(DispatcherTestCase):\n\n dispatcher_class = BaseDispatchWorker\n transport_name = 'test_transport'\n\n @inlineCallbacks\n def setUp(self):\n yield super(TestRedirectOutboundRouter, self).setUp()\n self.config = {\n 'dispatcher_name': 'redirect_outbound_dispatcher',\n 'router_class': 'vumi.dispatchers.base.RedirectOutboundRouter',\n 'transport_names': ['transport1', 'transport2'],\n 'exposed_names': ['app1', 'app2'],\n 'redirect_outbound': {\n 'app1': 'transport1',\n 'app2': 'transport2',\n },\n 'redirect_inbound': {\n 'transport1': 'app1',\n 'transport2': 'app2',\n }\n }\n self.dispatcher = yield self.get_dispatcher(self.config)\n self.router = self.dispatcher._router\n\n @inlineCallbacks\n def test_outbound_redirect(self):\n msgt1 = self.mkmsg_out(transport_name='app1')\n msgt2 = self.mkmsg_out(transport_name='app2')\n yield self.dispatch(msgt1, transport_name='app1',\n direction='outbound')\n yield self.dispatch(msgt2, transport_name='app2',\n direction='outbound')\n [outbound1] = self.get_dispatched_messages('transport1',\n direction='outbound')\n [outbound2] = self.get_dispatched_messages('transport2',\n direction='outbound')\n\n self.assertEqual(outbound1, msgt1)\n self.assertEqual(outbound2, msgt2)\n\n @inlineCallbacks\n def test_inbound_event(self):\n ack = self.mkmsg_ack(transport_name='transport1')\n yield self.dispatch(ack, transport_name='transport1',\n direction='event')\n [event] = self.get_dispatched_messages('app1',\n direction='event')\n self.assertEqual(event['transport_name'], 'app1')\n self.assertEqual(event['event_id'], ack['event_id'])\n\n @inlineCallbacks\n def test_inbound_message(self):\n msg = self.mkmsg_in(transport_name='transport1')\n yield self.dispatch(msg, transport_name='transport1',\n direction='inbound')\n [app_msg] = self.get_dispatched_messages('app1',\n direction='inbound')\n self.assertEqual(app_msg['transport_name'], 'app1')\n self.assertEqual(app_msg['message_id'], msg['message_id'])\n\n @inlineCallbacks\n def test_error_logging_for_bad_app(self):\n msgt1 = self.mkmsg_out(transport_name='app3') # Does not exist\n with LogCatcher() as log:\n yield self.dispatch(msgt1, transport_name='app2',\n direction='outbound')\n [err] = log.errors\n self.assertTrue('No redirect_outbound specified for app3' in\n err['message'][0])\n","sub_path":"vumi/dispatchers/tests/test_base.py","file_name":"test_base.py","file_ext":"py","file_size_in_byte":32107,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"61952286","text":"import RPi.GPIO as GPIO\nimport time\nfrom .settings import LDR_PIN\n\nGPIO.setmode(GPIO.BCM)\n\n#define the pin that goes to the circuit\npin_to_circuit = LDR_PIN\n\ndef rc_time (pin_to_circuit):\n count = 0\n \n #Output on the pin for \n GPIO.setup(pin_to_circuit, GPIO.OUT)\n GPIO.output(pin_to_circuit, GPIO.LOW)\n time.sleep(0.1)\n\n #Change the pin back to input\n GPIO.setup(pin_to_circuit, GPIO.IN)\n \n #Count until the pin goes high\n while (GPIO.input(pin_to_circuit) == GPIO.LOW):\n count += 1\n\n return count\n\n\ndef ldr_samples(count=10):\n samples=[]\n for i in range(count):\n samples.append(rc_time(pin_to_circuit))\n average = sum(samples) / len(samples)\n\n return average\n\n\ndef get_ldr_value():\n return ldr_samples(20)\n\ndef run(callback):\n try:\n # Main loop\n while True:\n # print rc_time(pin_to_circuit)\n ldr_value = get_ldr_value()\n callback({'type': 'LDR', 'payload': ldr_value})\n time.sleep(5)\n except KeyboardInterrupt:\n pass\n finally:\n GPIO.cleanup()\n\nif __name__ == '__main__':\n #Catch when script is interrupted, cleanup correctly\n try:\n # Main loop\n while True:\n # print rc_time(pin_to_circuit)\n print(get_ldr_value())\n time.sleep(.5)\n except KeyboardInterrupt:\n pass\n finally:\n GPIO.cleanup()\n","sub_path":"drivers/ldr_driver.py","file_name":"ldr_driver.py","file_ext":"py","file_size_in_byte":1406,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"130664652","text":"import sys\nimport gpaw as gp\nfrom ase.optimize.precon import PreconLBFGS\nfrom ase.optimize.precon.precon import Exp\nfrom ase.io.trajectory import Trajectory\nfrom ase.build import bulk\n\ndef main( argv ):\n n_mg = int(argv[0])\n atoms = bulk(\"Al\")\n atoms = atoms*(4,4,4)\n for i in range(n_mg):\n atoms[i].symbol = \"Mg\"\n\n atoms.rattle( stdev=0.005 )\n\n calc = gp.GPAW( mode=gp.PW(500), xc=\"PBE\", kpts=(4,4,4), nbands=\"120%\" )\n atoms.set_calculator( calc )\n\n logfile = \"preconTest%d.log\"%(n_mg)\n traj = \"preconTest%d.traj\"%(n_mg)\n trajObj = Trajectory(traj, 'w', atoms )\n\n relaxer = PreconLBFGS( atoms, logfile=logfile, use_armijo=True )\n relaxer.attach( trajObj )\n try:\n relaxer.run( fmax=0.05 )\n except:\n pass\n print (\"Mu: %.2E\"%(relaxer.precon.mu))\n\nif __name__ == \"__main__\":\n main( sys.argv[1:] )\n","sub_path":"Scaling/test_precon.py","file_name":"test_precon.py","file_ext":"py","file_size_in_byte":863,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"132301427","text":"# -*- coding: utf-8 -*-\n\"\"\"Main story.\n\"\"\"\n## path setting\nimport os\nimport sys\nsys.path.append(os.path.join(os.path.dirname(__file__), '..'))\nsys.path.append('storybuilder')\n## public libs\n## local libs\nfrom storybuilder.builder.world import World\nfrom storybuilder.builder.writer import Writer\nfrom config import PERSONS, AREAS, STAGES, DAYS, TIMES, ITEMS, WORDS, RUBIS, LAYERS\n## assets\nfrom storybuilder.assets import basic, accessory\n## local files\nfrom src.chapter.main import ch_main\n\n## define alias\nW = Writer\n_ = Writer.getWho()\n\n################################################################\n#\n# Sample step:\n# 1) Create the world\n# 世界を作成する。\n# 2) Create a new chapter\n# 章の作成。\n# 3) Create a episode\n# エピソード作成。\n# 4) Create a new scene\n# シーン作成。物語のベース。ここに様々なActionを追加する。\n# 5) Create a new stage\n# 舞台作成。シーンに必須要素\n# 6) Create a new day and time\n# 日時作成。シーンのサブ要素\n# 7) Add a scene plot\n# シーンプロットの作成。概要のないシーンは原則使えない\n# 8) Add scene actions\n# シーンアクションの追加。\n#\n################################################################\n\n\n## main\ndef create_world():\n \"\"\"Create a world.\n \"\"\"\n w = World(\"あるピアノ弾きの懺悔\")\n w.setCommonData()\n w.setAssets(basic.ASSET)\n w.setAssets(accessory.ASSET)\n w.buildDB(PERSONS,\n AREAS, STAGES, DAYS, TIMES, ITEMS, WORDS,\n RUBIS, LAYERS)\n w.setBaseDate(2020)\n w.setBaseArea(\"Tokyo\")\n # set textures\n # w.entryBlock()\n # w.entryHistory()\n # w.entryLifeNote()\n w.setOutline(\"ピアノ弾きの男は、後悔していた\")\n return w\n\n\ndef main(): # pragma: no cover\n w = create_world()\n return w.build(\n ch_main(w),\n )\n\n\nif __name__ == '__main__':\n import sys\n sys.exit(main())\n\n","sub_path":"src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1985,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"381608091","text":"import os\n\nos.chdir('/path/to/files/')\n\n# To see if its the correc directory\n# print(os.getcwd())\n\nfor f in os.listdir():\n\n #split to get the extensio/n\n f_name, f_ext = os.path.splitext(f)\n\n # Split by the dash\n f_title, f_course, f_num = file_name.split('-')\n #print('{}-{}-{}{}'.format(f_number, f_course, f_title, file_ext))\n\n f_title = f_title.strip()\n f_course = f_course.strip()\n # f_number = f_number.strip()\n\n # remove number sign in beggining\n f_number = f_number.strip()[1:]\n\n # put zero aside with single numbers\n f_number = f_number.strip()[1:].zfill(2)\n\n\n new_name = '{}-{}{}'.format(file_num, file_title, file_ext)\n\n # rename\n os.rename(fn, new_name)","sub_path":"python_learning/python_automation/python_renaming_multiple_files/scipt.py","file_name":"scipt.py","file_ext":"py","file_size_in_byte":710,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"276404775","text":"class Pessoa:\n def __init__(self, *filhos, nome = None, idade = 57):\n self.idade = idade\n self.nome = nome\n self.filhos = list(filhos)\n\n def cumprimentar(self):\n return f'olá {id(self)}'\n\nif __name__ == '__main__':\n jean = Pessoa(nome='Jeam')\n luciana = Pessoa(jean, nome='Luciana')\n print(Pessoa.cumprimentar(luciana))\n print (id(luciana))\n print (luciana.cumprimentar())\n print (luciana.nome)\n print (luciana.idade)\n for filho in luciana.filhos:\n print(filho.nome)\n print(luciana.filhos)\n","sub_path":"oo/pessoa.py","file_name":"pessoa.py","file_ext":"py","file_size_in_byte":559,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"129950715","text":"class Scene(object):\r\n \"\"\"Scene defines a container for a renderable scene\"\"\"\r\n\r\n def __init__(self, camera, light, bgColor, ambientColor):\r\n \"\"\"\r\n Create a Scene object\r\n\r\n Keyword arguments:\r\n camera (Camera) -- the camera that views the scene\r\n light (Light) -- the light that illuminates the scene\r\n bgColor (Point3) -- the color of the background of the scene\r\n ambientColor (Point3) -- the color of the ambient light present in the scene\r\n\r\n Geometric primitves may be added to this Scene by appending to self.objects\r\n\r\n \"\"\"\r\n\r\n self.camera = camera\r\n self.light = light\r\n self.bgColor = bgColor\r\n self.ambientColor = ambientColor\r\n self.objects = []\r\n \r\n def __repr__(self):\r\n \"\"\"Return a string representation of the Scene\"\"\"\r\n \r\n sceneStr = \"Scene------------------------------------------------------------------\\n\"\r\n sceneStr += \"Camera: %s\\n\" % self.camera\r\n sceneStr += \"Light: %s\\n\" % self.light\r\n sceneStr += \"Background Color: %s\\n\" % self.bgColor\r\n sceneStr += \"Ambient Color: %s\\n\" % self.ambientColor\r\n sceneStr += \"Objects:\\n\"\r\n \r\n for object in self.objects:\r\n sceneStr += \"\\t%s\\n\" % object\r\n \r\n sceneStr += \"----------------------------------------------------------------------\"\r\n \r\n return sceneStr","sub_path":"raytracer/Scene.py","file_name":"Scene.py","file_ext":"py","file_size_in_byte":1466,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"107258698","text":"import argparse, gzip\nfrom itertools import izip_longest\n\ndef main(args):\n\n\toccured = set() # Keeps track of read 1/read 2 sequence combinations that have already occured\n\tunique = [] # Keeps a single instance of each read 1/read 2 sequence combination\n\n\twith gzip.open(args.read_1) as i1, gzip.open(args.read_2) as i2:\n\t\tfor line1, line2 in izip_longest(i1, i2):\n\t\t\t\n\t\t\t#Read one entry from read 1\n\t\t\tinfo1 = line1.rstrip().split(' ')[0]\n\t\t\tseq1 = next(i1).rstrip()\n\t\t\textra1 = next(i1).rstrip()\n\t\t\tquality1 = next(i1).rstrip()\n\n\t\t\t#Read one entry from read 2\n\t\t\tinfo2 = line2.rstrip().split(' ')[0]\n\t\t\tseq2 = next(i2).rstrip()\n\t\t\textra2 = next(i2).rstrip()\n\t\t\tquality2 = next(i2).rstrip()\n\n\t\t\t# If this read 1/read 2 sequence combination hasn't occured yet, add it to unique reads with UMI moved to read name, update occured reads\n\t\t\tif (seq1, seq2) not in occured:\n\t\t\t\tunique.append('%s;%s\\n%s\\n%s\\n%s\\n' % (info1, seq1[:args.umi_length], seq1[args.umi_length:], extra1, quality1[args.umi_length:]))\n\t\t\t\toccured.add((seq1, seq2))\n\n\twith gzip.open(args.output ,'wb') as out:\n\t\tout.write(''.join(unique))\n\ndef parseArguments():\n\tparser = argparse.ArgumentParser(prog=\"compress_UMI\", description='Filters paired fastq file such that only a single instance of each read 1/read 2 sequence combination is kept. Moves UMI to read name.', usage='%(prog)s -n -1 .fastq.gz -2 .fastq.gz -o ')\n\trequired = parser.add_argument_group('required arguments')\n\trequired.add_argument('-n', '--UMI_length', type=int, required=True, help=' Length of the Unique Molecular Index', metavar='', dest='umi_length')\n\trequired.add_argument('-1', '--read_1', required=True, help=' File containing read 1 (fastq.gz)', metavar='', dest='read_1')\n\trequired.add_argument('-2', '--read_2', required=True, help=' File contianing read 2 (fastq.gz)', metavar='', dest='read_2')\n\trequired.add_argument('-o', '--output', required=True, help=' Output file name', metavar='', dest='output')\n\n\treturn parser.parse_args()\n\nargs = parseArguments()\nmain(args)\n","sub_path":"scripts/compress_UMI.py","file_name":"compress_UMI.py","file_ext":"py","file_size_in_byte":2055,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"595449875","text":"numberOfCities = int(input(\"Enter number of cities \"))\nloopCount = numberOfCities\nindex = 0\n\ncities = {}\n\nwhile index < loopCount:\n cityName = input(\"Enter city name \")\n cityPopulation = int(input(\"Enter population \"))\n cities[cityName] = cityPopulation\n index += 1\n\npopulationSum = 0\n\nfor city,population in cities.items():\n populationSum += population\n\npopulationAverage = populationSum / numberOfCities\n\nprint(\"Average population: \" + str(populationAverage))\n\nfor city,population in cities.items():\n if population > populationAverage:\n print (city)\n","sub_path":"proficiency_demo_2.py","file_name":"proficiency_demo_2.py","file_ext":"py","file_size_in_byte":577,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"195978235","text":"\"\"\"\nPlaying around with sep and the INT data\n\"\"\"\n\nimport math\nimport sep\nimport numpy as np\nfrom astropy.io import fits\nfrom astropy.table import Table\nfrom skimage.transform import hough_line, hough_line_peaks\nfrom skimage import feature\nfrom matplotlib import cm\nimport matplotlib.pyplot as plt\nfrom matplotlib.patches import Ellipse\nfrom scipy import ndimage\nfrom ccdproc import cosmicray_lacosmic\n\ndef subtractBackground(data, mask=None, box_width=32, box_height=32, \n filter_width=3, filter_height=3):\n \"\"\"\n Determine the spatially varying sky background using SEP\n and subtract from the image\n \n Parameters\n ----------\n data : array-like\n CCD data from which to subtract the background\n mask : array-like\n Bad pixel mask for the CCD frames\n box_width : int, optional\n Width of background boxes in pixels\n Default = 32\n box_height : int, optional\n Height of background boxes in pixels\n Default = 32\n filter_width : int, optional\n Width of filter in boxes\n Default = 3\n filter_height : int, optional\n Height of filter in boxes\n Default = 3\n \n Returns\n -------\n data_sub : array-like\n Data array with background signal subtracted\n bkg_rms : float\n Global rms of the spatially varying background, for use as a \n backup threshold in the extraction procedure\n \n Raises\n ------\n None\n \"\"\"\n \n # FITS files can be backwards byte order - SEP needs this fixed\n try:\n bkg = sep.Background(data, \n mask=mask,\n bw=box_width, \n bh=box_height,\n fw=filter_width, \n fh=filter_height)\n except:\n data = data.byteswap(True).newbyteorder()\n bkg = sep.Background(data, \n mask=mask,\n bw=box_width, \n bh=box_height,\n fw=filter_width, \n fh=filter_height)\n \n data_sub = data - bkg\n \n # calculate background rms as a backup for extraction threshold\n bkg_rms = bkg.globalrms\n \n return data_sub, bkg_rms\n\ndef sourceExtract(data, thresh=3, bkg=False, bkg_rms=None, \n err=None, mask=None, min_area=5, \n deblend_cont=0.005, segment=False):\n \"\"\"\n Extract all sources above a certain threshold in the given image\n \n Parameters\n ----------\n data : array-like\n CCD image frame from which to extract sources\n thresh : float, optional\n Number of sigma a detection must be above the background to \n be flagged as a source - if err not given, bkg_rms is needed\n Default = Sky.sigma [4]\n bkg : bool, optional\n Toggle to model spatially varying background and subtract from\n data - by default assumes this has been done separately\n Default = False\n bkg_rms : float, optional\n Estimation of the global background noise - used to determine\n threshold if err is None - can calculate global background\n rms when subtracting background model\n Default = None\n err : array-like, optional\n Error array for the CCD frame - supersedes bkg_rms when\n determining the threshold\n Default = None\n min_area : int, optional\n Minimum number of pixels to be flagged as a source\n Default = Sky.min_area [5]\n deblend_cont : float, optional\n Minimum contrast ratio used by SEP for deblending\n Default = Sky.deblend_cont [0.05]\n segment : bool, optional\n Toggle to generate a segmentation map for the given image\n Default = False\n \n Returns\n -------\n sources : astropy Table object\n Table containing quantities determined by sep for each source\n detected in the given image\n segmentation_map : array-like, optional\n Array of integers with same shape as data - pixels not \n belonging to any object have value 0, whilst all pixels \n belonging to ith object have value (e.g. sources[i]) have\n value i+1 - only returned if seg_map is True\n \n Raises\n ------\n None\n \"\"\"\n \n # subtract spatially varying background model if requested\n if bkg:\n data, bkg_rms = subtractBackground(data)\n \n # determine threshold for extraction\n if err is None:\n thresh *= bkg_rms\n \n # extract sources\n if not segment:\n sources = sep.extract(data, \n thresh, \n err=err,\n mask=mask,\n deblend_cont=deblend_cont)\n else:\n sources, seg_map = sep.extract(data, \n thresh,\n err=err,\n mask=mask,\n deblend_cont=deblend_cont,\n segmentation_map=seg_map)\n \n # convert detections to table format and keep relevant columns\n sources = Table(sources)\n sources = sources['thresh','npix','flag','x','y','a','b','theta','flux']\n \n # calculate ellipticity parameters - useful for sat/star separation\n sources['ellipticity'] = 1.0 - (sources['b'] / sources['a'])\n \n if not segment:\n return sources\n else:\n return sources, seg_map\n\ndef plotSources(data, sources):\n \"\"\"\n Plot the sources detected by SEP on top of the CCD frame\n \n Parameters\n ----------\n data : array-like\n Image data for the CCD frame\n sources : astropy Table object\n Source catalog outputted by SEP for the frame\n \n Returns\n -------\n None\n \n Raises\n ------\n None\n \"\"\"\n \n # plot image\n fig, ax = plt.subplots()\n m, s = np.mean(data), np.std(data)\n im = ax.imshow(data, interpolation='nearest', cmap='gray',\n vmin=m-s, vmax=m+s, origin='lower')\n\n # plot ellipse for each object\n for i in range(len(sources)):\n e = Ellipse(xy=(sources['x'][i], sources['y'][i]),\n width=6*sources['a'][i],\n height=6*sources['b'][i],\n angle=sources['theta'][i] * 180. / np.pi)\n e.set_facecolor('none')\n e.set_edgecolor('red')\n ax.add_artist(e)\n \n plt.xlabel('x')\n plt.ylabel('y')\n plt.show()\n plt.close(fig)\n\nif __name__ == \"__main__\":\n \n frame_path = '/media/james/Seagate_JAB1/Home/INTdata/20180903/calib/r1408368.fit'\n mask_path = '/media/james/Seagate_JAB1/Home/INTdata/badpixelmasks/bp_master.fits'\n \n # load int frame\n frame = fits.open(frame_path)[1].data.astype(np.float64)\n \n # load bad pixel mask\n #mask = fits.open(mask_path)[4].data.astype(np.bool)\n mask = fits.open(mask_path)[1].data.astype(np.uint8)\n \n # subtract background\n data, bkg_rms = subtractBackground(frame, \n mask=mask)\n \n \"\"\"\n trimmed = data[150:170,910:930]\n trimmed = trimmed.copy(order='C')\n mask_trimmed = mask[150:170,910:930]\n mask_trimmed = mask_trimmed.copy(order='C')\n \n sources = sourceExtract(trimmed, \n thresh=1.,\n bkg_rms=bkg_rms,\n mask = mask_trimmed,\n deblend_cont=1.0)\n \n print(sources)\n \n plotSources(trimmed, sources)\n \"\"\"\n \n \"\"\"\n # Hough analysis\n h, theta, d = hough_line(trimmed)\n \n fig, axes = plt.subplots(1, 3, figsize=(15, 6))\n ax = axes.ravel()\n \n m, s = np.mean(trimmed), np.std(trimmed)\n ax[0].imshow(trimmed, interpolation='nearest', cmap='gray',\n vmin=m-s, vmax=m+s, origin='lower')\n \n ax[1].imshow(np.log(1 + h),\n extent=[np.rad2deg(theta[-1]), np.rad2deg(theta[0]), d[-1], d[0]],\n cmap=cm.gray, aspect=1/1.5)\n \n ax[2].imshow(trimmed, interpolation='nearest', cmap='gray',\n vmin=m-s, vmax=m+s, origin='lower')\n \n for _, angle, dist in zip(*hough_line_peaks(h, theta, d)):\n y0 = (dist - 0 * np.cos(angle)) / np.sin(angle)\n y1 = (dist - trimmed.shape[1] * np.cos(angle)) / np.sin(angle)\n ax[2].plot((0, trimmed.shape[1]), (y0, y1), '-r')\n \n ax[2].set_xlim((0, trimmed.shape[1]))\n ax[2].set_ylim((trimmed.shape[0], 0))\n \n plt.show()\n \"\"\"\n \n \"\"\"\n # Canny edge detection\n edges1 = feature.canny(trimmed)\n edges2 = feature.canny(trimmed, sigma=3)\n \n fig, (ax1, ax2, ax3) = plt.subplots(nrows=1, ncols=3, figsize=(8, 3),\n sharex=True, sharey=True)\n\n ax1.imshow(trimmed, cmap=plt.cm.gray)\n ax1.axis('off')\n ax1.set_title('noisy image', fontsize=20)\n\n ax2.imshow(edges1, cmap=plt.cm.gray)\n ax2.axis('off')\n ax2.set_title('Canny filter, $\\sigma=1$', fontsize=20)\n\n ax3.imshow(edges2, cmap=plt.cm.gray)\n ax3.axis('off')\n ax3.set_title('Canny filter, $\\sigma=3$', fontsize=20)\n\n fig.tight_layout()\n\n plt.show() \n \"\"\"\n \n ## mathematical morphology tests\n \n trail_length = 229\n data = np.ma.masked_where(mask, frame)\n \n fig, ax = plt.subplots(nrows=1, ncols=3, figsize=(8, 3),\n sharex=True, sharey=True)\n \n # get stars\n bkg = sep.Background(frame, mask=mask)\n subtracted = frame - bkg\n \n thresh = 2 * bkg.globalrms\n print(thresh)\n all_objs = sep.extract(subtracted, thresh, mask=mask, deblend_cont=1.0)\n \n stars = all_objs[6*all_objs['a'] > trail_length] # sub-trails\n stars = stars[6*stars['a'] < trail_length*2.] # big oddities\n \n \n \n m, s = np.mean(data), np.std(data)\n ax[0].imshow(data, interpolation='nearest', cmap='gray',\n vmin=m-s, vmax=m+s, origin='lower')\n \n #frame = subtracted\n \n model = ndimage.grey_opening(ndimage.grey_closing(frame, size=(trail_length // 3, 1)), size=(trail_length // 2, 1))\n \n frame -= model\n frame_mask = np.logical_or(mask, ndimage.binary_dilation(model > 300))\n frame[frame_mask] = np.median(np.ma.masked_array(frame, mask=frame_mask))\n \n m, s = np.mean(model), np.std(model)\n from astropy.stats import sigma_clipped_stats\n print(sigma_clipped_stats(model))\n ax[1].imshow(model, interpolation='nearest', cmap='gray',\n vmin=m-s, vmax=m+s, origin='lower')\n \n # extract sources from MM-corrected frame\n bkg = sep.Background(frame, mask=frame_mask)\n subtracted = frame - bkg\n \n # remove cosmic rays\n subtracted = cosmicray_lacosmic(subtracted)[0]\n \n thresh = 1.5 * bkg.globalrms\n print(thresh)\n sources = sep.extract(subtracted, thresh, mask=frame_mask, deblend_cont=1.0)\n print(len(sources))\n \n # remove crap\n sources = sources[sources['npix'] > 15]\n print(len(sources))\n \n sources = Table(sources)\n idx = []\n for s, source in enumerate(sources):\n for st, star in enumerate(stars):\n if source['y'] < star['y'] + trail_length / 2:\n if source['y'] > star['y'] - trail_length / 2:\n if source['x'] < star['x'] + 10:\n if source['x'] > star['x'] - 10:\n idx.append(s)\n sources.remove_rows(idx)\n print(len(sources))\n \n print(1 - stars['b'] / stars['a'])\n \n print('sources:')\n for s in sources:\n print(s['x'], s['y'], s['flag'])\n print('stars:')\n for st in stars:\n print(st['x'], st['y'], st['flag'], math.degrees(st['theta']))\n \n m, s = np.mean(frame), np.std(frame)\n ax[2].imshow(frame, interpolation='nearest', cmap='gray',\n vmin=m-s, vmax=m+s, origin='lower')\n \n for i in range(len(sources)):\n e = Ellipse(xy=(sources['x'][i], sources['y'][i]),\n width=6*sources['a'][i],\n height=6*sources['b'][i],\n angle=sources['theta'][i] * 180. / np.pi)\n e.set_facecolor('none')\n e.set_edgecolor('red')\n ax[2].add_artist(e)\n \n for i in range(len(stars)):\n f = Ellipse(xy=(stars['x'][i], stars['y'][i]),\n width=6*stars['a'][i],\n height=6*stars['b'][i],\n angle=stars['theta'][i] * 180. / np.pi)\n f.set_facecolor('none')\n f.set_edgecolor('green')\n ax[2].add_artist(f)\n \n plt.show()\n","sub_path":"int/snippets/detect_test.py","file_name":"detect_test.py","file_ext":"py","file_size_in_byte":12627,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"415496491","text":"# MLP for Pima Indians Dataset serialize to YAML and HDF5\nfrom keras.models import Sequential\nfrom keras.models import model_from_json\nfrom keras.models import load_model\nfrom keras.layers import Dense\nfrom keras.layers import LSTM\nfrom keras.layers.embeddings import Embedding\nfrom keras.preprocessing import sequence\nimport numpy as np\nimport os\nimport sys\nfrom itertools import groupby\nimport LD\n\nm_test = LD.load_data(sys.argv[4], 'mfcc', 'test')\nf_test = LD.load_data(sys.argv[4], 'fbank', 'test')\n\nx_test = np.concatenate((m_test, f_test), axis=2)\n# load weights into new model\njson_file = open(sys.argv[1], 'r')\nloaded_model_json = json_file.read()\njson_file.close()\nloaded_model = model_from_json(loaded_model_json)\nloaded_model.load_weights(sys.argv[2])\n\n#print(loaded_model.summary())\n\nresult = loaded_model.predict(x_test, batch_size=1)\n#np.save(sys.argv[3], pred)\n\nfile_phone = open(os.path.join(sys.argv[4], \"48phone_char.map\"), 'r')\nphone = dict()\nlines = file_phone.readlines()\nfor line in lines:\n line = line.split('\\t')\n phone[int(line[1])] = line[2][:-1]\nphone[3] = phone[0]\nphone[5] = phone[2]\nphone[9] = phone[37]\nphone[14] = phone[27]\nphone[15] = phone[29]\nphone[16] = phone[37]\nphone[23] = phone[22]\nphone[43] = phone[37]\nphone[47] = phone[36]\n\nans = []\nfor line in result:\n tmp = []\n for w in line:\n tmp.append(phone[np.argmax(w)])\n ans.append(tmp)\n\n#np.save(sys.argv[2]+'.npy', ans)\n\nfor i, line in enumerate(ans):\n group = ans[i]\n '''\n group = []\n for j in range(2, len(ans[i])-2):\n if ans[i][j-2]==ans[i][j] or ans[i][j-1]==ans[i][j] or ans[i][j+1]==ans[i][j] or ans[i][j+2]==ans[i][j]:\n group += ans[i][j]\n group = ans[i][:2] + group + ans[i][-2:]\n '''\n tmp = []\n for x,y in groupby(group):\n k = len(list(y))\n if k > 1:\n for m in range(k):\n tmp.append(x)\n group = tmp\n \n group = [x for x,y in groupby(group) if len(list(y)) > 2]\n group = [x[0] for x in groupby(group)]\n if group[0] == 'L':\n del group[0]\n if group[-1] == 'L':\n del group[-1]\n ans[i] = group\n\nfp = open('sample.csv', 'r')\nfw = open(sys.argv[3], 'w')\nfw.write(\"id,phone_sequence\\n\")\n\ncontent = fp.readlines()\nfor (c, line) in zip(content[1:], ans):\n fw.write(c[:-1])\n for w in line:\n fw.write(w[0])\n fw.write('\\n')\n\n\n","sub_path":"hw1/load_model.py","file_name":"load_model.py","file_ext":"py","file_size_in_byte":2363,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"357814434","text":"\"\"\"\nLoad data from worksheet\n\"\"\"\nimport openpyxl\nfrom src.types.question import Question\nfrom src.cells import get_cells\n\ndef get_worksheets():\n \"\"\"\n Get worksheet\n return\n ------\n workbook: openpyxl.WorkSheet\n Got workbook\n \"\"\"\n workbook = openpyxl.load_workbook(\"questions.xlsx\");\n worksheets = workbook.worksheets\n\n return worksheets\n\ndef get_worksheet(worksheets, index):\n worksheet = worksheets[index]\n for row in range(1, 6):\n char_code = ord(\"A\")\n for _ in range(0, 15):\n judge_value = get_cells(worksheet, f\"{chr(char_code)}{row}\").value\n if \"問題\" in str(judge_value):\n Question.set_column(char_code)\n break\n char_code += 1\n if Question.question_column != \"\":\n break\n if Question.question_column == \"\":\n raise NameError(\"Question Column not found\")\n return worksheet\n","sub_path":"src/load.py","file_name":"load.py","file_ext":"py","file_size_in_byte":926,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"255799153","text":"from django.shortcuts import render, redirect\n\n# Create your views here.\nfrom home.models import AllTask\nfrom datetime import datetime\nfrom home.mixSlug import getSlug\n\n\ndef index(request):\n tasks = AllTask.objects.all()\n dict = {\n 'tasks': tasks,\n }\n if request.method == \"POST\":\n name = request.POST.get('name')\n about = request.POST.get('about')\n date = request.POST.get('date')\n slug = getSlug(name, about)\n addNew = AllTask(taskName=name, about=about, sColor=\"warning\",\n status=\"Init\", dateTime=datetime.now(), slug=slug, subDateTime=date)\n addNew.save()\n return redirect(\"index\")\n return render(request, 'index.html', dict)\n\n\ndef wip(request, slug):\n update = AllTask.objects.get(slug=slug)\n update.status = \"WIP\"\n update.sColor = \"primary\"\n update.save()\n home = redirect(\"/\")\n return home\n\n\ndef finish(request, slug):\n update = AllTask.objects.get(slug=slug)\n update.status = \"Finished\"\n update.sColor = \"success\"\n update.save()\n home = redirect(\"/\")\n return home\n\n\ndef delete(request, slug):\n update = AllTask.objects.get(slug=slug)\n update.delete()\n home = redirect(\"/\")\n return home\n","sub_path":"home/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1237,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"360946004","text":"from resources import order_database\nimport requests\nimport json\n\n\nclass Order:\n \"\"\"\n Used to create an order for a customer.\n\n Functions:\n record_order()\n update_order()\n get_vehicle()\n get_address_id()\n \"\"\"\n\n VEHICLE_REQUEST_URL = 'https://supply.team21.sweispring21.tk/api/vehicle-request'\n\n def __init__(self, customer_id, dest_address, service_type):\n # TODO: validate constructor inputs\n # Once inputs are validated, update functions that check these inputs\n self.customer_id = customer_id\n self.address = dest_address\n self.service_type = service_type\n\n # Hardcoded to None when initialized\n self.order_number = None\n self.vehicle = None\n\n def record_order(self):\n address_id = Order.get_address_id(self)\n\n # We require a valid address_ID to record our order\n if address_id is not None:\n # create a database connection\n database = order_database.OrderDatabase()\n\n # attempt to record an order\n order_result = database.record_order(self.customer_id, address_id)\n\n # close the database connection\n database.close()\n\n # if we received an orderNumber, the order was recorded successfully\n # if we received None (null), the order failed to be recorded\n if order_result is not None:\n self.order_number = order_result\n\n def update_order(self):\n success = False\n\n # Ensure that there is a vehicle to now update the specified order with\n if (self.order_number is not None) and (self.vehicle is not None):\n vehicle_number = self.vehicle['vehicle_number']\n\n # create a database connection\n database = order_database.OrderDatabase()\n\n # attempt to update the order\n result = database.update_order(self.order_number, vehicle_number)\n\n # close the database connection\n database.close()\n\n if result:\n success = True\n\n return success\n\n def get_vehicle(self):\n # if the order was placed (not None), we can request a vehicle for it\n if self.order_number is not None:\n dest_address = Order.format_address(self)\n\n # setting up the data that our api needs to receive\n data = {\n 'order_number': self.order_number,\n 'service_type': self.service_type,\n 'dest_address': dest_address\n }\n\n # create the request and take in a response\n response = requests.post(Order.VEHICLE_REQUEST_URL, json.dumps(data))\n # TODO: check that we received a 200 status before setting vehicle\n # set vehicle to our response (JSON format)\n vehicle_json = response.json()\n\n self.vehicle = vehicle_json['vehicle']\n\n # TODO: potentially make address its own class to handle things like this and format_address()\n def get_address_id(self):\n # create a database connection\n database = order_database.OrderDatabase()\n\n # separately pull every element of the address\n street_address = self.address['street_address']\n city = self.address['city']\n state = self.address['state']\n zip_code = self.address['zip_code']\n\n # attempt to record/access the address and receive its ID\n address_id = database.record_address(street_address, city, state, zip_code)\n\n # close the database connection\n database.close()\n\n # return the received order ID\n return address_id\n\n # TODO: potentially make address its own class to handle things like this and get_address_id()\n def format_address(self):\n # Formatting the address into one string\n # Example formatting for the following values:\n # streetAddress = '1234 Main St'\n # city = 'Austin'\n # state = 'TX'\n # zip_code = '78704'\n # dest_address = '1234 Main St Austin TX 78704'\n address_string = f\"{self.address['street_address']} {self.address['city']} \"\n address_string += f\"{self.address['state']} {self.address['zip_code']}\"\n\n return address_string\n\n # string representation of an Order\n def __str__(self):\n # Handling when order_number and or vehicle is None\n if self.order_number is None:\n order_number_string = \"Order does not have an associated number\"\n else:\n order_number_string = f\"Order Number: {self.order_number}\"\n\n if self.vehicle is None:\n vehicle_number_string = \"Order does not have an associated vehicle\"\n else:\n vehicle_number_string = f\"Vehicle Number: {self.vehicle['vehicle_number']}\"\n\n # Setting up String filled with all Order information\n order_str = f\"{order_number_string}, Customer ID: {self.customer_id}, Address: {self.format_address()}, \"\n order_str += f\"{vehicle_number_string}, Service Type: {self.service_type}\"\n return order_str\n\n # overwrite the display of internal representation for list use\n __repr__ = __str__\n\n # Orders are equal if their IDs are equal\n # We are ignoring implementing this for now as a result\n\n","sub_path":"swe-spring-2021-team-21-demand-be-94cadc3711be/order.py","file_name":"order.py","file_ext":"py","file_size_in_byte":5279,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"18753115","text":"from __future__ import print_function\n\nimport unittest\nimport warnings\n\nimport numpy as np\nimport tensorflow as tf\nfrom tensorflow.contrib.slim.nets import vgg, resnet_v2\n\nfrom measureDLS.measurement.accuracy import Accuracy\nfrom measureDLS.measurement.neuron_coverage import NeuronCoverage\nfrom measureDLS.measurement.robustness import Robustness\nfrom measureDLS.models.tensorflow import TensorFlowModel\nfrom measureDLS.utils import utils\n\n\nclass TestTensorFlow(unittest.TestCase):\n class ImageNetValData():\n\n class ImageNetValDataX():\n\n def __init__(self, dir, filenames, width, height, fashion, transform):\n self._dir = dir\n self._filenames = filenames\n self._width = width\n self._height = height\n self._fashion = fashion\n self._transform = transform\n\n def __len__(self):\n return len(self._filenames)\n\n def __getitem__(self, index):\n session = tf.compat.v1.Session()\n x = None\n for filename in self._filenames[index]:\n path = self._dir + '/' + filename\n image = tf.image.decode_image(tf.io.read_file(path), channels=3)\n image = session.run(image)\n if self._fashion == 'vgg16' or self._fashion == 'vgg19':\n image = self._aspect_preserving_resize(image, 256)\n image = self._central_crop([image], self._height, self._width)[0]\n image.set_shape([self._height, self._width, 3])\n image = tf.cast(image, dtype=tf.float32)\n image = session.run(image)\n elif self._fashion == 'resnet50_v2' or self._fashion == 'mobilenet_v2':\n image = tf.cast(image, tf.float32)\n image = session.run(image)\n image = tf.image.central_crop(image, central_fraction=0.875)\n image = tf.expand_dims(image, 0)\n image = tf.compat.v1.image.resize_bilinear(image, [self._width, self._height], align_corners=False)\n image = tf.squeeze(image, [0])\n image = session.run(image)\n else:\n raise Exception('Invalid fashion', self._fashion)\n if self._transform is not None:\n image = self._transform(image)\n image = np.expand_dims(image, axis=0)\n if x is None:\n x = image\n else:\n x = np.concatenate((x, image))\n session.close()\n return x\n\n def _smallest_size_at_least(self, height, width, smallest_side):\n smallest_side = tf.convert_to_tensor(smallest_side, dtype=tf.int32)\n height = tf.cast(height, dtype=tf.float32)\n width = tf.cast(width, dtype=tf.float32)\n smallest_side = tf.cast(smallest_side, dtype=tf.float32)\n scale = tf.cond(tf.greater(height, width), lambda: smallest_side / width, lambda: smallest_side / height)\n new_height = tf.cast(tf.math.rint(height * scale), dtype=tf.int32)\n new_width = tf.cast(tf.math.rint(width * scale), dtype=tf.int32)\n return new_height, new_width\n\n def _aspect_preserving_resize(self, image, smallest_side):\n smallest_side = tf.convert_to_tensor(smallest_side, dtype=tf.int32)\n shape = tf.shape(image)\n height = shape[0]\n width = shape[1]\n new_height, new_width = self._smallest_size_at_least(height, width, smallest_side)\n image = tf.expand_dims(image, 0)\n resized_image = tf.compat.v1.image.resize_bilinear(image, [new_height, new_width], align_corners=False)\n resized_image = tf.squeeze(resized_image)\n resized_image.set_shape([None, None, 3])\n return resized_image\n\n def _central_crop(self, image_list, crop_height, crop_width):\n outputs = []\n for image in image_list:\n image_height = tf.shape(image)[0]\n image_width = tf.shape(image)[1]\n offset_height = (image_height - crop_height) / 2\n offset_width = (image_width - crop_width) / 2\n outputs.append(self._crop(image, offset_height, offset_width, crop_height, crop_width))\n return outputs\n\n def _crop(self, image, offset_height, offset_width, crop_height, crop_width):\n original_shape = tf.shape(image)\n rank_assertion = tf.Assert(tf.equal(tf.rank(image), 3), ['Rank of image must be equal to 3.'])\n with tf.control_dependencies([rank_assertion]):\n cropped_shape = tf.stack([crop_height, crop_width, original_shape[2]])\n size_assertion = tf.Assert(\n tf.logical_and(tf.greater_equal(original_shape[0], crop_height), tf.greater_equal(original_shape[1], crop_width)), ['Crop size greater than the image size.'])\n offsets = tf.cast(tf.stack([offset_height, offset_width, 0]), dtype=tf.int32)\n with tf.control_dependencies([size_assertion]):\n image = tf.slice(image, offsets, cropped_shape)\n return tf.reshape(image, cropped_shape)\n\n def __init__(self, width, height, fashion, transform=None, label_offset=0):\n dir = utils.python_file_dir(__file__) + '/data/imagenet_val'\n filenames = []\n self.y = []\n with open(dir + '/' + 'ILSVRC2012_validation_ground_truth.txt', 'r') as f:\n lines = f.readlines()\n for line in lines:\n splits = line.split('---')\n if len(splits) != 5:\n continue\n filenames.append(splits[0])\n self.y.append(int(splits[2]))\n self.x = self.ImageNetValDataX(dir, filenames, width, height, fashion, transform)\n self.y = np.array(self.y, dtype=int) + label_offset\n\n def cifar10_data(self):\n x_test = np.load(utils.python_file_dir(__file__) + '/data/cifar-10-tensorflow/x_test.npy')\n y_test = np.load(utils.python_file_dir(__file__) + '/data/cifar-10-tensorflow/y_test.npy')\n return x_test, y_test\n\n def mnist_data(self):\n x_test = np.load(utils.python_file_dir(__file__) + '/data/MNIST/tensorflow/x_test.npy')\n y_test = np.load(utils.python_file_dir(__file__) + '/data/MNIST/tensorflow/y_test.npy')\n return x_test, y_test\n\n def test_imagenet_vgg16(self):\n tf.get_logger().setLevel('ERROR')\n session = tf.compat.v1.InteractiveSession(graph=tf.Graph())\n input = tf.compat.v1.placeholder(tf.float32, shape=(None, 224, 224, 3))\n with warnings.catch_warnings():\n warnings.simplefilter('ignore', category=DeprecationWarning)\n logits, _ = vgg.vgg_16(input, is_training=False)\n restorer = tf.compat.v1.train.Saver()\n restorer.restore(session, utils.python_file_dir(__file__) + '/models/tensorflow_vgg_16/vgg_16.ckpt')\n mean = (123.68, 116.78, 103.94)\n std = (1, 1, 1)\n data_preprocess = self.ImageNetValData(224, 224, 'vgg16', transform=lambda x: (x - mean) / std, label_offset=0)\n data_original = self.ImageNetValData(224, 224, 'vgg16', transform=None, label_offset=0)\n bounds = (0, 255)\n\n measure_model = TensorFlowModel(session, logits, input)\n\n accuracy = Accuracy()\n measure_model.predict(data_preprocess.x, data_preprocess.y, [accuracy.update, accuracy.report])\n\n neuron_coverage = NeuronCoverage()\n measure_model.intermediate_layer_outputs(data_preprocess.x, [neuron_coverage.update, neuron_coverage.report])\n\n robustness = Robustness(bounds)\n measure_model.adversarial_samples(data_original.x, data_original.y, 3, bounds, [robustness.update, robustness.report, utils.draw_adversarial_samples], batch_size=1, preprocessing=(mean, std))\n\n session.close()\n\n self.assertAlmostEqual(accuracy.get(1), 0.600000)\n self.assertAlmostEqual(accuracy.get(5), 0.925000)\n self.assertAlmostEqual(neuron_coverage.get(0.3), 0.630143, places=2)\n self.assertAlmostEqual(robustness.success_rate, 1.000000)\n\n def test_imagenet_vgg19(self):\n tf.get_logger().setLevel('ERROR')\n session = tf.compat.v1.InteractiveSession(graph=tf.Graph())\n input = tf.compat.v1.placeholder(tf.float32, shape=(None, 224, 224, 3))\n with warnings.catch_warnings():\n warnings.simplefilter('ignore', category=DeprecationWarning)\n logits, _ = vgg.vgg_19(input, is_training=False)\n restorer = tf.compat.v1.train.Saver()\n restorer.restore(session, utils.python_file_dir(__file__) + '/models/tensorflow_vgg_19/vgg_19.ckpt')\n mean = (123.68, 116.78, 103.94)\n std = (1, 1, 1)\n data_preprocess = self.ImageNetValData(224, 224, 'vgg19', transform=lambda x: (x - mean) / std, label_offset=0)\n data_original = self.ImageNetValData(224, 224, 'vgg19', transform=None, label_offset=0)\n bounds = (0, 255)\n\n measure_model = TensorFlowModel(session, logits, input)\n\n accuracy = Accuracy()\n measure_model.predict(data_preprocess.x, data_preprocess.y, [accuracy.update, accuracy.report])\n\n neuron_coverage = NeuronCoverage()\n measure_model.intermediate_layer_outputs(data_preprocess.x, [neuron_coverage.update, neuron_coverage.report])\n\n robustness = Robustness(bounds)\n measure_model.adversarial_samples(data_original.x, data_original.y, 3, bounds, [robustness.update, robustness.report, utils.draw_adversarial_samples], batch_size=1, preprocessing=(mean, std))\n\n session.close()\n\n self.assertAlmostEqual(accuracy.get(1), 0.625000)\n self.assertAlmostEqual(accuracy.get(5), 0.925000)\n self.assertAlmostEqual(neuron_coverage.get(0.3), 0.576892, places=2)\n self.assertAlmostEqual(robustness.success_rate, 1.000000)\n\n def test_imagenet_resnet50_v2(self):\n tf.get_logger().setLevel('ERROR')\n session = tf.compat.v1.InteractiveSession(graph=tf.Graph())\n input = tf.compat.v1.placeholder(tf.float32, shape=(None, 299, 299, 3))\n with warnings.catch_warnings():\n warnings.simplefilter('ignore', category=DeprecationWarning)\n with tf.contrib.slim.arg_scope(resnet_v2.resnet_arg_scope()):\n resnet_v2.resnet_v2_50(input, num_classes=1001, is_training=False)\n restorer = tf.compat.v1.train.Saver()\n restorer.restore(session, utils.python_file_dir(__file__) + '/models/tensorflow_resnet_v2_50/resnet_v2_50.ckpt')\n logits = session.graph.get_tensor_by_name('resnet_v2_50/predictions/Reshape:0')\n mean = (127.5, 127.5, 127.5)\n std = (127.5, 127.5, 127.5)\n data_preprocess = self.ImageNetValData(299, 299, 'resnet50_v2', transform=lambda x: (x - mean) / std, label_offset=1)\n data_original = self.ImageNetValData(299, 299, 'resnet50_v2', transform=None, label_offset=1)\n bounds = (0, 255)\n\n measure_model = TensorFlowModel(session, logits, input)\n\n accuracy = Accuracy()\n measure_model.predict(data_preprocess.x, data_preprocess.y, [accuracy.update, accuracy.report])\n\n neuron_coverage = NeuronCoverage()\n measure_model.intermediate_layer_outputs(data_preprocess.x, [neuron_coverage.update, neuron_coverage.report])\n\n robustness = Robustness(bounds)\n measure_model.adversarial_samples(data_original.x, data_original.y, 3, bounds, [robustness.update, robustness.report, utils.draw_adversarial_samples], batch_size=1, preprocessing=(mean, std))\n\n session.close()\n\n self.assertAlmostEqual(accuracy.get(1), 0.750000)\n self.assertAlmostEqual(accuracy.get(5), 0.875000)\n self.assertAlmostEqual(neuron_coverage.get(0.3), 0.600558, places=2)\n self.assertAlmostEqual(robustness.success_rate, 1.000000)\n\n def test_imagenet_mobilenet_v2(self):\n tf.get_logger().setLevel('ERROR')\n graph = tf.Graph()\n with tf.io.gfile.GFile(utils.python_file_dir(__file__) + '/models/tensorflow_mobilenet_v2/mobilenet_v2_1.4_224_frozen.pb', 'rb') as f:\n graph_def = tf.compat.v1.GraphDef()\n graph_def.ParseFromString(f.read())\n with graph.as_default():\n input = tf.compat.v1.placeholder(np.float32, shape=[None, 224, 224, 3])\n tf.import_graph_def(graph_def, {'input': input})\n session = tf.compat.v1.InteractiveSession(graph=graph)\n logits = graph.get_tensor_by_name('import/MobilenetV2/Predictions/Reshape_1:0')\n mean = (127.5, 127.5, 127.5)\n std = (127.5, 127.5, 127.5)\n data_preprocess = self.ImageNetValData(224, 224, 'mobilenet_v2', transform=lambda x: (x - mean) / std, label_offset=1)\n data_original = self.ImageNetValData(224, 224, 'mobilenet_v2', transform=None, label_offset=1)\n bounds = (0, 255)\n\n measure_model = TensorFlowModel(session, logits, input)\n\n accuracy = Accuracy()\n measure_model.predict(data_preprocess.x, data_preprocess.y, [accuracy.update, accuracy.report])\n\n neuron_coverage = NeuronCoverage()\n measure_model.intermediate_layer_outputs(data_preprocess.x, [neuron_coverage.update, neuron_coverage.report])\n\n robustness = Robustness(bounds)\n measure_model.adversarial_samples(data_original.x, data_original.y, 3, bounds, [robustness.update, robustness.report, utils.draw_adversarial_samples], batch_size=1, preprocessing=(mean, std))\n\n session.close()\n\n self.assertAlmostEqual(accuracy.get(1), 0.725000)\n self.assertAlmostEqual(accuracy.get(5), 0.900000)\n self.assertAlmostEqual(neuron_coverage.get(0.3), 0.288900, places=2)\n self.assertAlmostEqual(robustness.success_rate, 1.000000)\n\n def test_cifar10_simple(self):\n tf.get_logger().setLevel('ERROR')\n session = tf.compat.v1.InteractiveSession(graph=tf.Graph())\n restorer = tf.compat.v1.train.import_meta_graph(utils.python_file_dir(__file__) + '/models/tensorflow_cifar10_simple/tensorflow_cifar10_simple.meta')\n restorer.restore(session, tf.train.latest_checkpoint(utils.python_file_dir(__file__) + '/models/tensorflow_cifar10_simple/'))\n input = session.graph.get_tensor_by_name('Placeholder:0')\n logits = session.graph.get_tensor_by_name('fc2/add:0')\n x, y = self.cifar10_data()\n bounds = (0, 1)\n\n measure_model = TensorFlowModel(session, logits, input)\n\n accuracy = Accuracy()\n measure_model.predict(x, y, [accuracy.update, accuracy.report])\n\n neuron_coverage = NeuronCoverage()\n measure_model.intermediate_layer_outputs(x, [neuron_coverage.update, neuron_coverage.report])\n\n robustness = Robustness(bounds)\n measure_model.adversarial_samples(x, y, 3, bounds, [robustness.update, robustness.report, utils.draw_adversarial_samples], batch_size=1)\n\n session.close()\n\n self.assertAlmostEqual(accuracy.get(1), 0.327100)\n self.assertAlmostEqual(accuracy.get(5), 0.820200)\n self.assertAlmostEqual(neuron_coverage.get(0.3), 0.551282, places=2)\n self.assertAlmostEqual(robustness.success_rate, 1.000000)\n\n def test_mnist_simple(self):\n tf.get_logger().setLevel('ERROR')\n session = tf.compat.v1.InteractiveSession(graph=tf.Graph())\n restorer = tf.compat.v1.train.import_meta_graph(utils.python_file_dir(__file__) + '/models/tensorflow_mnist_simple/tensorflow_mnist_simple.meta')\n restorer.restore(session, tf.train.latest_checkpoint(utils.python_file_dir(__file__) + '/models/tensorflow_mnist_simple/'))\n input = session.graph.get_tensor_by_name('Placeholder:0')\n logits = session.graph.get_tensor_by_name('fc2/add:0')\n x, y = self.mnist_data()\n bounds = (0, 1)\n\n measure_model = TensorFlowModel(session, logits, input)\n\n accuracy = Accuracy()\n measure_model.predict(x, y, [accuracy.update, accuracy.report])\n\n neuron_coverage = NeuronCoverage()\n measure_model.intermediate_layer_outputs(x, [neuron_coverage.update, neuron_coverage.report])\n\n robustness = Robustness(bounds)\n measure_model.adversarial_samples(x, y, 3, bounds, [robustness.update, robustness.report, utils.draw_adversarial_samples], batch_size=1)\n\n session.close()\n\n self.assertAlmostEqual(accuracy.get(1), 0.937700)\n self.assertAlmostEqual(accuracy.get(5), 0.997200)\n self.assertAlmostEqual(neuron_coverage.get(0.3), 0.591150, places=2)\n self.assertAlmostEqual(robustness.success_rate, 1.000000)\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"measureDLS/tests/test_tensorflow.py","file_name":"test_tensorflow.py","file_ext":"py","file_size_in_byte":17033,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"403058334","text":"'''\n以下為本程式回答問題時使用的 Q&A 規則,例如對於以下 Q&A 規則物件\n\n: 'Q':\"想 | 希望\", 'A':\"為何想*呢?|真的想*?|那就去做阿?為何不呢?\",\n\n代表的是,當您輸入的字串中有「想」或「希望」這樣的詞彙時,\n程式就會從 'A': 欄位中的回答裏隨機選出一個來回答。\n\n回答語句中的 * 代表比對詞彙之後的字串,舉例而言、假如您說:\n\n 我想去巴黎\n\n那麼我們的程式從這四個可能的規則中隨機挑出一個來產生答案,產生的答案可能是:\n\n為何想去巴黎呢?\n真的想去巴黎?\n那就去做阿?\n為何不呢?\n\nEliza 就是一個這麼簡單的程式而已。\n'''\n\nimport re\nimport math\nimport random as R\n# Q&A 陣列宣告\nqa_list = [\n{ 'Q':\"謝謝\", 'A':\"不客氣!\" },\n{ 'Q':\"對不起 | 抱歉 | 不好意思\", 'A':\"別說抱歉 !|別客氣,儘管說 !\" },\n{ 'Q':\"可否 | 可不可以\", 'A':\"你確定想*?\" },\n{ 'Q':\"我想\", 'A':\"你為何想*?\" },\n{ 'Q':\"我要\", 'A':\"你為何要*?\" },\n{ 'Q':\"你是\", 'A':\"你認為我是*?\" },\n{ 'Q':\"認為 | 以為\", 'A':\"為何說*?\" },\n{ 'Q':\"感覺\", 'A':\"常有這種感覺嗎?\" },\n{ 'Q':\"為何不\", 'A':\"你希望我*!\" },\n{ 'Q':\"是否\", 'A':\"為何想知道是否*?\" },\n{ 'Q':\"不能\", 'A':\"為何不能*?|你試過了嗎?|或許你現在能*了呢?\" },\n{ 'Q':\"我是\", 'A':\"你好,久仰久仰!\" },\n{ 'Q':\"甚麼 | 什麼 | 何時 | 誰 | 哪裡 | 如何 | 為何 | 因何\", 'A':\"為何這樣問?|為何你對這問題有興趣?|你認為答案是甚麼呢?|你認為如何呢?|你常問這類問題嗎?|這真的是你想知道的嗎?|為何不問問別人?|你曾有過類似的問題嗎?|你問這問題的原因是甚麼呢?\" },\n{ 'Q':\"原因\", 'A':\"這是真正的原因嗎?|還有其他原因嗎?\" }, \n{ 'Q':\"理由\", 'A':\"這說明了甚麼呢?|還有其他理由嗎?\" },\n{ 'Q':\"你好 | 嗨 | 您好\", 'A':\"你好,有甚麼問題嗎?\" },\n{ 'Q':\"或許\", 'A':\"你好像不太確定?\" },\n{ 'Q':\"不曉得 | 不知道\", 'A':\"為何不知道?|在想想看,有沒有甚麼可能性?\" },\n{ 'Q':\"不想 | 不希望\", 'A':\"有沒有甚麼辦法呢?|為何不想*呢?|那你希望怎樣呢?\" }, \n{ 'Q':\"想 | 希望\", 'A':\"為何想*呢?|真的想*?|那就去做阿?為何不呢?\" },\n{ 'Q':\"不\", 'A':\"為何不*?|所以你不*?\" },\n{ 'Q':\"請\", 'A':\"我該如何*呢?|你想要我*嗎?\" },\n{ 'Q':\"你\", 'A':\"你真的是在說我嗎?|別說我了,談談你吧!|為何這麼關心我*?|不要再說我了,談談你吧!|你自己*\" },\n{ 'Q':\"總是 | 常常\", 'A':\"能不能具體說明呢?|何時?\" },\n{ 'Q':\"像\", 'A':\"有多像?|哪裡像?\" },\n{ 'Q':\"對\", 'A':\"你確定嗎?|我了解!\" },\n{ 'Q':\"朋友\", 'A':\"多告訴我一些有關他的事吧!|你認識他多久了呢?\" },\n{ 'Q':\"電腦\", 'A':\"你說的電腦是指我嗎?\" }, \n{ 'Q':\"難過\", 'A':\"別想它了|別難過|別想那麼多了|事情總是會解決的\"},\n{ 'Q':\"高興\", 'A':\"不錯ㄚ|太棒了|這樣很好ㄚ\"},\n{ 'Q':\"是阿|是的\", 'A':\"甚麼事呢?|我可以幫助你嗎?|我希望我能幫得上忙!\" },\n{ 'Q':\"\", 'A':\"我了解|我能理解|還有問題嗎 ?|請繼續說下去|可以說的更詳細一點嗎?|這樣喔! 我知道!|然後呢? 發生甚麼事?|再來呢? 可以多說一些嗎|接下來呢? |可以多告訴我一些嗎?|多談談有關你的事,好嗎?|想多聊一聊嗎|可否多告訴我一些呢?\" }\n]\n\ndef answer(say):\n\tfor qa in qa_list: # 對於每一個 QA\n\t\tqList = qa['Q'].split(\"|\") # 取出 Q 部分,分割成一個一個的問題字串 q\n\t\taList = qa['A'].split(\"|\") # 取出回答 A 部分,分割成一個一個的回答字串 q\n\t\tfor q in qList: # 對於每個問題字串 q\n\t\t\tif q.strip() == \"\": # 如果是最後一個「空字串」的話,那就不用比對,直接任選一個回答。\n\t\t\t\treturn R.choice(aList) # 那就從答案中任選一個回答\n\t\t\tm = re.search(\"(.*)\"+q+\"([^?.;]*)\", say)\n\t\t\tif m: # 比對成功的話\n\t\t\t\ttail = m.group(2) # 就取出句尾\n\t\t\t\t# 將問句句尾的「我」改成「你」,「你」改成「我」。\n\t\t\t\ttail = tail.replace(\"我\", \"#\").replace(\"你\", \"我\").replace(\"#\", \"你\")\n\t\t\t\treturn R.choice(aList).replace('*', tail) # 然後將 * 改為句尾進行回答\n\treturn \"然後呢?\" # 如��發生任何錯誤,就回答「然後呢?」來混過去。\n\n\ndef eliza():\n\tprint('你好,我是 Eliza ! ')\n\twhile (True):\n\t\tsay = input('> ') # 取得使用者輸入的問句。\n\t\tif say == 'bye':\n\t\t\tbreak\n\t\tans = answer(say)\n\t\tprint(ans)\n\neliza()\n","sub_path":"python/07-nlp/eliza.py","file_name":"eliza.py","file_ext":"py","file_size_in_byte":4527,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"121855309","text":"import torch\nfrom torch.nn import functional\nfrom all.nn import ListNetwork\nfrom .stochastic import StochasticPolicy\n\n\nclass SoftmaxPolicy(StochasticPolicy):\n def __init__(\n self,\n model,\n optimizer,\n _, # deprecated\n **kwargs\n ):\n model = ListNetwork(model)\n\n def distribution(outputs):\n probs = functional.softmax(outputs, dim=-1)\n return torch.distributions.Categorical(probs)\n\n super().__init__(model, optimizer, distribution, **kwargs)\n","sub_path":"all/policies/softmax.py","file_name":"softmax.py","file_ext":"py","file_size_in_byte":545,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"427730178","text":"import logging\nimport sys\nfrom collections import defaultdict\nfrom heapq import *\nimport re\nimport numpy as np\n\nlogging.basicConfig(level=logging.DEBUG,\n filename='../logs/CodeCraft-2019.log',\n format='[%(asctime)s] %(levelname)s [%(funcName)s: %(filename)s, %(lineno)d] %(message)s',\n datefmt='%Y-%m-%d %H:%M:%S',\n filemode='a')\n\ndef main():\n if len(sys.argv) != 5:\n logging.info('please input args: car_path, road_path, cross_path, answerPath')\n exit(1)\n\n car_path = sys.argv[1]\n road_path = sys.argv[2]\n cross_path = sys.argv[3]\n answer_path = sys.argv[4]\n\n logging.info(\"car_path is %s\" % (car_path))\n logging.info(\"road_path is %s\" % (road_path))\n logging.info(\"cross_path is %s\" % (cross_path))\n logging.info(\"answer_path is %s\" % (answer_path))\n\n np.random.seed(123)\n \n def dataProcess(carPath, crossPath, roadPath):\n carData = []\n crossData = []\n roadData = []\n with open(carPath, 'r') as lines:\n for line in lines:\n line = line.split(',')\n if re.findall(\"\\d+\", line[0]) != []:\n line[0] = re.findall(\"\\d+\", line[0])[0]\n if re.findall(\"\\d+\", line[-1]) != []:\n line[-1] = re.findall(\"\\d+\", line[-1])[0]\n # for i in range(len(line)):\n # line[i] = int(line[i].strip())\n carData.append(line)\n\n carData = np.array(carData)\n index = np.argsort(carData,0,kind = 'stable')[:,-2]\n carData = carData[index, :]\n carData = carData.tolist()\n carData = carData[::-1]\n with open(roadPath, 'r') as lines:\n for line in lines:\n line = line.split(',')\n if re.findall(\"\\d+\", line[0]) != []:\n line[0] = re.findall(\"\\d+\", line[0])[0]\n if re.findall(\"\\d+\", line[-1]) != []:\n line[-1] = re.findall(\"\\d+\", line[-1])[0]\n roadData.append(line)\n with open(crossPath, 'r') as lines:\n for line in lines:\n line = line.split(',')\n if re.findall(\"\\d+\", line[0]) != []:\n line[0] = re.findall(\"\\d+\", line[0])[0]\n if re.findall(\"\\d+\", line[-1]) != []:\n line[-1] = re.findall(\"\\d+\", line[-1])[0]\n crossData.append(line)\n\n carData = carData[1: ]\n for i in range(len(carData)):\n for j in range(len(carData[i])):\n carData[i][j] = int(carData[i][j].strip())\n roadData = roadData[1: ]\n for i in range(len(roadData)):\n for j in range(len(roadData[i])):\n roadData[i][j] = int(roadData[i][j].strip())\n crossData = crossData[1: ]\n for i in range(len(crossData)):\n for j in range(len(crossData[i])):\n crossData[i][j] = int(crossData[i][j].strip())\n if crossData[i][j] == 1:\n crossData[i][j] = -1\n return carData, crossData, roadData\n\n def dijkstra(edges, f, t):\n if f == 19 and t == 52:\n print(\"debug\")\n g = defaultdict(list)\n for l,r,c in edges:\n g[l].append((c,r))\n\n q, seen, mins = [(0,f,())], set(), {f: 0}\n while q:\n (cost,v1,path) = heappop(q)\n if v1 not in seen:\n seen.add(v1)\n path = (v1, path)\n if v1 == t:\n\n return (cost, path)\n\n for c, v2 in g.get(v1, ()):\n if v2 in seen: continue\n prev = mins.get(v2, None)\n next = cost + c\n if prev is None or next < prev:\n mins[v2] = next\n heappush(q, (next, v2, path))\n\n\n\n return float(\"inf\")\n\n\n def Seek(carData, crossData, roadData):\n\n edges = []\n\n # 生成地图(双向图)\n for i in range(len(roadData)):\n if (roadData[i][-1] == 1):\n edges.append((str(roadData[i][-3]), str(roadData[i][-2]), roadData[i][1]/roadData[i][3]))\n edges.append((str(roadData[i][-2]), str(roadData[i][-3]), roadData[i][1]/roadData[i][3]))\n else:\n edges.append((str(roadData[i][-3]), str(roadData[i][-2]), roadData[i][1]/roadData[i][3]))\n\n # 生成地图(单向图)\n # for i in range(len(roadData)):\n # if (roadData[i][-1] == 1):\n # edges.append((str(roadData[i][-3]), str(roadData[i][-2]), roadData[i][1]))\n\n # print(\"ok\")\n\n carRoute = []\n # print(dijkstra(edges, \"22\", \"2\"))\n for carNum in range(len(carData)):\n\n # result = dijkstra(edges, \"2\", \"31\")\n result = dijkstra(edges, str(carData[carNum][1]), str(carData[carNum][2]))\n sumarize = []\n while result[1] != ():\n sumarize.append(int(result[0]))\n if result[1] != ():\n result = result[1]\n sumarize.append(int(result[0]))\n # print(sumarize)\n\n\n lengthSumarize = len(sumarize)\n carRouteTmp = [carData[carNum][0]]\n car_speed = carData[carNum][3]\n low_add = 0 if car_speed == 8 else (90 if car_speed == 6 else (190 if car_speed == 4 else 325))\n high_add = 90 if car_speed == 8 else (190 if car_speed == 6 else (280 if car_speed == 4 else 400)) \n carRouteTmp.append(carData[carNum][-1]+int(np.random.uniform(low_add,high_add)))\n for i in range(1, lengthSumarize - 1):\n if carData[carNum][0] == 10054 and i == 5:\n print(\"debug\")\n for j in range(len(roadData)):\n if ((roadData[j][-3] == sumarize[lengthSumarize - i] and roadData[j][-2] == sumarize[lengthSumarize - i -1]) or (roadData[j][-2] == sumarize[lengthSumarize - i] and roadData[j][-3] == sumarize[lengthSumarize - i -1])):\n carRouteTmp.append(roadData[j][0])\n carRoute.append(tuple(carRouteTmp))\n # print(carRoute)\n\n return carRoute\n\n\n carData, crossData, roadData = dataProcess(car_path, cross_path, road_path)\n carRoute = Seek(carData, crossData, roadData)\n\n with open(answer_path, 'w') as f:\n f.write('#(carId,StartTime,RoadId...)')\n f.write('\\n')\n for i in range(len(carRoute)):\n for j in range(len(carRoute[i])):\n if j == 0:\n f.write('(')\n f.write(str(carRoute[i][j]))\n if j != len(carRoute[i]) - 1:\n f.write(', ')\n else:\n f.write(')')\n if i != len(carRoute) - 1:\n f.write('\\n')\n f.close()\n\n print(\"OK\")\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"test/CodeCraft-2019.py","file_name":"CodeCraft-2019.py","file_ext":"py","file_size_in_byte":6925,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"577174050","text":"from xml.etree import ElementTree\nfrom swedish_geoposition_converter import *\n\nclass Coordinate:\n\t\"\"\" Normalises a Facility Coordinate to vanilla SWEREF99 (~WGS84) from SWEREF99 2015 \"\"\"\n\tdef __init__(self, element):\n\t\tself.lat = None\n\t\tself.lon = None\n\n\t\tfor projection in [ 'LU', 'RT90' ]:\n\t\t\tgrid_x = element.find(projection + '_X').text\n\t\t\tgrid_y = element.find(projection + '_Y').text\n\n\t\t\tif grid_x == None or grid_y == None or grid_x == \"0\" or grid_y == \"0\":\n\t\t\t\tcontinue\t# empty value for this coordinate type\n\n\t\t\tgrid_x = str(grid_x).replace(',', '.')\n\t\t\tgrid_y = str(grid_y).replace(',', '.')\n\t\t\t\n\t\t\tgrid_x, grid_y = list(map(float, [ grid_x, grid_y ]))\n\n\t\t\t# convert non-WGS84 to vanilla SWEREF99 (which is close enough to WGS84)\n\t\t\tif projection == 'LU':\n\t\t\t\t# LU is not used,anymore, nowadays this field has SWEREF99 20 15 (dd mm)\n\t\t\t\tconverter = SwedishGeoPositionConverter('sweref_99_2015')\n\t\t\t\tself.lat, self.lon = converter.gridToGeodetic(grid_x, grid_y)\n\t\t\telif projection == 'RT90':\n\t\t\t\tconverter = SwedishGeoPositionConverter('rt90_2.5_gon_v')\n\t\t\t\tself.lat, self.lon = converter.gridToGeodetic(grid_x, grid_y)\n\t\t\tbreak\n\n\t\tif self.lat == None or self.lon == None:\n\t\t\tself.lat = element.find('WGS84_LAT').text\n\t\t\tself.lon = element.find('WGS84_LONG').text\n\n\t\tif self.lat == None or self.lon == None:\n\t\t\traise ValueError(\"Bad or no coordinates\")\n\n\tdef __str__(self, projection=None):\n\t\tif self.lat == None or self.lon == None:\n\t\t\treturn None\n\n\t\tif projection != None:\n\t\t\tconverter = SwedishGeoPositionConverter(projection)\n\t\t\treturn \"{0},{1}\".format(converter.geodeticToGrid(self.lat, self.lon))\n\t\t\t\n\t\treturn \"{0},{1}\".format(self.lat, self.lon)\nclass Facility:\n\t\"\"\" The Facility object is initialised with an XML Element \"\"\"\n\n\tID = None\n\tCoordinate = None\n\n\tdef __init__(self, element):\n\t\tself.__class__.ID = int(element.attrib['ID'])\n\n\t\tfor tag in [ 'Name', 'Buildingtype' ]:\n\t\t\tsetattr(self.__class__, tag, str(element.find(tag).text))\n\n\t\tself.__class__.Coordinate = Coordinate(element.find('Coordinate'))\n\n\t\tself.element = element\n\t\n\tdef __str__(self, xml=True):\n\t\treturn ElementTree.tostring(self.element, encoding='utf-8').decode('utf-8')\n\t\n\tdef cols(self):\n\t\treturn '{0}\\t{1}\\t{2}\\t{3}\\t{4}'.format(self.ID, self.Coordinate.lat, self.Coordinate.lon, self.Name, self.Buildingtype)\n","sub_path":"OpenUmeaRecreationalFacilities/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2303,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"415328620","text":"import MySQLdb\nimport csv\n\n\ndef connectdb():\n # 打开数据库连接\n db = MySQLdb.connect(\"localhost\", \"root\", \"123456pkh\", \"dqd\", charset='utf8')\n # 使用cursor()方法获取操作游标\n return db\n\n\ndef player_match_data_sql(id, match_data):\n sql_list = []\n if len(match_data) == 2:\n return sql_list\n match_data = match_data[3:-3].replace(\"None\", \"\\'NULL\\'\").replace(\"', '\", \"_\").replace(\"'], ['\", \"&\")\n match_records = match_data.split(\"&\")\n del(match_records[0])\n for match_record in match_records:\n info = match_record.replace(\"~\", \"NULL\").split(\"_\")\n sql = \"INSERT INTO PLAYERMATCHDATA VALUES(NULL, %s, '%s', '%s', %s, %s, %s, %s, %s, %s, %s)\" \\\n % (id, info[0], info[1], info[2], info[3], info[4], info[5], info[6], info[7], info[8])\n sql_list.append(sql)\n return sql_list\n\n\ndef team_player_match_data_sql(league, team_id):\n print(league, team_id)\n sql_list = []\n filename = 'person_list_' + str(league) + \"_\" + str(team_id) + '.csv'\n first = True\n with open(filename, encoding='utf-8')as f:\n f_csv = csv.reader(f)\n for row in f_csv:\n if first:\n first = False\n continue\n id = row[0]\n match_data = row[3]\n sqls = player_match_data_sql(id, match_data)\n sql_list.append(sqls)\n return sql_list\n\n\ndef main():\n db = connectdb()\n cursor = db.cursor()\n league_list = [1, 2, 3, 4, 10]\n for league in league_list:\n first = True\n filename = 'team_list_' + str(league) + '.csv'\n with open(filename, encoding='utf-8')as f:\n f_csv = csv.reader(f)\n for row in f_csv:\n if first:\n first = False\n continue\n id = row[0]\n sql_list = team_player_match_data_sql(league, id)\n for sqls in sql_list:\n for sql in sqls:\n try:\n # 执行sql语句\n cursor.execute(sql)\n # 提交到数据库执行\n db.commit()\n except:\n print(sql)\n # 发生错误时回滚\n db.rollback()\n db.close()\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"crawler/dqd_data&script/player_match_data.py","file_name":"player_match_data.py","file_ext":"py","file_size_in_byte":2402,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"432688177","text":"from django.db import models\nfrom google.appengine.api import users\nfrom google.appengine.ext import ndb\n\n# Create your models here.\n\nCOUNTRY_LIST = [\n 'Argentina',\n 'Australia',\n 'Austria',\n 'Belgium',\n 'Brazil',\n 'Bulgaria',\n 'Canada',\n 'Chile',\n 'Colombia',\n 'Czech Republic',\n 'Denmark',\n 'Ecuador',\n 'Egypt',\n 'El Salvador',\n 'Estonia',\n 'Faroe Islands',\n 'Finland',\n 'France',\n 'Germany',\n 'Guatemala',\n 'Honduras',\n 'Hong Kong',\n 'Hungary',\n 'Indonesia',\n 'Iran (Islamic Republic of)',\n 'Ireland',\n 'Israel',\n 'Italy',\n 'Jordan',\n 'Korea, Republic of',\n 'Kuwait',\n 'Latvia',\n 'Luxembourg',\n 'Malaysia',\n 'Mexico',\n 'Morocco',\n 'Netherlands',\n 'New Zealand',\n 'Norway',\n 'Paraguay',\n 'Peru',\n 'Philippines',\n 'Poland',\n 'Portugal',\n 'Reunion',\n 'Romania',\n 'Russian Federation',\n 'Saudi Arabia',\n 'Singapore',\n 'Slovakia',\n 'South Africa',\n 'Spain',\n 'Sri Lanka',\n 'Sweden',\n 'Switzerland',\n 'Syrian Arab Republic',\n 'Thailand',\n 'Ukraine',\n 'United Arab Emirates',\n 'United Kingdom',\n 'United States',\n 'Uruguay',\n 'Vietnam',\n]\nclass Account(ndb.Model):\n firstname = ndb.StringProperty()\n lastname = ndb.StringProperty()\n guser = ndb.UserProperty()\n nickname = ndb.StringProperty(indexed = True)\n dob = ndb.DateProperty()\n country = ndb.StringProperty(choices = COUNTRY_LIST)\n date_created = ndb.DateProperty()\n","sub_path":"users/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1388,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"145377091","text":"import requests\nimport datetime\nfrom bs4 import BeautifulSoup as bs\nfrom lxml import html\nurl = 'http://www.realmadrid.com/en/football/schedule'\nresponse = requests.get(url)\nhtml = response.content\nsoup = bs(html)\nloc = soup.find('p', {'class': 'm_highlighted_next_game_location'}).contents\nloc1 = loc[0]\nif \"Santiago\" in loc1:\n opp = soup.find('div',{'class':'m_highlighted_next_game_second_team'}).strong.contents\nelse:\n opp = soup.find('div', {'class': 'm_highlighted_next_game_team m_highlighted_next_game_second_team'}).strong.contents\nopp1=opp[0]\ntime = soup.find('div', {'class': 'm_highlighted_next_game_info_wrapper'}).time.contents\ntime1 = time[0]\ndate = soup.find('header', {'class': 'm_highlighted_next_game_header'}).time.contents\ndate1 = date[0]\ntimes = time1.split(\":\")\ndates = date1.split(\"/\")\n\nhour = times[0]\nmintemp = times[1]\nminutes = mintemp[:-2]\nyear = dates[0]\nmonth = dates[1]\nday = dates[2]\nfrom flask import Flask, render_template\napp = Flask(__name__)\n@app.route('/')\ndef index():\n return render_template('index.html',hour=hour,minutes=minutes,year=year,month=month,day=day,loc=loc1,opp=opp1)\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0',debug=True)\n","sub_path":"Madrid-match-countdown/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1197,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"251344971","text":"# Copyright 2021 Huawei Technologies Co., Ltd\n#\n# Licensed under the BSD 3-Clause License (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://opensource.org/licenses/BSD-3-Clause\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom .assigners import AssignResult, BaseAssigner, MaxIoUAssigner\nfrom .bbox_target import bbox_target\nfrom .geometry import bbox_overlaps\nfrom .samplers import (BaseSampler, CombinedSampler,\n InstanceBalancedPosSampler, IoUBalancedNegSampler,\n PseudoSampler, RandomSampler, SamplingResult)\nfrom .transforms import (bbox2delta, bbox2result, bbox2roi, bbox_flip,\n bbox_mapping, bbox_mapping_back, delta2bbox,\n distance2bbox, roi2bbox)\n\nfrom .assign_sampling import ( # isort:skip, avoid recursive imports\n assign_and_sample, build_assigner, build_sampler)\n\n__all__ = [\n 'bbox_overlaps', 'BaseAssigner', 'MaxIoUAssigner', 'AssignResult',\n 'BaseSampler', 'PseudoSampler', 'RandomSampler',\n 'InstanceBalancedPosSampler', 'IoUBalancedNegSampler', 'CombinedSampler',\n 'SamplingResult', 'build_assigner', 'build_sampler', 'assign_and_sample',\n 'bbox2delta', 'delta2bbox', 'bbox_flip', 'bbox_mapping',\n 'bbox_mapping_back', 'bbox2roi', 'roi2bbox', 'bbox2result',\n 'distance2bbox', 'bbox_target'\n]\n","sub_path":"PyTorch/contrib/cv/detection/SOLOv1/mmdet/core/bbox/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1691,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"113071820","text":"import math\ndef move(x,y,step,angle=0):\n nx=x+step*math.cos(angle)\n ny=y-step*math.sin(angle)\n return nx,ny\nresult = move(100, 100, 60, math.pi / 6)\nprint(result)\n#python 的函数返回多个值其实就是返回一个tuple\n#位置参数\ndef power(x,n):\n s=1\n while n>0:\n n=n-1\n s=s*x\n return s\n#默认参数\ndef power2(x,n=2):\n s=1\n while n>0:\n n=n-1\n s=s*n\n return s\n#可变参数\ndef calc(number):\n sum=0\n for n in number:\n sum=sum+n\n return sum\nprint(calc((1,2,3,4)))\n#===>\ndef calc1(*number):\n sum =0\n for n in number:\n sum=sum+n\n return sum\nprint(calc1(1,2,3,4))\ntuple=(1,2,3,4,5)\nprint(calc1(*tuple))#list tuple前面加个*变成可变参数传进去\n#关键字参数\ndef person(name,age,**kwargs):\n print(name,age,'other:',kwargs)\nperson('lzp',27)\nperson('lung',26,city='guangzhou',sex='F')\nextra={'city':'bj','sex':'L'}\nperson('lzp',30,**extra)#dict前面加**变成关键字参数传入\n#命名关键字,只接收命名的关键字参数。\ndef person2(name,age,*,city,job):\n print(name,age,city,job)\nperson2('l',23,city='zhaoqing',job='IT')\n# person2('ll',24,'gz','it') #这是错误的,命名关键字必须加上参数名\n\n","sub_path":"one/args.py","file_name":"args.py","file_ext":"py","file_size_in_byte":1237,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"84711030","text":"# Given a read only array of n + 1 integers between 1 and n, find one number that repeats in linear time using less than O(n) space \n# and traversing the stream sequentially O(1) times.\n\n# Sample Input:\n\n# [3 4 1 4 1]\n# Sample Output:\n\n# 1\n# If there are multiple possible answers ( like in the sample case above ), output any one.\n\n# If there is no duplicate, output -1\n\n##########################################################################################################################################\n\nclass Solution:\n # @param A : tuple of integers\n # @return an integer\n def repeatedNumber(self, A):\n if len(A) < 1:\n return -1\n lst = [None] * (len(A) - 1)\n for ii in range(len(A)):\n if lst[A[ii]-1] is None:\n lst[A[ii]-1] = 1\n else:\n return A[ii]\n return -1\n\n##########################################################################################################################################\n","sub_path":"Arrays/Find_Duplicate_in_Array.py","file_name":"Find_Duplicate_in_Array.py","file_ext":"py","file_size_in_byte":1010,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"129584048","text":"\"\"\"\nFunky extraction using Justus' method\n\"\"\"\n\nimport networkx as nx\nimport random\nimport numpy as np\nfrom copy import deepcopy\nimport heapq\nfrom collections import defaultdict\n\nimport vrgs.globals as globals\nfrom vrgs.Rule import FullRule\nfrom vrgs.Rule import NoRule\nfrom vrgs.Rule import PartRule\nfrom vrgs.globals import find_boundary_edges\nfrom vrgs.part_info import set_boundary_degrees\n\n\n\nfrom vrgs.Tree import create_tree, TreeNode\n\ndef get_buckets(root, k):\n \"\"\"\n\n :return:\n \"\"\"\n bucket = defaultdict(set) # create buckets keyed in by the absolute difference of k and number of leaves and the list of nodes\n node2bucket = {} # keeps track of which bucket every node in the tree is in\n nodes = set()\n stack = [root]\n\n while len(stack) != 0:\n node = stack.pop()\n nodes.add(node)\n val = abs(node.nleaf - k)\n\n if not node.is_leaf: # don't add to the bucket if it's a leaf\n bucket[val].add(node) # bucket is a defaultdict\n node2bucket[node] = val\n\n if node.left is not None:\n stack.append(node.left)\n\n if node.right is not None:\n stack.append(node.right)\n\n return nodes, bucket, node2bucket\n\n\ndef extract_subtree(k, buckets, node2bucket, active_nodes):\n \"\"\"\n :param k:\n :param buckets:\n :param node2bucket:\n :return:\n \"\"\"\n\n # pick something from the smallest non-empty bucket\n\n best_node = None\n for id, bucket in sorted(buckets.items()):\n if len(bucket) != 0:\n best_node = random.sample(bucket, 1)[0]\n break\n\n if best_node is None:\n return None\n\n subtree = best_node.payload.intersection(active_nodes)\n new_node_key = min(subtree)\n\n # print('removing {}, subtree: {}'.format(best_node.key, subtree))\n\n # best_node is a leaf, so don't add it back to the bucket\n node2bucket[best_node] = None\n\n # disconnect the children of that node, remove them from active nodes\n stack = [best_node]\n while len(stack) != 0:\n node = stack.pop()\n\n active_nodes.remove(node.key)\n val = abs(node.nleaf - k)\n\n if not node.is_leaf:\n buckets[val].remove(node)\n node2bucket[node] = val\n\n if node.left is not None:\n stack.append(node.left)\n\n if node.right is not None:\n stack.append(node.right)\n\n\n best_node.key = new_node_key # the best node's key is now the key of the new_node\n\n active_nodes.add(new_node_key) # add the new node to the set of active nodes\n\n best_node.payload = {new_node_key} # update the payload of the new node\n best_node.left = None\n best_node.right = None\n best_node.is_leaf = True\n\n if best_node.parent is not None:\n best_node.parent.payload.add(new_node_key)\n\n # update the nleafs for its parents\n node = best_node\n\n while node.parent is not None:\n val = node2bucket[node.parent] # old value of parent\n buckets[val].remove(node.parent) # remove the parent from that bucket\n\n node.parent.nleaf -= node.nleaf - 1 # since all the children of the node disappears, but the node remains\n\n node.parent.payload.add(new_node_key) # each of the parents has to contain this value\n val = abs(node.parent.nleaf - k) # new value of parent\n\n buckets[val].add(node.parent) # adding the parent to a new bucket\n node2bucket[node.parent] = val # updating the node2bucket dict\n\n node = node.parent\n\n best_node.nleaf = 1 # we can't set this earlier since we are using the value in the while loop\n\n return subtree\n\n ## NOTE: nothing is removed from the payload after compression.. always take intersection of payload and active nodes\n\n\ndef funky_extract(g, root,k, mode='full'):\n \"\"\"\n Runner function for the funcky extract\n :param g: graph\n :param root: pointer to the root of the tree\n :param k: number of leaves to collapse\n :param mode: full / part / no\n\n :return: list of rules\n \"\"\"\n nodes, buckets, node2bucket = get_buckets(root=root, k=k)\n active_nodes = {node.key for node in nodes}\n\n rule_list = list()\n\n if mode == 'full':\n Rule = FullRule\n elif mode == 'part':\n Rule = PartRule\n else:\n Rule = NoRule\n\n while True:\n subtree = extract_subtree(k=k, buckets=buckets, node2bucket=node2bucket, active_nodes=active_nodes)\n if subtree is None:\n break\n\n sg = g.subgraph(subtree)\n boundary_edges = find_boundary_edges(g, subtree)\n\n rule = Rule()\n rule.lhs = len(boundary_edges)\n rule.internal_nodes = subtree\n # rule.level = lvl\n\n if mode == 'full': # in the full information case, we add the boundary edges to the RHS and contract it\n for u, v in boundary_edges:\n sg.add_edge(u, v, attr_dict={'b': True})\n rule.contract_rhs()\n\n if mode == 'part': # in the partial boundary info, we need to set the boundary degrees\n set_boundary_degrees(g, sg)\n\n rule.graph = sg\n rule.generalize_rhs()\n\n\n # next we contract the original graph\n [g.remove_node(n) for n in subtree]\n\n new_node = min(subtree)\n\n # replace subtree with new_node\n g.add_node(new_node, attr_dict={'label': rule.lhs})\n\n # rewire new_node\n subtree = set(subtree)\n\n for u, v in boundary_edges:\n if u in subtree:\n u = new_node\n if v in subtree:\n v = new_node\n g.add_edge(u, v)\n\n\n rule_list.append(rule)\n\n return rule_list\n\n","sub_path":"vrgs/funky_extract.py","file_name":"funky_extract.py","file_ext":"py","file_size_in_byte":5617,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"67897946","text":"MODEL_NUM = 'S12'\nLR = 1e-5\nW_DECAY = 1 # L2 Regularization\nLR_DECAY_STEP = 10 # to disable, use 100\n\nDROPOUT_P = 0.5\nCOS_MARGIN = 0.5\n\nEPOCHS = 50\nBATCH_SIZE = 32\n\nVGG_FREEZE_UNTIL = 181\n\n\"\"\"\nPoints to experiment with:\n- ReLU vs ELU. ReLU > ELU for this problem\n- BN vs Dropout\n- BN vs GroupNorm\n\nTo-do:\n[] Add data\n\n\"\"\"\n","sub_path":"hyperparameters.py","file_name":"hyperparameters.py","file_ext":"py","file_size_in_byte":324,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"96495413","text":"\nfrom arnold import *\nfrom common import *\nfrom info import *\n\nclass CommandLineParser:\n def __init__(self, args):\n self.args = args\n self.current = 0\n \n def Finished(self):\n return (self.current == len(self.args))\n \n def CurrentParameter(self):\n return '' if self.Finished() else self.args[self.current]\n\n def CurrentArguments(self):\n arguments = []\n index = 1\n while (self.current + index < len(self.args)) and not self.IsParameter(self.args[self.current + index]):\n arguments.append(self.args[self.current + index])\n index += 1\n return arguments\n\n def CheckArgument(self, num):\n if (len(self.CurrentArguments()) < num):\n Error('Wrong number of arguments in parameter \"%s\"' % self.CurrentParameter());\n else:\n self.current += num\n\n def IsParameter(self, str):\n # NOTE: Command line parameters are assumed to begin with a lower case letter\n return (len(str) >= 2) and (str[0] == '-') and str[1].islower()\n\ndef ParseCommandLine1(argv):\n if len(argv) < 2:\n return False\n\n parser = CommandLineParser(argv[1:])\n\n while not parser.Finished():\n param = parser.CurrentParameter()\n arguments = parser.CurrentArguments()\n \n if param == '-i':\n parser.CheckArgument(1)\n if not arguments[0] in GC.inputFileNames:\n GC.inputFileNames.append(arguments[0])\n elif param == '-o':\n parser.CheckArgument(1)\n GC.outputFileName = arguments[0]\n elif param == '-of':\n if parser.current == len(parser.args) - 1:\n GC.infoMode = InfoMode.K_INFO_LIST_OUTPUT_DRIVERS\n else: \n parser.CheckArgument(1)\n elif param == '-r':\n parser.CheckArgument(2)\n elif param == '-d':\n parser.CheckArgument(1)\n GC.ignoreList.append(arguments[0])\n elif param == '-dw':\n GC.renderWindow = False\n elif param == '-dp':\n GC.progressive = False\n elif param == '-db':\n GC.binary_ass = False\n elif param == '-v':\n if len(arguments) == 0:\n GC.verbosity = 1\n else:\n GC.verbosity = int(arguments[0])\n parser.current += 1\n elif param == '-nw':\n parser.CheckArgument(1)\n GC.maxWarnings = int(arguments[0]) \n elif param == '-log':\n GC.writeLog = True\n elif param == '-logfile':\n parser.CheckArgument(1)\n GC.logFileName = arguments[0]\n GC.writeLog = True\n elif param == '-l':\n parser.CheckArgument(1)\n if not arguments[0] in GC.libPaths:\n GC.libPaths.append(arguments[0])\n elif param == '-repeat':\n parser.CheckArgument(1)\n GC.repetitions = max(1, int(arguments[0])) \n elif param == '-turn':\n parser.CheckArgument(1)\n GC.turns = max(1, int(arguments[0])) \n elif param == '-resave':\n parser.CheckArgument(1)\n GC.resave = True\n GC.resaveFileName = arguments[0]\n elif param == '-nstdin':\n GC.ignoreStdin = True\n elif param == '-set':\n parser.CheckArgument(len(arguments))\n attrib = arguments[0]\n value = \" \".join(arguments[1:])\n GC.attributes.append((attrib, value))\n elif param == '-cm':\n parser.CheckArgument(1)\n attrib = \"ai_default_reflection_shader.color_mode\"\n value = arguments[0]\n GC.attributes.append((attrib, value))\n elif param == '-sm':\n parser.CheckArgument(1)\n attrib = \"ai_default_reflection_shader.shade_mode\"\n value = arguments[0]\n GC.attributes.append((attrib, value))\n elif param == '-om':\n parser.CheckArgument(1)\n attrib = \"ai_default_reflection_shader.overlay_mode\"\n value = arguments[0]\n GC.attributes.append((attrib, value))\n elif param == '-nodes':\n GC.infoMode = InfoMode.K_INFO_NODES\n if len(arguments) == 0:\n GC.infoSort = 0\n else:\n parser.CheckArgument(1)\n if arguments[0] == 'n':\n GC.infoSort = 0\n elif arguments[0] == 't':\n GC.infoSort = 1\n else:\n Error('Unknown sort type \"%s\"' % arguments[0])\n elif param == '-info':\n GC.infoMode = InfoMode.K_INFO_NODE\n parser.CheckArgument(1)\n if arguments[0] == 'u':\n parser.CheckArgument(1)\n GC.infoData = arguments[1]\n GC.infoSort = 0\n elif arguments[0] == 'n':\n parser.CheckArgument(1)\n GC.infoData = arguments[1]\n GC.infoSort = 1\n else:\n GC.infoData = arguments[0]\n GC.infoSort = 1\n elif param == '-tree':\n parser.CheckArgument(1)\n elif param == '-utest':\n result = AiTest()\n sys.exit(K_SUCCESS if result else K_ERROR)\n elif param == '-av':\n print(AiGetVersionString())\n sys.exit(K_SUCCESS)\n elif param == '-h' or param == '-help' or param == '--help':\n DisplayHelp()\n sys.exit(K_SUCCESS)\n elif param == '-licensecheck':\n GC.infoMode = InfoMode.K_INFO_LICENSE\n else:\n ext = os.path.splitext(param)[1]\n if ext == '.ass' or ext == '.gz':\n if not param in GC.inputFileNames:\n GC.inputFileNames.append(param)\n\n parser.current += 1 \n \n return True\n\ndef ParseCommandLine2(argv):\n options = AiUniverseGetOptions()\n \n if not options:\n return False\n \n if len(argv) < 2:\n return False\n\n parser = CommandLineParser(argv[1:])\n\n while not parser.Finished():\n param = parser.CurrentParameter()\n arguments = parser.CurrentArguments()\n \n if param == '-i':\n parser.CheckArgument(1)\n if param == '-o':\n parser.CheckArgument(1)\n elif param == '-of':\n parser.CheckArgument(1)\n format = arguments[0]\n GC.driverType = AiFindDriverType(format)\n\n if not GC.driverType:\n Error(\"Output file format not recognized\")\n elif param == '-c':\n parser.CheckArgument(1)\n cam = AiNodeLookUpByName(arguments[0])\n if cam:\n AiNodeSetPtr(options, \"camera\", cam)\n else:\n Error(\"Camera %s does not exist\", arguments[0])\n elif param == '-sh':\n parser.CheckArgument(2)\n GC.shutterStart = float(arguments[0])\n GC.shutterEnd = float(arguments[1])\n elif param == '-fov':\n parser.CheckArgument(1)\n GC.fov = float(arguments[0])\n elif param == '-e':\n parser.CheckArgument(1)\n GC.camera_exposure = float(arguments[0])\n elif param == '-r':\n parser.CheckArgument(2)\n AiNodeSetInt(options, \"xres\", int(arguments[0]))\n AiNodeSetInt(options, \"yres\", int(arguments[1]))\n elif param == '-rg':\n parser.CheckArgument(4)\n AiNodeSetInt(options, \"region_min_x\", int(arguments[0]))\n AiNodeSetInt(options, \"region_min_y\", int(arguments[1]))\n AiNodeSetInt(options, \"region_max_x\", int(arguments[2]))\n AiNodeSetInt(options, \"region_max_y\", int(arguments[3]))\n elif param == '-sr':\n parser.CheckArgument(1);\n s = float(arguments[0])\n # scale width and height\n AiNodeSetInt(options, \"xres\", int(float(AiNodeGetInt(options, \"xres\")) * s)); \n AiNodeSetInt(options, \"yres\", int(float(AiNodeGetInt(options, \"yres\")) * s)); \n # reset render region \n AiNodeSetInt(options, \"region_min_x\", -1); \n AiNodeSetInt(options, \"region_min_y\", -1); \n AiNodeSetInt(options, \"region_max_x\", -1); \n AiNodeSetInt(options, \"region_max_y\", -1); \n elif param == '-t':\n parser.CheckArgument(1)\n AiNodeSetInt(options, \"threads\", int(arguments[0]))\n elif param == '-tp' and platform.system().lower() == 'windows':\n parser.CheckArgument(1)\n AiNodeSetInt(options, \"thread_priority\", int(arguments[0]))\n elif param == '-bs':\n parser.CheckArgument(1)\n AiNodeSetInt(options, \"bucket_size\", int(arguments[0]))\n elif param == '-bc':\n parser.CheckArgument(1)\n pentry = AiNodeEntryLookUpParameter(AiNodeEntryLookUp(\"options\"), \"bucket_scanning\")\n enum = AiParamGetEnum(pentry)\n scanType = AiEnumGetValue(enum, arguments[0])\n if scanType != -1:\n AiNodeSetInt(options, \"bucket_scanning\", scanType)\n else:\n Error('Bucket scanning not recognized')\n elif param == '-as':\n parser.CheckArgument(1)\n AiNodeSetInt(options, \"AA_samples\", int(arguments[0]))\n elif param == '-af':\n parser.CheckArgument(2)\n if (arguments[0] == 'box' or\n arguments[0] == 'catrom2D' or\n arguments[0] == 'catrom' or\n arguments[0] == 'cone' or\n arguments[0] == 'cook' or\n arguments[0] == 'cubic' or\n arguments[0] == 'disk' or\n arguments[0] == 'gaussian' or\n arguments[0] == 'mitnet' or\n arguments[0] == 'sinc' or\n arguments[0] == 'triangle' or\n arguments[0] == 'video'):\n GC.filterTypeName = \"%s_filter\" % arguments[0]\n GC.filterWidth = float(arguments[1])\n else:\n Error('Anti-aliasing filter not recognized')\n elif param == '-asc':\n parser.CheckArgument(1)\n AiNodeSetFlt(options, \"AA_sample_clamp\", float(arguments[0]))\n elif param == '-ar':\n parser.CheckArgument(1)\n AiNodeSetFlt(options, \"aspect_ratio\", float(arguments[0]))\n elif param == '-g':\n parser.CheckArgument(1)\n GC.gamma = float(arguments[0])\n elif param == '-td':\n parser.CheckArgument(1)\n AiNodeSetInt(options, \"GI_total_depth\", int(arguments[0]))\n elif param == '-rfl':\n parser.CheckArgument(1)\n AiNodeSetInt(options, \"GI_reflection_depth\", int(arguments[0]))\n elif param == '-rfr':\n parser.CheckArgument(1)\n AiNodeSetInt(options, \"GI_refraction_depth\", int(arguments[0]))\n elif param == '-dif':\n parser.CheckArgument(1)\n AiNodeSetInt(options, \"GI_diffuse_depth\", int(arguments[0]))\n elif param == '-glo':\n parser.CheckArgument(1)\n AiNodeSetInt(options, \"GI_glossy_depth\", int(arguments[0]))\n elif param == '-ds':\n parser.CheckArgument(1)\n AiNodeSetInt(options, \"GI_diffuse_samples\", int(arguments[0]))\n elif param == '-gs':\n parser.CheckArgument(1)\n AiNodeSetInt(options, \"GI_glossy_samples\", int(arguments[0]))\n elif param == '-f':\n GC.flatAll = True \n elif param == '-tg':\n parser.CheckArgument(1)\n AiNodeSetFlt(options, \"texture_gamma\", float(arguments[0]))\n elif param == '-lg':\n parser.CheckArgument(1)\n AiNodeSetFlt(options, \"light_gamma\", float(arguments[0]))\n elif param == '-sg':\n parser.CheckArgument(1)\n AiNodeSetFlt(options, \"shader_gamma\", float(arguments[0]))\n elif param == '-d':\n parser.CheckArgument(1)\n elif param == '-it':\n AiNodeSetBool(options, \"ignore_textures\", True)\n elif param == '-is':\n AiNodeSetBool(options, \"ignore_shaders\", True)\n elif param == '-ib':\n AiNodeSetPtr(options, \"background\", POINTER(AtNode)())\n elif param == '-ia':\n AiNodeSetBool(options, \"ignore_atmosphere\", True)\n elif param == '-il':\n AiNodeSetBool(options, \"ignore_lights\", True)\n elif param == '-id':\n AiNodeSetBool(options, \"ignore_shadows\", True)\n elif param == '-isd':\n AiNodeSetBool(options, \"ignore_subdivision\", True)\n elif param == '-idisp':\n AiNodeSetBool(options, \"ignore_displacement\", True)\n elif param == '-ibump':\n AiNodeSetBool(options, \"ignore_bump\", True)\n elif param == '-imb':\n AiNodeSetBool(options, \"ignore_motion_blur\", True)\n elif param == '-idof':\n AiNodeSetBool(options, \"ignore_dof\", True)\n elif param == '-isss':\n AiNodeSetBool(options, \"ignore_sss\", True)\n elif param == '-flat':\n AiNodeSetBool(options, \"ignore_smoothing\", True)\n elif param == '-idirect':\n AiNodeSetBool(options, \"ignore_direct_lighting\", True) \n elif param == '-sd':\n parser.CheckArgument(1)\n AiNodeSetInt(options, \"max_subdivisions\", int(arguments[0]))\n elif param == '-l':\n parser.CheckArgument(1)\n elif param == '-repeat':\n parser.CheckArgument(1)\n elif param == '-turn':\n parser.CheckArgument(1)\n elif param == '-resave':\n parser.CheckArgument(1)\n elif param == '-forceexpand':\n GC.openProcedurals = True\n AiNodeSetBool(options, \"procedural_force_expand\", True)\n elif param == '-nstdin':\n pass\n elif param == '-set':\n parser.CheckArgument(len(arguments))\n elif param == '-cm':\n parser.CheckArgument(1)\n elif param == '-sm':\n parser.CheckArgument(1)\n elif param == '-om':\n parser.CheckArgument(1)\n elif param == '-tree':\n parser.CheckArgument(1)\n error = PrintShadingTree(arguments[0])\n sys.exit(K_SUCCESS) \n elif param == '-dw':\n pass\n elif param == '-dp':\n pass\n elif param == '-db':\n pass\n elif param == '-v':\n if len(arguments) == 0:\n GC.verbosity = 1\n else:\n GC.verbosity = int(arguments[0])\n parser.current += 1\n elif param == '-nw':\n parser.CheckArgument(1)\n elif param == '-log':\n pass\n elif param == '-logfile':\n pass\n elif param == '-sl':\n AiNodeSetBool(options, \"skip_license_check\", True)\n else:\n ext = os.path.splitext(param)[1]\n if ext == '.ass' or ext == '.gz':\n pass\n elif (GC.outputFileName == ''):\n GC.outputFileName = param\n else:\n Error('Command line parameter \"%s\" not recognized' % param)\n\n parser.current += 1 \n \n return True\n","sub_path":"maya/plug-ins/mtoa_1.2.7.3_maya2014/scripts/pykick/command_line.py","file_name":"command_line.py","file_ext":"py","file_size_in_byte":14306,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"470158444","text":"import os\r\nimport subprocess\r\nimport requests\r\n\r\n\r\ndef prepare(file_delete=False):\r\n # 실행 후 zip 파일을 삭제할 것인가를 물음.\r\n file_delete = file_delete\r\n\r\n # 파일 리스트를 가져온다.\r\n file_list = os.listdir()\r\n\r\n # 실행에 필요한 파일들은 리스트에서 제외.\r\n if 'sample.wmv' in file_list:\r\n file_list.remove('sample.wmv')\r\n else:\r\n # sample.wmv 파일이 없을 경우엔 깃허브에서 받아올 수 있도록 하였음.\r\n print('sample 파일 받아오는 중')\r\n url = 'https://raw.githubusercontent.com/pertinency/google_drive_uploader/master/sample.wmv'\r\n res = requests.get(url=url, allow_redirects=True)\r\n with open('sample.wmv', 'wb') as f:\r\n f.write(res.content)\r\n if '새 폴더' in file_list:\r\n file_list.remove('새 폴더')\r\n if 'bat.bat' in file_list:\r\n file_list.remove('bat.bat')\r\n if 'drive_uploader.py' in file_list:\r\n file_list.remove('drive_uploader.py')\r\n\r\n print(\"리스트는 : \\n{}\".format(file_list))\r\n\r\n # bat 파일 작성\r\n with open(\"bat.bat\", 'w') as bat_file:\r\n for file_name in file_list:\r\n if \"\\'\" in file_name:\r\n file_name.replace(\"\\'\", \"\\\\\\'\")\r\n my_string = \"copy /b \\\"sample.wmv\\\"+\\\"{0}\\\" \\\"{1}.wmv\\\"\\n\".format(file_name, file_name)\r\n del_string = \"del \\\"{}\\\"\".format(file_name)\r\n bat_file.write(my_string)\r\n if file_delete:\r\n bat_file.write(del_string)\r\n\r\n # bat 파일 실행\r\n subprocess.call('bat.bat')\r\n\r\n # 준비파일 삭제\r\n if os.path.isfile('bat.bat'):\r\n os.remove('bat.bat')\r\n if os.path.isfile('sample.wmv'):\r\n os.remove('sample.wmv')\r\n\r\n\r\nif __name__ == \"__main__\":\r\n prepare()\r\n","sub_path":"drive_uploader.py","file_name":"drive_uploader.py","file_ext":"py","file_size_in_byte":1812,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"234945293","text":"xmin = -10\nxmax = 10\nymin = -10\nymax = 10\nrangex = xmax - xmin\nrangey = ymax - ymin\n\n\ndef setup():\n # Scale factors for scaling down graph\n global xscl, yscl\n size(600, 600)\n xscl = width / rangex\n yscl = -height / rangey # Graph upside down if positive\n \n \ndef draw():\n global xscl, yscl\n background(255) # White\n # Move origin from top-left of screen to center\n translate(width/2, height/2)\n grid(xscl, yscl)\n graphFunction(quadratic)\n \n \ndef parabola(x):\n return x**2\n\n\ndef cubic(x):\n return 6*x**3 + 31*x**2 + 3*x - 10\n\n\ndef quadratic(x):\n return 2*x**2 + 7*x - 15\n \n \ndef graphFunction(f):\n x = xmin\n while x <= xmax:\n stroke(255, 0, 0)\n line(x*xscl, f(x)*yscl, (x+0.1)*xscl, f(x+0.1)*yscl)\n x += 0.1\n\n\ndef grid(xscl, yscl):\n # Grid lines\n strokeWeight(1)\n stroke(0, 255, 255) # Cyan\n for i in range(xmin, xmax + 1): # Vertical lines\n line(i*xscl, ymin*yscl, i*xscl, ymax*yscl)\n for i in range(ymin, ymax + 1): # Horizontal lines\n line(xmin*xscl, i*yscl, xmax*xscl, i*yscl)\n \n # Axes\n stroke(0) # Black\n line(0, ymin*yscl, 0, ymax*yscl)\n line(xmin*xscl, 0, xmax*xscl, 0)\n","sub_path":"grid_pyde/grid_pyde.pyde","file_name":"grid_pyde.pyde","file_ext":"pyde","file_size_in_byte":1225,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"417412732","text":"from scipy.io import loadmat\nimport numpy as np\nimport matplotlib.pyplot as plt \nimport Plotter as plotter\nimport Exporter as exporter\nimport pandas as pd\n\t\nfile = [ \"C_Easy1_noise01\",\n \"C_Easy1_noise02\",\n \"C_Easy1_noise03\",\n \"C_Easy1_noise04\",\n \"C_Easy1_noise005\",\n \"C_Easy2_noise01\",\n \"C_Easy2_noise02\",\n \"C_Difficult1_noise01\",\n \"C_Test_LFPcorr_Easy2_noise015\",\n \"C_Drift_Easy2_noise015\" ]\n\ndef __runTest(file):\n c0, c1, c_ret = [], [], []\n f= loadmat(\"../../Simulator/\"+file+\".mat\")\n\n arrTime = f['spike_times'][0][0][0]\n arrClass = f['spike_class'][0][1][0]\n arrData = f['data'][0]\n\n for i in range(0, len(arrTime)):\n clss = c0 if(arrClass[i] == 0) else c1\n clss.append((round(arrData[arrTime[i]],5), arrTime[i], arrClass[i]))\n\n name = \"../Resources/R2-Real/Test-\"+ file +\".csv\"\n c_ret.append(c0)\n c_ret.append(c1)\n exporter.exportCaseTest(c_ret, name)\n plotter.graphSample(c0, c1)\n plt.show()\n\n\nfor i in file: __runTest(i)\n\n\n\n","sub_path":"TEST/GeneratorCaseTestReal.py","file_name":"GeneratorCaseTestReal.py","file_ext":"py","file_size_in_byte":1089,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"250434503","text":"\"\"\"\nНаписать реализацию игры крестики нолики\nДва человека\nОдин компьютер\nЗадание хода – координатами в консоль\nВывод «доски» после каждого хода\nПроверка допустимости хода\nПроверка условий победы\n\"\"\"\n\nrowset = {\"A\":0, \"B\":1, \"C\":2, \"D\":3, \"E\":4, \"F\":5, \"G\":6, \"H\":8}\nrowsetrev = {0:\"A\", 1:\"B\", 2:\"C\", 3:\"D\", 4:\"E\", 5:\"F\", 6:\"G\", 8:\"H\"}\ntable = []\nxsize = 3\nysize = 3\nX = 1\nO = 2\nE = 0\ncurrPlayer = X\n\n\ndef initTable():\n global currPlayer\n table = [[0 for _ in range(xsize)] for _ in range(ysize)]\n\n currPlayer = X\n\n return table\n\n\ndef drawCell(cell):\n if cell == X:\n xo = \"[X]\"\n elif cell == O:\n xo = \"[O]\"\n else:\n xo = \"[ ]\"\n print(xo ,end='')\n\ndef drawHdrXY(pos):\n if pos == 0:\n print(\" \", end='')\n\n xo = \"[\"+str(pos)+\"]\"\n print(xo ,end='')\n\ndef changePlayer():\n global currPlayer\n if currPlayer == X:\n currPlayer=O\n else:\n currPlayer=X\n\n\n\ndef drawTable():\n abc = (\"A\", \"B\", \"C\", \"D\", \"E\", \"F\", \"G\", \"H\")\n\n #print(\"\\n\")\n\n for row in range(xsize):\n drawHdrXY(row)\n print('', end='\\n')\n\n i = 0\n for row in table:\n print(\"[\"+abc[i]+\"]\", end='')\n i += 1\n for c in row:\n drawCell(c)\n print('', end='\\n')\n\ndef chkCell(row, col):\n print('rc', row, col)\n if row < 0 or row > ysize:\n return False\n elif col < 0 or col > xsize:\n return False\n elif table[row][col] != E:\n return False\n else:\n return True\n\ndef writeCell(row, col):\n table[row][col] = currPlayer\n changePlayer()\n return table\n\ndef checkOnWin():\n\n def chkCount(winline):\n isWin = False\n if winline == \"@ROW\":\n wininfo = winline + \":\" + rowsetrev[yrow]\n elif winline == \"@COL\":\n wininfo = winline + \":\" + str(xcol)\n elif winline == \"@DIAG-A0C2\":\n wininfo = winline\n elif winline == \"@DIAG-C0A2\":\n wininfo = winline\n\n if playerX == xsize:\n print(\"PLAYER-X WIN!!! \", wininfo)\n isWin = True\n elif playerO == xsize:\n print(\"PLAYER-O WIN!!! \", wininfo)\n isWin = True\n return isWin\n\n playerWin = False\n\n # проверяем rows\n for yrow in range(ysize):\n playerX = 0\n playerO = 0\n for xcol in range(xsize):\n if table[yrow][xcol] == X:\n playerX += 1\n elif table[yrow][xcol] == O:\n playerO += 1\n ret = chkCount(\"@ROW\")\n if not playerWin:\n playerWin = ret\n\n # проверяем cols\n for xcol in range(xsize):\n playerX = 0\n playerO = 0\n for yrow in range(ysize):\n if table[yrow][xcol] == X:\n playerX += 1\n elif table[yrow][xcol] == O:\n playerO += 1\n\n ret = chkCount(\"@COL\")\n if not playerWin:\n playerWin = ret\n\n\n # проверяем diag a0c2\n playerX = 0\n playerO = 0\n for xcol in range(xsize):\n yrow = xcol\n if table[yrow][xcol] == X:\n playerX += 1\n elif table[yrow][xcol] == O:\n playerO += 1\n\n ret = chkCount(\"@DIAG-A0C2\")\n if not playerWin:\n playerWin = ret\n\n # проверяем diag c0a2\n playerX = 0\n playerO = 0\n for xcol in range(xsize):\n yrow = ysize - xcol -1\n if table[yrow][xcol] == X:\n playerX += 1\n elif table[yrow][xcol] == O:\n playerO += 1\n\n ret = chkCount(\"@DIAG-C0A2\")\n if not playerWin:\n playerWin = ret\n\n return playerWin\n\ndef run(rdata):\n #print(rdata)\n cy = rowset[rdata[0]]\n cx = int(rdata[1])\n if chkCell(cy, cx):\n writeCell(cy, cx)\n else:\n print(\"\\nОШИБКА: проверьте свой ход (неверные координаты или ячейка уже была записана ранее)\")\n drawTable()\n ret = checkOnWin()\n return not ret\n\n\nisPlayOn = True\nwhile isPlayOn:\n table = initTable()\n drawTable()\n isRoundOn = True\n while isRoundOn:\n rdata = list(input(\"\\n[Игрок #\"+str(currPlayer)+\"]\\nВаш ход (формат A0..C2) ->\").upper())\n isRoundOn = run(rdata)\n\n isPlayOn = input(\"Начать новую партию? [0 - Exit 1 - Continue]\") != '0'\n\nprint(\"*** ~^ GAME OVER ! ^~ ***\")","sub_path":"14_XOXOXO.py","file_name":"14_XOXOXO.py","file_ext":"py","file_size_in_byte":4548,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"146337699","text":"from tkinter import *\r\nfrom tkinter import scrolledtext\r\ndef clear():\r\n txt.delete(1.0,END)\r\n # txt.insert(INSERT,'You text goes here') # to append to text\r\n\r\nwindow = Tk()\r\nwindow.title(\"Welcome to LikeGeeks app\")\r\nwindow.geometry('350x200')\r\nbtn1 = Button(window,command=clear, text=\"clear\")\r\nbtn1.grid(column =0, row=1)\r\ntxt = scrolledtext.ScrolledText(window,width=40,height=10)\r\ntxt.grid(column=0,row=0)\r\n\r\nwindow.mainloop()","sub_path":"tkinter/tkinter09[scrolledtext].py","file_name":"tkinter09[scrolledtext].py","file_ext":"py","file_size_in_byte":435,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"33335343","text":"### Importando as bibliotecas.\r\n\r\nimport pandas as pd ## manipulação de dataframes em python\r\nimport time ## manipulação de tempo\r\nimport numpy as np ## manipulação de arrays e vetores\r\nimport talib as ta ## criação de indicadores técnicos\r\nfrom datetime import datetime ## manipulação de datas em pyton\r\nimport datetime as dt ## manipulação de datas em python\r\nfrom buy_function import buy ## função de compra de ativos\r\nfrom sell_function import sell ## função de vendas de ativos\r\nfrom download_data import download_data ## função para download dos dados em tempo real\r\nfrom config_param import config_param ## função de configuração da estratégia de compra/venda\r\nimport warnings ## filtros para avisos\r\nwarnings.filterwarnings('ignore') ## ignorar aviso\r\nimport MetaTrader5 as mt5 ## biblioteca do MT5 para Python\r\nimport pytz ## manipulação de time zones em python\r\n\r\n\r\n##A) Iniciando uma sessão do MT5 com um looping\r\n\r\nRUN=1\r\nwhile RUN==1:\r\n\r\n # Estabelecendo uma conexão com o Terminal do MetaTrader5\r\n if not mt5.initialize():\r\n print(\"initialize() failed, error code =\",mt5.last_error())\r\n quit()\r\n \r\n ## Definindo o ativo usado no robô\r\n Ativo='CCMH21'\r\n\r\n ## Ajustando a quantidade de lotes que serão comprados/vendidos\r\n lot= 1\r\n\r\n ## Ajustando o timeframe (M1 para 1 minuto / M5 para 5 minutos / D1 para diário\r\n timeframe = mt5.TIMEFRAME_D1\r\n\r\n\r\n ## Carregando as cotações em tempo real através da função download_data\r\n xfh = download_data(Ativo,timeframe) ## Parâmetros: Ativo e o timeframe configurado\r\n\r\n #Criando um novo objeto que recebe o dataframe com os dados em tempo real\r\n stocks = xfh.copy()\r\n\r\n # PARTE IV - CRIANDO A ESTRATÉGIA\r\n\r\n #ETAPA II) Suavização da Série\r\n\r\n # a) Suavização da série\r\n\r\n suavização = 5\r\n\r\n # b) Gerando as features OHLC suavizadas\r\n\r\n stocks['EMAC'] = ta.EMA(stocks['Adj Close'], timeperiod=suavização) # Suavização da série de fechamento\r\n stocks['EMAO'] = ta.EMA(stocks['Open'], timeperiod=suavização) # Suavização da série de abertura\r\n stocks['EMAH'] = ta.EMA(stocks['High'], timeperiod=suavização) # Suavização da série de Altas\r\n stocks['EMAL'] = ta.EMA(stocks['Low'], timeperiod=suavização) # Suavização da série de Baixas\r\n stocks['EMAV'] = ta.EMA(stocks['Volume'], timeperiod=suavização) # Suavização da série de Volume\r\n\r\n #ETAPA III)\r\n\r\n ##-- Gerando os Osciladores e Indicadores de Tendência\r\n\r\n # 1) RSI - Relative Strength Index\r\n stocks['RSI'] = ta.RSI(stocks['EMAC'], timeperiod=14)\r\n # 2) MACD - Moving Average Convergence/Divergence\r\n stocks['macd'], stocks['macdsignal'], stocks['macdhist'] = ta.MACD(stocks['EMAC'], fastperiod=12, slowperiod=26, signalperiod=9)\r\n # 3) Parabolic SAR\r\n stocks['SAR'] = ta.SAR(stocks['EMAH'], stocks['EMAL'], 0.02, 0.3)\r\n stocks['SAREXT'] = ta.SAREXT(stocks['EMAH'], stocks['EMAL'], 0.02, 0.3)\r\n # 4) CCI - Commodity Channel Index\r\n stocks['CCI'] = ta.CCI(stocks['EMAH'], stocks['EMAL'], stocks['EMAC'], timeperiod=14)\r\n # 5) SMA - Single Moving Average\r\n sht = 5\r\n lng = 22\r\n stocks['SHT'] = stocks['Adj Close'].rolling(window=sht).mean()\r\n stocks['LNG'] = stocks['Adj Close'].rolling(window=lng).mean()\r\n # 6) Bollinger Bands\r\n stocks['UPP'], stocks['MIDD'], stocks['LOW'] = ta.BBANDS(stocks['EMAC'], timeperiod=6, nbdevup=4, nbdevdn=4, matype=0)\r\n # 7) Top & Bottom\r\n stocks['Close20d'] = stocks['Adj Close'].shift(20)\r\n stocks['Close30d'] = stocks['Adj Close'].shift(30)\r\n stocks['Close40d'] = stocks['Adj Close'].shift(40)\r\n stocks['Close50d'] = stocks['Adj Close'].shift(50)\r\n stocks['Close60d'] = stocks['Adj Close'].shift(60)\r\n # 8) TOP & BOTTOM\r\n Lenght = 60\r\n stocks['MIN_' + str(Lenght)] = list(np.zeros(len(stocks)))\r\n stocks['MAX_' + str(Lenght)] = list(np.zeros(len(stocks)))\r\n for i in range(len(stocks) - Lenght):\r\n stocks['MIN_' + str(Lenght)][i + Lenght] = stocks['Adj Close'][i:i + Lenght].min()\r\n stocks['MAX_' + str(Lenght)][i + Lenght] = stocks['Adj Close'][i:i + Lenght].max()\r\n stocks.dropna(axis=0, inplace=True)\r\n\r\n #===================================================================================================================\r\n # A ESTRATÉGIA DE COMPRA/VENDA DEVE SER INSERIDA NO BLOCO ABAIXO\r\n #===================================================================================================================\r\n\r\n stocks['Status'] = stocks['SHT'] > stocks['LNG']\r\n\r\n # ==================================================================================================================\r\n # ==================================================================================================================\r\n\r\n\r\n # Executando a função 'config_param' e passando a série\r\n\r\n stocks=config_param(stocks, sht, lng, Lenght)\r\n\r\n ## Criando variáveis com os últimos resultados os indicadores estratégicos\r\n\r\n Var= stocks['action'].tail(1).values\r\n Var=Var[0]\r\n #Var7 = stocks['has_action'].tail(1).values\r\n #Var7 = Var7[0]\r\n Var1 = stocks['Adj Close'].tail(1).values\r\n Var1 = Var1[0]\r\n Var2 = stocks['MIN_' + str(Lenght)].tail(1).values\r\n Var2 = Var2[0]\r\n Var3 = stocks['MAX_' + str(Lenght)].tail(1).values\r\n Var3 = Var3[0]\r\n #Var4 = stocks['UPP'].tail(1).values\r\n #Var4 = Var4[0]\r\n #Var5 = stocks['LOW'].tail(1).values\r\n #Var5 = Var5[0]\r\n\r\n\r\n #Ordens de Compra\r\n if Var=='buy':\r\n \r\n result,price= buy(Ativo,lot)\r\n #print(result)\r\n import time\r\n time.sleep(60)\r\n\r\n #Ordens de Venda\r\n if Var=='sell':\r\n \r\n result,price= sell(Ativo,lot)\r\n #print(result)\r\n import time\r\n time.sleep(60)\r\n\r\n #Imprimindo os valores de cada indicador\r\n\r\n\r\n if Var1 < Var2:\r\n print(f'Entry Point - Buy //// Price {Var1:.0f}')\r\n print(f'Price {Var1:.0f} < {Var2:.0f} (Mínimo {Lenght} dias)')\r\n elif Var1 > Var3:\r\n print(f'Entry Point - Sell //// Price {Var3:.2f}')\r\n print(f'Price {Var1:.0f} > {Var3:.0f} (Máximo {Lenght} dias)')\r\n else:\r\n print('No Entry Point --- :( ')\r\n print(f'Mínimo - {Var2:.0f} < Price {Var1:.0f} < Máximo {Var3:.0f}')\r\n\r\n\r\n '''\r\n if Var1 > Var4:\r\n print('Price: ', Var1, '>', 'UPP: ', Var4, ' & ', 'CCI > 100 ', Var5)\r\n elif Var1 < Var5:\r\n print('Price: ', Var1, '<', 'LOW: ', Var4, ' & ', 'CCI < -100 ', Var5)\r\n else:\r\n print('No entry point {::} :(')\r\n '''\r\n #import time\r\n\r\n #time.sleep(60) # Sleep for 1 seconds\r\n\r\n\r\n\r\n","sub_path":"turtle_v1/robot.py","file_name":"robot.py","file_ext":"py","file_size_in_byte":6897,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"12362242","text":"import subprocess\nimport sys, os\nimport gc\nimport json\nimport tensorflow as tf\nimport numpy as np\nfrom absl import flags\nfrom absl.flags import FLAGS\nfrom enum import Enum\nfrom queue import Queue\nimport math\n# from ctypes import CDLL\n# from ctypes import c_void_p, byref, cast, POINTER, c_char, c_size_t, c_int\n# from ctypes.util import find_library\nfrom imghdr import what\nimport tensorflow_text\n\nfrom time import sleep, time\nfrom sysv_ipc import MessageQueue, IPC_CREX, BusyError\nfrom threading import Thread, Lock, Event\nfrom threading import Semaphore as pySem\n# from concurrent.futures import ThreadPoolExecutor\n\nfrom pocket_tf_if import PocketControl, TFFunctions, ReturnValue, TFDataType, CLIENT_TO_SERVER, SERVER_TO_CLIENT, SharedMemoryChannel\n# os.chdir('/root/yolov3-tf2')\n# LIBC = CDLL(find_library('c'))\n\nGLOBAL_SLEEP = 0.01\nLOCAL_SLEEP = 0.0001\nPOCKETD_SOCKET_PATH = '/tmp/pocketd.sock'\nDEVICE_LIST_AVAILABLE = False\nDEVICE_LIST = []\nADD_INTERVAL = 0.01\nDEDUCT_INTERVAL = 0.01\ndef debug(*args):\n import inspect\n filename = inspect.stack()[1].filename\n lineno = inspect.stack()[1].lineno\n caller = inspect.stack()[1].function\n print(f'debug>> [{filename}:{lineno}, {caller}]', *args)\n\nMEM_SEM = pySem()\nCPU_SEM = pySem()\n\nclass IsolationControl:\n PRIVATEQUEUE = True if os.environ.get('PRIVATEQUEUE', 'on') == 'on' else False\n CAPABILITIESLIST = True if os.environ.get('ACL', 'on') == 'on' else False\n\nclass Utils:\n @staticmethod\n def get_container_id():\n cg = open('/proc/self/cgroup')\n content = cg.readlines()\n for line in content:\n if 'docker' in line:\n cid = line.strip().split('/')[-1]\n return cid\n \n @staticmethod\n def round_up_to_even(f):\n return int(math.ceil(f / 2.) * 2)\n\n @staticmethod\n def measure_resource_usage():\n stat_dict = {}\n with open('/sys/fs/cgroup/cpuacct/cpuacct.usage') as f:\n stat_dict['cputime.total'] = f.read()\n with open('/sys/fs/cgroup/cpuacct/cpuacct.usage_sys') as f:\n stat_dict['cputime.sys'] = f.read()\n with open('/sys/fs/cgroup/cpuacct/cpuacct.usage_user') as f:\n stat_dict['cputime.user'] = str(int(stat_dict['cputime.total']) - int(stat_dict['cputime.sys']))\n with open('/sys/fs/cgroup/memory/memory.max_usage_in_bytes') as f:\n stat_dict['memory.max_usage'] = f.read()\n with open('/sys/fs/cgroup/memory/memory.memsw.max_usage_in_bytes') as f:\n stat_dict['memory.memsw.max_usage'] = f.read()\n with open('/sys/fs/cgroup/memory/memory.failcnt') as f:\n stat_dict['memory.failcnt'] = f.read()\n with open('/sys/fs/cgroup/memory/memory.stat') as f:\n for line in f:\n if 'total_pgfault' in line:\n value = line.split()[-1]\n stat_dict['memory.stat.pgfault'] = value\n elif 'total_pgmajfault' in line:\n value = line.split()[-1]\n stat_dict['memory.stat.pgmajfault'] = value\n return stat_dict\n\n @staticmethod\n def get_memory_limit(client_id = None):\n if client_id != None:\n with open(f'/cg/memory/docker/{client_id}/memory.limit_in_bytes', 'r') as limit_in_bytes:\n memory_limit = int(limit_in_bytes.read().strip())\n return memory_limit\n else:\n with open('/sys/fs/cgroup/memory/memory.limit_in_bytes', 'r') as limit_in_bytes:\n memory_limit = int(limit_in_bytes.read().strip())\n return memory_limit\n\n @staticmethod\n def get_memory_usage(client_id = None):\n if client_id != None:\n with open(f'/cg/memory/docker/{client_id}/memory.usage_in_bytes', 'r') as usage_in_bytes:\n memory_usage = int(usage_in_bytes.read().strip())\n else:\n with open('/sys/fs/cgroup/memory/memory.usage_in_bytes', 'r') as usage_in_bytes:\n memory_usage = int(usage_in_bytes.read().strip())\n return memory_usage\n\n @staticmethod\n def get_cpu_limit(client_id = None):\n if client_id != None:\n with open(f'/cg/cpu/docker/{client_id}/cpu.cfs_period_us', 'r') as cfs_period_us:\n cfs_period_us = int(cfs_period_us.read().strip())\n with open(f'/cg/cpu/docker/{client_id}/cpu.cfs_quota_us', 'r') as cfs_quota_us:\n cfs_quota_us = int(cfs_quota_us.read().strip())\n return cfs_quota_us, cfs_period_us\n else:\n with open(f'/sys/fs/cgroup/cpu/cpu.cfs_period_us', 'r') as cfs_period_us:\n cfs_period_us = int(cfs_period_us.read().strip())\n with open(f'/sys/fs/cgroup/cpu/cpu.cfs_quota_us', 'r') as cfs_quota_us:\n cfs_quota_us = int(cfs_quota_us.read().strip())\n return cfs_quota_us, cfs_period_us\n\n ### remove\n @staticmethod\n def request_memory_move():\n with open('/sys/fs/cgroup/memory/memory.limit_in_bytes', 'r') as limit_in_bytes:\n memory_limit = float(limit_in_bytes.read().strip()) * RSRC_REALLOC_RATIO\n return memory_limit\n\n ### remove \n @staticmethod\n def request_cpu_move():\n with open(f'/sys/fs/cgroup/cpu/cpu.cfs_period_us', 'r') as cfs_period_us:\n cpu_denominator = float(cfs_period_us.read().strip())\n with open(f'/sys/fs/cgroup/cpu/cpu.cfs_quota_us', 'r') as cfs_quota_us:\n cpu_numerator = float(cfs_quota_us.read().strip())\n return (cpu_numerator/cpu_denominator) * RSRC_REALLOC_RATIO, cpu_numerator, cpu_denominator\n\n @staticmethod\n def deduct_resource(client_id, mem, cfs_quota_us, cfs_period_us):\n global DEDUCT_INTERVAL\n if cfs_period_us != 100000:\n raise Exception(\"cfs_period_us should be 100000\")\n \n CPU_SEM.acquire()\n MEM_SEM.acquire()\n\n fe_mem_int = Utils.get_memory_limit(client_id) + mem\n fe_cfs_quota, fe_cfs_period = Utils.get_cpu_limit(client_id)\n fe_cpu_int = fe_cfs_quota + cfs_quota_us\n\n be_mem_int = Utils.get_memory_limit() - mem\n be_cfs_quota, be_cfs_period = Utils.get_cpu_limit()\n be_cpu_int = be_cfs_quota - cfs_quota_us\n\n # debug(f'old-->cpu={Utils.get_cpu_limit()}) - {cfs_quota_us}, mem={Utils.get_memory_limit()} - {mem}')\n\n if mem != 0:\n try:\n # Checks if memory limit to be < current usage.\n # current_usage = Utils.get_memory_usage()\n # if current_usage >= be_mem_int:\n # difference = current_usage - be_mem_int\n # page_size = LIBC.getpagesize()\n # how_many_pages = ceil(difference/page_size)\n # num_bytes_to_evict = page_size * how_many_pages\n # tmp_ptr = c_void_p()\n # ret = LIBC.posix_memalign(byref(tmp_ptr), page_size, num_bytes_to_evict)\n # if ret != 0:\n # raise Exception('ENOMEM')\n # c_char_ptr = cast(tmp_ptr, POINTER(c_char * num_bytes_to_evict))\n # for i in range(0, how_many_pages):\n # c_char_ptr.contents[i*page_size] = c_char(0xff)\n # LIBC.free(tmp_ptr)\n \n with open('/sys/fs/cgroup/memory/memory.limit_in_bytes', 'w') as be_limit:\n be_limit.write(str(be_mem_int).strip())\n with open('/sys/fs/cgroup/memory/memory.memsw.limit_in_bytes', 'w') as be_swap_limit:\n be_swap_limit.write(str(be_mem_int*4).strip())\n\n # with open(f'/cg/memory/docker/{client_id}/memory.memsw.limit_in_bytes', 'w') as fe_swap_limit:\n # fe_swap_limit.write(str(fe_mem_int*4).strip())\n with open(f'/cg/memory/docker/{client_id}/memory.limit_in_bytes', 'w') as fe_limit:\n fe_limit.write(str(fe_mem_int).strip())\n\n except Exception as e:\n mem_fail = True\n debug(repr(e), e)\n\n if cfs_quota_us != 0:\n try:\n with open(f'/cg/cpu/docker/{client_id}/cpu.cfs_quota_us', 'w') as cfs_quota_us:\n cfs_quota_us.write(str(fe_cpu_int).strip())\n with open('/sys/fs/cgroup/cpu/cpu.cfs_quota_us', 'w') as cfs_quota_us:\n cfs_quota_us.write(str(be_cpu_int).strip())\n except Exception as e:\n cpu_fail = True\n debug(repr(e), e)\n debug(f'client_id={client_id}, fe_cpu_int={fe_cpu_int}, be_cpu_int={be_cpu_int}')\n\n MEM_SEM.release()\n CPU_SEM.release()\n\n\n @staticmethod\n def add_resource(client_id, mem, cfs_quota_us, cfs_period_us):\n global ADD_INTERVAL\n if cfs_period_us != 100000:\n raise Exception(\"cfs_period_us should be 100000\")\n\n CPU_SEM.acquire()\n MEM_SEM.acquire()\n\n fe_mem_current_limit = Utils.get_memory_limit(client_id)\n fe_mem_int = fe_mem_current_limit - mem\n fe_cfs_quota, fe_cfs_period = Utils.get_cpu_limit(client_id)\n fe_cpu_int = fe_cfs_quota - cfs_quota_us\n\n be_mem_current_limit = Utils.get_memory_limit()\n be_mem_int = be_mem_current_limit + mem\n be_cfs_quota, be_cfs_period = Utils.get_cpu_limit()\n be_cpu_int = be_cfs_quota + cfs_quota_us\n\n memory_transferred, cpu_transferred = 0, 0\n\n # debug(f'old-->cpu={Utils.get_cpu_limit()}) + {cfs_quota_us}, mem={Utils.get_memory_limit()} + {mem}')\n\n if mem != 0:\n # Checks if memory limit to be < current usage.\n fe_mem_current_usage = Utils.get_memory_usage(client_id)\n how_much_reduce_available = fe_mem_current_limit - fe_mem_current_usage\n how_much_reduce_required = mem\n # debug(how_much_reduce_available, how_much_reduce_required, fe_mem_current_limit, mem)\n # if fe_mem_int < fe_mem_current_usage:\n if how_much_reduce_available < how_much_reduce_required:\n mem = how_much_reduce_available * 0.5\n fe_mem_int = fe_mem_current_limit - mem\n be_mem_int = be_mem_current_limit + mem\n # debug(fe_mem_int)\n # debug(Utils.get_memory_usage(client_id))\n else:\n try:\n with open(f'/cg/memory/docker/{client_id}/memory.limit_in_bytes', 'w') as fe_limit:\n fe_limit.write(str(fe_mem_int).strip())\n # FE swap space does not need to adjusted.. but leave below.\n # with open(f'/cg/memory/docker/{client_id}/memory.memsw.limit_in_bytes', 'w') as fe_swap_limit:\n # try:\n # fe_swap_limit.write(str(fe_mem_int*4).strip())\n # except:\n # raise Exception('OutOfMemory')\n\n with open('/sys/fs/cgroup/memory/memory.memsw.limit_in_bytes', 'w') as be_swap_limit:\n be_swap_limit.write(str(be_mem_int*4).strip())\n with open('/sys/fs/cgroup/memory/memory.limit_in_bytes', 'w') as be_limit:\n be_limit.write(str(be_mem_int).strip())\n memory_transferred = mem\n except Exception as e:\n memory_transferred = False\n debug(repr(e), e)\n\n if cfs_quota_us != 0:\n try:\n with open(f'/cg/cpu/docker/{client_id}/cpu.cfs_quota_us', 'w') as cfs_quota_us_f:\n cfs_quota_us_f.write(str(fe_cpu_int).strip())\n with open('/sys/fs/cgroup/cpu/cpu.cfs_quota_us', 'w') as cfs_quota_us_f:\n cfs_quota_us_f.write(str(be_cpu_int).strip())\n cpu_transferred = cfs_quota_us\n except Exception as e:\n cpu_transferred = cfs_quota_us\n debug(repr(e), e)\n\n MEM_SEM.release()\n CPU_SEM.release()\n\n return memory_transferred, cpu_transferred\n\n @staticmethod\n def deduct_resource_daemon(client_id, mem, cfs_quota_us, cfs_period_us):\n global DEDUCT_INTERVAL\n import socket\n if cfs_period_us != 100000:\n raise Exception(\"cfs_period_us should be 100000\")\n \n CPU_SEM.acquire()\n MEM_SEM.acquire()\n\n # fe_mem_int = Utils.get_memory_limit(client_id) + mem\n # fe_cfs_quota, fe_cfs_period = Utils.get_cpu_limit(client_id)\n # fe_cpu_int = fe_cfs_quota + cfs_quota_us\n\n # be_mem_int = Utils.get_memory_limit() - mem\n # be_cfs_quota, be_cfs_period = Utils.get_cpu_limit()\n # be_cpu_int = be_cfs_quota - cfs_quota_us\n\n # debug(f'old-->cpu={Utils.get_cpu_limit()}) - {cfs_quota_us}, mem={Utils.get_memory_limit()} - {mem}')\n\n\n if mem != 0 or cfs_quota_us != 0:\n my_socket = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)\n my_socket.connect(POCKETD_SOCKET_PATH)\n args_dict = {'sender' : 'FE',\n 'command' : 'migrate_resource',\n 'client' : Utils.get_container_id(), \n 'be' : client_id,\n 'mem' : mem,\n 'cpu' : cfs_quota_us,\n 'cpudenom' : cfs_period_us}\n json_data_to_send = json.dumps(args_dict)\n my_socket.send(json_data_to_send.encode('utf-8'))\n data_received = my_socket.recv(1024)\n my_socket.close()\n\n MEM_SEM.release()\n CPU_SEM.release()\n\n\n @staticmethod\n def add_resource_daemon(client_id, mem, cfs_quota_us, cfs_period_us):\n global ADD_INTERVAL\n import socket\n if cfs_period_us != 100000:\n raise Exception(\"cfs_period_us should be 100000\")\n\n CPU_SEM.acquire()\n MEM_SEM.acquire()\n \n # fe_mem_int = Utils.get_memory_limit(client_id) - mem\n # fe_cfs_quota, fe_cfs_period = Utils.get_cpu_limit(client_id)\n # fe_cpu_int = fe_cfs_quota - cfs_quota_us\n\n # be_mem_int = Utils.get_memory_limit() + mem\n # be_cfs_quota, be_cfs_period = Utils.get_cpu_limit()\n # be_cpu_int = be_cfs_quota + cfs_quota_us\n\n # debug(f'old-->cpu={Utils.get_cpu_limit()}) + {cfs_quota_us}, mem={Utils.get_memory_limit()} + {mem}')\n\n if mem != 0 or cfs_quota_us != 0:\n my_socket = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)\n my_socket.connect(POCKETD_SOCKET_PATH)\n args_dict = {'sender' : 'FE',\n 'command' : 'migrate_resource',\n 'client' : client_id, \n 'be' : Utils.get_container_id(),\n 'mem' : mem,\n 'cpu' : cfs_quota_us,\n 'cpudenom' : cfs_period_us}\n json_data_to_send = json.dumps(args_dict)\n my_socket.send(json_data_to_send.encode('utf-8'))\n data_received = my_socket.recv(1024)\n my_socket.close()\n\n MEM_SEM.release()\n CPU_SEM.release()\n\n### moved from apps\nclass BatchNormalization(tf.keras.layers.BatchNormalization):\n \"\"\"\n Make trainable=False freeze BN for real (the og version is sad)\n \"\"\"\n\n def call(self, x, training=False):\n if training is None:\n training = tf.constant(False)\n training = tf.logical_and(training, self.trainable)\n return super().call(x, training)\n\ndef yolo_boxes(pred, anchors, classes):\n # pred: (batch_size, grid, grid, anchors, (x, y, w, h, obj, ...classes))\n grid_size = tf.shape(pred)[1]\n box_xy, box_wh, objectness, class_probs = tf.split(\n pred, (2, 2, 1, classes), axis=-1)\n\n box_xy = tf.sigmoid(box_xy)\n objectness = tf.sigmoid(objectness)\n class_probs = tf.sigmoid(class_probs)\n pred_box = tf.concat((box_xy, box_wh), axis=-1) # original xywh for loss\n\n # !!! grid[x][y] == (y, x)\n grid = tf.meshgrid(tf.range(grid_size), tf.range(grid_size))\n grid = tf.expand_dims(tf.stack(grid, axis=-1), axis=2) # [gx, gy, 1, 2]\n\n box_xy = (box_xy + tf.cast(grid, tf.float32)) / \\\n tf.cast(grid_size, tf.float32)\n box_wh = tf.exp(box_wh) * anchors\n\n box_x1y1 = box_xy - box_wh / 2\n box_x2y2 = box_xy + box_wh / 2\n bbox = tf.concat([box_x1y1, box_x2y2], axis=-1)\n\n return bbox, objectness, class_probs, pred_box\n\ndef yolo_nms(outputs, anchors, masks, classes):\n # boxes, conf, type\n b, c, t = [], [], []\n\n for o in outputs:\n b.append(tf.reshape(o[0], (tf.shape(o[0])[0], -1, tf.shape(o[0])[-1])))\n c.append(tf.reshape(o[1], (tf.shape(o[1])[0], -1, tf.shape(o[1])[-1])))\n t.append(tf.reshape(o[2], (tf.shape(o[2])[0], -1, tf.shape(o[2])[-1])))\n\n bbox = tf.concat(b, axis=1)\n confidence = tf.concat(c, axis=1)\n class_probs = tf.concat(t, axis=1)\n\n scores = confidence * class_probs\n boxes, scores, classes, valid_detections = tf.image.combined_non_max_suppression(\n boxes=tf.reshape(bbox, (tf.shape(bbox)[0], -1, 1, 4)),\n scores=tf.reshape(\n scores, (tf.shape(scores)[0], -1, tf.shape(scores)[-1])),\n max_output_size_per_class=FLAGS.yolo_max_boxes,\n max_total_size=FLAGS.yolo_max_boxes,\n iou_threshold=FLAGS.yolo_iou_threshold,\n score_threshold=FLAGS.yolo_score_threshold\n )\n\n return boxes, scores, classes, valid_detections\n\nyolo_anchors = np.array([(10, 13), (16, 30), (33, 23), (30, 61), (62, 45),\n (59, 119), (116, 90), (156, 198), (373, 326)],\n np.float32) / 416\nyolo_anchor_masks = np.array([[6, 7, 8], [3, 4, 5], [0, 1, 2]])\n\n# but don't delete\nflags.DEFINE_integer('yolo_max_boxes', 100,\n 'maximum number of boxes per image')\nflags.DEFINE_float('yolo_iou_threshold', 0.5, 'iou threshold')\nflags.DEFINE_float('yolo_score_threshold', 0.5, 'score threshold')\n\n\ndef stack_trace():\n import traceback\n traceback.print_tb()\n traceback.print_exception()\n traceback.print_stack()\n\n# def str_replacer(old, new, start):\n# if start not in range(len(old)):\n# raise ValueError(\"invalid start index\")\n\n# # if start < 0:\n# # return new + old\n# # if start > len(old):\n# # return old + new\n\n# return old[:start] + new + old[start + 1:]\n\n### test_code\n# from multiprocessing import Process, Manager ## test_code\n# manager = Manager() ## test_code\n# _matmultest_dict = manager.dict() ## test_code\n_matmultest_dict = {} ## test_code\nfrom math import sqrt\n# class ThreadWithReturnValue(Thread):\n# def __init__(self, group=None, target=None, name=None,\n# args=(), kwargs=None, *, daemon=None):\n# # Call the Thread class's init function\n# Thread.__init__(self)\n# self._return = None\n\n# def run(self):\n# if self._Thread__target is not None:\n# self._return = self._Thread__target(*self._Thread__args,\n# **self._Thread__kwargs)\n# def join(self):\n# Thread.join(self)\n# return self._return\n\nclass TensorFlowServer:\n @staticmethod\n def hello(client_id, message):\n return_dict = {'message': message}\n return ReturnValue.OK.value, return_dict\n\n @staticmethod\n def _noptest(client_id):\n try:\n # do nothing\n pass\n except Exception as e:\n import inspect\n from inspect import currentframe, getframeinfo\n frameinfo = getframeinfo(currentframe())\n return ReturnValue.EXCEPTIONRAISED.value, {'exception': e.__class__.__name__, 'message': str(e), 'filename':frameinfo.filename, 'lineno': frameinfo.lineno, 'function': inspect.stack()[0][3]}\n else:\n return ReturnValue.OK.value, None\n finally:\n pass\n\n @staticmethod\n def _matmultest(client_id, N): ## test_code\n try:\n mat_a = np.random.randint(0, sqrt(sys.maxsize), size=(N, N))\n mat_b = np.random.randint(0, sqrt(sys.maxsize), size=(N, N))\n # for r in range(N):\n # for c in range(N):\n # for k in range(N):\n # mat_c[r, c] = mat_a[r, k] * mat_b[k, c]\n # mat_c = np.matmul(mat_a, mat_b) \n if client_id not in _matmultest_dict:\n _matmultest_dict[client_id] = []\n _matmultest_dict[client_id].append(mat_a)\n _matmultest_dict[client_id].append(mat_b)\n\n # import ctypes\n # filepath = '/root/tfrpc/server/test/libmatmul.so'\n # if not os.path.exists(filepath):\n # print(os.getcwd())\n # print(subprocess.check_output('ls -alh /root/tfrpc/server/test/', shell=True, encoding='utf8'))\n # raise Exception('Library file does not exist, consider build it first.')\n\n # matmullib = ctypes.CDLL(filepath)\n # matmullib.matmul.argtypes = [ctypes.c_int]\n # matmullib.matmul.restype = ctypes.c_void_p\n # result_mat = matmullib.matmul(N)\n # # t = ThreadWithReturnValue(target=matmullib.matmul, args=(N,))\n # # t = Thread(target=matmullib.matmul, args=(N,))\n # # t.start()\n # # # result_mat = t.join()\n # # t.join()\n # if client_id not in _matmultest_dict:\n # _matmultest_dict[client_id] = []\n # _matmultest_dict[client_id].append(result_mat)\n except Exception as e:\n import inspect\n from inspect import currentframe, getframeinfo\n print(e)\n frameinfo = getframeinfo(currentframe())\n return ReturnValue.EXCEPTIONRAISED.value, {'exception': e.__class__.__name__, 'message': str(e), 'filename':frameinfo.filename, 'lineno': frameinfo.lineno, 'function': inspect.stack()[0][3]}\n else:\n return ReturnValue.OK.value, None\n finally:\n pass\n\n @staticmethod\n def check_if_model_exist(client_id, model_name):\n keras_model = None\n if model_name in PocketManager.get_instance().model_dict:\n exist_value = True\n model = PocketManager.get_instance().model_dict[model_name]\n keras_model = TFDataType.Model(model_name, id(model), already_built=True).to_dict()\n PocketManager.get_instance().add_object_to_per_client_store(client_id, model)\n else:\n PocketManager.get_instance().dict_modelname_to_session[model_name] = tf.Graph()\n # PocketManager.get_instance().dict_modelname_to_session[model_name] = tf.compat.v1.Session(graph=tf.Graph())\n PocketManager.get_instance().dict_clientid_to_modelname[client_id] = model_name\n exist_value = False\n\n return ReturnValue.OK.value, (exist_value, keras_model)\n\n @staticmethod\n def tf_callable(client_id, typename, callable, args, _shmem=None):\n try:\n callable_instance = PocketManager.get_instance().get_real_object_with_mock(client_id, callable)\n real_args = []\n PocketManager.get_instance().disassemble_args(client_id, args, real_args)\n # debug(real_args)\n ret = callable_instance(*real_args)\n except Exception as e:\n import traceback\n tb = traceback.format_exc()\n debug(tb)\n from inspect import currentframe, getframeinfo, stack\n frameinfo = getframeinfo(currentframe())\n return ReturnValue.EXCEPTIONRAISED.value, {'exception': e.__class__.__name__, 'message': str(e), 'filename':frameinfo.filename, 'lineno': frameinfo.lineno, 'function': stack()[0][3]}\n else:\n if type(ret) in (list, tuple):\n ret_list = []\n for index, elem in enumerate(ret):\n PocketManager.get_instance().add_object_to_per_client_store(client_id, elem)\n try:\n ret_list.append(TFDataType.Tensor(elem.name, id(elem), elem.shape.as_list()).to_dict())\n except AttributeError as e:\n ret_list.append(TFDataType.Tensor(None, id(elem), elem.shape.as_list()).to_dict())\n\n return ReturnValue.OK.value, ret_list\n elif type(ret) is dict:\n ret_dict = {} # optim 2 # todo: pseudo dict implementation needed. for object det\n # s = time()\n # for key, value in ret.items():\n # ret[key] = value.numpy().tolist()\n # t1 = time()\n # json_dumps = json.dumps(ret)\n # t11 = time()\n # json_converted = bytes(json_dumps, encoding='utf8')\n # t2 = time()\n # length = len(json_converted)\n # t3 = time()\n # # PocketManager.get_instance().shmem_dict[client_id].write(contents=json_converted) # optim 1\n # ret_dict={'shmem': {'length':length}}\n # e = time()\n # print(f'\\ttime={e-s}, {t1-s}, {t11-t1},{t2-t11}, {t3-t2}, {e-t3}')\n\n return ReturnValue.OK.value, ret_dict\n\n else:\n PocketManager.get_instance().add_object_to_per_client_store(client_id, ret)\n try:\n name = ret.name\n except AttributeError as e:\n name=None\n try:\n shape = ret.shape.as_list()\n except AttributeError as e:\n shape = None\n return ReturnValue.OK.value, TFDataType.Tensor(name, id(ret), shape).to_dict()\n finally:\n pass\n\n @staticmethod\n def object_slicer(client_id, mock_dict, key):\n try:\n object = PocketManager.get_instance().get_real_object_with_mock(client_id, mock_dict)\n # debug(f'object={object}')\n tensor = object[key]\n except Exception as e:\n import inspect\n from inspect import currentframe, getframeinfo\n # debug(key)\n frameinfo = getframeinfo(currentframe())\n return ReturnValue.EXCEPTIONRAISED.value, {'exception': e.__class__.__name__, 'message': str(e), 'filename':frameinfo.filename, 'lineno': frameinfo.lineno, 'function': inspect.stack()[0][3]}\n else:\n PocketManager.get_instance().add_object_to_per_client_store(client_id, tensor)\n try:\n mock_tensor = TFDataType.Tensor(tensor.name, id(tensor), tensor.shape.as_list(), tensor)\n ret = mock_tensor.to_dict()\n except AttributeError as e:\n mock_tensor = TFDataType.Tensor(None, id(tensor), tensor.shape.as_list(), tensor)\n ret = mock_tensor.to_dict()\n finally:\n return ReturnValue.OK.value, ret\n finally:\n pass\n\n @staticmethod\n def tensor_division(client_id, mock_dict, other):\n try:\n # debug(f'mock_dict={mock_dict} other={other}')\n object = PocketManager.get_instance().get_real_object_with_mock(client_id, mock_dict)\n tensor = object / other\n except Exception as e:\n import inspect\n from inspect import currentframe, getframeinfo\n frameinfo = getframeinfo(currentframe())\n return ReturnValue.EXCEPTIONRAISED.value, {'exception': e.__class__.__name__, 'message': str(e), 'filename':frameinfo.filename, 'lineno': frameinfo.lineno, 'function': inspect.stack()[0][3]}\n else:\n PocketManager.get_instance().add_object_to_per_client_store(client_id, tensor)\n return ReturnValue.OK.value, TFDataType.Tensor(None, id(tensor), tensor.shape.as_list()).to_dict()\n finally:\n pass\n\n # @staticmethod\n # def tensor_shape(client_id, mock_dict):\n # try:\n # # debug(f'mock_dict={mock_dict} other={other}')\n # object = PocketManager.get_instance().get_real_object_with_mock(client_id, mock_dict)\n # shape = object.shape.as_list()\n # except Exception as e:\n # import inspect\n # from inspect import currentframe, getframeinfo\n # frameinfo = getframeinfo(currentframe())\n # return ReturnValue.EXCEPTIONRAISED.value, {'exception': e.__class__.__name__, 'message': str(e), 'filename':frameinfo.filename, 'lineno': frameinfo.lineno, 'function': inspect.stack()[0][3]}\n # else:\n # return ReturnValue.OK.value, shape\n # finally:\n # pass\n\n # @staticmethod\n # def __substitute_closure_vars_with_context(function, context):\n # new_string = function\n # debug(context)\n # for key, value in context.copy().items():\n # index = 0\n # while index < len(function):\n # if function[index:].startswith(key) and \\\n # not function[index-1].isalnum() and \\\n # not function[index+len(key)].isalnum():\n # substitute = str(value)\n # new_string = function[:index] + function[index:].replace(key, substitute, 1)\n # function = new_string\n # index += 1\n # function = new_string\n # return function\n\n\n @staticmethod\n def tensor_division(client_id, mock_dict, other):\n try:\n # debug(f'mock_dict={mock_dict} other={other}')\n object = PocketManager.get_instance().get_real_object_with_mock(client_id, mock_dict)\n tensor = object / other\n except Exception as e:\n import inspect\n from inspect import currentframe, getframeinfo\n frameinfo = getframeinfo(currentframe())\n return ReturnValue.EXCEPTIONRAISED.value, {'exception': e.__class__.__name__, 'message': str(e), 'filename':frameinfo.filename, 'lineno': frameinfo.lineno, 'function': inspect.stack()[0][3]}\n else:\n PocketManager.get_instance().add_object_to_per_client_store(client_id, tensor)\n return ReturnValue.OK.value, TFDataType.Tensor(None, id(tensor), tensor.shape.as_list()).to_dict()\n finally:\n pass\n\n # @staticmethod\n # def __substitute_closure_vars_with_context(function, context):\n # new_string = function\n # debug(context)\n # for key, value in context.copy().items():\n # index = 0\n # while index < len(function):\n # if function[index:].startswith(key) and \\\n # not function[index-1].isalnum() and \\\n # not function[index+len(key)].isalnum():\n # substitute = str(value)\n # new_string = function[:index] + function[index:].replace(key, substitute, 1)\n # function = new_string\n # index += 1\n # function = new_string\n # return function\n\n\n @staticmethod\n def tf_shape(client_id, input, out_type, name=None):\n try:\n out_type = eval(out_type)\n input = PocketManager.get_instance().get_real_object_with_mock(client_id, input)\n tensor = tf.shape(input=input, out_type=out_type, name=name)\n except Exception as e:\n import traceback\n tb = traceback.format_exc()\n debug(tb)\n from inspect import currentframe, getframeinfo, stack\n frameinfo = getframeinfo(currentframe())\n return ReturnValue.EXCEPTIONRAISED.value, {'exception': e.__class__.__name__, 'message': str(e), 'filename':frameinfo.filename, 'lineno': frameinfo.lineno, 'function': stack()[0][3]}\n else:\n PocketManager.get_instance().add_object_to_per_client_store(client_id, tensor)\n return ReturnValue.OK.value, TFDataType.Tensor(tensor.name,\n id(tensor),\n tensor.shape.as_list()).to_dict()\n finally:\n pass\n\n @staticmethod\n def tf_reshape(client_id, tensor, shape, name=None):\n try:\n tensor = PocketManager.get_instance().get_real_object_with_mock(client_id, tensor)\n # debug(tensor)\n # debug(shape)\n returned_tensor = tf.reshape(tensor=tensor, shape=shape, name=name)\n # debug(returned_tensor)\n except Exception as e:\n import traceback\n tb = traceback.format_exc()\n # debug(tb)\n from inspect import currentframe, getframeinfo, stack\n frameinfo = getframeinfo(currentframe())\n return ReturnValue.EXCEPTIONRAISED.value, {'exception': e.__class__.__name__, 'message': str(e), 'filename':frameinfo.filename, 'lineno': frameinfo.lineno, 'function': stack()[0][3]}\n else:\n PocketManager.get_instance().add_object_to_per_client_store(client_id, returned_tensor)\n try:\n name = returned_tensor.name\n except AttributeError as e:\n name=None\n try:\n shape = returned_tensor.shape.as_list()\n except AttributeError as e:\n shape = None\n return ReturnValue.OK.value, TFDataType.Tensor(name, \n id(returned_tensor), \n shape).to_dict()\n finally:\n pass\n\n @staticmethod\n def tf_constant(client_id, value, dtype=None, shape=None, name='Const'):\n try:\n length = value\n value = str(PocketManager.get_instance().shmem_dict[client_id].read(length), 'utf-8').split(';')\n tensor = tf.constant(value=value, dtype=dtype, shape=shape, name=name)\n # debug(returned_tensor)\n except Exception as e:\n import traceback\n tb = traceback.format_exc()\n # debug(tb)\n from inspect import currentframe, getframeinfo, stack\n frameinfo = getframeinfo(currentframe())\n return ReturnValue.EXCEPTIONRAISED.value, {'exception': e.__class__.__name__, 'message': str(e), 'filename':frameinfo.filename, 'lineno': frameinfo.lineno, 'function': stack()[0][3]}\n else:\n PocketManager.get_instance().add_object_to_per_client_store(client_id, tensor)\n try:\n name = tensor.name\n except AttributeError as e:\n name=None\n try:\n shape = tensor.shape.as_list()\n except AttributeError as e:\n shape = None\n return ReturnValue.OK.value, TFDataType.Tensor(name,\n id(tensor),\n shape).to_dict()\n finally:\n pass\n\n\n @staticmethod\n def tf_sigmoid(client_id, x, name=None):\n try:\n if type(x) == dict and 'obj_id' in x:\n x = PocketManager.get_instance().get_real_object_with_mock(client_id, x)\n\n tensor = tf.sigmoid(x=x, name=name)\n except Exception as e:\n import traceback\n tb = traceback.format_exc()\n # debug(tb)\n from inspect import currentframe, getframeinfo, stack\n frameinfo = getframeinfo(currentframe())\n print(e)\n return ReturnValue.EXCEPTIONRAISED.value, {'exception': e.__class__.__name__, 'message': str(e), 'filename':frameinfo.filename, 'lineno': frameinfo.lineno, 'function': stack()[0][3]}\n else:\n return ReturnValue.OK.value, tensor.numpy().tolist()\n finally:\n pass\n\n\n @staticmethod\n def tf_config_experimental_list__physical__devices(client_id, device_type):\n global DEVICE_LIST, DEVICE_LIST_AVAILABLE\n if DEVICE_LIST_AVAILABLE:\n return_list = DEVICE_LIST\n else:\n device_list = tf.config.experimental.list_physical_devices(device_type)\n return_list = []\n DEVICE_LIST_AVAILABLE = True\n for elem in device_list:\n return_list.append(TFDataType.PhysicalDevice(dict=elem.__dict__))\n DEVICE_LIST = return_list\n # return_list.append(TFDataType.PhysicalDevice(elem.name, elem.device_type).to_dict())\n return ReturnValue.OK.value, return_list\n\n @staticmethod\n def tf_config_experimental_set__memory__growth(client_id, device, enable):\n try:\n tf.config.experimental.set_memory_growth(device, enable)\n except Exception as e:\n import inspect\n from inspect import currentframe, getframeinfo\n frameinfo = getframeinfo(currentframe())\n return ReturnValue.EXCEPTIONRAISED.value, {'exception': e.__class__.__name__, 'message': str(e), 'filename':frameinfo.filename, 'lineno': frameinfo.lineno, 'function': inspect.stack()[0][3]}\n else:\n return ReturnValue.OK.value, []\n finally:\n pass\n\n @staticmethod\n def tf_keras_layers_Input(client_id, shape=None, batch_size=None, name=None, dtype=None, sparse=False, tensor=None, ragged=False, **kwargs):\n try:\n tensor = tf.keras.layers.Input(shape=shape, batch_size=batch_size, name=name, dtype=dtype, sparse=sparse, tensor=tensor, ragged=ragged, **kwargs)\n except Exception as e:\n import inspect\n from inspect import currentframe, getframeinfo\n frameinfo = getframeinfo(currentframe())\n return ReturnValue.EXCEPTIONRAISED.value, {'exception': e.__class__.__name__, 'message': str(e), 'filename':frameinfo.filename, 'lineno': frameinfo.lineno, 'function': inspect.stack()[0][3]}\n else:\n PocketManager.get_instance().add_object_to_per_client_store(client_id, tensor)\n return ReturnValue.OK.value, TFDataType.Tensor(tensor.name, \n id(tensor), \n tensor.shape.as_list()).to_dict()\n finally:\n pass\n\n @staticmethod\n def tf_keras_layers_Conv2D(client_id, filters, kernel_size, strides=(1, 1),\n padding='valid', data_format=None,\n dilation_rate=(1, 1), activation=None, use_bias=True,\n kernel_initializer='glorot_uniform', bias_initializer='zeros',\n kernel_regularizer=None, bias_regularizer=None, activity_regularizer=None,\n kernel_constraint=None, bias_constraint=None, **kwargs):\n # debug('\\ntf_keras_layers_Conv2D')\n\n kernel_regularizer = PocketManager.get_instance().get_real_object_with_mock(client_id, kernel_regularizer)\n\n try:\n\n tensor = tf.keras.layers.Conv2D(filters=filters, kernel_size=kernel_size, strides=strides, padding=padding, data_format=data_format, dilation_rate=dilation_rate, activation=activation, use_bias=use_bias, kernel_initializer=kernel_initializer, bias_initializer=bias_initializer, kernel_regularizer=kernel_regularizer, bias_regularizer=bias_regularizer, activity_regularizer=activity_regularizer, kernel_constraint=kernel_constraint, bias_constraint=bias_constraint, **kwargs)\n # debug(f'tensor_name={tensor.name}')\n except Exception as e:\n import inspect\n from inspect import currentframe, getframeinfo\n frameinfo = getframeinfo(currentframe())\n return ReturnValue.EXCEPTIONRAISED.value, {'exception': e.__class__.__name__, 'message': str(e), 'filename':frameinfo.filename, 'lineno': frameinfo.lineno, 'function': inspect.stack()[0][3]}\n else:\n PocketManager.get_instance().add_object_to_per_client_store(client_id, tensor)\n return ReturnValue.OK.value, TFDataType.Conv2D(tensor.name, \n id(tensor)).to_dict()\n finally:\n pass\n\n @staticmethod\n def tf_keras_layers_ZeroPadding2D(client_id, padding=(1, 1), data_format=None, **kwargs):\n try:\n tensor = tf.keras.layers.ZeroPadding2D(padding=padding, data_format=data_format, **kwargs)\n except Exception as e:\n import inspect\n from inspect import currentframe, getframeinfo\n frameinfo = getframeinfo(currentframe())\n return ReturnValue.EXCEPTIONRAISED.value, {'exception': e.__class__.__name__, 'message': str(e), 'filename':frameinfo.filename, 'lineno': frameinfo.lineno, 'function': inspect.stack()[0][3]}\n else:\n PocketManager.get_instance().add_object_to_per_client_store(client_id, tensor)\n return ReturnValue.OK.value, TFDataType.ZeroPadding2D(tensor.name, \n id(tensor)).to_dict()\n finally:\n pass\n\n @staticmethod\n def tf_keras_regularizers_l2(client_id, l=0.01):\n try:\n l2 = tf.keras.regularizers.l2(l=l)\n except Exception as e:\n import inspect\n from inspect import currentframe, getframeinfo\n frameinfo = getframeinfo(currentframe())\n return ReturnValue.EXCEPTIONRAISED.value, {'exception': e.__class__.__name__, 'message': str(e), 'filename':frameinfo.filename, 'lineno': frameinfo.lineno, 'function': inspect.stack()[0][3]}\n else:\n PocketManager.get_instance().add_object_to_per_client_store(client_id, l2)\n # return ReturnValue.OK.value, TFDataType.L2(id(l2)).to_dict()\n return ReturnValue.OK.value, TFDataType.L2(id(l2)).to_dict()\n finally:\n pass\n\n @staticmethod\n def tf_keras_layers_BatchNormalization(client_id, axis=-1, momentum=0.99, epsilon=0.001, center=True, scale=True,\n beta_initializer='zeros', gamma_initializer='ones',\n moving_mean_initializer='zeros', moving_variance_initializer='ones',\n beta_regularizer=None, gamma_regularizer=None, beta_constraint=None,\n gamma_constraint=None, renorm=False, renorm_clipping=None, renorm_momentum=0.99,\n fused=None, trainable=True, virtual_batch_size=None, adjustment=None, name=None,\n **kwargs):\n try:\n tensor = BatchNormalization(axis=axis, momentum=momentum, epsilon=epsilon, center=center, scale=scale,\n beta_initializer=beta_initializer, gamma_initializer=gamma_initializer,\n moving_mean_initializer=moving_mean_initializer, moving_variance_initializer=moving_variance_initializer,\n beta_regularizer=beta_regularizer, gamma_regularizer=gamma_regularizer, beta_constraint=beta_constraint,\n gamma_constraint=gamma_constraint, renorm=renorm, renorm_clipping=renorm_clipping, renorm_momentum=renorm_momentum,\n fused=fused, trainable=trainable, virtual_batch_size=virtual_batch_size, adjustment=adjustment, name=name,\n **kwargs)\n except Exception as e:\n import inspect\n from inspect import currentframe, getframeinfo\n frameinfo = getframeinfo(currentframe())\n return ReturnValue.EXCEPTIONRAISED.value, {'exception': e.__class__.__name__, 'message': str(e), 'filename':frameinfo.filename, 'lineno': frameinfo.lineno, 'function': inspect.stack()[0][3]}\n else:\n PocketManager.get_instance().add_object_to_per_client_store(client_id, tensor)\n return ReturnValue.OK.value, TFDataType.BatchNormalization(tensor.name, \n id(tensor)).to_dict()\n finally:\n pass\n\n @staticmethod\n def tf_keras_layers_LeakyReLU(client_id, alpha=0.3, **kwargs):\n try:\n tensor = tf.keras.layers.LeakyReLU(alpha=alpha, **kwargs)\n except Exception as e:\n import inspect\n from inspect import currentframe, getframeinfo\n frameinfo = getframeinfo(currentframe())\n return ReturnValue.EXCEPTIONRAISED.value, {'exception': e.__class__.__name__, 'message': str(e), 'filename':frameinfo.filename, 'lineno': frameinfo.lineno, 'function': inspect.stack()[0][3]}\n else:\n PocketManager.get_instance().add_object_to_per_client_store(client_id, tensor)\n return ReturnValue.OK.value, TFDataType.LeakyReLU(tensor.name, id(tensor)).to_dict()\n finally:\n pass\n\n @staticmethod\n def tf_keras_layers_Add(client_id, **kwargs):\n try:\n tensor = tf.keras.layers.Add(**kwargs) ###\n except Exception as e:\n import inspect\n from inspect import currentframe, getframeinfo\n frameinfo = getframeinfo(currentframe())\n return ReturnValue.EXCEPTIONRAISED.value, {'exception': e.__class__.__name__, 'message': str(e), 'filename':frameinfo.filename, 'lineno': frameinfo.lineno, 'function': inspect.stack()[0][3]}\n else:\n PocketManager.get_instance().add_object_to_per_client_store(client_id, tensor)\n return ReturnValue.OK.value, TFDataType.Add(tensor.name, id(tensor)).to_dict()\n finally:\n pass\n\n @staticmethod\n def tf_keras_Model(client_id, args, **kwargs):\n try:\n real_args = []\n PocketManager.get_instance().disassemble_args(client_id, args, real_args)\n\n real_kwargs = {}\n PocketManager.get_instance().disassemble_kwargs(client_id, kwargs, real_kwargs)\n model = tf.keras.Model(*real_args, **real_kwargs) ###\n except Exception as e:\n import traceback\n tb = traceback.format_exc()\n debug(tb)\n from inspect import currentframe, getframeinfo, stack\n frameinfo = getframeinfo(currentframe())\n return ReturnValue.EXCEPTIONRAISED.value, {'exception': e.__class__.__name__, 'message': str(e), 'filename':frameinfo.filename, 'lineno': frameinfo.lineno, 'function': stack()[0][3]}\n else:\n PocketManager.get_instance().add_object_to_per_client_store(client_id, model)\n PocketManager.get_instance().add_built_model(name=model.name, model=model)\n return ReturnValue.OK.value, TFDataType.Model(name=model.name,\n obj_id=id(model)).to_dict()\n finally:\n pass\n\n @staticmethod\n def tf_keras_layers_Lambda(client_id, function, output_shape=None, mask=None, arguments=None, **kwargs):\n try:\n function = eval(function)\n tensor = tf.keras.layers.Lambda(function=function, output_shape=output_shape, mask=mask, arguments=arguments, **kwargs)\n except Exception as e:\n import traceback\n tb = traceback.format_exc()\n debug(tb)\n from inspect import currentframe, getframeinfo, stack\n frameinfo = getframeinfo(currentframe())\n return ReturnValue.EXCEPTIONRAISED.value, {'exception': e.__class__.__name__, 'message': str(e), 'filename':frameinfo.filename, 'lineno': frameinfo.lineno, 'function': stack()[0][3]}\n else:\n PocketManager.get_instance().add_object_to_per_client_store(client_id, tensor)\n return ReturnValue.OK.value, TFDataType.Model(name=tensor.name,\n obj_id=id(tensor)).to_dict()\n finally:\n pass\n\n @staticmethod\n def tf_keras_layers_UpSampling2D(client_id, size=(2, 2), data_format=None, interpolation='nearest', **kwargs):\n try:\n tensor = tf.keras.layers.UpSampling2D(size=size, data_format=data_format, interpolation=interpolation, **kwargs)\n except Exception as e:\n import traceback\n tb = traceback.format_exc()\n debug(tb)\n from inspect import currentframe, getframeinfo, stack\n frameinfo = getframeinfo(currentframe())\n return ReturnValue.EXCEPTIONRAISED.value, {'exception': e.__class__.__name__, 'message': str(e), 'filename':frameinfo.filename, 'lineno': frameinfo.lineno, 'function': stack()[0][3]}\n else:\n PocketManager.get_instance().add_object_to_per_client_store(client_id, tensor)\n return ReturnValue.OK.value, TFDataType.Model(name=tensor.name,\n obj_id=id(tensor)).to_dict()\n finally:\n pass\n\n @staticmethod\n def tf_keras_layers_Concatenate(client_id, axis=-1, **kwargs):\n try:\n tensor = tf.keras.layers.Concatenate(axis=axis, **kwargs)\n except Exception as e:\n import traceback\n tb = traceback.format_exc()\n debug(tb)\n from inspect import currentframe, getframeinfo, stack\n frameinfo = getframeinfo(currentframe())\n return ReturnValue.EXCEPTIONRAISED.value, {'exception': e.__class__.__name__, 'message': str(e), 'filename':frameinfo.filename, 'lineno': frameinfo.lineno, 'function': stack()[0][3]}\n else:\n PocketManager.get_instance().add_object_to_per_client_store(client_id, tensor)\n return ReturnValue.OK.value, TFDataType.Model(name=tensor.name,\n obj_id=id(tensor)).to_dict()\n finally:\n pass\n\n # @tf.function\n @staticmethod\n def tf_image_decode__image(client_id, contents, channels=None, dtype='tf.dtypes.uint8', name=None, expand_animations=True):\n try:\n dtype = eval(dtype)\n contents = bytes(PocketManager.get_instance().shmem_dict[client_id].read(contents))\n format = what(None, h=contents)\n if format == 'png':\n tensor = tf.image.decode_png(contents=contents, channels=channels, dtype=dtype, name=name)\n elif format == 'jpeg':\n tensor = tf.image.decode_png(contents=contents, channels=channels, dtype=dtype, name=name)\n else:\n tensor = tf.image.decode_image(contents=contents, channels=channels, dtype=dtype, name=name, expand_animations=expand_animations)\n except Exception as e:\n import traceback\n tb = traceback.format_exc()\n debug(tb)\n from inspect import currentframe, getframeinfo, stack\n frameinfo = getframeinfo(currentframe())\n return ReturnValue.EXCEPTIONRAISED.value, {'exception': e.__class__.__name__, 'message': str(e), 'filename':frameinfo.filename, 'lineno': frameinfo.lineno, 'function': stack()[0][3]}\n else:\n PocketManager.get_instance().add_object_to_per_client_store(client_id, tensor)\n return ReturnValue.OK.value, TFDataType.Tensor(name=None,\n obj_id=id(tensor), \n shape=tensor.shape.as_list()).to_dict()\n finally:\n pass\n\n @staticmethod\n def model_load_weights(client_id, model, filepath, by_name=False, skip_mismatch=False):\n try:\n # debug(client_id, model)\n model = PocketManager.get_instance().get_real_object_with_mock(client_id, model)\n model.load_weights(filepath=filepath, by_name=by_name, skip_mismatch=skip_mismatch)\n except Exception as e:\n import traceback\n tb = traceback.format_exc()\n debug(tb)\n from inspect import currentframe, getframeinfo, stack\n frameinfo = getframeinfo(currentframe())\n return ReturnValue.EXCEPTIONRAISED.value, {'exception': e.__class__.__name__, 'message': str(e), 'filename':frameinfo.filename, 'lineno': frameinfo.lineno, 'function': stack()[0][3]}\n else:\n # PocketManager.get_instance().add_object_to_per_client_store(client_id, tensor)\n return ReturnValue.OK.value, None\n finally:\n pass\n\n @staticmethod\n def tf_expand__dims(client_id, input, axis, name=None):\n try:\n input = PocketManager.get_instance().get_real_object_with_mock(client_id, input)\n tensor = tf.expand_dims(input=input, axis=axis, name=name)\n except Exception as e:\n import traceback\n tb = traceback.format_exc()\n debug(tb)\n from inspect import currentframe, getframeinfo, stack\n frameinfo = getframeinfo(currentframe())\n return ReturnValue.EXCEPTIONRAISED.value, {'exception': e.__class__.__name__, 'message': str(e), 'filename':frameinfo.filename, 'lineno': frameinfo.lineno, 'function': stack()[0][3]}\n else:\n PocketManager.get_instance().add_object_to_per_client_store(client_id, tensor)\n return ReturnValue.OK.value, TFDataType.Tensor(name=None,\n obj_id=id(tensor)).to_dict()\n finally:\n pass\n\n @staticmethod\n def tf_image_resize(client_id, images, size, method=tf.image.ResizeMethod.BILINEAR, preserve_aspect_ratio=False,\n antialias=False, name=None):\n try:\n images = PocketManager.get_instance().get_real_object_with_mock(client_id, images)\n tensor = tf.image.resize(images=images, size=size, method=method, preserve_aspect_ratio=preserve_aspect_ratio, antialias=antialias, name=name)\n except Exception as e:\n import traceback\n tb = traceback.format_exc()\n debug(tb)\n from inspect import currentframe, getframeinfo, stack\n frameinfo = getframeinfo(currentframe())\n return ReturnValue.EXCEPTIONRAISED.value, {'exception': e.__class__.__name__, 'message': str(e), 'filename':frameinfo.filename, 'lineno': frameinfo.lineno, 'function': stack()[0][3]}\n else:\n PocketManager.get_instance().add_object_to_per_client_store(client_id, tensor)\n return ReturnValue.OK.value, TFDataType.Tensor(name=None,\n obj_id=id(tensor),\n shape=tensor.shape.as_list()).to_dict()\n finally:\n pass\n\n @staticmethod\n def tf_keras_applications_MobileNetV2(client_id, args, **kwargs):\n try:\n if 'mobilenetv2' in PocketManager.get_instance().model_dict:\n model = PocketManager.get_instance().model_dict['mobilenetv2']\n PocketManager.get_instance().add_object_to_per_client_store(client_id, model)\n return ReturnValue.OK.value, TFDataType.Model(name='mobilenetv2',\n obj_id=id(model),\n already_built=True).to_dict()\n PocketManager.get_instance().dict_clientid_to_modelname[client_id]='mobilenetv2'\n \n real_args = []\n PocketManager.get_instance().disassemble_args(client_id, args, real_args)\n\n real_kwargs = {}\n PocketManager.get_instance().disassemble_kwargs(client_id, kwargs, real_kwargs)\n\n real_kwargs['input_shape'] = tuple(real_kwargs['input_shape'])\n\n model = tf.keras.applications.MobileNetV2(*real_args, **real_kwargs) ###\n except Exception as e:\n import inspect\n from inspect import currentframe, getframeinfo\n frameinfo = getframeinfo(currentframe())\n return ReturnValue.EXCEPTIONRAISED.value, {'exception': e.__class__.__name__, 'message': str(e), 'filename':frameinfo.filename, 'lineno': frameinfo.lineno, 'function': inspect.stack()[0][3]}\n else:\n PocketManager.get_instance().add_object_to_per_client_store(client_id, model)\n PocketManager.get_instance().add_built_model(name='mobilenetv2', model=model)\n return ReturnValue.OK.value, TFDataType.Model(name='mobilenetv2',\n obj_id=id(model)).to_dict()\n finally:\n pass\n\n @staticmethod\n def tf_keras_applications_ResNet50(client_id, args, **kwargs):\n try:\n PocketManager.get_instance().dict_clientid_to_modelname[client_id]='resnet50'\n if 'resnet50' in PocketManager.get_instance().model_dict:\n model = PocketManager.get_instance().model_dict['resnet50']\n PocketManager.get_instance().add_object_to_per_client_store(client_id, model)\n return ReturnValue.OK.value, TFDataType.Model(name='resnet50',\n obj_id=id(model),\n already_built=True).to_dict()\n\n real_args = []\n PocketManager.get_instance().disassemble_args(client_id, args, real_args)\n\n real_kwargs = {}\n PocketManager.get_instance().disassemble_kwargs(client_id, kwargs, real_kwargs)\n\n real_kwargs['input_shape'] = tuple(real_kwargs['input_shape'])\n\n model = tf.keras.applications.ResNet50(*real_args, **real_kwargs) ###\n except Exception as e:\n import inspect\n from inspect import currentframe, getframeinfo\n frameinfo = getframeinfo(currentframe())\n return ReturnValue.EXCEPTIONRAISED.value, {'exception': e.__class__.__name__, 'message': str(e), 'filename':frameinfo.filename, 'lineno': frameinfo.lineno, 'function': inspect.stack()[0][3]}\n else:\n PocketManager.get_instance().add_object_to_per_client_store(client_id, model)\n PocketManager.get_instance().add_built_model(name='resnet50', model=model)\n return ReturnValue.OK.value, TFDataType.Model(name='resnet50',\n obj_id=id(model)).to_dict()\n finally:\n pass\n\n @staticmethod\n def tf_keras_preprocessing_image_img__to__array(client_id, img, data_format=None, dtype=None):\n try:\n img = PocketManager.get_instance().get_real_object_with_mock(client_id, img)\n array = tf.keras.preprocessing.image.img_to_array(img=img, data_format=data_format, dtype=dtype) ###\n except Exception as e:\n import inspect\n from inspect import currentframe, getframeinfo\n frameinfo = getframeinfo(currentframe())\n return ReturnValue.EXCEPTIONRAISED.value, {'exception': e.__class__.__name__, 'message': str(e), 'filename':frameinfo.filename, 'lineno': frameinfo.lineno, 'function': inspect.stack()[0][3]}\n else:\n PocketManager.get_instance().add_object_to_per_client_store(client_id, array)\n return ReturnValue.OK.value, TFDataType.Tensor(name=None,\n obj_id=id(array),\n shape=array.shape).to_dict()\n finally:\n pass\n\n @staticmethod\n def tf_keras_applications_resnet50_preprocess__input(client_id, args, **kwargs):\n try:\n real_args = []\n PocketManager.get_instance().disassemble_args(client_id, args, real_args)\n\n real_kwargs = {}\n PocketManager.get_instance().disassemble_kwargs(client_id, kwargs, real_kwargs)\n\n tensor = tf.keras.applications.resnet50.preprocess_input(*real_args, **real_kwargs) ###\n except Exception as e:\n import inspect\n from inspect import currentframe, getframeinfo\n frameinfo = getframeinfo(currentframe())\n return ReturnValue.EXCEPTIONRAISED.value, {'exception': e.__class__.__name__, 'message': str(e), 'filename':frameinfo.filename, 'lineno': frameinfo.lineno, 'function': inspect.stack()[0][3]}\n else:\n PocketManager.get_instance().add_object_to_per_client_store(client_id, tensor)\n return ReturnValue.OK.value, TFDataType.Tensor(name=None,\n obj_id=id(tensor),\n shape=tensor.shape.as_list()).to_dict()\n finally:\n pass\n\n @staticmethod\n def tf_saved__model_load(client_id, export_dir, tags=None):\n try:\n dir = f'/models/imdb_prediction/{export_dir}_bert'\n PocketManager.get_instance().dict_clientid_to_modelname[client_id]=export_dir\n if export_dir in PocketManager.get_instance().model_dict:\n model = PocketManager.get_instance().model_dict[export_dir]\n PocketManager.get_instance().add_object_to_per_client_store(client_id, model)\n return ReturnValue.OK.value, TFDataType.Model(name=export_dir,\n obj_id=id(model),\n already_built=True).to_dict()\n\n model = tf.saved_model.load(dir, tags=tags) ###\n except Exception as e:\n import inspect\n from inspect import currentframe, getframeinfo\n frameinfo = getframeinfo(currentframe())\n return ReturnValue.EXCEPTIONRAISED.value, {'exception': e.__class__.__name__, 'message': str(e), 'filename':frameinfo.filename, 'lineno': frameinfo.lineno, 'function': inspect.stack()[0][3]}\n else:\n PocketManager.get_instance().add_object_to_per_client_store(client_id, model)\n PocketManager.get_instance().add_built_model(name=export_dir, model=model)\n return ReturnValue.OK.value, TFDataType.Model(name=export_dir,\n obj_id=id(model)).to_dict()\n finally:\n pass\n\nclass NumpyServer:\n @staticmethod\n def np_argmax(client_id, a, axis=None, out=None):\n try:\n tensor = PocketManager.get_instance().get_real_object_with_mock(client_id, a)\n argmax = np.argmax(tensor, axis, out).item()\n except Exception as e:\n import traceback\n tb = traceback.format_exc()\n debug(tb)\n from inspect import currentframe, getframeinfo, stack\n frameinfo = getframeinfo(currentframe())\n return ReturnValue.EXCEPTIONRAISED.value, {'exception': e.__class__.__name__, 'message': str(e), 'filename':frameinfo.filename, 'lineno': frameinfo.lineno, 'function': stack()[0][3]}\n else:\n return ReturnValue.OK.value, argmax\n finally:\n pass\n\nimport tensorflow_hub as hub\nclass HubServer:\n @staticmethod\n def hub_load(client_id, handle, tags=None, options=None):\n try:\n model_name = handle.split('/')[4]\n if model_name in PocketManager.get_instance().model_dict:\n model = PocketManager.get_instance().model_dict[model_name]\n PocketManager.get_instance().add_object_to_per_client_store(client_id, model)\n return ReturnValue.OK.value, TFDataType.Model(name=model_name,\n obj_id=id(model),\n already_built=True).to_dict()\n model = hub.load(handle=handle, tags=tags, options=options)\n except Exception as e:\n import traceback\n tb = traceback.format_exc()\n debug(tb)\n from inspect import currentframe, getframeinfo, stack\n frameinfo = getframeinfo(currentframe())\n return ReturnValue.EXCEPTIONRAISED.value, {'exception': e.__class__.__name__, 'message': str(e), 'filename':frameinfo.filename, 'lineno': frameinfo.lineno, 'function': stack()[0][3]}\n else:\n PocketManager.get_instance().add_object_to_per_client_store(client_id, model)\n PocketManager.get_instance().add_built_model(name=model_name, model=model)\n return ReturnValue.OK.value, TFDataType.Model(name=model_name,\n obj_id=id(model)).to_dict()\n finally:\n pass\n\ntf_function_dict = {\n TFFunctions.LOCALQ_DEBUG: \n TensorFlowServer.hello,\n TFFunctions.MODEL_EXIST:\n TensorFlowServer.check_if_model_exist,\n TFFunctions.TF_CALLABLE:\n TensorFlowServer.tf_callable,\n TFFunctions.OBJECT_SLICER:\n TensorFlowServer.object_slicer,\n TFFunctions.TF_SHAPE:\n TensorFlowServer.tf_shape,\n TFFunctions.TF_RESHAPE:\n TensorFlowServer.tf_reshape,\n TFFunctions.TENSOR_DIVISION:\n TensorFlowServer.tensor_division,\n # TFFunctions.TENSOR_SHAPE:\n # TensorFlowServer.tensor_shape,\n TFFunctions.TF_CONSTANT:\n TensorFlowServer.tf_constant,\n TFFunctions.TF_SIGMOID:\n TensorFlowServer.tf_sigmoid,\n\n TFFunctions._NOPTEST:\n TensorFlowServer._noptest,\n TFFunctions._MATMULTEST:\n TensorFlowServer._matmultest,\n\n TFFunctions.TF_CONFIG_EXPERIMENTAL_LIST__PHYSICAL__DEVICES: \n TensorFlowServer.tf_config_experimental_list__physical__devices,\n TFFunctions.TF_CONFIG_EXPERIMENTAL_SET__MEMORY__GROWTH: \n TensorFlowServer.tf_config_experimental_set__memory__growth,\n # TFFunctions.TF_GRAPH_GET__TENSOR__BY__NAME: \n # TensorFlowServer.tf_Graph_get__tensor__by__name,\n TFFunctions.TF_KERAS_LAYERS_INPUT: \n TensorFlowServer.tf_keras_layers_Input,\n TFFunctions.TF_KERAS_LAYERS_ZEROPADDING2D: \n TensorFlowServer.tf_keras_layers_ZeroPadding2D,\n TFFunctions.TF_KERAS_REGULARIZERS_L2: \n TensorFlowServer.tf_keras_regularizers_l2,\n TFFunctions.TF_KERAS_LAYERS_CONV2D: \n TensorFlowServer.tf_keras_layers_Conv2D,\n TFFunctions.TF_KERAS_LAYERS_BATCHNORMALIZATION: \n TensorFlowServer.tf_keras_layers_BatchNormalization,\n TFFunctions.TF_KERAS_LAYERS_LEAKYRELU: \n TensorFlowServer.tf_keras_layers_LeakyReLU,\n TFFunctions.TF_KERAS_LAYERS_ADD: \n TensorFlowServer.tf_keras_layers_Add,\n TFFunctions.TF_KERAS_MODEL: \n TensorFlowServer.tf_keras_Model,\n TFFunctions.TF_KERAS_LAYERS_LAMBDA: \n TensorFlowServer.tf_keras_layers_Lambda,\n TFFunctions.TF_KERAS_LAYERS_UPSAMPLING2D: \n TensorFlowServer.tf_keras_layers_UpSampling2D,\n TFFunctions.TF_KERAS_LAYERS_CONCATENATE: \n TensorFlowServer.tf_keras_layers_Concatenate,\n TFFunctions.TF_IMAGE_DECODE__IMAGE:\n TensorFlowServer.tf_image_decode__image,\n TFFunctions.TF_EXPAND__DIMS:\n TensorFlowServer.tf_expand__dims,\n TFFunctions.TF_IMAGE_RESIZE:\n TensorFlowServer.tf_image_resize,\n TFFunctions.TF_KERAS_APPLICATIONS_MOBILENETV2:\n TensorFlowServer.tf_keras_applications_MobileNetV2,\n TFFunctions.TF_KERAS_APPLICATIONS_RESNET50:\n TensorFlowServer.tf_keras_applications_ResNet50,\n TFFunctions.TF_KERAS_PREPROCESSING_IMAGE_IMG__TO__ARRAY:\n TensorFlowServer.tf_keras_preprocessing_image_img__to__array,\n TFFunctions.TF_KERAS_APPLICATIONS_RESNET50_PREPROCESS__INPUT:\n TensorFlowServer.tf_keras_applications_resnet50_preprocess__input,\n TFFunctions.TF_SAVED__MODEL_LOAD:\n TensorFlowServer.tf_saved__model_load,\n\n TFFunctions.TF_MODEL_LOAD_WEIGHTS:\n TensorFlowServer.model_load_weights,\n\n TFFunctions.NP_ARGMAX:\n NumpyServer.np_argmax,\n\n TFFunctions.HUB_LOAD:\n HubServer.hub_load,\n}\n\nIN_GRAPH = { \n TFFunctions.LOCALQ_DEBUG,\n TFFunctions.MODEL_EXIST,\n TFFunctions.TF_CALLABLE,\n TFFunctions.OBJECT_SLICER,\n TFFunctions.TF_SHAPE,\n TFFunctions.TF_RESHAPE,\n TFFunctions.TENSOR_DIVISION,\n\n TFFunctions.TF_CONFIG_EXPERIMENTAL_LIST__PHYSICAL__DEVICES, \n TFFunctions.TF_CONFIG_EXPERIMENTAL_SET__MEMORY__GROWTH, \n TFFunctions.TF_KERAS_LAYERS_INPUT, \n TFFunctions.TF_KERAS_LAYERS_ZEROPADDING2D, \n TFFunctions.TF_KERAS_REGULARIZERS_L2, \n TFFunctions.TF_KERAS_LAYERS_CONV2D, \n TFFunctions.TF_KERAS_LAYERS_BATCHNORMALIZATION, \n TFFunctions.TF_KERAS_LAYERS_LEAKYRELU, \n TFFunctions.TF_KERAS_LAYERS_ADD, \n TFFunctions.TF_KERAS_MODEL, \n TFFunctions.TF_KERAS_LAYERS_LAMBDA, \n TFFunctions.TF_KERAS_LAYERS_UPSAMPLING2D, \n TFFunctions.TF_KERAS_LAYERS_CONCATENATE, \n TFFunctions.TF_IMAGE_DECODE__IMAGE,\n TFFunctions.TF_EXPAND__DIMS,\n TFFunctions.TF_IMAGE_RESIZE,\n TFFunctions.TF_KERAS_APPLICATIONS_MOBILENETV2,\n\n TFFunctions.TF_MODEL_LOAD_WEIGHTS,\n\n TFFunctions.NP_ARGMAX\n}\n\nclass ResourceMoveRequest:\n class Command(Enum):\n ADD = 1\n GIVEBACK = 2\n\n def __init__(self, command, client, mem, cfs_quota_us, cfs_period_us, sync=False):\n self.command = command\n self.client_id = client\n self.memory = int(mem)\n self.cfs_quota_us = int(cfs_quota_us)\n self.cfs_period_us = int(cfs_period_us)\n self.sync = sync\n if sync:\n debug(f'{command} request_synced')\n self.done = Event()\n\nclass PocketManager:\n universal_key = 0x1001 # key for message queue\n aux_key = 0x1002\n __instance = None\n\n @staticmethod\n def get_instance():\n if PocketManager.__instance == None:\n PocketManager()\n\n return PocketManager.__instance\n\n\n def __init__(self):\n if PocketManager.__instance != None:\n raise Exception('Singleton instance exists already!')\n\n self.gq = MessageQueue(PocketManager.universal_key, IPC_CREX)\n # self.aux_gq = MessageQueue(PocketManager.aux_key, IPC_CREX)\n if not IsolationControl.PRIVATEQUEUE:\n print('privatequeue on'); sys.stdout.flush()\n self.aux_gq = MessageQueue(PocketManager.aux_key, IPC_CREX)\n self.gq_thread = Thread(target=self.pocket_new_connection)\n self.handle_clients_thread = Thread(target=self.pocket_serving_client)\n self.queues_dict = {}\n self.per_client_object_store = {}\n self.model_dict = {}\n self.shmem_dict = {}\n self.dict_modelname_to_session = {} \n self.dict_clientid_to_modelname = {} # todo - clean up @ detach\n\n self.resource_move_queue = Queue()\n PocketManager.__instance = self\n # self.executor = ThreadPoolExecutor(10)\n self.futures = {} # todo - clean up @ detach\n\n # self.default_session = tf.compat.v1.Session(graph=tf.Graph())\n\n self.graph_build_in_progress = False\n\n def start(self):\n self.gq_thread.daemon = True\n self.gq_thread.start()\n\n # # self resource moving\n # self.rsrc_mgr_thread = Thread(target=self.handle_resource_move_request) # todo: remove\n # self.rsrc_mgr_thread.daemon=True\n # self.rsrc_mgr_thread.start()\n\n self.handle_clients_thread.daemon = True\n self.handle_clients_thread.start()\n self.handle_clients_thread.join()\n self.gq_thread.join()\n\n def handle_resource_move_request(self): #@@@\n while True:\n request = self.resource_move_queue.get()\n client_id = request.client_id\n mem = request.memory\n cfs_quota_us = request.cfs_quota_us\n cfs_period_us = request.cfs_period_us\n # debug(request.__dict__)\n try: \n if request.command == ResourceMoveRequest.Command.ADD:\n # sleep(ADD_INTERVAL)\n Utils.add_resource(client_id, mem, cfs_quota_us, cfs_period_us)\n elif request.command == ResourceMoveRequest.Command.GIVEBACK:\n # sleep(DEDUCT_INTERVAL)\n Utils.deduct_resource(client_id, mem, cfs_quota_us, cfs_period_us)\n\n if request.sync:\n # debug('sync request!!!!!!!')\n request.done.set()\n else:\n # debug('request not synced!!!!!!!')\n pass\n except OSError as e:\n print(repr(e))\n print(e)\n\n def pocket_new_connection(self):\n from time import time\n while True:\n raw_msg, raw_type = self.gq.receive(block=True, type=CLIENT_TO_SERVER)\n args_dict = json.loads(raw_msg)\n raw_type = args_dict['raw_type']\n \n # debug('misun>>', args_dict)\n # debug(hex(raw_type))\n\n type = PocketControl(raw_type)\n reply_type = raw_type | 0x40000000\n if type == PocketControl.CONNECT:\n # debug('>>>conn')\n\n client_id = args_dict.get('client_id')\n self.add_client_queue(client_id, args_dict['key'])\n if IsolationControl.CAPABILITIESLIST:\n self.per_client_object_store[client_id] = {}\n else:\n self.per_client_object_store_indirect_handle = {}\n self.per_client_object_store_indirect_handle[client_id] = []\n\n mem = args_dict.get('mem')\n cfs_quota_us = args_dict.get('cfs_quota_us')\n cfs_period_us = args_dict.get('cfs_period_us')\n Utils.add_resource(client_id, mem, cfs_quota_us, cfs_period_us)\n self.send_ack_to_client(client_id)\n\n self.shmem_dict[client_id] = SharedMemoryChannel(client_id)\n elif type == PocketControl.DISCONNECT:\n # debug('>>>detach')\n client_id = args_dict.get('client_id')\n its_lq = self.queues_dict.pop(client_id)\n if IsolationControl.CAPABILITIESLIST:\n self.per_client_object_store.pop(client_id, None)\n else:\n for obj_key in self.per_client_object_store_indirect_handle[client_id]:\n self.per_client_object_store.pop(obj_key, None)\n self.per_client_object_store_indirect_handle.pop(client_id)\n self.shmem_dict.pop(client_id, None)\n\n self.dict_clientid_to_modelname.pop(client_id, None)\n self.futures.pop(client_id, None)\n\n # if args_dict['client_id'] in _matmultest_dict: ## test_code\n # matrices = _matmultest_dict.pop(args_dict['client_id'])\n # del matrices\n # import ctypes\n # libmatmul = ctypes.CDLL('/root/tfrpc/server/test/libmatmul.so')\n # libmatmul.free_mem.argtypes = [ctypes.c_void_p]\n # libmatmul.free_mem.restype = None\n # for matrix in matrices:\n # libmatmul.free_mem(matrix)\n\n\n gc.collect()\n\n mem = args_dict.get('mem')\n cfs_quota_us = args_dict.get('cfs_quota_us')\n cfs_period_us = args_dict.get('cfs_period_us')\n # print('>>>>>>', mem, cfs_quota_us)\n Utils.deduct_resource(client_id, mem, cfs_quota_us, cfs_period_us)\n \n return_dict = {'result': ReturnValue.OK.value}\n\n return_byte_obj = json.dumps(return_dict)\n its_lq.send(return_byte_obj, type = reply_type)\n elif type == PocketControl.START_BUILD_GRAPH:\n debug('START BUILD')\n client_id = args_dict['client_id']\n if self.graph_build_in_progress == True:\n return_dict = {'result': ReturnValue.ERROR.value, 'message': 'graph_build already in progress'}\n else:\n return_dict = {'result': ReturnValue.OK.value, 'message': 'build start!'}\n self.graph_build_in_progress = True\n self.graph_build_owner = client_id\n\n return_byte_obj = json.dumps(return_dict)\n self.queues_dict[client_id].send(return_byte_obj, block=True, type=reply_type)\n elif type == PocketControl.END_BUILD_GRAPH:\n debug('END BUILD')\n client_id = args_dict['client_id']\n if self.graph_build_in_progress == True:\n return_dict = {'result': ReturnValue.OK.value, 'message': 'build end!'}\n self.graph_build_in_progress = False\n self.graph_build_owner = None\n else:\n return_dict = {'result': ReturnValue.ERROR.value, 'message': 'graph_build not in progress'}\n\n return_byte_obj = json.dumps(return_dict)\n self.queues_dict[client_id].send(return_byte_obj, block=True, type=reply_type)\n\n elif type == PocketControl.HELLO:\n return_dict = {'result': ReturnValue.OK.value, 'message': args_dict['message']}\n return_byte_obj = json.dumps(return_dict)\n self.gq.send(return_byte_obj, type=reply_type)\n \n sleep(GLOBAL_SLEEP)\n\n def pocket_serving_client(self):\n import sysv_ipc\n while True:\n if IsolationControl.PRIVATEQUEUE:\n for client_id, queue in self.queues_dict.copy().items():\n try:\n # if client_id in self.futures and not self.futures[client_id].done():\n # continue\n # try:\n raw_msg, _ = queue.receive(block=False, type=CLIENT_TO_SERVER)\n # except Exception as e:\n # print(e)\n # print(f'client_id={client_id[:16]}, queue={queue}')\n # sys.stdout.flush()\n\n args_dict = json.loads(raw_msg)\n\n # if self.graph_build_in_progress and client_id == self.graph_build_owner:\n # self.worker_naive(client_id, queue, args_dict)\n # else:\n self.worker_name(client_id, queue, args_dict)\n # self.futures[client_id] = self.executor.submit(self.worker_naive, client_id, queue, args_dict)\n # self.futures[client_id] = self.executor.submit(self.worker_name, client_id, queue, args_dict)\n # self.worker_naive(client_id, queue, args_dict)\n \n except BusyError as err1:\n pass\n except sysv_ipc.ExistentialError as err2:\n print(f'client_id={client_id}, queue={queue}')\n print('queue does not exist anymore..')\n print(self.queues_dict)\n else:\n try:\n raw_msg, _ = self.aux_gq.receive(block=False, type=CLIENT_TO_SERVER)\n args_dict = json.loads(raw_msg)\n client_id = args_dict['client_id']\n queue = self.queues_dict[client_id]\n self.worker_name(client_id, queue, args_dict)\n except BusyError as err1:\n pass\n except sysv_ipc.ExistentialError as err2:\n print(f'client_id={client_id}')\n print('queue does not exist anymore..')\n print(self.queues_dict)\n\n\n # sleep(LOCAL_SLEEP)\n\n def worker_name(self, client_id, queue, args_dict):\n raw_type = args_dict.pop('raw_type')\n\n function_type = TFFunctions(raw_type)\n reply_type = raw_type | 0x40000000\n\n # debug(function_type, client_id, args_dict)\n client_id = args_dict.pop('client_id')\n mem = args_dict.pop('mem')\n cfs_quota_us = args_dict.pop('cfs_quota_us')\n cfs_period_us = args_dict.pop('cfs_period_us')\n\n # from time import time ## test_code\n # t1 = time()\n # Utils.add_resource_daemon(client_id, mem, cfs_quota_us, cfs_period_us)\n # t2 = time()\n\n mem_transfer, cpu_transfer = Utils.add_resource(client_id, mem, cfs_quota_us, cfs_period_us)\n\n result, ret = tf_function_dict[function_type](client_id, **args_dict)\n\n Utils.deduct_resource(client_id, mem_transfer, cpu_transfer, cfs_period_us)\n\n # ## test_code\n # t3 = time()\n # Utils.deduct_resource_daemon(client_id, mem, cfs_quota_us, cfs_period_us)\n # t4 = time()\n # debug(f'resource_reallocation={(t2-t1) + (t4-t3)}')\n\n return_dict = {'result': result}\n if result == ReturnValue.OK.value:\n return_dict.update({'actual_return_val': ret})\n else:\n return_dict.update(ret)\n return_byte_obj = json.dumps(return_dict)\n\n queue.send(return_byte_obj, type = reply_type)\n\n def add_client_queue(self, client_id, key):\n client_queue = MessageQueue(key)\n self.queues_dict[client_id] = client_queue\n\n def send_ack_to_client(self, client_id):\n return_dict = {'result': ReturnValue.OK.value, 'message': 'you\\'re acked!'}\n return_byte_obj = json.dumps(return_dict)\n reply_type = PocketControl.CONNECT.value | 0x40000000\n\n self.queues_dict[client_id].send(return_byte_obj, block=True, type=reply_type)\n\n def add_object_to_per_client_store(self, client_id, object):\n if IsolationControl.CAPABILITIESLIST:\n self.per_client_object_store[client_id][id(object)] = object\n else:\n self.per_client_object_store[id(object)] = object\n self.per_client_object_store_indirect_handle[client_id].append(id(object))\n\n def get_object_to_per_client_store(self, client_id, obj_id):\n if IsolationControl.CAPABILITIESLIST:\n return self.per_client_object_store[client_id][obj_id]\n else:\n return self.per_client_object_store[obj_id]\n\n def get_real_object_with_mock(self, client_id, mock):\n if IsolationControl.CAPABILITIESLIST:\n return self.per_client_object_store[client_id][mock['obj_id']]\n else:\n return self.per_client_object_store[mock['obj_id']]\n\n def add_built_model(self, name, model):\n self.model_dict[name] = model\n\n def disassemble_args(self, client_id, args, real_args):\n for index, elem in enumerate(args):\n real_args.append(None)\n if type(elem) in [list, tuple]:\n real_args[index] = []\n self.disassemble_args(client_id, elem, real_args[index])\n elif type(elem) is dict and 'obj_id' not in elem: # nested dictionary?\n if '_typename' in elem and elem['_typename'] == 'NPArray': #ndarray\n length = elem['contents_length']\n dtype = elem['dtype']\n shape = elem['shape']\n real_args[index] = np.frombuffer(self.shmem_dict[client_id].read(length), dtype).reshape(shape)\n else:\n real_args[index] = {}\n self.disassemble_kwargs(client_id, elem, real_args[index])\n elif type(elem) in (int, float, bool, str, bytes, bytearray):\n real_args[index] = elem\n else:\n real_args[index] = self.get_real_object_with_mock(client_id, elem)\n\n def disassemble_kwargs(self, client_id, kwargs, real_kwargs):\n for key, value in kwargs.items():\n real_kwargs[key] = None\n if type(value) in [list, tuple]:\n real_kwargs[key] = []\n self.disassemble_args(client_id, value, real_kwargs[key])\n elif type(value) is dict and 'obj_id' not in value: # # nested dictionary?\n if '_typename' in value and value['_typename'] == 'NPArray': #ndarray\n length = value['contents_length']\n dtype = value['dtype']\n shape = value['shape']\n real_kwargs[key] = np.frombuffer(self.shmem_dict[client_id].read(length), dtype).reshape(shape)\n else:\n real_kwargs[key] = {}\n self.disassemble_kwargs(client_id, value, real_kwargs[key])\n elif type(value) in (int, float, bool, str, bytes, bytearray):\n real_kwargs[key] = value\n else:\n real_kwargs[key] = self.get_real_object_with_mock(client_id, value)\n","sub_path":"tfrpc/server/pocketmgr_isolation.py","file_name":"pocketmgr_isolation.py","file_ext":"py","file_size_in_byte":83854,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"235463832","text":"'''\nCreated on April 26,2016\n@author: Prajit Kumar Das\n\nUsage: python processTermDocMatrixTFIDF.py\\n\n\nProcess files for feature generation in syscall analysis.\n'''\nimport time\nimport sys\nimport json\nimport logging\n\ndef getAggregateInfo(app,key,aggregateDict,masterDict,syscallList):\n\tannotated_category = masterDict[key][1]\n\t# google_play_category = masterDict[key][0]\n\tcallFrequencies = masterDict[key][2]\n\n\tcount = 0\n\tcallDict = {}\n\tif annotated_category in aggregateDict:\n\t\tcallDict = aggregateDict[annotated_category]\n\t\tfor call in syscallList:\n\t\t\tcallDict[call] += callFrequencies[count]\n\t\t\tcount += 1\n\telse:\n\t\tfor call in syscallList:\n\t\t\tcallDict[call] = callFrequencies[count]\n\t\t\tcount += 1\n\n\taggregateDict[annotated_category] = callDict\n\treturn aggregateDict\n\ndef main(argv):\n\tif len(sys.argv) != 1:\n\t\tsys.stderr.write('Usage: python processTermDocMatrixTFIDF.py\\n')\n\t\tsys.exit(1)\n\n\tstartTime = time.time()\n\n\tmasterDict = json.loads(open(\"termDocMatrix.json\",'r').read())\n\tsyscallList = masterDict[\"allSystemCalls\"]\n\taggregateDict = {}\n\n\tfor info in masterDict:\n\t\tif info == \"allSystemCalls\":\n\t\t\tcontinue\n\t\telse:\n\t\t\tapp = info.split('.SatJan7')[0]\n\t\t\taggregateDict = getAggregateInfo(app,info,aggregateDict,masterDict,syscallList)\n\n\topen(\"tfidfFromTermDocMatirx.json\",\"w\").write(json.dumps(aggregateDict,indent=4,sort_keys=True))\n\n\texecutionTime = str((time.time()-startTime)*1000)\n\tlogging.debug('Execution time was: '+executionTime+' ms')\n\nif __name__ == \"__main__\":\n\tmain(sys.argv)","sub_path":"code/malwareSystemCallAnalysis/processTermDocMatrixTFIDF.py","file_name":"processTermDocMatrixTFIDF.py","file_ext":"py","file_size_in_byte":1492,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"526550737","text":"from tkinter import *\nfrom tkinter import ttk\n\ndef main():\n\twindow = Window()\n\twindow.mainloop()\n\nclass Window(Tk):\n\tdef __init__(self):\n\t\tsuper().__init__()\n\t\t# object attributes\n\t\tself.width = 350\n\t\tself.height = 160\n\t\t# configure\n\t\tself.title(\"Expand Children as Window Resizes - Row 1 Heavy\")\n\t\tself.config(width = self.width, height = self.height)\n\t\tself.rowconfigure(0, weight = 1)\n\t\tself.columnconfigure(0, weight = 1)\n\t\t#populate\n\t\tmainframe = MainFrame(self)\n\nclass MainFrame(ttk.Frame):\n\tdef __init__(self, window):\n\t\tsuper().__init__(window)\n\t\t# configure\n\t\tself.grid(sticky = (N, W, E, S))\n\t\tself.columnconfigure(0, minsize = 60, weight = 1)\n\t\tself.columnconfigure(1, minsize = 220, weight = 3)\n\t\tself.columnconfigure(2, minsize = 60, weight = 1)\n\t\tself.rowconfigure(0, minsize = 40, weight = 1)\n\t\tself.rowconfigure(1, minsize = 40, weight = 3)\n\t\tself.rowconfigure(2, minsize = 50, weight = 1)\n\t\t# populate\n\t\tplace_holder_01 = PlaceHolderFrame(self)\n\t\tplace_holder_02 = PlaceHolderFrame(self)\n\t\tplace_holder_03 = PlaceHolderFrame(self)\n\t\tplace_holder_04 = PlaceHolderFrame(self)\n\t\tquit_button = QuitButton(self, window)\n\t\tplace_holder_05 = PlaceHolderFrame(self)\n\t\tplace_holder_06 = PlaceHolderFrame(self)\n\t\tplace_holder_07 = PlaceHolderFrame(self)\n\t\tplace_holder_08 = PlaceHolderFrame(self)\n\t\t# layout\n\t\tplace_holder_01.grid(column = 0, row = 0, sticky = (N, W, E, S))\n\t\tplace_holder_02.grid(column = 1, row = 0, sticky = (N, W, E, S))\n\t\tplace_holder_03.grid(column = 2, row = 0, sticky = (N, W, E, S))\n\t\tplace_holder_04.grid(column = 0, row = 1, sticky = (N, W, E, S))\n\t\tquit_button.grid(column = 1, row = 1, sticky = (N, W, E, S))\n\t\tplace_holder_05.grid(column = 2, row = 1, sticky = (N, W, E, S))\n\t\tplace_holder_06.grid(column = 0, row = 2, sticky = (N, W, E, S))\n\t\tplace_holder_07.grid(column = 1, row = 2, sticky = (N, W, E, S))\n\t\tplace_holder_08.grid(column = 2, row = 2, sticky = (N, W, E, S))\n\nclass PlaceHolderFrame(ttk.Frame):\n\tdef __init__(self, parent):\n\t\tsuper().__init__(parent)\n\t\t# configure\n\t\tself.configure(borderwidth = 5, relief = \"ridge\")\n\t\tself.rowconfigure(0, weight = 1)\n\t\tself.columnconfigure(0, weight = 1)\n\t\nclass QuitButton(ttk.Button):\n\tdef __init__(self, parent, window):\n\t\tsuper().__init__(parent)\n\t\t# object attributes\n\t\tself.text = 'Quit'\n\t\tself.top_window = window\n\t\t# configure\n\t\tself.config(text = self.text, command = self.quit)\n\n\tdef quit(self):\n\t\t# do someting\n\t\tself.top_window.quit()\n\t\t\nif __name__ == \"__main__\":\n\tmain()\n","sub_path":"005_grid/grid_008_expand_with_resize_weighty.py","file_name":"grid_008_expand_with_resize_weighty.py","file_ext":"py","file_size_in_byte":2475,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"146949531","text":"from utils import data_utils, nnet_utils, env_utils\nfrom typing import Dict, List, Tuple, Any\n\nfrom environments.environment_abstract import Environment\nfrom updaters.updater import Updater\nfrom search_methods.gbfs import gbfs_test\nimport torch\nimport wandb\nimport torch.nn as nn\nimport os\nimport pickle\n\nfrom argparse import ArgumentParser\nimport numpy as np\nimport time\n\nimport sys\nimport shutil\n\n\ndef parse_arguments(parser: ArgumentParser) -> Dict[str, Any]:\n # Environment\n parser.add_argument('--env', type=str, required=True, help=\"Environment\")\n\n # Debug\n parser.add_argument('--debug', action='store_true', default=False, help=\"\")\n\n # Gradient Descent\n parser.add_argument('--lr', type=float, default=0.001, help=\"Initial learning rate\")\n parser.add_argument('--lr_d', type=float, default=0.9999993, help=\"Learning rate decay for every iteration. \"\n \"Learning rate is decayed according to: \"\n \"lr * (lr_d ^ itr)\")\n\n # Training\n parser.add_argument('--max_itrs', type=int, default=1000000, help=\"Maxmimum number of iterations\")\n parser.add_argument('--batch_size', type=int, default=1000, help=\"Batch size\")\n parser.add_argument('--single_gpu_training', action='store_true',\n default=True, help=\"If set, train only on one GPU. Update step will still use \"\n \"all GPUs given by CUDA_VISIBLE_DEVICES\")\n\n # Update\n parser.add_argument('--loss_thresh', type=float, default=0.05, help=\"When the loss falls below this value, \"\n \"the target network is updated to the current \"\n \"network.\")\n parser.add_argument('--states_per_update', type=int, default=1000, help=\"How many states to train on before \"\n \"checking if target network should be \"\n \"updated\")\n parser.add_argument('--epochs_per_update', type=int, default=1, help=\"How many epochs to train for. \"\n \"Making this greater than 1 could increase \"\n \"risk of overfitting, however, one can train \"\n \"for more iterations without having to \"\n \"generate more data.\")\n parser.add_argument('--num_update_procs', type=int, default=1, help=\"Number of parallel workers used to \"\n \"compute updated cost-to-go function\")\n parser.add_argument('--update_nnet_batch_size', type=int, default=10000, help=\"Batch size of each nnet used for \"\n \"each process update. \"\n \"Make smaller if running out of \"\n \"memory.\")\n parser.add_argument('--max_update_steps', type=int, default=1, help=\"Number of steps to take when trying to \"\n \"solve training states with \"\n \"greedy best-first search (GBFS) or A* search. \"\n \"Each state \"\n \"encountered when solving is added to the \"\n \"training set. Number of steps starts at \"\n \"1 and is increased every update until \"\n \"the maximum number is reached. \"\n \"Value of 1 is the same as doing \"\n \"value iteration on only given training \"\n \"states. Increasing this number \"\n \"can make the cost-to-go function more \"\n \"robust by exploring more of the \"\n \"state space.\")\n\n parser.add_argument('--update_method', type=str, default=\"GBFS\", help=\"GBFS or ASTAR. If max_update_steps is 1 \"\n \"then either one is the same as doing value \"\n \"iteration\")\n\n parser.add_argument('--eps_max', type=float, default=0, help=\"When addings training states with GBFS, each \"\n \"instance will have an eps that is distributed \"\n \"randomly between 0 and epx_max.\")\n # Testing\n parser.add_argument('--num_test', type=int, default=10000, help=\"Number of test states.\")\n\n # data\n parser.add_argument('--back_max', type=int, required=True, help=\"Maximum number of backwards steps from goal\")\n parser.add_argument('--dynamic_back_max', action='store_true', default=False, help=\"Whether to dynamically increase the difficulty of the training exercises\")\n parser.add_argument('--dynamic_back_max_per', type = float, default=25, help=\"Minimum required solve-percentage to level up difficulty of the training exercises.\")\n parser.add_argument(\"--fixed_difficulty\", action='store_true', default=False, help = \"fix difficulty of generated training examples during each lesson, to be used in combination with dynamic_back_max=True\")\n parser.add_argument(\"--uniform_data_gen\", action='store_true', default=False, help = \"toggle the random flag in generate_state method in layer 2. Right now only to be used for layer 2. If turned on, backwards steps from goal disabled. Data generated randomly\")\n parser.add_argument(\"--normal_dist\", action='store_true', default=False, help = \"Use a normal distirbution with mean = back_max and std = 3 to generate examples\")\n\n # model\n parser.add_argument('--nnet_name', type=str, required=True, help=\"Name of neural network\")\n parser.add_argument('--update_num', type=int, default=0, help=\"Update number\")\n parser.add_argument('--save_dir', type=str, default=\"saved_models\", help=\"Director to which to save model\")\n parser.add_argument('--model_name', type=str, required=True, help=\"Which model to use: options listed in pytorch_models.py\")\n\n # parse arguments\n args = parser.parse_args()\n\n args_dict: Dict[str, Any] = vars(args)\n\n # make save directory\n model_dir: str = \"%s/%s/\" % (args_dict['save_dir'], args_dict['nnet_name'])\n args_dict['targ_dir'] = \"%s/%s/\" % (model_dir, 'target')\n args_dict['curr_dir'] = \"%s/%s/\" % (model_dir, 'current')\n\n if not os.path.exists(args_dict['targ_dir']):\n os.makedirs(args_dict['targ_dir'])\n\n if not os.path.exists(args_dict['curr_dir']):\n os.makedirs(args_dict['curr_dir'])\n\n args_dict[\"output_save_loc\"] = \"%s/output.txt\" % model_dir\n\n # save args\n args_save_loc = \"%s/args.pkl\" % model_dir\n print(\"Saving arguments to %s\" % args_save_loc)\n with open(args_save_loc, \"wb\") as f:\n pickle.dump(args, f, protocol=-1)\n\n print(\"Batch size: %i\" % args_dict['batch_size'])\n\n return args_dict\n\n\ndef copy_files(src_dir: str, dest_dir: str):\n src_files: List[str] = os.listdir(src_dir)\n for file_name in src_files:\n full_file_name: str = os.path.join(src_dir, file_name)\n if os.path.isfile(full_file_name):\n shutil.copy(full_file_name, dest_dir)\n\n\ndef do_update(back_max: int, update_num: int, env: Environment, max_update_steps: int, update_method: str,\n num_states: int, eps_max: float, heur_fn_i_q, heur_fn_o_qs, fixed_difficulty = False, random=False, normal_dist = False) -> Tuple[List[np.ndarray], np.ndarray]:\n '''Generate randomly scrambled states as training examples, do one step look ahead to get training labels '''\n '''Generate num_states training examples'''\n update_steps: int = min(update_num + 1, max_update_steps) #1 in our case\n num_states: int = int(np.ceil(num_states / update_steps))\n\n # Do updates\n output_time_start = time.time()\n\n print(\"Updating cost-to-go with value iteration\")\n if max_update_steps > 1:\n print(\"Using %s with %i step(s) to add extra states to training set\" % (update_method.upper(), update_steps))\n updater: Updater = Updater(env, num_states, back_max, heur_fn_i_q, heur_fn_o_qs, update_steps, update_method,\n update_batch_size=10000, eps_max=eps_max, fixed_difficulty = fixed_difficulty, random=random, normal_dist= normal_dist)\n\n states_update_nnet: List[np.ndarray]\n output_update: np.ndarray\n states_update_nnet, output_update, is_solved = updater.update()\n print(\"states_update_nnet\", states_update_nnet[0])\n # Print stats\n if max_update_steps > 1:\n print(\"%s produced %s states, %.2f%% solved (%.2f seconds)\" % (update_method.upper(),\n format(output_update.shape[0], \",\"),\n 100.0 * np.mean(is_solved),\n time.time() - output_time_start))\n\n mean_ctg = output_update[:, 0].mean()\n min_ctg = output_update[:, 0].min()\n max_ctg = output_update[:, 0].max()\n print(\"Cost-to-go (mean/min/max): %.2f/%.2f/%.2f\" % (mean_ctg, min_ctg, max_ctg))\n wandb.log({\"mean_ctg\": mean_ctg, \"is_solved\": is_solved})\n\n return states_update_nnet, output_update\n\n\ndef load_nnet(nnet_dir: str, env: Environment, model_name: str) -> Tuple[nn.Module, int, int]:\n nnet_file: str = \"%s/model_state_dict.pt\" % nnet_dir\n if os.path.isfile(nnet_file):\n nnet = nnet_utils.load_nnet(nnet_file, env.get_nnet_model(model_name))\n itr: int = pickle.load(open(\"%s/train_itr.pkl\" % nnet_dir, \"rb\"))\n update_num: int = pickle.load(open(\"%s/update_num.pkl\" % nnet_dir, \"rb\"))\n else:\n nnet: nn.Module = env.get_nnet_model(model_name)\n itr: int = 0\n update_num: int = 0\n\n return nnet, itr, update_num\n\n\ndef main():\n\n # arguments\n parser: ArgumentParser = ArgumentParser()\n args_dict: Dict[str, Any] = parse_arguments(parser)\n\n if not args_dict[\"debug\"]:\n sys.stdout = data_utils.Logger(args_dict[\"output_save_loc\"], \"a\")\n\n # environment\n env: Environment = env_utils.get_environment(args_dict['env'])\n\n # get device\n on_gpu: bool\n device: torch.device\n device, devices, on_gpu = nnet_utils.get_device()\n\n print(\"device: %s, devices: %s, on_gpu: %s\" % (device, devices, on_gpu))\n\n # load nnet\n nnet: nn.Module\n itr: int\n update_num: int\n nnet, itr, update_num = load_nnet(args_dict['curr_dir'], args_dict[\"model_name\"])\n\n nnet.to(device)\n if on_gpu and (not args_dict['single_gpu_training']):\n nnet = nn.DataParallel(nnet)\n\n #initialize data visualizer\n run_id = \"{}-{}\".format(args_dict[\"env\"], args_dict[\"nnet_name\"])\n wandb.init(project='deepcubaa',entity = \"cs229deepcubeteam\", id = run_id, name = run_id, config = args_dict)\n\n\n dynamic_back_max = 0\n can_increase_dynamic_back_max = False\n # training\n '''In every itr:\n 1 we generate args_dict['states_per_update'] random cubes (training examples), and corresponding labels using\n value iteration.\n 2. Train DNN on these examples (for 1 epoch), using a batch size of args_dict['batch_size']\n 3. Check if DNN's loss if below threshold, if yes, update target network with current network.\n 4. Test the DNN on random cubes (different from training cubes)\n '''\n\n '''Target vs Current model'''\n '''Target model is the \"teacher\", it's responsible for generating training examples and labels\n Current model is the \"learner\", it updates it's parameters by learning from the examples\n Once the current model becomes good enough, it will become the new teacher\n '''\n while itr < args_dict['max_itrs']:\n network_updated = False\n # update\n targ_file: str = \"%s/model_state_dict.pt\" % args_dict['targ_dir']\n all_zeros: bool = not os.path.isfile(targ_file)\n heur_fn_i_q, heur_fn_o_qs, heur_procs = nnet_utils.start_heur_fn_runners(args_dict['num_update_procs'],\n args_dict['targ_dir'],\n device, on_gpu, env,\n all_zeros=all_zeros,\n clip_zero=True,\n batch_size=args_dict[\n \"update_nnet_batch_size\"],\n model_name = args_dict['model_name'])\n\n states_nnet: List[np.ndarray]\n outputs: np.ndarray\n if args_dict[\"dynamic_back_max\"]:\n states_nnet, outputs = do_update(dynamic_back_max, update_num, env,\n args_dict['max_update_steps'], args_dict['update_method'],\n args_dict['states_per_update'], args_dict['eps_max'],\n heur_fn_i_q, heur_fn_o_qs, fixed_difficulty=args_dict[\"fixed_difficulty\"], random=False, normal_dist = args_dict[\"normal_dist\"])\n elif args_dict[\"uniform_data_gen\"]:\n states_nnet, outputs = do_update(dynamic_back_max, update_num, env,\n args_dict['max_update_steps'], args_dict['update_method'],\n args_dict['states_per_update'], args_dict['eps_max'],\n heur_fn_i_q, heur_fn_o_qs, random=args_dict[\"uniform_data_gen\"], fixed_difficulty=False)\n else:\n states_nnet, outputs = do_update(args_dict[\"back_max\"], update_num, env,\n args_dict['max_update_steps'], args_dict['update_method'],\n args_dict['states_per_update'], args_dict['eps_max'],\n heur_fn_i_q, heur_fn_o_qs, fixed_difficulty = False, random=False)\n\n nnet_utils.stop_heuristic_fn_runners(heur_procs, heur_fn_i_q)\n\n # train nnet\n num_train_itrs: int = args_dict['epochs_per_update'] * np.ceil(outputs.shape[0] / args_dict['batch_size'])\n print(\"Training model for update number %i for %i iterations\" % (update_num, num_train_itrs))\n last_loss = nnet_utils.train_nnet(nnet, states_nnet, outputs, device, args_dict['batch_size'], num_train_itrs,\n itr, args_dict['lr'], args_dict['lr_d'])\n itr += num_train_itrs\n wandb.log({\"loss\": last_loss})\n # save nnet\n torch.save(nnet.state_dict(), \"%s/model_state_dict.pt\" % args_dict['curr_dir'])\n pickle.dump(itr, open(\"%s/train_itr.pkl\" % args_dict['curr_dir'], \"wb\"), protocol=-1)\n pickle.dump(update_num, open(\"%s/update_num.pkl\" % args_dict['curr_dir'], \"wb\"), protocol=-1)\n\n # test\n start_time = time.time()\n heuristic_fn = nnet_utils.get_heuristic_fn(nnet, device, env, batch_size=args_dict['update_nnet_batch_size'])\n max_solve_steps: int = min(update_num + 1, args_dict['back_max'])\n if args_dict[\"dynamic_back_max\"]:\n per_solved = gbfs_test(args_dict['num_test'], args_dict['back_max'], env, heuristic_fn, max_solve_steps=max_solve_steps, dynamic_back_max = dynamic_back_max)\n #if agents does decently well on problems generated dynamic_back_max steps, then increase dynamic_back_max\n if (per_solved>args_dict[\"dynamic_back_max_per\"]): #if percentage solved pass this number we increase the difficulty of the generated problems\n # can_increase_dynamic_back_max = True\n dynamic_back_max = min(args_dict[\"back_max\"], dynamic_back_max+1)\n #update network\n wandb.log({\"dynamic_back_max\": dynamic_back_max})\n\n else:\n gbfs_test(args_dict['num_test'], args_dict['back_max'], env, heuristic_fn, max_solve_steps=max_solve_steps, random=args_dict[\"uniform_data_gen\"])\n\n wandb.log({\"max_solve_steps\": max_solve_steps})\n print(\"Test time: %.2f\" % (time.time() - start_time))\n\n # clear cuda memory\n torch.cuda.empty_cache()\n\n print(\"Last loss was %f\" % last_loss)\n if last_loss < args_dict['loss_thresh']:\n # Update nnet\n print(\"Updating target network\")\n copy_files(args_dict['curr_dir'], args_dict['targ_dir'])\n update_num = update_num + 1\n pickle.dump(update_num, open(\"%s/update_num.pkl\" % args_dict['curr_dir'], \"wb\"), protocol=-1)\n wandb.log({\"update_num\": update_num})\n print(\"Done\")\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"ctg_approx/avi.py","file_name":"avi.py","file_ext":"py","file_size_in_byte":18001,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"180808841","text":"from flask import Flask\nfrom flask import render_template\nimport socket\nimport os\n\napp = Flask(__name__)\n\ncount = 0\n\n@app.route('/', methods=[\"GET\"])\n@app.route('/hello/', methods=[\"GET\"])\n@app.route('/hello/', methods=[\"GET\"])\ndef hello(name=None):\n global count\n count += 1\n\n return render_template('hello.html', host=socket.gethostname(), name=name, count=count)\n\nif __name__ == '__main__':\n app.run(debug=True, host='0.0.0.0', port=int(os.environ.get('PORT', 5000)))\n","sub_path":"app/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":489,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"426002001","text":"import tflite_runtime.interpreter as tflite\nimport cv2\nimport numpy as np\n\ninterpreter = tflite.Interpreter(model_path=\"thermal_face_automl_edge_fast.tflite\")\ninterpreter.allocate_tensors()\n\ninput_details = interpreter.get_input_details()\noutput_details = interpreter.get_output_details()\n\nprint(input_details)\nprint(output_details)\n\noriginal = cv2.imread(\"data/pic04.jpg\")\nimage = cv2.resize(original, (192, 192))\n\nvalue = np.expand_dims(image, axis=0)\n\ninterpreter.set_tensor(input_details[0]['index'], value)\n\ninterpreter.invoke()\n\n#output_data = interpreter.get_tensor(output_details[0]['index'])\n\n# Retrieve detection results_05\nboxes = interpreter.get_tensor(output_details[0]['index'])[0] # Bounding box coordinates of detected objects\nclasses = interpreter.get_tensor(output_details[1]['index'])[0] # Class index of detected objects\nscores = interpreter.get_tensor(output_details[2]['index'])[0] # Confidence of detected objects\n# num = interpreter.get_tensor(output_details[3]['index'])[0] # Total number of detected objects (inaccurate and not needed)\n\nmin_conf_threshold = .5\nimH = original.shape[1]\nimW = original.shape[0]\nlabels = [\"label\"]\n\n# Loop over all detections and draw detection box if confidence is above minimum threshold\nfor i in range(len(scores)):\n if ((scores[i] > min_conf_threshold) and (scores[i] <= 1.0)):\n # Get bounding box coordinates and draw box\n # Interpreter can return coordinates that are outside of image dimensions, need to force them to be within image using max() and min()\n ymin = int(max(1, (boxes[i][0] * imH)))\n xmin = int(max(1, (boxes[i][1] * imW)))\n ymax = int(min(imH, (boxes[i][2] * imH)))\n xmax = int(min(imW, (boxes[i][3] * imW)))\n\n cv2.rectangle(original, (xmin, ymin), (xmax, ymax), (10, 255, 0), 2)\n cv2.imwrite(\"results_05/pic04.jpg\", original)\n\n# All the results_05 have been drawn on the image, now display the image\ncv2.imshow('Object detector', original)\n\n# Press any key to continue to next image, or press 'q' to quit\nif cv2.waitKey(0) == ord('q'):\n cv2.destroyAllWindows()\n","sub_path":"test_model.py","file_name":"test_model.py","file_ext":"py","file_size_in_byte":2106,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"510635930","text":"def find_missing(lista,listb):\n\n\tseta = set(lista) ^ set(listb)\n\n\tif len(lista) !=0 and len(listb) != 0:\n\n\t\tif lista == listb:\n\n\t\t\treturn 0\n\t\telse:\n\n\t\t\tlistc = list(seta)[0]\n\n\t\t\treturn listc\n \n\t\t\t#return [x for x in lista+listb if (x not in lista) or (x not in listb)]\t\n\telse:\n\n\t\treturn 0","sub_path":"missing_num_def.py","file_name":"missing_num_def.py","file_ext":"py","file_size_in_byte":296,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"53576650","text":"\r\nfrom smac.env import StarCraft2Env\r\nimport numpy as np\r\n\r\nimport torch\r\nimport torch.nn as nn\r\n\r\n\r\nnp.set_printoptions(threshold=np.inf)\r\n\r\n\r\n\r\nclass AQ_network(nn.Module):\r\n \r\n def __init__(self, obs_size, n_actions):\r\n super(AQ_network, self).__init__()\r\n self.AQ_network = nn.Sequential(\r\n \r\n nn.Linear(obs_size, 70),\r\n nn.ReLU(),\r\n nn.Linear(70, 70),\r\n nn.ReLU(),\r\n \r\n nn.Linear(70, n_actions) \r\n )\r\n \r\n self.sm_layer = nn.Softmax(dim=1)\r\n \r\n def forward(self, x):\r\n aq_network_out = self.AQ_network(x)\r\n sm_layer_out = self.sm_layer(aq_network_out)\r\n \r\n return sm_layer_out\r\n \r\n\r\ndef select_actionFox(action_probabilities, avail_actions_ind, epsilon):\r\n \r\n for ia in action_probabilities:\r\n action = np.argmax(action_probabilities)\r\n if action in avail_actions_ind:\r\n return action\r\n else:\r\n action_probabilities[action] = 0\r\n \r\ndef main():\r\n env = StarCraft2Env(map_name=\"75z1сFOX\", difficulty=\"1\")\r\n env_info = env.get_env_info()\r\n obs_size = env_info.get('obs_shape')\r\n print (\"obs_size=\",obs_size)\r\n n_actions = env_info[\"n_actions\"]\r\n n_agents = env_info[\"n_agents\"]\r\n \r\n n_episodes = 50 \r\n epsilon = 0\r\n \r\n obs_sizeXY = 4\r\n \r\n \r\n q_network = AQ_network(obs_sizeXY, n_actions)\r\n \r\n q_network_list = []\r\n \r\n for agent_id in range(n_agents):\r\n q_network_list.append(q_network)\r\n \r\n state = torch.load(\"aqnet_%.0f.dat\"%agent_id, map_location=lambda stg, _: stg)\r\n q_network_list[agent_id].load_state_dict(state)\r\n \r\n print(q_network_list[0])\r\n \r\n \r\n ###########################################################################\r\n for e in range(n_episodes):\r\n env.reset()\r\n terminated = False\r\n episode_reward = 0\r\n \r\n \r\n #######################################################################\r\n while not terminated:\r\n state = env.get_state()\r\n \r\n actions = []\r\n action = 0\r\n \r\n actionsFox = np.zeros([n_agents]) \r\n \r\n \r\n obs_agentXY = np.zeros([n_agents, obs_sizeXY]) \r\n \r\n \r\n for agent_id in range(n_agents):\r\n ##############################################################\r\n \r\n \r\n \r\n unit = env.get_unit_by_id(agent_id)\r\n obs_agentXY[agent_id][0] = unit.pos.x\r\n obs_agentXY[agent_id][1] = unit.pos.y\r\n \r\n \r\n for e_id, e_unit in env.enemies.items():\r\n obs_agentXY[agent_id][2] = e_unit.pos.x\r\n obs_agentXY[agent_id][3] = e_unit.pos.y\r\n \r\n obs_agentT = torch.FloatTensor([obs_agentXY[agent_id]])\r\n \r\n action_probabilitiesT = q_network_list[agent_id](obs_agentT)\r\n action_probabilities = action_probabilitiesT.data.numpy()[0]\r\n \r\n avail_actions = env.get_avail_agent_actions(agent_id)\r\n avail_actions_ind = np.nonzero(avail_actions)[0]\r\n \r\n action = select_actionFox(action_probabilities, avail_actions_ind, epsilon)\r\n if action is None: action = np.random.choice (avail_actions_ind)\r\n \r\n actions.append(action)\r\n actionsFox[agent_id] = action\r\n ##############################################################\r\n\r\n\r\n \r\n reward, terminated, _ = env.step(actions)\r\n \r\n episode_reward += reward\r\n \r\n ######################################################################\r\n print(\"Total reward in episode {} = {}\".format(e, episode_reward))\r\n \r\n\r\n \r\n ##########################################################################\r\n \r\n \r\n env.close()\r\n \r\n \r\n\r\n\r\n \r\nif __name__ == \"__main__\":\r\n main() ","sub_path":"DRACO/Algorithm_DRACO_test.py","file_name":"Algorithm_DRACO_test.py","file_ext":"py","file_size_in_byte":4235,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"474226863","text":"import os, glob, fiona, logging\nimport pandas as pd\nimport numpy as np\nimport geopandas as gpd\nfrom fiona.crs import from_epsg\nfrom tqdm import tqdm\nfrom osgeo import ogr\n\nimport raster.pykic_gdal as rpg\n\n\"\"\"\nOGR utilities\nAuthor: Nicolas EKICIER\nRelease: V1.56 02/2020\n\"\"\"\n\ndef add_field_id(input, field='nerid'):\n \"\"\"\n Add an ID field in the attribute table of shapefile\n :param input: Path of shapefile\n :param field: Name of field (default = \"nerid\")\n :return: Path of new file\n \"\"\"\n shp = gpd.read_file(input)\n shp[field] = np.arange(1, shp.shape[0]+1)\n name = input.replace('.shp', '_nerid.shp')\n shp.to_file(name)\n return name\n\n\ndef getbbox(input):\n \"\"\"\n Get bounding box of geometry\n :param input: Path of ogr file or ogr geometry (cf below)\n ogrl = ogr.Open(shp)\n input = ogrl.GetLayer()\n :return: Tuple : (xmin, xmax, ymin, ymax)\n \"\"\"\n if type(input) is str:\n ogrl = ogr.Open(input)\n xmin, xmax, ymin, ymax = ogrl.GetLayer().GetExtent()\n else:\n try:\n xmin, xmax, ymin, ymax= input.GetGeometryRef().GetEnvelope()\n except:\n xmin, xmax, ymin, ymax = input.GetExtent()\n return (xmin, xmax, ymin, ymax)\n\n\ndef zonstat(inshp, inimg, attribut='id'):\n \"\"\"\n Compute zonal statistics (count) on each polygon of shapefile from image\n Output will be in image folder with \"_statz.csv\" extent\n :param inshp: path of shapefile\n :param inimg: path of image\n :param attribut: attribute to use in shapefile table (default = 'id')\n :return:\n \"\"\"\n # Check proj\n epsg_shp = int(rpg.geoinfo(inshp, onlyepsg=True))\n epsg_img = int(rpg.geoinfo(inimg, onlyepsg=True))\n if epsg_shp != epsg_img:\n layertmp = ogreproj(inshp, epsg_img, write=True)\n inshp = inshp.replace('.shp', '_{0:d}.shp'.format(epsg_img))\n\n # Read\n img, _, _, _ = rpg.gdal2array(inimg)\n uidi = np.unique(img)\n\n mask = rpg.makemask(inshp, inimg, attribute=attribut)\n uid = np.unique(mask)\n\n # Stats\n output = pd.DataFrame(index=uid, columns=uidi)\n for idmask in tqdm(uid, desc='Zonal Statistics', total=len(uid)):\n index = np.flatnonzero(mask == idmask)\n values = img[np.unravel_index(index, img.shape)]\n\n uidval, uidvalc = np.unique(values, return_counts=True)\n for idval, idvalc in zip(uidval, uidvalc):\n output.at[idmask, idval] = idvalc\n\n output = output.fillna(value=0)\n output.to_csv(inimg.replace('.tif', '_statz.csv'))\n\n # Clean\n if epsg_shp != epsg_img:\n shptmp = glob.glob(inshp.replace('.shp', '*'))\n for r in shptmp:\n os.remove(r)\n\n return None\n\n\ndef checkproj(layer0, layer1):\n \"\"\"\n Check if projections are same OR same as layer1 (=EPSG)\n :param layer0: path of shapefile 1\n :param layer1: path of shapefile 2 or EPSG (ex : '4326', str)\n :return: booleen and EPSG of each layer\n \"\"\"\n # with fiona.open(layer0, 'r') as src0:\n # proj0 = src0.crs['init']\n proj0 = rpg.geoinfo(layer0, onlyepsg=True)\n if os.path.isfile(layer1):\n proj1 = rpg.geoinfo(layer1, onlyepsg=True)\n else:\n proj1 = layer1\n\n if proj0 != proj1:\n check = False\n else:\n check = True\n proj0 = 'epsg:{0:s}'.format(proj0)\n proj1 = 'epsg:{0:s}'.format(proj1)\n return check, proj0, proj1\n\n\ndef ogreproj(player, oEPSG, write=False):\n \"\"\"\n Reprojection of an OGR layer\n :param layer: Path of shapefile\n :param oEPSG: EPSG value for destination (int)\n :param write: if write, output is written on disk (same path of player with suffix)\n :return: Reprojected layer & path of file if write=True\n \"\"\"\n layer = gpd.read_file(player)\n\n iEPSG = layer.crs # Get projection from input and print in console\n print('Reprojection from {0:s} to epsg:{1:d}'.format(iEPSG['init'], oEPSG))\n\n data_proj = layer.copy()\n data_proj = data_proj.to_crs(epsg=oEPSG) # Reproject the geometries by replacing the values with projected ones\n data_proj.crs = from_epsg(oEPSG) # Determine the CRS of the GeoDataFrame\n\n if write:\n name = player.replace('.shp', '_{0:d}.shp'.format(oEPSG))\n data_proj.to_file(name)\n return data_proj, name\n else:\n return data_proj\n\n\ndef sprocessing(layer1, layer2, method):\n \"\"\"\n Geometric processing between shapefiles\n :param layer1: Geopandas Dataframe 1 OR path of shapefile\n :param layer2: Geopandas Dataframe 2 OR path of shapefile\n :param method: \"intersects\", \"within\", \"contains\"\n :return: Result layer\n \"\"\"\n str_test = 0\n if type(layer1) is str:\n str_test = str_test + 1\n lay1 = gpd.read_file(layer1)\n else:\n lay1 = layer1.copy()\n if type(layer2) is str:\n str_test = str_test + 1\n lay2 = gpd.read_file(layer2)\n else:\n lay2 = layer2.copy()\n\n # Check if projections are same\n if str_test == 2:\n check, _, _ = checkproj(layer1, layer2)\n if not check:\n logging.error('Warning : CRS are not the same')\n return None\n elif str_test == 1:\n if isinstance(lay1, gpd.GeoDataFrame):\n epsg = lay1.crs['init'].split(':')[-1]\n check, _, _ = checkproj(layer2, epsg)\n else:\n epsg = lay2.crs['init'].split(':')[-1]\n check, _, _ = checkproj(layer1, epsg)\n if not check:\n logging.error('Warning : CRS are not the same')\n return None\n\n # Process\n layerm = gpd.sjoin(lay1, lay2, op=method)\n return layerm\n\n\ndef shpbuf(distance, input, output=None, fmtout=None):\n \"\"\"\n Create a buffer from shapefile or Geopandas DataFrame\n :param distance: Distance of buffer (in the same crs unit)\n :param input: Shapefile path or GeoDataFrame\n :param output: Write the output (OPTIONAL)\n :param fmtout: Output format (OPTIONAL) --> ('CSV', 'ESRI Shapefile'=default, 'GeoJSON', 'GML', 'GPX', 'MapInfo File')\n For complete list : import fiona; fiona.supported_drivers\n :return: Buffered layer (GeoDataFrame)\n \"\"\"\n if type(input) is str:\n layer = gpd.read_file(input)\n else: # GeoDataFrame\n layer = input.copy()\n\n layerb = gpd.GeoDataFrame(layer.buffer(distance))\n layerb = layerb.rename(columns={0: 'geometry'}).set_geometry('geometry')\n layerb.crs = layer.crs\n\n if output is not None:\n if type(output) is str and '.shp' in output:\n layerb.to_file(output)\n elif type(output) is str and fmtout is not None:\n layerb.to_file(output, driver=fmtout)\n else:\n logging.error('Warning : output is not a string or \".shp\" extension is missing')\n return layerb\n\n\ndef distm(lon, lat, units='km'):\n \"\"\"\n Geodesic distance between dots (degrees units)\n Method : Vincenty (1975)\n :param lon: numpy array with at least 2 values (longitude)\n :param lat: numpy array with at least 2 values (latitude)\n :param units: output units : {'km'=default, 'm'}\n :return: distance\n \"\"\"\n radius = 6371009 # meters\n\n xa = np.deg2rad(lon[0:-1])\n xb = np.deg2rad(lon[1:])\n ya = np.deg2rad(lat[0:-1])\n yb = np.deg2rad(lat[1:])\n\n s1 = np.sin(ya)*np.sin(yb) + np.cos(ya)*np.cos(yb)*np.cos(xb - xa)\n s2 = np.arccos(s1)\n\n if units.lower() == 'km':\n d = s2 * radius / 1000\n elif units.lower() == 'm':\n d = s2 * radius\n else:\n logging.error('Warning : output format is not recognized, use default')\n d = s2 * radius / 1000\n return d","sub_path":"vector/pykic_ogr.py","file_name":"pykic_ogr.py","file_ext":"py","file_size_in_byte":7719,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"122305517","text":"import nltk\n\ndef calculate_top_ten_users(authors):\n\tauthor_counts = []\n\tfor author in authors:\n\t\tif len(author_counts) <= 10:\n\t\t\tauthor_counts.append((author[\"name\"], author[\"idea_count\"]+author[\"comment_count\"]+author[\"vote_count\"], author))\n\t\t\tauthor_counts.sort(key=lambda x: x[1])\n\t\telse:\n\t\t\tauthor_counts[0] = (author[\"name\"], author[\"idea_count\"]+author[\"comment_count\"]+author[\"vote_count\"], author)\n\t\t\tauthor_counts.sort(key=lambda x: x[1])\n\tauthor_counts_asc = author_counts\n\tauthor_counts_desc = list(author_counts) # to properly copy\n\tauthor_counts_desc.reverse()\n\treturn (author_counts_asc, author_counts_desc)\n\t\ndef ideas_to_months(ideas):\n\tcalendar_ideas = {}\n\tcalendar_comments = {}\n\tfor idea in ideas:\n\t\tmonth = idea[\"creation_date\"][5:7]\n\t\tif month not in calendar_ideas.keys():\n\t\t\tcalendar_ideas[month] = 1\n\t\t\tcalendar_comments[month] = idea[\"comment_count\"]\n\t\telse:\n\t\t\tcalendar_ideas[month] += 1\n\t\t\tcalendar_comments[month] += idea[\"comment_count\"]\n\treturn (calendar_ideas, calendar_comments)\n\t\ndef word_count(ideas):\n\ttitle_corpus = \"\"\n\ttext_corpus = \"\"\n\tfor idea in ideas:\n\t\ttitle_corpus += \" \"\n\t\ttitle_corpus += idea[\"title\"].lower()\n\t\ttext_corpus += \" \"\n\t\ttext_corpus += idea[\"idea_text\"].lower()\n\tfdist_title = nltk.FreqDist(title_corpus.split(\" \"))\n\tfdist_text = nltk.FreqDist(text_corpus.split(\" \"))\n\treturn (fdist_title, fdist_text)","sub_path":"helpers.py","file_name":"helpers.py","file_ext":"py","file_size_in_byte":1359,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"318669070","text":"from sklearn.feature_extraction.text import TfidfVectorizer\n\nfrom tokenizer import tokenize\n\ntexts = [\n '東京から大阪に行く',\n '大阪から東京に行く',\n]\n\n# bi-gram\nvectorizer = TfidfVectorizer(tokenizer=tokenize, ngram_range=(2, 2))\nvectorizer.fit(texts)\ntfidf = vectorizer.transform(texts)\n","sub_path":"assets/src/sec60_feature_extraction/word_ngram/sklearn_ngram_tfidf.py","file_name":"sklearn_ngram_tfidf.py","file_ext":"py","file_size_in_byte":313,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"195201390","text":"from __future__ import division\nimport pandas as pd\nfrom sklearn.cluster import KMeans\nfrom flask import Flask, send_file, redirect\nfrom flask import render_template, request, jsonify\nfrom apps.models.user import User\nfrom apps.models.room import Room\nimport json\nfrom apps.controllers.login import signin, save_user\nfrom apps.controllers.room import save_room, roomin, intoTheRoom, outOfTheRoom, selectPayer, getUserNumber, getPayerInfo, CompleteOrder, calculateTotal\nfrom apps.controllers.menu import showingMenu_from_rid, save_user_menu\nfrom apps.utils.db import init_db, get_id_from_db, select_one, select_all, insert, get_username_from_db, get_restaurantname_from_db, get_locationid_from_univ, get_univ_from_user, get_room_id_from_session, get_restaurant_id_from_roomid, get_host_id_from_roomid\nfrom apps.utils.token import check_token, check_room_token\n\n\napp = Flask(__name__, template_folder='../templates', static_folder = '../static')\n\n\n@app.route(\"/\")\ndef index():\n if check_token():\n return render_template(\"main.html\"), 200\n return render_template(\"login.html\"), 200\n\n@app.route(\"/static/js/login.js\")\ndef loginjs():\n return send_file('..\\\\static\\\\js\\\\login.js')\n\n@app.route(\"/static/js/main.js\")\ndef mainjs():\n return send_file('..\\\\static\\\\js\\\\main.js')\n\n@app.route(\"/static/js/CreationRoom.js\")\ndef creationroomjs():\n return send_file('..\\\\static\\\\js\\\\CreationRoom.js')\n\n@app.route(\"/static/css/login.css\")\ndef logincss():\n return send_file('..\\\\static\\\\css\\\\login.css')\n\n@app.route(\"/static/css/main.css\")\ndef maincss():\n return send_file('..\\\\static\\\\css\\\\main.css')\n\n@app.route(\"/static/css/CreationRoom.css\")\ndef creationroomcss():\n return send_file('..\\\\static\\\\css\\\\CreationRoom.css')\n\n@app.route(\"/user\", methods=[\"POST\"])\ndef login():\n #print(\"login\")\n try:\n data=json.loads(request.data)\n except ValueError:\n return \"Input must be json format\", 400\n\n user=User.create_from_request(data)\n #user.getUserEmail()\n user.getHashing()\n\n response=signin(user)\n\n return response\n\n@app.route(\"/user\", methods=[\"PUT\"])\ndef signup():\n #print(\"signup\")\n try:\n data=json.loads(request.data)\n except ValueError:\n return \"Input must be json format\", 400\n\n user=User.create_from_request(data)\n\n if(user is not None):\n user.getHashing()\n\n\n response=save_user(user)\n return response\n\n@app.route('/createroom', methods=[\"PUT\"])\ndef create_room():\n try:\n data=json.loads(request.data)\n except ValueError:\n return \"Input must be json format\", 400\n\n room = Room.create_from_request(data)\n\n get_session = request.cookies.get('pr_session')\n user_id = get_id_from_db(get_session)\n univ_id = get_univ_from_user(user_id)\n room.host_id = user_id\n room.location_id = get_locationid_from_univ(univ_id)\n\n response = save_room(room)\n\n return response\n\n@app.route(\"/roomList\", methods=[\"GET\"])\ndef roomList():\n get_session = request.cookies.get('pr_session')\n user_id = get_id_from_db(get_session)\n\n query = \"select chief_id, room_title, restaurant_id, created, room_id from room as r1, (select e2.location_id as location_id from (select univ_id from pr_user where user_id = '%d') as e1, pr_university as e2 where e1.univ_id = e2.univ_id) as r2 where r1.room_exist = 1 and r1.location_id = r2.location_id order by created desc;\"\n table_all = select_all(query % user_id)\n room_List = list()\n for table_one in table_all:\n username = get_username_from_db(table_one[0])\n rname = get_restaurantname_from_db(table_one[2])\n room_dic = { 'user_name':username,'room_title': table_one[1], 'restaurant_name':rname,'created':table_one[3], 'room_id':table_one[4]}\n room_List.append(room_dic)\n\n return jsonify(results = room_List)\n\n@app.route('/room/')\ndef show_room(room_id):\n if check_token():\n if check_room_token(room_id):\n query = \"select chief_id from room where room_id = '%d'\"\n host_id = select_one(query % room_id)\n\n query = \"select user_name from pr_user where user_id = '%d'\"\n host_name = select_one(query % host_id[0])\n\n query2 = \"select restaurant_name, restaurant_location, restaurant_phone, r1.restaurant_id from pr_restaurant as r1, (select restaurant_id from room where room_id = '%d') as r2 where r1.restaurant_id = r2.restaurant_id;\"\n restaurant_info = select_one(query2 % room_id)\n\n query3 = \"select room_title from room where room_id = '%d'\"\n room_title = select_one(query3 % room_id)\n\n query4 = \"select created from room where room_id = '%d'\"\n created = select_one(query4 % room_id)\n\n query5 = \"select room_inwon from room where room_id = '%d'\"\n room_inwon = select_one(query5 % room_id)\n\n menu_table = showingMenu_from_rid(restaurant_info[3])\n\n return render_template(\"room.html\", host_name = host_name[0], rname = restaurant_info[0], rlocation = restaurant_info[1], rphone = restaurant_info[2], menu_table = menu_table, room_title = room_title[0], created = created[0], room_inwon = int(room_inwon[0])), 200\n\n return render_template(\"password.html\", room_id = room_id), 200\n\n@app.route('/showTotal', methods=[\"GET\"])\ndef show_total():\n pr_session = request.cookies.get('pr_session')\n user_id = get_id_from_db(pr_session)\n\n query = \"select taker, price from total_price where giver = '%d'\" #내가 줘야되는 돈\n query2 = \"select user_name from pr_user where user_id = '%d'\"\n query3 = \"select giver, price from total_price where taker = '%d'\" #내가 받아야되는 돈\n table_all = select_all(query % user_id)\n get_table = select_one(query2 % user_id)\n myname = get_table[0]\n\n pay_list = list()\n for table_one in table_all:\n name = select_one(query2 % table_one[0])\n taker_name = name[0]\n pay_dic = {'giver': myname, 'taker':taker_name, 'price': table_one[1]}\n pay_list.append(pay_dic)\n\n payed_list = list()\n table_all = select_all(query3 % user_id)\n for table_one in table_all:\n name = select_one(query2 % table_one[0])\n giver_name = name[0]\n pay_dic = {'giver': giver_name, 'taker': myname, 'price': table_one[1]}\n payed_list.append(pay_dic)\n return jsonify(results = pay_list, results2 = payed_list)\n\n@app.route(\"/recommend\", methods=[\"GET\"])\ndef user_recommend():\n get_session = request.cookies.get('pr_session')\n user_id = get_id_from_db(get_session)\n\n query = \"select chief_id, room_title, restaurant_id, created, room_id from room as r1, (select e2.location_id as location_id from (select univ_id from pr_user where user_id = '%d') as e1, pr_university as e2 where e1.univ_id = e2.univ_id) as r2 where r1.location_id = r2.location_id order by created desc;\"\n table_all = select_all(query % user_id)\n room_List = list()\n for table_one in table_all:\n username = get_username_from_db(table_one[0])\n rname = get_restaurantname_from_db(table_one[2])\n room_dic = { 'user_name':username,'room_title': table_one[1], 'restaurant_name':rname,'created':table_one[3], 'room_id':table_one[4]}\n room_List.append(room_dic)\n\n myindex = 0\n query = \"select user_id from pr_user\"\n userAll = select_all(query)\n\n userList = list()\n for index in userAll:\n userList.append(index[0])\n\n sets = [set() for _ in range(0, len(userList))]\n\n for index in range(0, len(userList)):\n if (userList[index] == user_id):\n myindex = index\n query = \"select food_id from user_orderlist where user_id = '%d'\"\n temp = select_all(query % userList[index])\n orderList = list()\n for i in temp:\n orderList.append(i[0])\n\n for i in range(0, len(orderList)):\n sets[index].add(orderList[i])\n\n datas = []\n\n for i in range(0, len(userList)):\n temp = []\n for j in range(0, len(userList)):\n if (i != j):\n a = sets[i]\n b = sets[j]\n temp.append(len(a & b) / len(a | b))\n else:\n temp.append(0)\n\n datas.append(temp)\n #print(datas)\n df_x = pd.DataFrame(datas)\n nCluster = len(userList) ** 0.5\n\n kmeans = KMeans(n_clusters=round(nCluster), random_state=0).fit(df_x)\n print(kmeans.labels_)\n recommands = set()\n mine = sets[myindex]\n print(myindex)\n for index in range(0,len(kmeans.labels_)):\n if (kmeans.labels_[myindex] == kmeans.labels_[index]):\n recommands = recommands.union(sets[index] - mine)\n\n recommend_list = list(recommands)\n print(recommend_list)\n list_len = len(recommend_list)\n recommend_info = list()\n i = 100\n print(len(recommend_list))\n while i < 120:\n if(recommend_list[i] >= 10000):\n print(recommend_list[i])\n query = \"select restaurant_id from pr_food where food_id = '%d'\"\n get_table = select_one(query % recommend_list[i])\n query2 = \"select restaurant_name from pr_restaurant where restaurant_id = '%d'\"\n rname = select_one(query2 % get_table[0])\n restaurant_name = rname[0]\n query3 = \"select food_name from pr_food where food_id = '%d'\"\n fname = select_one(query3 % recommend_list[i])\n food_name = fname[0]\n query4 = \"select food_price from pr_food where food_id = '%d'\"\n fprice = select_one(query4 % recommend_list[i])\n food_price = fprice[0]\n rdic = {'restaurant_name': restaurant_name, 'food_name': food_name, 'food_price': food_price}\n recommend_info.append(rdic)\n i+=1\n\n return jsonify(results = recommend_info)\n\n@app.route('/roomlogin', methods=[\"POST\"])\ndef room_login():\n try:\n data=json.loads(request.data)\n except ValueError:\n return \"Input must be json format\", 400\n\n get_session = request.cookies.get('pr_session')\n user_id = get_id_from_db(get_session)\n response = roomin(data, user_id)\n\n room_id = int(data['roomid'])\n intoTheRoom(room_id, user_id)\n\n return response\n\n@app.route('/roomOut', methods=[\"GET\"])\ndef room_out():\n pr_session = request.cookies.get('pr_session')\n user_id = get_id_from_db(pr_session)\n room_session = request.cookies.get('room_session')\n room_id = get_room_id_from_session(room_session)\n response = outOfTheRoom(room_id, user_id)\n\n return response\n\n@app.route('/select')\ndef select_menu():\n get_session = request.cookies.get('room_session')\n room_id = get_room_id_from_session(get_session)\n pr_session = request.cookies.get('pr_session')\n user_id = get_id_from_db(pr_session)\n\n host_id = get_host_id_from_roomid(room_id)\n\n query = \"select restaurant_id from room where room_id = '%d'\"\n restaurant_id = select_one(query % (room_id))\n\n query = \"select food_name, food_price from pr_food where restaurant_id = '%d'\"\n food_table = select_all(query % int(restaurant_id[0]))\n food_List = list()\n for table_one in food_table:\n food_dic = {'food_name': table_one[0], 'food_price': table_one[1]}\n food_List.append(food_dic)\n\n uinfo = list()\n\n if host_id == user_id:\n info = 0\n uinfo.append(info)\n else:\n query = \"select user_name from pr_user where user_id = '%d'\"\n get_table = select_one(query % user_id)\n info = get_table[0]\n uinfo.append(info)\n\n return jsonify(results = food_List, results2 = uinfo)\n\n@app.route('/roomMember', methods = [\"GET\"])\ndef room_member():\n room_session = request.cookies.get('room_session')\n room_id = get_room_id_from_session(room_session)\n\n menu_list = list()\n\n query1 = \"select user_id from room_list where room_id = '%d' and user_present = 1\"\n user_table = select_all(query1 % (room_id))\n\n restaurant_id = get_restaurant_id_from_roomid(room_id)\n query2 = \"select food_name, food_price from user_orderlist where user_id = '%d' and room_id = '%d' order by user_id\" #음식 정보 가져오기\n query3 = \"select user_name from pr_user where user_id = '%d'\" #유저이름 가져오기\n query4 = \"select user_ready from room_list where room_id = '%d' and user_id = '%d' and user_present = 1\" #유저 레디 정보 가져오기\n ftable_dic = dict()\n\n for utable_one in user_table:\n total_price = 0\n food_list = list()\n food_table = select_all(query2 % (int(utable_one[0]), room_id))\n for ftable_one in food_table:\n ftable_dic = {'food_name': ftable_one[0], 'food_price': ftable_one[1]}\n total_price += int(ftable_one[1])\n food_list.append(ftable_dic)\n\n user_name = select_one(query3 % utable_one[0])\n ready = select_one(query4 % (room_id, utable_one[0]))\n utable_dic = {'user_name': user_name[0], 'user_choice': food_list, 'user_pay': total_price, 'user_ready': ready[0]}\n menu_list.append(utable_dic)\n\n uinfo = list()\n for table_one in user_table:\n query = \"select user_name from pr_user where user_id = '%d'\"\n uname = select_one(query % table_one[0])\n p_dic = {'user_name': uname[0], 'user_id': table_one[0]}\n uinfo.append(p_dic)\n\n return jsonify(results = menu_list, results2 = uinfo)\n\n@app.route('/finalOrder', methods = [\"PUT\"])\ndef final_decision():\n try:\n data=json.loads(request.data)\n except ValueError:\n return \"Input must be json format\", 400\n\n room_session = request.cookies.get('room_session')\n room_id = get_room_id_from_session(room_session)\n ready_number=getUserNumber(room_id)\n total_num = int(data)\n\n if ready_number == total_num:\n payerID = getPayerInfo(room_id)\n if payerID is None:\n return \"결제자 선택이 완료되지 않았습니다\", 400\n\n response = CompleteOrder(room_id)\n if response is None:\n return \"이미 최종 선택된 방입니다\", 400\n\n response = calculateTotal(room_id)\n return response\n\n return \"전원 준비되지 않았습니다\", 400\n\n\n\n@app.route('/orderReady', methods = [\"POST\"])\ndef order_ready():\n pr_session = request.cookies.get('pr_session')\n user_id = get_id_from_db(pr_session)\n#유저 id와 방 id를 가지고 RoomList에서 user_ready를 1로 바꿔주면 땡\n room_session = request.cookies.get('room_session')\n room_id = get_room_id_from_session(room_session)\n\n query = \"select user_ready from room_list where user_present = 1 and room_id = '%d' and user_id = '%d'\"\n ready = select_one(query % (room_id, user_id))\n\n if ready[0]:\n query2 = \"update room_list set user_ready = 0 where room_id = '%d' and user_id = '%d' and user_present = 1\"\n insert(query2 % (room_id, user_id))\n else:\n query2 = \"update room_list set user_ready = 1 where room_id = '%d' and user_id = '%d' and user_present = 1\"\n insert(query2 % (room_id, user_id))\n\n return jsonify(results = ready[0])\n\n\n@app.route('/order', methods=[\"PUT\"])\ndef order_menu():\n try:\n data=json.loads(request.data)\n except ValueError:\n return \"Input must be json format\", 400\n\n pr_session = request.cookies.get('pr_session')\n user_id = get_id_from_db(pr_session)\n room_session = request.cookies.get('room_session')\n room_id = get_room_id_from_session(room_session)\n\n save_user_menu(user_id, room_id, data)\n\n return \"\"\n\n@app.route('/getPayer', methods=[\"PUT\"])\ndef get_payer():\n try:\n data = json.loads(request.data)\n except ValueError:\n return \"Input must be json format\", 400\n\n if data != None:\n room_session = request.cookies.get('room_session')\n room_id = get_room_id_from_session(room_session)\n response = selectPayer(room_id, data['user_id'])\n return response\n\n return -1\n\n\n@app.route('/creation')\ndef new_room():\n if check_token():\n return render_template(\"CreationRoom.html\"), 200\n\n@app.route('/restaurant', methods={\"GET\"})\ndef restaurant(rid):\n get_session = request.cookies.get('pr_session')\n user_id = get_id_from_db(get_session)\n\n query = \"select restaurant_name, restaurant_phone, restaurant_location, restaurant_id from pr_restaurant as r1, (select e2.location_id as location_id from (select univ_id from pr_user where user_id = '%d') as e1, pr_university as e2 where e1.univ_id = e2.univ_id) as r2 where r1.location_id = r2.location_id and r1.restaurant_kind = '%d';\"\n query2 = \"select food_name, food_price from pr_food where restaurant_id = '%d'\"\n\n table_all = select_all(query % (user_id, rid))\n restaurant_List = list()\n menu_dic = dict()\n\n for table_one in table_all:\n menu_list = list()\n menu_table = select_all(query2 % int(table_one[3]))\n for mtable_one in menu_table:\n menu_dic = {'food_name': mtable_one[0], 'food_price': mtable_one[1]}\n menu_list.append(menu_dic)\n\n\n r_dic = {'phoneNumber': table_one[1], 'location': table_one[2], 'title': table_one[0], 'menulist': menu_list}\n restaurant_List.append(r_dic)\n\n return jsonify(results = restaurant_List)\n\n\nif __name__ == '__main__':\n app.debug = True\n init_db()\n app.run('163.180.118.174', port = 5000)\n","sub_path":"apps/views/index.py","file_name":"index.py","file_ext":"py","file_size_in_byte":17282,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"465129308","text":"import numpy as np\nimport math\nimport matplotlib.ticker as mtick\n\nimport matplotlib.pyplot as plt\nwith open(\"targets.txt\") as textFile:\n lines=[line.split() for line in textFile]\n\nlines_=np.array(lines)\n\n\n#Perform 10-fold cross-validation for KNN. m is the matrix in which\n#rows are for objects and columns are attribute values, c is a single\n#column matrix of labels. k is the neighbor size for KNN.\nm=np.loadtxt(\"attributes.txt\")\nc=np.loadtxt(\"targets.txt\")\n#print(lines_)\ndef CrossVal10(m, c, k):\n size = np.size(m, axis=0) #objects count\n fsize = size//10 #size of each fold\n errorList=[None]*fsize\n summingerror=0\n avgError=0\n for i in range(10):\n trainx = np.delete(m, range(i*fsize, (i+1)*fsize), axis=0)\n trainl = np.delete(c, range(i*fsize, (i+1)*fsize))\n testx = m[i*fsize:(i+1)*fsize,:]\n testl = c[i*fsize:(i+1)*fsize]\n testl1=KNN(trainx, trainl, testx, k)\n each_Error=0\n for j in range(len(testl1)):\n if(testl1[j]!=testl[j]):\n each_Error=each_Error+1\n each_Error=each_Error/(len(testl))\n errorList[i]=each_Error\n for p in range(len(errorList)):\n summingerror=summingerror+errorList[p]\n avgError=summingerror/(len(errorList))\n return avgError\n #add the code below to do the following: 1. call your KNN function.\n #It should be something like KNN(trainx, trainl, testx, k); 2. compute\n #the classification error for the current fold; 3. return the\n #average classification rate over all folds. \n\n \n \n\n#Use KNN with neighbor size of k to classify each row in testx. The\n#training set is trainx and trainl, which contain attributes and labels.\n#Returns a vector of labels for the rows in testx.\ndef KNN(trainx, trainl, testx, k):\n all_Distances=[]\n size = np.size(testx, axis=0) #objects count\n testl1=[None]*len(testx)\n for i in range(size):\n m = np.array([testx[i,:]]) \n dist=np.sum((trainx-np.repeat(m,np.size(trainx,axis=0), axis=0))**2, axis=1)\n all_Distances.append(dist)\n len_distances=len(all_Distances)\n for j in range(len_distances):\n \n ind=np.argsort(all_Distances[j])\n kclasses=[]\n for l in range(k):\n kclasses.append(trainl[ind[l]])\n counterZero=0\n counterOne=0\n for y in range(len(kclasses)):\n if (kclasses[y]==0):\n counterZero=counterZero+1\n else:\n counterOne=counterOne+1\n if(counterZero>counterOne):\n testl1[j]=0\n elif(counterZero==counterOne):\n testl1[j]=1\n else:\n testl1[j]=1\n\n\n\n return testl1\n\ndef lowesterror(m,c):\n AllErrors=[None]*30\n for i in range(30):\n AllErrors[i]=CrossVal10(m,c,i+1)\n lowesterror=AllErrors.index(min(AllErrors))\n return lowesterror+1\n\n\n# for i in range(30):\n# average=CrossVal10(m,c,i+1)\n# print (\"for k= \", i+1, \"the values is\", average)\n\nlowestk=lowesterror(m,c)\n\ndef plotit(m,c) :\n k=[None]*30\n misclass=[None]*30\n\n for i in range (30) :\n k[i]=i+1\n misclass[i]=CrossVal10(m,c,i+1)*100\n\n xint = range(min(k), math.ceil(max(k))+1)\n plt.figure(figsize=(14,6))\n plt.xticks(xint)\n\n plt.plot(k,misclass,'ro--')\n\n\n plt.xlabel('Value of K')\n plt.ylabel('CV error percentage in %')\n plt.show()\n\nprint(\"The best K is\",lowestk, \"with CV error=\", CrossVal10(m,c,lowestk), \"or in percentage \", CrossVal10(m,c,lowestk)*100, \"%\")\n\nplotit(m,c)\n#add additional code in the following to complete the remaining task required\n#by Problem 3 in Assignment 4.","sub_path":"KNN/k-nearest-neighbours.py","file_name":"k-nearest-neighbours.py","file_ext":"py","file_size_in_byte":3620,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"258967469","text":"import logging\nimport traceback\nimport json\nfrom typing import List\nimport os\nfrom time import sleep\n\nimport click\nimport requests\nfrom requests.exceptions import ConnectionError as RequestsConnectionError\nfrom terminaltables import SingleTable\nimport dateutil.parser\n\nfrom .config import load_conf, load_cookies, save_cookies\nfrom .upload import compress_dir\nfrom .util import compose, slugify, create_random_subdomain\n\n\nclass PhostServerError(Exception):\n pass\n\n\nclass GlobalAppState(object):\n \"\"\" What it says on the tin. \"\"\"\n\n def __init__(self, config_file):\n self.conf = load_conf(config_file)\n\n # Load cookies from previous session and construct a new session with them\n self.session = requests.Session()\n self.session.cookies.update(load_cookies())\n\n def make_request(self, method: str, *args, json_body=None, form_data=None, multipart_data=None):\n (func, kwargs) = {\n \"POST\": (\n self.session.post,\n {\"json\": json_body, \"files\": multipart_data, \"data\": form_data},\n ),\n \"GET\": (self.session.get, {}),\n \"PUT\": (self.session.put, {\"json\": json_body}),\n \"PATCH\": (self.session.patch, {\"json\": json_body}),\n \"DELETE\": (self.session.delete, {}),\n }[method.upper()]\n\n return func(*args, **kwargs)\n\n def api_call(self, resource_path: str, method=\"GET\", **kwargs):\n try:\n res = self.make_request(\n method, \"{}/{}\".format(self.conf[\"api_server_url\"], resource_path), **kwargs\n )\n\n if res.status_code == 404:\n raise PhostServerError(\"Resource not found\")\n elif res.status_code == 500:\n raise PhostServerError(\"Internal server error\")\n elif res.status_code == 403:\n # Try to login and repeat the request if this isn't the login route\n if resource_path != \"login/\":\n self.login()\n sleep(0.2)\n return self.api_call(resource_path, method=method, **kwargs)\n\n raise PhostServerError(\"Error logging in; invalid username/password?\")\n elif res.status_code != 200:\n raise PhostServerError(\n \"Received {} response code when making request: {}\".format(\n res.status_code, res.text\n )\n )\n\n return res.json()\n\n except Exception as e:\n show_stacktrace = True\n if isinstance(e, PhostServerError):\n show_stacktrace = False\n elif isinstance(e, RequestsConnectionError):\n e = Exception(\"Error while communicating with the server's API\")\n show_stacktrace = False\n\n if show_stacktrace:\n traceback.print_exc()\n else:\n print(\"Error: {}\".format(e))\n\n self.save_cookies()\n exit(1)\n\n def login(self):\n res = self.api_call(\n \"login/\",\n method=\"POST\",\n form_data={\"username\": self.conf[\"username\"], \"password\": self.conf[\"password\"]},\n )\n\n if not res[\"success\"]:\n print(\"Error logging into the server; invalid username/password?\")\n exit(1)\n\n # Save the cookies from this session so that they can be re-used next time that the\n # application is run.\n save_cookies(self.session.cookies.get_dict())\n\n\nSTATE = None\n\n\ndef list_deployments():\n def process_versions(versions: List[dict]) -> (str, str):\n active_version = \"None\"\n versions_string = \"\"\n\n for v in sorted(versions, key=lambda version: version[\"created_on\"]):\n if v[\"active\"]:\n active_version = v[\"version\"]\n\n if versions_string:\n versions_string += \", \"\n versions_string += v[\"version\"]\n\n return (active_version, versions_string)\n\n deployments = STATE.api_call(\"deployments/\")\n table_headers = [\"Name\", \"URL\", \"Creation Date\", \"Active Version\", \"All Versions\", \"Categories\"]\n table_data = map(\n lambda datum: [\n datum[\"name\"],\n \"{}://{}.{}/\".format(\n STATE.conf[\"hosting_protocol\"], datum[\"subdomain\"], STATE.conf[\"hosting_base_url\"]\n ),\n dateutil.parser.parse(datum[\"created_on\"]).strftime(\"%Y-%m-%d\"),\n *process_versions(datum[\"versions\"]),\n \", \".join(\n filter(None, map(lambda category: category[\"category\"], datum[\"categories\"]))\n ),\n ],\n deployments,\n )\n\n table = SingleTable([table_headers, *table_data])\n table.inner_column_border = False\n table.inner_footing_row_border = False\n table.inner_heading_row_border = False\n table.inner_row_border = False\n table.outer_border = False\n table.padding_left = 0\n table.padding_right = 3\n\n print(table.table)\n\n\ndef delete_deployment(query, lookup_field, version):\n req_path = (\n \"deployments/{}/?lookupField={}\".format(query, lookup_field)\n if version is None\n else \"deployments/{}/{}/?lookupField={}\".format(query, version, lookup_field)\n )\n STATE.api_call(req_path, method=\"DELETE\")\n\n print(\"Deployment {}successfully deleted\".format(\"\" if version is None else \"version \"))\n\n\n@click.group()\n@click.option(\n \"--config\",\n \"-c\",\n type=click.File(encoding=\"utf-8\"),\n default=os.path.expanduser(\"~/.phost/conf.toml\"),\n)\ndef main(config):\n global STATE # pylint: disable=W0603\n STATE = GlobalAppState(config)\n\n\n@main.group(\"deployment\")\ndef deployment():\n pass\n\n\nwith_query_lookup_decorators = compose(\n click.argument(\"query\"),\n click.option(\n \"--name\", \"lookup_field\", flag_value=\"name\", default=True, help=\"Look up deployment by name\"\n ),\n click.option(\n \"--id\",\n \"lookup_field\",\n flag_value=\"id\",\n default=False,\n help=\"Look up deployment by deployment UUID\",\n ),\n click.option(\n \"--subdomain\",\n \"lookup_field\",\n flag_value=\"subdomain\",\n default=False,\n help=\"Look up deployment by subdomain\",\n ),\n)\n\n\ndelete_deployment_decorators = compose(\n with_query_lookup_decorators,\n click.option(\n \"--version\",\n \"-v\",\n default=None,\n help=(\n \"If supplied, only this version will be deleted. \"\n \"If not supplied, all versions will be deleted.\"\n ),\n ),\n)\n\n\n@deployment.command(\"ls\")\ndef list_deployments_deployment():\n list_deployments()\n\n\n@main.command(\"ls\")\ndef list_deployments_main():\n list_deployments()\n\n\n@deployment.command(\"rm\")\n@delete_deployment_decorators\ndef delete_deployment_deployment(query, lookup_field, version):\n delete_deployment(query, lookup_field, version)\n\n\n@main.command(\"rm\")\n@delete_deployment_decorators\ndef delete_deployment_main(query, lookup_field, version):\n delete_deployment(query, lookup_field, version)\n\n\ndef create_deployment(name, subdomain, directory, version, random_subdomain, categories):\n if random_subdomain:\n if subdomain is None:\n subdomain = create_random_subdomain()\n else:\n print(\"Can't supply both `--random-subdomain` and an explicit subdomain\")\n exit(1)\n elif not subdomain:\n subdomain = slugify(name)\n\n # Compress the target directory into a tempfile .tgz archive\n tgz_file = compress_dir(directory)\n\n multipart_data = {\n \"name\": (\"\", name),\n \"subdomain\": (\"\", subdomain),\n \"file\": (\"directory.tgz\", tgz_file),\n \"version\": (\"\", version),\n \"categories\": (\"\", \",\".join(categories)),\n }\n\n res = STATE.api_call(\"deployments/\", method=\"POST\", multipart_data=multipart_data)\n print(\"Deployment successfully created: {}\".format(res[\"url\"]))\n\n\ncreate_deployment_decorators = compose(\n click.argument(\"name\"),\n click.argument(\"directory\"),\n click.option(\n \"--subdomain\",\n \"-s\",\n default=None,\n help=(\n \"The subdomain on which the deployment will be hosted. If left off, the subdomain\"\n \" will be constructed from the deployment name.\"\n ),\n ),\n click.option(\"--version\", \"-v\", default=\"0.1.0\"),\n click.option(\n \"--private\",\n \"-p\",\n default=False,\n help=\"Private deployments have a randomized subdomain\",\n is_flag=True,\n ),\n click.option(\n \"--category\",\n \"-c\",\n multiple=True,\n help=(\n \"A string representing a category that this deployment should be added to.\"\n \" (Multiple may be provided)\"\n ),\n ),\n)\n\n\n@main.command(\"create\")\n@create_deployment_decorators\ndef create_deployment_main(name, subdomain, directory, version, private, category):\n create_deployment(name, subdomain, directory, version, private, category)\n\n\n@deployment.command(\"create\")\n@create_deployment_decorators\ndef create_deployment_deployment(name, subdomain, directory, version, private, category):\n create_deployment(name, subdomain, directory, version, private, category)\n\n\nwith_update_deployment_decorators = compose(\n with_query_lookup_decorators, click.argument(\"version\"), click.argument(\"directory\")\n)\n\n\ndef update_deployment(query, lookup_field, version, directory):\n \"\"\" Pushes a new version for an existing deployment \"\"\"\n\n multipart_data = {\"file\": compress_dir(directory)}\n STATE.api_call(\n \"deployments/{}/{}/?lookupField={}\".format(query, version, lookup_field),\n multipart_data=multipart_data,\n method=\"POST\",\n )\n\n print(\"Deployment successfully updated\")\n\n\n@deployment.command(\"update\")\n@with_update_deployment_decorators\ndef update_deployment_deployment(query, lookup_field, version, directory):\n update_deployment(query, lookup_field, version, directory)\n\n\n@main.command(\"update\")\n@with_update_deployment_decorators\ndef update_deployment_main(query, lookup_field, version, directory):\n update_deployment(query, lookup_field, version, directory)\n\n\n@with_query_lookup_decorators\n@deployment.command(\"show\")\ndef show_deployment(query, lookup_field):\n deployment_data = STATE.api_call(\"deployments/{}/?lookupField={}\".format(query, lookup_field))\n print(json.dumps(deployment_data, indent=4))\n\n\nlogging.getLogger(\"requests\").setLevel(logging.CRITICAL)\nlogging.getLogger(\"urllib3\").setLevel(logging.CRITICAL)\n\nmain() # pylint: disable=E1120[]\n","sub_path":"client/phost/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":10513,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"113344944","text":"#%% Import\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport os\nimport importlib\n\nfrom itertools import product\n\nimport my_constants as mc\nimport my_utilities as mu\nimport MC_functions as mcf\nimport chain_functions as cf\n\nmc = importlib.reload(mc)\nmu = importlib.reload(mu)\nmcf = importlib.reload(mcf)\ncf = importlib.reload(cf)\n\n#import time\n\nos.chdir(mc.sim_folder + 'PMMA_sim_Harris')\n\n\n#%%\n#source_dir = '/Volumes/ELEMENTS/Chains_Harris_no_space/'\nsource_dir = 'Chains_Harris_no_space/'\n\n#print(os.listdir(source_dir))\n\n\n#%% constants\nN_chains_total = 1393\nN_mon_cell_max = 500\n\nl_xyz = np.array((100, 100, 500))\n\nx_min, y_min, z_min = (-l_xyz[0]/2, -l_xyz[1]/2, 0)\nxyz_min = np.array((x_min, y_min, z_min))\nxyz_max = xyz_min + l_xyz\nx_max, y_max, z_max = xyz_max\n\nstep_2nm = 2\n\nx_bins_2nm = np.arange(x_min, x_max + 1, step_2nm)\ny_bins_2nm = np.arange(y_min, y_max + 1, step_2nm)\nz_bins_2nm = np.arange(z_min, z_max + 1, step_2nm)\n\nbins_2nm = x_bins_2nm, y_bins_2nm, z_bins_2nm\n\nx_grid_2nm = (x_bins_2nm[:-1] + x_bins_2nm[1:]) / 2\ny_grid_2nm = (y_bins_2nm[:-1] + y_bins_2nm[1:]) / 2\nz_grid_2nm = (z_bins_2nm[:-1] + z_bins_2nm[1:]) / 2\n\nresist_shape = len(x_grid_2nm), len(y_grid_2nm), len(z_grid_2nm)\n\nxs = len(x_grid_2nm)\nys = len(y_grid_2nm)\nzs = len(z_grid_2nm)\n\n\n#%%\npos_matrix = np.zeros(resist_shape, dtype=np.uint32)\n\nresist_matrix = -np.ones((*resist_shape, N_mon_cell_max, 3), dtype=np.uint32)\n\nchain_tables = []\n\nuint32_max = 4294967295\n\n\n#%%\nfor chain_num in range(N_chains_total):\n \n mu.pbar(chain_num, N_chains_total)\n \n now_chain = np.load(source_dir + 'chain_shift_' + str(chain_num) + '.npy')\n \n chain_table = np.zeros((len(now_chain), 5), dtype=np.uint32)\n \n \n for n_mon, mon_line in enumerate(now_chain):\n \n if n_mon == 0:\n mon_type = 0\n elif n_mon == len(now_chain) - 1:\n mon_type = 2\n else:\n mon_type = 1\n \n now_x, now_y, now_z = mon_line\n \n xi = mcf.get_closest_el_ind(x_grid_2nm, now_x)\n yi = mcf.get_closest_el_ind(y_grid_2nm, now_y)\n zi = mcf.get_closest_el_ind(z_grid_2nm, now_z)\n \n mon_line_pos = pos_matrix[xi, yi, zi]\n \n resist_matrix[xi, yi, zi, mon_line_pos] = chain_num, n_mon, mon_type\n chain_table[n_mon] = xi, yi, zi, mon_line_pos, mon_type\n \n pos_matrix[xi, yi, zi] += 1\n \n \n chain_tables.append(chain_table)\n\n\n#%%\nprint('resist_matrix size, Gb:', resist_matrix.nbytes / 1024**3)\nnp.save('../mapping_Harris/MATRIX_resist_Harris_no_space.npy', resist_matrix)\n\n\n#%%\ndest_folder = '../mapping_Harris/Harris_chain_tables_no_space/'\n\nfor i, ct in enumerate(chain_tables):\n \n mu.pbar(i, len(chain_tables))\n \n np.save(dest_folder + 'chain_table_' + str(i) + '.npy', ct)\n\n\n#%%\nfor i, chain in enumerate(chain_tables):\n \n mu.pbar(i, len(chain_tables))\n \n for j, line in enumerate(chain):\n \n x, y, z, pos, mon_t = line.astype(int)\n \n mat_cn, n_mon, mat_type = resist_matrix[x, y, z, pos]\n \n if mat_cn != i or n_mon != j or mat_type != mon_t:\n print('ERROR!', i, j)\n print('chain_num:', mat_cn, i)\n print('n_mon', n_mon, j)\n print('mon_type', mon_t, mat_type)\n\n","sub_path":"PMMA_sim_Harris/PMMA_sim_2019/Harris_chain_files_no_space.py","file_name":"Harris_chain_files_no_space.py","file_ext":"py","file_size_in_byte":3295,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"648507597","text":"# LaserFire.py\n'''Written by Larry T, Priest (priestlt@protonmail.com)\nAugust 2021\nhardware:\n Raspberry pico, 16 LED's\nProject goal:\n simulate laser canon fire, to be included in the K-9 and supercomputer\n projects.\nSoftware:\n CircuitPython or micropython whichever I can get to work.\n'''\nimport board\nimport analogio\nimport pwmio\nimport time\nfrom adafruit_simplemath import map_range\n\nTOTALBITS = 15 # START AT 0\nLEDbits = [] # list of pins used for the laser pulses\n# input for the pulse timer\ndelaySpeed = analogio.AnalogIn(board.GP28)\nDutyCycle = 0xffff # max duty cycle\nDutyCycle2 = DutyCycle/2\nDutyCycle4 = DutyCycle/4\nnew_min = 0.025 # minimum time between pulses\nnew_max = 2 # max time between pulses\n\n\nclass LaserFire():\n\n def __init__(self):\n # initialize LED list\n subcommand1 = 'LEDbits.append(pwmio.PWMOut(board.GP'\n subcommand2 = ', frequency=1000))'\n for i in range(TOTALBITS+1):\n fullcommand = subcommand1 + str(i) + subcommand2\n exec(fullcommand)\n\n\nif __name__ == '__main__':\n LaserFire()\n while True:\n # remapped_delaySpeed = int(map_range(delaySpeed.value, 0, 65520, new_min, new_max))\n remapped_delaySpeed = map_range(delaySpeed.value, 200, 65520, new_min, new_max)\n\n for i in range(8):\n LEDbits[i].duty_cycle = int(DutyCycle)\n if i >= 1:\n LEDbits[i-1].duty_cycle = int(DutyCycle2)\n if i >= 2:\n LEDbits[i-2].duty_cycle = int(DutyCycle4)\n j = i + 8\n LEDbits[j].duty_cycle = int(DutyCycle)\n if j >= 9:\n LEDbits[j-1].duty_cycle = int(DutyCycle2)\n if j >= 10:\n LEDbits[j-2].duty_cycle = int(DutyCycle4)\n print(i, j, remapped_delaySpeed)\n time.sleep(remapped_delaySpeed)\n # print(delaySpeed.value, ' ', remapped_delaySpeed)\n LEDbits[i].duty_cycle = 0\n LEDbits[i-1].duty_cycle = 0\n LEDbits[i-2].duty_cycle = 0\n LEDbits[j].duty_cycle = 0\n LEDbits[j-1].duty_cycle = 0\n LEDbits[j-2].duty_cycle = 0\n","sub_path":"LaserFire.py","file_name":"LaserFire.py","file_ext":"py","file_size_in_byte":2142,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"461073629","text":"# 抓bing wallpaper 的爬蟲\r\n# 設定好基本url和變數 -> requests.get json檔的位址 -> 用json.loads抓json檔的內容\r\n# 讀取json檔中的startdate(作為檔名)和url(圖片實際位址)並各自放入file_name和target中\r\n# 利用while loop來跑(因為要多張),步驟請至47行觀看\r\n\r\nimport requests\r\nimport json\r\nimport os\r\nimport shutil\r\nimport time\r\n\r\n\r\n# 讀到的圖片url要接上basic_url才會獨自開啟圖片url\r\nbasic_url = \"https://bing.com\"\r\n\r\n# 背景圖片的json檔位址(n是要查詢的數量)\r\nurl = \"https://www.bing.com/HPImageArchive.aspx?format=js&idx=0&n=8&mkt=zh-tw\"\r\n\r\n# 要傳去接url的變數(因為我想抓很多張,所以就用list)\r\ntarget = []\r\n\r\n# 新檔案名稱(因為我想抓很多張,所以就用list)\r\nfile_name = []\r\n\r\n\r\n# 連上json檔位址\r\ndata = requests.get(url)\r\n\r\n# 讀取json檔的內容\r\njsondata = json.loads(data.text)\r\n\r\n\r\n# jsondata是list,我們只要讀圖片的url\r\nfor i in jsondata['images']:\r\n if(i['startdate'] != None):\r\n file_name.append(i['startdate'] + \".jpg\")\r\n if(i['url'] != None):\r\n # 得到該圖片的url\r\n target.append(basic_url + i['url'])\r\n else:\r\n pass\r\n\r\n\r\nprint(\"Start to download\")\r\nj = 0\r\n# bing的一次最多8張,所以讓他跑8次\r\nwhile j < 8:\r\n print(\"Now had download\",j,\"photos start to download no\",j+1,\"photo\")\r\n # 開啟該圖片\r\n download = requests.get(target[j], stream=True)\r\n # 準備空白檔案,複製進去\r\n file_ = os.path.join(os.getcwd(), \"image\", file_name[j])\r\n\r\n with open(file_, \"wb\") as output:\r\n shutil.copyfileobj(download.raw, output)\r\n del file_\r\n print(\"Finish downloading no\",j,\"photo\")\r\n time.sleep(3)\r\n j += 1\r\n","sub_path":"scrap_practice/getbingphoto.py","file_name":"getbingphoto.py","file_ext":"py","file_size_in_byte":1745,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"486863358","text":"# Program to check if a string exists in another string.\r\n\r\ndef isSubstring(str1, str2):\r\n\r\n # Base Case: if length of any string is o then we cant compare them\r\n if len(str1) == 0 or len(str2) == 0:\r\n return False\r\n\r\n # Base Case: if the strings passed are single character long, and the characters of both string aren't same then return false.\r\n if (len(str1) == 1 or len(str2) == 1) and (str1[0] != str2):\r\n return False\r\n\r\n # Base Case: if the strings passed are single character long, and the characters of both string are same then return true.\r\n if (len(str1) == 1 or len(str2) == 1) and (str1[0] == str2):\r\n return True\r\n\r\n\r\n # Check if characters match, if so then pass the next characters to the recursive function\r\n if str1[0] == str2[0]:\r\n\r\n #Recursive Case if there is a match in characters of both strings\r\n if isSubstring(str1[1:], str2[1:]):\r\n return True\r\n\r\n # Recursive Case\r\n return isSubstring(str1[1:], str2)\r\n\r\n\r\nstr1 = \"importantance\"\r\nstr2 = \"ant\"\r\n\r\nprint(isSubstring(str1, str2))","sub_path":"Recursive Method to Check if a String is Substring of Another String/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1081,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"25505274","text":"import os\n\nimport numpy as np\n\n\ndef load_embeddings(file_name):\n if os.path.exists(file_name):\n return np.load(file_name)\n return None\n\n\ndef sort_elements(triples, elements_set):\n dic = {}\n for s, p, o in triples:\n if s in elements_set:\n dic[s] = dic.get(s, 0) + 1\n if p in elements_set:\n dic[p] = dic.get(p, 0) + 1\n if o in elements_set:\n dic[o] = dic.get(o, 0) + 1\n # firstly sort by values (i.e., frequencies), if equal, by keys (i.e, URIs)\n sorted_list = sorted(dic.items(), key=lambda x: (x[1], x[0]), reverse=True)\n ordered_elements = [x[0] for x in sorted_list]\n return ordered_elements, dic\n\n\ndef generate_sharing_id(train_links, kg1_triples, kg1_elements, kg2_triples, kg2_elements, ordered=True):\n ids1, ids2 = {}, {}\n if ordered:\n linked_dic = {}\n for x, y in train_links:\n linked_dic[y] = x\n kg2_linked_elements = [x[1] for x in train_links]\n kg2_unlinked_elements = set(kg2_elements) - set(kg2_linked_elements)\n ids1, ids2 = generate_mapping_id(kg1_triples, kg1_elements, kg2_triples, kg2_unlinked_elements, ordered=ordered)\n for ele in kg2_linked_elements:\n ids2[ele] = ids1[linked_dic[ele]]\n else:\n index = 0\n for e1, e2 in train_links:\n assert e1 in kg1_elements\n assert e2 in kg2_elements\n ids1[e1] = index\n ids2[e2] = index\n index += 1\n for ele in kg1_elements:\n if ele not in ids1:\n ids1[ele] = index\n index += 1\n for ele in kg2_elements:\n if ele not in ids2:\n ids2[ele] = index\n index += 1\n assert len(ids1) == len(set(kg1_elements))\n assert len(ids2) == len(set(kg2_elements))\n return ids1, ids2\n\n\ndef generate_mapping_id(kg1_triples, kg1_elements, kg2_triples, kg2_elements, ordered=True):\n ids1, ids2 = {}, {}\n if ordered:\n kg1_ordered_elements, _ = sort_elements(kg1_triples, kg1_elements)\n kg2_ordered_elements, _ = sort_elements(kg2_triples, kg2_elements)\n n1 = len(kg1_ordered_elements)\n n2 = len(kg2_ordered_elements)\n n = max(n1, n2)\n for i in range(n):\n if i < n1 and i < n2:\n ids1[kg1_ordered_elements[i]] = i * 2\n ids2[kg2_ordered_elements[i]] = i * 2 + 1\n elif i >= n1:\n ids2[kg2_ordered_elements[i]] = n1 * 2 + (i - n1)\n else:\n ids1[kg1_ordered_elements[i]] = n2 * 2 + (i - n2)\n else:\n index = 0\n for ele in kg1_elements:\n if ele not in ids1:\n ids1[ele] = index\n index += 1\n for ele in kg2_elements:\n if ele not in ids2:\n ids2[ele] = index\n index += 1\n assert len(ids1) == len(set(kg1_elements))\n assert len(ids2) == len(set(kg2_elements))\n return ids1, ids2\n\n\ndef uris_list_2ids(uris, ids):\n id_uris = []\n for u in uris:\n assert u in ids\n id_uris.append(ids[u])\n assert len(id_uris) == len(set(uris))\n return id_uris\n\n\ndef uris_pair_2ids(uris, ids1, ids2):\n id_uris = []\n for u1, u2 in uris:\n # assert u1 in ids1\n # assert u2 in ids2\n if u1 in ids1 and u2 in ids2:\n id_uris.append((ids1[u1], ids2[u2]))\n # assert len(id_uris) == len(set(uris))\n return id_uris\n\n\ndef uris_relation_triple_2ids(uris, ent_ids, rel_ids):\n id_uris = []\n for u1, u2, u3 in uris:\n assert u1 in ent_ids\n assert u2 in rel_ids\n assert u3 in ent_ids\n id_uris.append((ent_ids[u1], rel_ids[u2], ent_ids[u3]))\n assert len(id_uris) == len(set(uris))\n return id_uris\n\n\ndef uris_attribute_triple_2ids(uris, ent_ids, attr_ids):\n id_uris = []\n for u1, u2, u3 in uris:\n assert u1 in ent_ids\n assert u2 in attr_ids\n id_uris.append((ent_ids[u1], attr_ids[u2], u3))\n assert len(id_uris) == len(set(uris))\n return id_uris\n\n\ndef generate_sup_relation_triples_one_link(e1, e2, rt_dict, hr_dict):\n new_triples = set()\n for r, t in rt_dict.get(e1, set()):\n new_triples.add((e2, r, t))\n for h, r in hr_dict.get(e1, set()):\n new_triples.add((h, r, e2))\n return new_triples\n\n\ndef generate_sup_relation_triples(sup_links, rt_dict1, hr_dict1, rt_dict2, hr_dict2):\n new_triples1, new_triples2 = set(), set()\n for ent1, ent2 in sup_links:\n new_triples1 |= (generate_sup_relation_triples_one_link(ent1, ent2, rt_dict1, hr_dict1))\n new_triples2 |= (generate_sup_relation_triples_one_link(ent2, ent1, rt_dict2, hr_dict2))\n print(\"supervised relation triples: {}, {}\".format(len(new_triples1), len(new_triples2)))\n return new_triples1, new_triples2\n\n\ndef generate_sup_attribute_triples_one_link(e1, e2, av_dict):\n new_triples = set()\n for a, v in av_dict.get(e1, set()):\n new_triples.add((e2, a, v))\n return new_triples\n\n\ndef generate_sup_attribute_triples(sup_links, av_dict1, av_dict2):\n new_triples1, new_triples2 = set(), set()\n for ent1, ent2 in sup_links:\n new_triples1 |= (generate_sup_attribute_triples_one_link(ent1, ent2, av_dict1))\n new_triples2 |= (generate_sup_attribute_triples_one_link(ent2, ent1, av_dict2))\n print(\"supervised attribute triples: {}, {}\".format(len(new_triples1), len(new_triples2)))\n return new_triples1, new_triples2\n\n\n# def generate_input(triples_1_file, triples_2_file,\n# total_links_file, train_links_file, valid_links_file, test_links_file,\n# attr_triples_1_file=None, attr_triples_2_file=None,\n# alignment=\"sharing\"):\n# assert alignment in [\"sharing\", \"mapping\", \"swapping\"]\n# print(\"training data path:\", triples_1_file, triples_2_file)\n# triples1, ents1, rels1 = read_relation_triples(triples_1_file)\n# triples2, ents2, rels2 = read_relation_triples(triples_2_file)\n# num_triples = len(triples1) + len(triples2)\n# print('total triples: %d + %d = %d' % (len(triples1), len(triples2), num_triples))\n# num_ent = len(ents1) + len(ents2)\n# print(\"ent num\", num_ent)\n# num_rel = len(rels1) + len(rels2)\n# print(\"rel num\", num_rel)\n# total_links = read_links(total_links_file)\n# train_links = read_links(train_links_file)\n# valid_links = read_links(valid_links_file)\n# test_links = read_links(test_links_file)\n# print(\"train links:\", len(train_links))\n# print(\"valid links:\", len(valid_links))\n# print(\"test links:\", len(test_links))\n# if alignment == \"sharing\":\n# ent_ids1, ent_ids2 = generate_sharing_id(total_links, train_links, valid_links, test_links, ents1, ents2)\n# rel_ids1, rel_ids2 = generate_sharing_id([], [], [], [], rels1, rels2)\n# else:\n# ent_ids1, ent_ids2 = generate_mapping_id(total_links, train_links, valid_links, test_links, ents1, ents2)\n# rel_ids1, rel_ids2 = generate_mapping_id([], [], [], [], rels1, rels2)\n# id_triple1, id_triple2, id_train_links, id_valid_links, id_test_links = \\\n# uris2ids(ent_ids1, rel_ids1, ent_ids2, rel_ids2, triples1, triples2, train_links, valid_links, test_links)\n#\n# if attr_triples_1_file is not None:\n# attr_triples1, attr_ents1, attrs1 = read_attribute_triples(attr_triples_1_file)\n# attr_triples2, attr_ents2, attrs2 = read_attribute_triples(attr_triples_2_file)\n# num_attr_triples = len(attr_triples1) + len(attr_triples2)\n# print('total attribute triples: %d + %d = %d' % (len(attr_triples1), len(attr_triples2), num_attr_triples))\n# attr_ids1, attr_ids2 = generate_mapping_id([], [], [], [], attrs1, attrs2)\n#\n# attr_id_triples1, attr_id_triples2 = attr_uris2ids(ent_ids1, attr_ids1, ent_ids2, attr_ids2, attr_triples1, attr_triples2)\n# else:\n# attr_id_triples1, attr_id_triples2 = set(), set()\n# attr_ids1, attr_ids2 = None, None\n# kg1 = KG(id_triple1, ent_ids1, rel_ids1, attr_triples=attr_id_triples1, attrs_id_dict=attr_ids1)\n# kg2 = KG(id_triple2, ent_ids2, rel_ids2, attr_triples=attr_id_triples2, attrs_id_dict=attr_ids2)\n# if alignment == \"swapping\":\n# id_new_triples1, id_new_triples2 = generate_sup_triples(kg1, kg2, id_train_links)\n# kg1.add_sup_triples(id_new_triples1)\n# kg2.add_sup_triples(id_new_triples2)\n# kgs = KGs(kg1, kg2, id_train_links, id_valid_links, id_test_links)\n# return kgs\n\n\ndef read_relation_triples(file_path):\n print(\"read relation triples:\", file_path)\n if file_path is None:\n return set(), set(), set()\n triples = set()\n entities, relations = set(), set()\n file = open(file_path, 'r', encoding='utf8')\n for line in file.readlines():\n params = line.strip('\\n').split('\\t')\n assert len(params) == 3\n h = params[0].strip()\n r = params[1].strip()\n t = params[2].strip()\n triples.add((h, r, t))\n entities.add(h)\n entities.add(t)\n relations.add(r)\n return triples, entities, relations\n\n\ndef read_links(file_path):\n print(\"read links:\", file_path)\n links, refs, reft = [], [], []\n file = open(file_path, 'r', encoding='utf8')\n for line in file.readlines():\n params = line.strip('\\n').split('\\t')\n assert len(params) == 2\n e1 = params[0].strip()\n e2 = params[1].strip()\n refs.append(e1)\n reft.append(e2)\n links.append((e1, e2))\n assert len(refs) == len(reft)\n return links\n\n\ndef read_dict(file_path):\n file = open(file_path, 'r', encoding='utf8')\n ids = {}\n for line in file.readlines():\n params = line.strip('\\n').split('\\t')\n assert len(params) == 2\n ids[params[0]] = int(params[1])\n file.close()\n return ids\n\n\ndef read_pair_ids(file_path):\n file = open(file_path, 'r', encoding='utf8')\n pairs = []\n for line in file.readlines():\n params = line.strip('\\n').split('\\t')\n assert len(params) == 2\n pairs.append((int(params[0]), int(params[1])))\n file.close()\n return pairs\n\n\ndef pair2file(file, pairs):\n if pairs is None:\n return\n with open(file, 'w', encoding='utf8') as f:\n for i, j in pairs:\n f.write(str(i) + '\\t' + str(j) + '\\n')\n\n\ndef dict2file(file, dic):\n if dic is None:\n return\n with open(file, 'w', encoding='utf8') as f:\n for i, j in dic.items():\n f.write(str(i) + '\\t' + str(j) + '\\n')\n print(file, \"saved.\")\n\n\ndef line2file(file, lines):\n if lines is None:\n return\n with open(file, 'w', encoding='utf8') as f:\n for line in lines:\n f.write(line + '\\n')\n print(file, \"saved.\")\n\n\ndef radio2file(path, radio):\n path = os.path.join(path, str(radio).replace('.', '_'))\n if not os.path.exists(path):\n os.makedirs(path)\n return path\n\n\ndef save_results(path, rest_12):\n if not os.path.exists(path):\n os.makedirs(path)\n pair2file(os.path.join(path, 'alignment_results_12'), rest_12)\n print(\"Results saved!\")\n\n\ndef save_embeddings(path, kgs, ent_embeds, rel_embeds, attr_embeds, nv_ent_embeds=None, rv_ent_embeds=None, av_ent_embeds=None):\n if not os.path.exists(path):\n os.makedirs(path)\n if ent_embeds is not None:\n np.save(os.path.join(path, 'ent_embeds.npy'), ent_embeds)\n if rel_embeds is not None:\n np.save(os.path.join(path, 'rel_embeds.npy'), rel_embeds)\n if attr_embeds is not None:\n np.save(os.path.join(path, 'attr_embeds.npy'), attr_embeds)\n if ent_embeds is not None:\n np.save(os.path.join(path, 'nv_ent_embeds.npy'), nv_ent_embeds)\n if ent_embeds is not None:\n np.save(os.path.join(path, 'rv_ent_embeds.npy'), rv_ent_embeds)\n if ent_embeds is not None:\n np.save(os.path.join(path, 'av_ent_embeds.npy'), av_ent_embeds)\n dict2file(os.path.join(path, 'kg1_ent_ids'), kgs.kg1.entities_id_dict)\n dict2file(os.path.join(path, 'kg2_ent_ids'), kgs.kg2.entities_id_dict)\n dict2file(os.path.join(path, 'kg1_rel_ids'), kgs.kg1.relations_id_dict)\n dict2file(os.path.join(path, 'kg2_rel_ids'), kgs.kg2.relations_id_dict)\n dict2file(os.path.join(path, 'kg1_attr_ids'), kgs.kg1.attributes_id_dict)\n dict2file(os.path.join(path, 'kg2_attr_ids'), kgs.kg2.attributes_id_dict)\n print(\"Embeddings saved!\")\n\n\ndef read_attribute_triples(file_path):\n print(\"read attribute triples:\", file_path)\n if file_path is None:\n return set(), set(), set()\n if file_path is None:\n return set(), set(), set()\n triples = set()\n entities, attributes = set(), set()\n file = open(file_path, 'r', encoding='utf8')\n for line in file.readlines():\n params = line.strip().strip('\\n').split('\\t')\n if len(params) < 3:\n continue\n head = params[0].strip()\n attr = params[1].strip()\n value = params[2].strip()\n if len(params) > 3:\n for p in params[3:]:\n value = value + ' ' + p.strip()\n value = value.strip().rstrip('.').strip()\n entities.add(head)\n attributes.add(attr)\n triples.add((head, attr, value))\n return triples, entities, attributes\n","sub_path":"Code/Machine learning for Entity Matching/code/pytorch/load/read.py","file_name":"read.py","file_ext":"py","file_size_in_byte":13228,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"505831483","text":"#-------------------------------------------------------------------------------\n# Get a screen catpure from DPO4000 series scope and save it to a file\n\n# python 2.7 (http://www.python.org/)\n# pyvisa 1.4 (http://pyvisa.sourceforge.net/)\n# numpy 1.6.2 (http://numpy.scipy.org/)\n# MatPlotLib 1.0.1 (http://matplotlib.sourceforge.net/)\n#-------------------------------------------------------------------------------\n\nimport visa\nimport numpy as np\nfrom struct import unpack\nimport pylab\nimport time\nimport csv\nimport serial\nimport scope as osc\nimport status\n\nport = \"\"\nosilloscope = \"\" \ndirectory_open = \"\"\ndirectory_close = \"\"\npost_sample = 1\niterations = 0\ndelay = 0\n\nmodule = \"NAME\"\niteration_start = 0\niteration_counter = 0\n\niteration_sample = 1\n\nminutes = -1\n\n\n\noscilloscope_sample = str(10000000)\noscilloscope_sample_start = str(4990000)\noscilloscope_sample_stop = str(5700000)\n\ntemperature_threshold = 0\n\ndef main():\n\n\ttimed = False\n\tnoosc = False\n\n\n\twith open('config.txt') as csv_file:\n\t\tcsv_reader = csv.reader(csv_file, delimiter=',')\n\t\tline_count = 0\n\t\tfor row in csv_reader:\n\t\t\tkey = row[0]\n\t\t\tvalue = row[1]\n\t\t\tprint(str(row[0]))\n\t\t\tprint(str(row[1]))\n\t\t\tline_count += 1\n\t\t\tif(key == \"port\"):\n\t\t\t\tport = value\n\t\t\telif(key == \"oscilloscope\"):\n\t\t\t\toscilloscope = value\n\t\t\telif(key == \"directory_open\"):\n\t\t\t\tdirectory_open = value\n\t\t\telif(key == \"directory_close\"):\n\t\t\t\tdirectory_close = value\n\t\t\telif(key == \"post_sample\"):\n\t\t\t\tpost_sample = int(value)\n\t\t\telif(key == \"iterations\"):\n\t\t\t\titerations = int(value)\n\t\t\telif(key == \"oscilloscope_sample\"):\n\t\t\t\toscilloscope_sample = value\n\t\t\telif(key == \"oscilloscope_sample_start\"):\n\t\t\t\toscilloscope_sample_start = value\n\t\t\telif(key == \"oscilloscope_sample_stop\"):\n\t\t\t\toscilloscope_sample_stop = value\n\t\t\telif(key == \"temperature_threshold\"):\n\t\t\t\ttemperature_threshold = (int(value)).to_bytes(1, byteorder=\"big\")\n\t\t\telif(key == \"gas_threshold\"):\n\t\t\t\tgas_threshold = (int(value)).to_bytes(1, byteorder=\"big\")\n\t\t\telif(key == \"voltage_threshold\"):\n\t\t\t\tvoltage_threshold = (int(value)).to_bytes(1, byteorder=\"big\")\n\t\t\telif(key == \"module\"):\n\t\t\t\tmodule = value\n\t\t\telif(key == \"iteration_start\"):\n\t\t\t\titeration_start = int(value)\n\t\t\t\titeration_counter = iteration_start\n\t\t\telif(key ==\"minutes\"):\n\t\t\t\tminutes = int(value)\n\t\t\t\ttimed = True\n\t\t\telif(key == \"noosc\"):\n\t\t\t\tnoosc = True\n\t\t\telif(key == \"iteration_sample\"):\n\t\t\t\titeration_sample = int(value)\n\t\t\telif(key == \"delay\"):\n\t\t\t\tdelay = float(value)\n\t\t\telif(key == \"sample_delay\"):\n\t\t\t\tsample_delay = (int(value)).to_bytes(1, byteorder=\"big\")\n\n\n\t\tprint(f'Read {line_count} lines.')\n\n\trm = visa.ResourceManager()\n\tscope = rm.get_instrument(oscilloscope)\n\tser = serial.Serial(port, 9600, timeout=1, dsrdtr=False, rtscts=False)\n\tprint(ser.name)\n\n\tosc.setup(scope, oscilloscope_sample, oscilloscope_sample_start, oscilloscope_sample_stop)\n\n\tinput(\"Press Enter to continue...\")\n\n\terror = 0\n\n#Confirm that module started up initilized in expected mode\n\tprint(\"Confirming Config Mode\")\n\tser.write(b'\\x80')\n\tser.write(b'\\x80')\n\tstate = ser.read(1)\n\tstate = int.from_bytes(state, byteorder='big')\n\tstatus.print_status(state)\n\tif(status.mode_is_timeout(state)):\n\t\tprint('Exiting...')\n\t\treturn\n\tif not status.mode_is_config(state):\n\t\tprint(\"Module is in wrong mode\")\n\t\treturn\n\tprint(\"Config Mode confirmed\")\n\n\tprint(\"\");\n\n\tprint(\"Setting Temperature Threshold\")\n\tser.write(bytes(b'\\x21'))\n\tser.write(temperature_threshold)\n\tstate0 = ser.read(1)\n\tstate1 = ser.read(1)\n\tstate0 = int.from_bytes(state0, byteorder='big')\n\tstate1 = int.from_bytes(state1, byteorder='big')\n\tstatus.print_status(state1)\n\tif(status.mode_is_timeout(state1)):\n\t\tprint('Exiting...')\n\t\treturn\n\tif not status.mode_is_config(state1):\n\t\tprint(\"Module is in wrong mode\")\n\t\treturn\n\tprint(\"Temperature set to: \" + str(state0) + \" or \" + str(state0/2.0) + \"degC\")\n\n\tprint(\"\")\n\n\tprint(\"Setting Gas Threshold\")\n\tser.write(bytes(b'\\x22'))\n\tser.write(gas_threshold)\n\tstate0 = ser.read(1)\n\tstate1 = ser.read(1)\n\tstate0 = int.from_bytes(state0, byteorder='big')\n\tstate1 = int.from_bytes(state1, byteorder='big')\n\tstatus.print_status(state1)\n\tif(status.mode_is_timeout(state1)):\n\t\tprint('Exiting...')\n\t\treturn\n\tif not status.mode_is_config(state1):\n\t\tprint(\"Module is in wrong mode\")\n\t\treturn\n\tprint(\"Gas set to: \" + str(state0))\n\n\tprint(\"\")\n\n\tprint(\"Setting Voltage Threshold\")\n\tser.write(bytes(b'\\x24'))\n\tser.write(voltage_threshold)\n\tstate0 = ser.read(1)\n\tstate1 = ser.read(1)\n\tstate0 = int.from_bytes(state0, byteorder='big')\n\tstate1 = int.from_bytes(state1, byteorder='big')\n\tstatus.print_status(state1)\n\tif(status.mode_is_timeout(state1)):\n\t\tprint('Exiting...')\n\t\treturn\n\tif not status.mode_is_config(state1):\n\t\tprint(\"Module is in wrong mode\")\n\t\treturn\n\tprint(\"Voltage set to: \" + str(state0) + \" or \" + str(state0/2.0/100) + \"volts\")\n\n\tprint(\"\")\n\n\tprint(\"Setting Sample Delay \")\n\tser.write(bytes(b'\\x30'))\n\tser.write(sample_delay)\n\tstate0 = ser.read(1)\n\tstate1 = ser.read(1)\n\tstate0 = int.from_bytes(state0, byteorder='big')\n\tstate1 = int.from_bytes(state1, byteorder='big')\n\tstatus.print_status(state1)\n\tif(status.mode_is_timeout(state1)):\n\t\tprint('Exiting...')\n\t\treturn\n\tif not status.mode_is_config(state1):\n\t\tprint(\"Module is in wrong mode\")\n\t\treturn\n\tprint(\"Delay set to: \" + str(state0) + \"ms\")\n\n\n#Confirm that module switched to run mode\n\tprint(\"Confirming Run Mode\")\n\tser.write(b'\\x40')\n\tser.write(b'\\x40')\n\tstate = ser.read(1)\n\tstate = int.from_bytes(state, byteorder='big')\n\tstatus.print_status(state)\n\tif(status.mode_is_timeout(state)):\n\t\tprint('Exiting...')\n\t\treturn\n\tif not status.mode_is_run(state):\n\t\tprint(\"Module is in wrong mode\")\n\t\treturn\n\tprint(\"Run Mode confirmed\")\n\n\tstart_time = time.monotonic()\n\tend_time = start_time + minutes*60.0\n\tprint(str(start_time))\n\tprint(str(end_time))\n\n\treturn\n#perform the requested number of iterations\n\twhile (iteration_counter < iterations and not timed) or (timed and time.monotonic() 0:\n print(\"Epoch {:03d}/{:03d} | Loss {:.4f} | Win count {}\".format(e, epochs, loss, win_cnt))\n win_hist.append(win_cnt)\n\n return win_hist\n\nif __name__ == \"__main__\":\n \"\"\"\n 模型参数\n \"\"\"\n # 注意在其他地方不能够有IPython运行,也就是运行这个程序时候需要关闭jupyter notebook,如果有ipython在notebook运行的话\n if 'session' in locals() and session is not None:\n print('Close interactive session')\n session.close()\n\n num_actions = 3\n hidden_size = 100\n grid_size = 10\n\n model = baseline_model(grid_size, num_actions, hidden_size)\n model.summary()\n\n # 训练过程\n # playing many games\n epoch = 5000\n hist = train(model, epoch, verbose=0)\n print(\"Training done\")\n\n model.save_weights(\"model.h5\", overwrite=True)\n with open(\"model.json\", \"w\") as outfile:\n json.dump(model.to_json(), outfile)\n","sub_path":"Catch_game_RL/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":3876,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"44042419","text":"from tkinter import *\nimport tkinter.messagebox as tkMessageBox\n\nENTER_LHS = 0\nENTER_RHS = 1\n\nclass Calculator(Frame):\n def __init__(self, parent):\n Frame.__init__(self, parent)\n parent.title(\"Calculator\")\n\n self.state = ENTER_LHS\n self.lhs = ''\n self.rhs = ''\n self.op = ''\n self.result = 0\n\n self.operators = ['/', '*', '-', '+']\n self.valid_digit_chars = ['.', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9']\n\n self['padx'] = 10\n self['pady'] = 10\n\n self.var_display = StringVar()\n self.var_display.set('I\\'m a calculator!')\n Label(self, textvariable=self.var_display).grid(row=0, column=0, columnspan=2, sticky=E+W, pady=5)\n\n number_width = op_width = 4\n number_height = op_height = 2\n\n self.create_numbers(self, number_width, number_height).grid(row=1, column=0, padx=5, pady=5)\n self.create_operators(self, op_width, op_height).grid(row=1, column=1, padx=5, pady=5)\n\n Button(self, text='=', height=op_height, command=lambda: self.on_button('='))\\\n .grid(row=2, column=0, columnspan=2, sticky=E+W, padx=5)\n\n self.grid(row=0, column=0, sticky=N+E+S+W)\n\n self.bind('', lambda event: self.on_button(event.char))\n self.bind('', lambda event: self.on_button('='))\n # So we get keyboard events\n self.focus_set()\n\n def create_numbers(self, parent, w, h):\n frame_numbers = Frame(parent)\n\n self.buttons_numbers = dict()\n\n for i in range(3):\n for j in range(3):\n n = 3*i + j + 1\n self.buttons_numbers[n] = Button(frame_numbers,\n text='%i' % n,\n width=w, height=h,\n command=lambda x=n: self.on_button(x)\n )\n self.buttons_numbers[n].grid(row=2-i, column=j)\n\n self.buttons_numbers[0] = Button(frame_numbers, text='0', width=w, height=h, command=lambda: self.on_button(0))\n self.buttons_numbers[0].grid(row=3, column=0, columnspan=2, sticky=E+W)\n\n self.buttons_numbers['.'] = Button(frame_numbers, text='.', width=w, height=h, command=lambda: self.on_button('.'))\n self.buttons_numbers['.'].grid(row=3, column=2)\n\n return frame_numbers\n\n def create_operators(self, parent, w, h):\n frame_operators = Frame(parent)\n\n self.buttons_ops = dict()\n\n r = 0\n for o in self.operators:\n self.buttons_numbers[o] = Button(frame_operators, text=o, width=w, height=h, command=lambda x=o: self.on_button(x))\n self.buttons_numbers[o].grid(row=r, column=0)\n r += 1\n\n return frame_operators\n\n def on_button(self, n):\n if n == '=':\n try:\n lhs = float(self.lhs)\n rhs = float(self.rhs)\n\n if self.op == '+':\n self.result = lhs + rhs\n elif self.op == '-':\n self.result = lhs - rhs\n elif self.op == '*':\n self.result = lhs * rhs\n elif self.op == '/':\n try:\n self.result = lhs / rhs\n except ZeroDivisionError:\n tkMessageBox.showerror('Divide by zero error', 'Don\\'t divide by zero!')\n result = 0\n except ValueError:\n self.reset_state()\n return\n\n self.var_display.set(self.lhs + self.op + self.rhs + '=' + str(self.result))\n self.reset_state()\n\n else:\n if n in self.operators:\n # Handles the case where an operator is entered instead of '='\n if self.state == ENTER_RHS:\n # Ugly hack because I'm lazy. Evaluate the expression. Upon return of control, self.lhs is '' and\n # result is taken to be the left hand side\n self.on_button('=')\n # Handles the case where an operator is entered instead of a number after evaluation\n if self.lhs == '':\n self.lhs = str(self.result)\n self.op = n\n self.state = ENTER_RHS\n elif isinstance(n, int) or n in self.valid_digit_chars:\n if self.state == ENTER_LHS:\n self.lhs += str(n)\n else:\n self.rhs += str(n)\n\n self.var_display.set(self.lhs + self.op + self.rhs)\n\n def reset_state(self):\n self.lhs = ''\n self.rhs = ''\n self.op = ''\n self.state = ENTER_LHS\n\n def on_close(self):\n pass\n\ndef main():\n root = Tk()\n app = Calculator(root)\n root.mainloop()\n\nif __name__ == '__main__':\n main()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4888,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"72668136","text":"import json\nfrom datetime import datetime\nfrom pytz import timezone\n\nclass TranslationDelivered:\n\n def __init__(self, json_input):\n json_obj = json.loads(json_input)\n\n self.timestamp = self.__process_datetime(json_obj['timestamp'])\n self.translation_id = json_obj['translation_id']\n self.source_language = json_obj['source_language']\n self.target_language = json_obj['target_language']\n self.client_name = json_obj['client_name']\n self.event_name = json_obj['event_name']\n self.duration = json_obj['duration']\n self.nr_words = json_obj['nr_words']\n \n def __process_datetime(self, date_time):\n date_time_obj = datetime.strptime(date_time, '%Y-%m-%d %H:%M:%S.%f')\n tz = timezone('Europe/Lisbon')\n tz.localize(date_time_obj)\n return date_time_obj\n\ndef translation_delivered_parser(file_path):\n with open(file_path) as file:\n for line in file:\n x = TranslationDelivered(line)\n yield x\n","sub_path":"average/parser.py","file_name":"parser.py","file_ext":"py","file_size_in_byte":1013,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"259813185","text":"import cv2;\nimport numpy as np;\nimport matplotlib.pyplot as plt;\n\nimg = cv2.imread('jungle.jpg')\ncolor = ('b', 'g', 'r')\n\nplt.subplot(211), plt.imshow(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))\n\nplt.subplot(212)\nfor i, col in enumerate(color):\n histr = cv2.calcHist([img], [i], None, [256], [0,256])\n plt.plot(histr, color = col)\n plt.xlim([0,256])\n\nplt.show()\n","sub_path":"Histogram/histogram.py","file_name":"histogram.py","file_ext":"py","file_size_in_byte":365,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"235284969","text":"import os\nimport csv\nimport autoprimer.autoprimer as ap\nimport requests\nimport sys\nfrom pybedtools import BedTool\n\n\nclass Variant:\n def __init__(self, chromosome, start, build, **kwargs):\n self.filename = \"\"\n self.gene = \"\"\n self.strand = \"\"\n self.build = build\n self.chromosome = chromosome\n self.start = start\n self.end = \"\"\n self.lenght = \"\"\n self.ref = \"\"\n self.alt = \"\"\n self.inheritance = \"\"\n self.condition = \"\"\n self.hgvsc = \"\"\n self.hgvsp = \"\"\n self.zygosity = \"\"\n self.pathogenicity = \"\"\n self.contribution = \"\"\n self.depth = \"\"\n self.af_max = \"\"\n for key, value in kwargs.items():\n setattr(self, key, value)\n\n\ndef get_surrounding_sequence(variant):\n start = int(variant.start) - 251\n stop = int(variant.start) + 250\n variant_bed = BedTool(variant.chromosome + \" \" + str(start) + \" \" + str(stop), from_string=True)\n # set the genome to use\n if variant.build == 'GRCh37':\n ucsc_fasta = BedTool(\"/media/sf_S_DRIVE/genomic_resources/primer_design/hg19.fa\")\n #ucsc_fasta = BedTool(\"/srv/primer_design/s_drive/hg19.fa\")\n elif variant.build == 'GRCh38':\n ucsc_fasta = BedTool(\"/media/sf_S_DRIVE/genomic_resources/primer_design/hg38.fa\")\n #ucsc_fasta = BedTool(\"/srv/primer_design/s_drive/hg38.fa\")\n # use pybedtools API to return the sequence\n genomic_region = variant_bed.sequence(fi=ucsc_fasta, tab=True)\n bedtools_result = open(genomic_region.seqfn).read()\n raw_sequence = bedtools_result.strip().split('\\t')[1]\n return raw_sequence.upper()\n\n\ndef look_up_strand(gene_symbol):\n server = \"https://rest.ensembl.org\"\n ext = \"/lookup/symbol/homo_sapiens/\" + gene_symbol + \"?expand=1\"\n\n r = requests.get(server + ext, headers={\"Content-Type\": \"application/json\"})\n\n if not r.ok:\n r.raise_for_status()\n sys.exit()\n\n decoded = r.json()\n if repr(decoded['strand']) == \"1\":\n strand = \"Forward (+)\"\n elif repr(decoded['strand']) == \"-1\":\n strand = \"Reverse (-)\"\n return strand\n\n\ndef design_from_bespoke(variant, options):\n print('chromosome = ' + variant.chromosome)\n print('coord = ' + str(variant.start))\n sequence = get_surrounding_sequence(variant)\n input_sequence = ap.InputSequence(variant.hgvsc, sequence, gene_name=variant.gene, chrom_number=variant.chromosome,\n genomic_coords=(int(variant.start)-250, int(variant.start)+250), strand=\"+\")\n target = ap.TargetRegion(variant.chromosome + \":\" + str(variant.start), sequence, int(variant.start)-250, int(variant.start)+250, 245, input_sequence)\n print(type(target))\n input_sequence.set_snps_bed(variant.build)\n input_sequence.target_regions.append(target)\n\n target_regions = input_sequence.target_regions\n for target in target_regions:\n target.set_snps()\n target.mask_sequence(options['max_avhet'])\n print(target.masked_sequence)\n target.set_bespoke_primers(options['min_product_size'], options['max_product_size'], options['primer_opt_size'],\n options['primer_min_size'], options['primer_max_size'], options['primer_opt_tm'],\n options['primer_min_tm'], options['primer_max_tm'], options['primer_min_gc'],\n options['primer_max_gc'])\n primers = target.primers\n for primer in primers:\n primer.set_snps(target)\n print(primer.forward_seq, primer.forward_genomic_coords, primer.reverse_seq, primer.reverse_genomic_coords)\n return input_sequence\n\n\n\ndef write_to_csv(input_sequence, variant):\n \"\"\"\n Writes primers that have been designed to a CSV file\n \"\"\"\n # inputSequence ID used as the filename\n filename = variant.chromosome + '-' + str(variant.start) + '.csv'\n targets = input_sequence.target_regions\n #with open('/srv/primer_design/s_drive/designs/' + filename, 'w') as csvfile:\n with open('/media/sf_S_DRIVE/genomic_resources/primer_design/designs/' + filename, 'w') as csvfile:\n f = csv.writer(csvfile, delimiter=',',\n quotechar=',', quoting=csv.QUOTE_MINIMAL)\n f.writerow(['Gene', 'Strand', 'Target', 'Product size', 'Forward primer sequence', 'Genomic Coords', 'Forward TM',\n 'Forward GC %', 'Forward SNPs', 'Reverse primer sequence', 'Genomic Coords', 'Reverse TM',\n 'Reverse GC %', 'Reverse SNPs'])\n for target in targets:\n primer_list = target.primers\n # Primer temperatures and GC% rounded to 2 decimal places\n for primer in primer_list:\n forward_snps = ''\n reverse_snps = ''\n for snp in primer.forward_snps:\n forward_snps = forward_snps + snp.snp_id + ' (' + str(round(snp.av_het, 4)) + ') '\n for snp in primer.reverse_snps:\n reverse_snps = reverse_snps + snp.snp_id + ' (' + str(round(snp.av_het, 4)) + ') '\n f.writerow([input_sequence.gene_name, variant.strand, target.target_id, primer.product_size, primer.forward_seq,\n input_sequence.chrom_number + \":\" + str(primer.forward_genomic_coords[0]) + \"-\" + str(\n primer.forward_genomic_coords[1]), round(primer.forward_tm, 2),\n round(primer.forward_gc, 2), forward_snps, primer.reverse_seq,\n input_sequence.chrom_number + \":\" + str(primer.reverse_genomic_coords[0]) + \"-\" + str(\n primer.reverse_genomic_coords[1]),\n round(primer.reverse_tm, 2), round(primer.reverse_gc, 2), reverse_snps])\n\ndef write_to_bed(input_sequence, variant):\n \"\"\"\n Writes primers that have been designed to a CSV file\n \"\"\"\n # inputSequence ID used as the filename\n filename = variant.chromosome + '-' + str(variant.start) + '.bed'\n targets = input_sequence.target_regions\n #with open('/srv/primer_design/s_drive/designs/' + filename, 'w') as csvfile:\n with open('/media/sf_S_DRIVE/genomic_resources/primer_design/designs/' + filename, 'w') as csvfile:\n f = csv.writer(csvfile, delimiter='\\t',\n quotechar=';', quoting=csv.QUOTE_MINIMAL)\n f.writerow(['track name=\"' + filename + '\" description=' + '\"Primers designed for' + filename +\n '\" visibility=2 itemRgb=\"On\"'])\n for target in targets:\n f.writerow([input_sequence.chrom_number, target.seq_start + target.overhang,# - 30,\n target.seq_stop - target.overhang , target.target_id, 0, input_sequence.strand,\n target.seq_start + target.overhang, target.seq_stop - target.overhang,# + 30,\n '255,0,0'])\n for target in targets:\n for primer in target.primers:\n f.writerow([input_sequence.chrom_number, primer.forward_genomic_coords[0] - 1,\n primer.reverse_genomic_coords[1], target.target_id, 0, input_sequence.strand,\n primer.forward_genomic_coords[1], primer.reverse_genomic_coords[0] - 1, '0,0,255'])\n\n\ndef bespoke_design(genome_build, chromosome, coordinate, max_avhet, min_product_size, max_product_size, primer_opt_size,\n primer_min_size, primer_max_size, primer_opt_tm, primer_min_tm, primer_max_tm,\n primer_min_gc, primer_max_gc):\n variant = Variant(chromosome, coordinate, genome_build)\n options = {'max_avhet': float(max_avhet),\n 'min_product_size': int(min_product_size),\n 'max_product_size': int(max_product_size),\n 'primer_opt_size': int(primer_opt_size),\n 'primer_min_size': int(primer_min_size),\n 'primer_max_size': int(primer_max_size),\n 'primer_opt_tm': float(primer_opt_tm),\n 'primer_min_tm': float(primer_min_tm),\n 'primer_max_tm': float(primer_max_tm),\n 'primer_min_gc': float(primer_min_gc),\n 'primer_max_gc': float(primer_max_gc)}\n input_sequence = design_from_bespoke(variant, options)\n write_to_csv(input_sequence, variant)\n write_to_bed(input_sequence, variant)\n return input_sequence","sub_path":"autoprimerapp/autoprimerapp/autoprimer/bespoketarget.py","file_name":"bespoketarget.py","file_ext":"py","file_size_in_byte":8401,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"614455874","text":"####python plan for converting table s2 to FASTA format\n## feb 18 2015 \n# carolyn tarpey (&garrett)\n\n## this needs two arguments:\n#the first is the name of the excel file that needs converting \n#the second is the name of the output file you want (the FASTA file)\n\n#!/bin/bash\n\nimport sys\nimport re\n\n#open file\nexcel_file = open(sys.argv[1], \"r\")\nFASTA = open(sys.argv[2],\"w\") \n\nfor line in excel_file:#read one line of the excel file at a time and \n\tcolumns = line.split(\"\\t\")#take that line and split it up by the tabs\n\t#print columns \n\tnewline =[ \">\", \"\\t\", columns[1], \"\\t\", columns[2], \"\\t\", columns[3], \"\\n\" ] #> the second column tab third column tab fourth column end line\n\t#print newline\n\tFASTA.write(''.join(newline)) # write this to the output file: > the second column tab third column tab fourth column end line\n\tseq = columns[4]\n\tpat1 = r'(\\[)'\n\tpat2 = r'(/\\w])'\n\tseq_new = \"\"\n\tx = re.sub(pat1, seq_new, seq)\n\t#print x\n\ty = re.sub(pat2, seq_new, x)\n\t#print y\n\t\n\tFASTA.write(''.join(y)) # write this to the output file: the edited sequence and the end of line \n\tFASTA.write(\"\\n\") #skip a line in the output\n\nexcel_file.close()\nFASTA.close()\n\n\n","sub_path":"General_Scripts/convert_to_FASTA.py","file_name":"convert_to_FASTA.py","file_ext":"py","file_size_in_byte":1156,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"530803968","text":"import json\nimport numpy as np\nfrom PearsonModel.Constant import Const\n\nclass StatisticTyphoon(object):\n \"\"\"description of class\"\"\"\n def __init__(self, typhoon_dict, TARGET_BAND_NUM, flag = True):\n if flag:\n self.file = self.__getGPVfromFile__(typhoon_dict['GPVfile'], TARGET_BAND_NUM)\n self.position = [typhoon_dict['latitude'], typhoon_dict['longitude']] \n self.movement = typhoon_dict['movement']\n self.analogies = np.zeros(len(Const.TARGET_BAND))\n\n def getMovement(self):\n return self.movement\n\n # 類似度の計算 - OK\n def calcAnalogy(self, data, bandIndex, INDEXES):\n Xave = 0\n Yave = 0\n Sxy = 0\n Sx = 0\n Sy = 0\n\n for index in INDEXES:\n Xave += self.dataset[bandIndex]['Values'][ index[0], index[1] ]\n Yave += data[bandIndex]['Value'][ index[0], index[1] ]\n\n Xave = Xave / len(INDEXES)\n Yave = Yave / len(INDEXES)\n\n for index in INDEXES:\n Sxy += (self.dataset[bandIndex]['Values'][ index[0], index[1] ] - Xave) * (data[bandIndex]['Value'][ index[0], index[1] ] - Yave)\n Sx += (self.dataset[bandIndex]['Values'][ index[0], index[1] ] - Xave) ** 2\n Sy += (data[bandIndex]['Value'][ index[0], index[1] ] - Yave) ** 2\n\n self.analogies[bandIndex] = Sxy / (np.sqrt(Sx) * np.sqrt(Sy))\n\n # 類似度の平均の算出 - OK\n def aveAnalogy(self):\n self.analogy = np.average(self.analogies)\n print(self.analogies)\n return self.analogy\n\n def getAveAnalogy(self):\n return self.analogy\n\n # GPVを取得 - OK\n def __getGPVfromFile__(self, fname, TARGET_BAND_NUM):\n \n fp = open(fname, 'r')\n jsondata = json.load(fp)\n\n self.dataset = []\n for TARGET in TARGET_BAND_NUM:\n for datas in jsondata.values():\n if datas['band'] == TARGET:\n info = { 'Pressure' : datas['description'], 'Element' : datas['metadata']['']['GRIB_COMMENT'], 'Values' : self.filtering(np.array(datas['GPV']))}\n self.dataset.append(info)\n break\n fp.close()\n\n # 格子間隔のフィルタリング - OK\n def filtering(self, dataset):\n\n filtedValues = np.zeros([len(Const.CONVERTED_LATITUDE), len(Const.CONVERTED_LONGITUDE)])\n\n for latIndex, latValue in enumerate(Const.CONVERTED_LATITUDE):\n for longIndex, longValue in enumerate(Const.CONVERTED_LONGITUDE):\n\n original = self.__calcGPVIndexes__(latValue, longValue)\n filtedValues[latIndex, longIndex] = self.__Gaussian__(dataset, original, Const.N)\n\n return filtedValues\n\n # 元データのインデックス番号を得る - OK\n def __calcGPVIndexes__(self, lat, long):\n latIndex = int(round((lat - 47.6) / (- 0.1)))\n longIndex = int(round((long - 120.0) / 0.125))\n return [latIndex, longIndex]\n\n # ガウシアンフィルタをかける - OK\n def __Gaussian__(self, dataset, indexes, N):\n value = 0\n for y in np.arange(-N, N + 1, 1):\n for x in np.arange(-N, N + 1, 1):\n distance = np.sqrt(x ** 2 + y ** 2)\n K = 1.0 / (2.0 * 3.14) * np.exp(- distance / 2)\n\n # 領域範囲外の場合の処理\n yaxis = indexes[0] - y\n xaxis = indexes[1] - x\n if yaxis < 0 : yaxis = 0\n elif yaxis > 252 : yaxis = 252\n if xaxis < 0 : xaxis = 0\n elif xaxis > 240 : xaxis = 240\n\n value += K * dataset[yaxis, xaxis]\n\n return value\n\n","sub_path":"PearsonModel/StatisticTyphoon.py","file_name":"StatisticTyphoon.py","file_ext":"py","file_size_in_byte":3659,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"1871254","text":"import sys\nsys.path.append('../Utilities/')\nsys.path.append('../Methods/DILCA/')\nfrom dilca import dilca\nfrom synthetic_data_seq import seq_generator\nimport pickle\nimport os\nimport numpy as np\nimport argparse\n\n\nparser = argparse.ArgumentParser(description='Model Parameter Setting')\nparser.add_argument('--batch_size', default=600, help='Generate batch size of categorical data stream',\n dest='batch_size', type=int)\nparser.add_argument('--data_sets', default=None, help='Specific data sets', dest='data_sets', type=str)\nparser.add_argument('--window_num', default=250, help='Specific number of windows/batches', dest='window_num', type=int)\nargs = parser.parse_args()\n\n\n# import a dataset\ndef listdir(path, list_name):\n for file in os.listdir(path):\n file_path = os.path.join(path, file)\n if os.path.isdir(file_path):\n listdir(file_path, list_name)\n elif os.path.splitext(file_path)[1] == '.pkl':\n list_name.append(file_path)\n\n\ndef mkdir(path):\n import os\n path = path.strip()\n path = path.rstrip(\"\\\\\")\n isExists = os.path.exists(path)\n\n if not isExists:\n os.makedirs(path)\n return True\n else:\n return False\n\n\nmkdir('./Results/DILCA/batch_size_{}/'.format(args.batch_size))\n\nif args.data_sets is None:\n datalist = []\n listdir('../Data/static data/', datalist)\nelse:\n data_sets = args.data_sets.split(',')\n path = '../Data/static data/'\n datalist = [path+i+'.pkl' for i in data_sets]\n\n\n# Represent categorical data and execute clustering\nfor dataset_index, dataname in enumerate(datalist):\n data = pickle.load(open(dataname,'rb'))\n print('{}/{}, data set {} is processing.'.format(dataset_index+1, len(datalist), data['data_name']))\n # # transform the static data to data stream\n try:\n seq_data = seq_generator(data, batch_size=args.batch_size, windows = args.window_num, random_seed=0)\n print('The data stream contains {} batchs and {} objects.'.format(len(seq_data['data']), len(data['data'])))\n results = {} # result diction\n # # # DILCA representation clustering\n representation_set, clustering_set = dilca(seq_data['data'], seq_data['label'])\n results['representation_set'] = representation_set\n results['clustering_set'] = clustering_set\n from evaluation import accuracy\n acc = accuracy(clustering_set,seq_data['label'])\n print('accuracy: {:.4f}±{:.4f}'.format(np.mean(acc),np.std(acc)))\n results['accuracy'] = acc\n from evaluation import precision\n pcs = precision(clustering_set,seq_data['label'])\n print('precision: {:.4f}±{:.4f}'.format(np.mean(pcs),np.std(pcs)))\n results['precision'] = pcs\n from evaluation import recall\n rc = recall(clustering_set,seq_data['label'])\n print('recall: {:4f}±{:.4f}'.format(np.mean(rc),np.std(rc)))\n results['recall'] = rc\n from evaluation import fscore\n fs = fscore(pcs, rc)\n results['fscore'] = fs\n print('FScore: {:4f}±{:.4f}'.format(np.mean(fs), np.std(fs)))\n pickle.dump(results, open('./Results/DILCA/batch_size_{}/'.format(args.batch_size) + data['data_name']+'.pkl','wb'))\n except:\n print('Something wrong! Skipped...')\n","sub_path":"Experiments/dilca_evaluation.py","file_name":"dilca_evaluation.py","file_ext":"py","file_size_in_byte":3285,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"434529385","text":"import json\n\nimport googletrans\nfrom bs4 import BeautifulSoup\n\nif __name__ == '__main__':\n translator = googletrans.Translator()\n options = {}\n with open('fuels.txt', 'r', encoding='utf8') as file:\n content = file.read()\n soup = BeautifulSoup(content, 'html.parser')\n\n inputs = soup.find_all('option')\n options = []\n for input in inputs:\n print(input)\n code = input.attrs['value']\n name = input.text\n translated = translator.translate(name, src='ko', dest='ru')\n text = translated.text\n options.append({\n 'name': name,\n 'code': code,\n 'label': text,\n })\n print(options)\n with open('translated_fuels.json', 'w', encoding='utf8') as file:\n file.write(json.dumps(options))\n","sub_path":"importer/parse_fuels.py","file_name":"parse_fuels.py","file_ext":"py","file_size_in_byte":853,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"588524752","text":"import itertools\n\n'''\nThis file contains generators that are useful\nshort hands for unusual kinds of looping\n'''\n\n# yields the id number of the node\n# and the number of input values\n# there into the layer that it is in\ndef id_inputs (inputs, shape):\n node_id = 0\n for size in shape:\n for i in xrange(size):\n yield node_id, inputs\n node_id += 1\n inputs = size\n\n# yield the index of locations with\n# empty lists\ndef id_empty (sequence):\n for i,v in enumerate(sequence):\n if len(v) == 0: yield i\n\n# like enumerate but yields the values\n# in reverse\ndef reversed_enumerate(sequence):\n indices = reversed(xrange(len(sequence)))\n values = reversed(sequence)\n for i,v in itertools.izip(indices, values):\n yield i,v\n \n \n","sub_path":"src/nn/utils/generators.py","file_name":"generators.py","file_ext":"py","file_size_in_byte":798,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"297918852","text":"# y1 = 2 + koren od x\r\n# y2 = x2 / 4\r\n\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\nimport random\r\n\r\ns1 = \"2 + np.math.sqrt(x)\"\r\ns2 = \"x / 4\"\r\n\r\nx = np.arange(0.0, 6.0, 1.0)\r\ny1 = []\r\ny2 = (x**2) / 4\r\n\r\nfor i in np.arange(0.0, 6.0, 1.0):\r\n y1.append(2 + np.math.sqrt(i))\r\n\r\nplt.figure(1)\r\nplt.subplot(111)\r\nplt.plot(x, y1)\r\n\r\nplt.figure(1)\r\nplt.plot(x, y2)\r\nax = plt.gca()\r\nplt.show()\r\n\r\ndef f(n):\r\n pog = 0;\r\n for i in range(n):\r\n xrand = random.uniform(0,4)\r\n yrand = random.uniform(0,4)\r\n y1 = 2 + np.math.sqrt(xrand)\r\n y2 = (xrand ** 2) / 4\r\n if ( y1 > yrand > y2):\r\n pog+=1\r\n print(xrand, yrand, y1, y2)\r\n return(pog/n)\r\nprint(\"Broj gadjanja Površina\")\r\nprint('{:>10d} {:<5.4}'.format(1000000,f(1000000)))\r\n","sub_path":"MonteKarlo.py","file_name":"MonteKarlo.py","file_ext":"py","file_size_in_byte":783,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"333418599","text":"# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.\n# SPDX-License-Identifier: MIT-0\n\n# Implementation of the API backend for resources\nimport boto3\nimport json\nimport os\nimport uuid\nfrom datetime import datetime\n\nfrom aws_embedded_metrics import metric_scope\n\n# Patch libraries to instrument downstream calls\n# See https://docs.aws.amazon.com/xray/latest/devguide/xray-sdk-python-patching.html for more details\nfrom aws_xray_sdk.core import patch_all\npatch_all()\n\n# Prepare DynamoDB client\nRESOURCES_TABLE = os.getenv('RESOURCES_TABLE', None)\ndynamodb = boto3.resource('dynamodb')\nddbTable = dynamodb.Table(RESOURCES_TABLE)\n\n\n@metric_scope\ndef lambda_handler(event, context, metrics):\n route_key = f\"{event['httpMethod']} {event['resource']}\"\n\n # Set default response, override with data from DynamoDB if any\n response_body = {'Message': 'Unsupported route'}\n status_code = 400\n headers = {\n 'Content-Type': 'application/json',\n 'Access-Control-Allow-Origin': '*'\n }\n\n # Initialize putting common business metrics using EMF\n metric_payload = {}\n metrics.put_dimensions({'Service': 'Resources'})\n metrics.put_metric('ProcessedResources', 1, 'Count')\n metrics.set_property('requestId', event['requestContext']['requestId'])\n metrics.set_property('routeKey', route_key)\n\n try:\n # Get all resources\n if route_key == 'GET /locations/{locationid}/resources':\n # generate business metrics for the route\n metric_payload['operation'] = 'GET'\n metric_payload['locationid'] = event['pathParameters']['locationid']\n # get data from the database\n ddb_response = ddbTable.query(\n IndexName='locationidGSI',\n KeyConditionExpression='locationid = :locationid',\n ExpressionAttributeValues={\n ':locationid': event['pathParameters']['locationid']\n }\n )\n # return list of items instead of full DynamoDB response\n response_body = ddb_response['Items']\n status_code = 200\n # Resource CRUD operations\n if route_key == 'GET /locations/{locationid}/resources/{resourceid}':\n # generate business metrics for the route\n metric_payload['operation'] = 'GET'\n metric_payload['locationid'] = event['pathParameters']['locationid']\n metric_payload['resourceid'] = event['pathParameters']['resourceid']\n # get data from the database\n ddb_response = ddbTable.get_item(\n Key={'resourceid': event['pathParameters']['resourceid']}\n )\n # return list of items instead of full DynamoDB response\n if 'Item' in ddb_response:\n response_body = ddb_response['Item']\n else:\n response_body = {}\n status_code = 200\n if route_key == 'DELETE /locations/{locationid}/resources/{resourceid}':\n # generate business metrics for the route\n metric_payload['operation'] = 'DELETE'\n metric_payload['locationid'] = event['pathParameters']['locationid']\n metric_payload['resourceid'] = event['pathParameters']['resourceid']\n # delete item in the database\n ddbTable.delete_item(\n Key={'resourceid': event['pathParameters']['resourceid']}\n )\n response_body = {}\n status_code = 200\n if route_key == 'PUT /locations/{locationid}/resources':\n request_json = json.loads(event['body'])\n request_json['locationid'] = event['pathParameters']['locationid']\n request_json['timestamp'] = datetime.now().isoformat()\n # generate unique id if it isn't present in the request\n if 'resourceid' not in request_json:\n request_json['resourceid'] = str(uuid.uuid1())\n # generate business metrics for the route\n metric_payload['operation'] = 'DELETE'\n metric_payload['locationid'] = event['pathParameters']['locationid']\n metric_payload['resourceid'] = request_json['resourceid']\n # update the database\n ddbTable.put_item(\n Item=request_json\n )\n response_body = request_json\n status_code = 200\n except Exception as err:\n status_code = 400\n response_body = {'Error:': str(err)}\n print(str(err))\n # Add route specific business metrics\n metrics.set_property(\"Payload\", metric_payload)\n return {\n 'statusCode': status_code,\n 'body': json.dumps(response_body),\n 'headers': headers\n }\n","sub_path":"serverless-rest-api/python-rest-sam/src/api/resources.py","file_name":"resources.py","file_ext":"py","file_size_in_byte":4712,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"36561528","text":"# Definition for singly-linked list.\nclass ListNode:\n def __init__(self, x):\n self.val = x\n self.next = None\n\n def __repr__(self):\n return str(self.val)\n\nclass Solution:\n def removeNthFromEnd(self, head, n):\n \"\"\"\n :type head: ListNode\n :type n: int\n :rtype: ListNode\n \"\"\"\n none_hunter = head\n n_prev_hunter = head\n\n for _ in range(n + 1):\n if not none_hunter:\n return head.next\n none_hunter = none_hunter.next\n # print(none_hunter)\n\n while none_hunter:\n none_hunter = none_hunter.next\n n_prev_hunter = n_prev_hunter.next\n # print(n_prev_hunter)\n n_prev_hunter.next = n_prev_hunter.next.next\n\n return head\n\n\ndef main():\n\n def get_nodes(values):\n next_node = None\n for value in values[::-1]:\n node = ListNode(value)\n node.next = next_node\n next_node = node\n\n return next_node\n\n def get_list(head):\n node = head\n nodes = list()\n while node:\n nodes.append(node.val)\n node = node.next\n return nodes\n \n solution = Solution()\n assert get_list(solution.removeNthFromEnd(get_nodes([0]), 1)) == []\n assert get_list(solution.removeNthFromEnd(get_nodes([0, 1]), 2)) == [1]\n assert get_list(solution.removeNthFromEnd(get_nodes([0, 1, 2, 3, 4]), 2)) == [0, 1, 2, 4] \n assert get_list(solution.removeNthFromEnd(get_nodes([0, 1, 2, 3, 4]), 5)) == [1, 2, 3, 4]\n\nif __name__ == '__main__':\n main()\n","sub_path":"solutions/remove_nth_node_from_end_of_list.py","file_name":"remove_nth_node_from_end_of_list.py","file_ext":"py","file_size_in_byte":1591,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"207791629","text":"from django.test import TestCase,Client\nfrom contact.models import Contact\n\n# Create your tests here.\nclass ContactTestCase(TestCase):\n def setUp(self):\n Contact.objects.create(first_name=\"shyam\", last_name=\"kc\", email=\"test@tst.com\", subject=\"hello\", message=\"hi\")\n Contact.objects.create(first_name=\"hari\", last_name=\"kc\", email=\"test@tst.com\", subject=\"hello\", message=\"hi\")\n\n def test_contact_name(self):\n shyam = Contact.objects.get(first_name=\"shyam\")\n hari = Contact.objects.get(first_name=\"hari\")\n self.assertEqual(shyam.get_first_name(), 'shyam')\n self.assertEqual(hari.get_first_name(), 'hari')\n\nclass ContactTestCase2(TestCase):\n\n def test_contacts_get_request(self):\n c = Client()\n response = c.post('/api/v1/contacts/',\n {'first_name': 'test', 'last_name': 'test', 'email': 'princenirajan12@gmail.com',\n 'subject': 'test', 'message': 'test'})\n print('post', response)\n status_code = response.status_code\n self.assertEquals(status_code, 200)\n\n response = c.get('/api/v1/contacts/')\n status_code = response.status_code\n print(response.json())\n self.assertEquals(status_code, 200)\n\n\n\n","sub_path":"venv/store/contact/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":1259,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"433731577","text":"import sys\n\nfrom . import citygml\nfrom . import polygons\nfrom . import stl\nfrom . import __version__\n\n\ntry:\n unicode\nexcept NameError:\n unicode = str\n\n\ndef main():\n \"\"\"\n Simple CLI interafce for citygml2stl\n \"\"\"\n\n if len(sys.argv) == 1 or '--help' in sys.argv or 'help' in sys.argv:\n print('CityGML {}'.format(__version__))\n print('Usage: {} [file [file [file...]]]'.format(sys.argv[0]))\n return 0\n\n ret = 0\n\n for ipath in sys.argv[1:]:\n if ipath.endswith('.xml') or ipath.endswith('.gml'):\n opath = ipath[:-3] + 'stl'\n else:\n opath = ipath + '.stl'\n\n print('Converting {} to {}'.format(ipath, opath))\n\n try:\n c = citygml.CityGML(ipath)\n with stl.StlFile(opath) as ofile:\n for obj in c.get_objects_of_types():\n ofile.write_triangles(polygons.object2triangles(obj))\n except Exception as e:\n sys.stderr.write('Error: ' + unicode(e) + '\\n')\n ret = 1\n\n return ret\n","sub_path":"citygml2stl/cli.py","file_name":"cli.py","file_ext":"py","file_size_in_byte":1043,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"642893792","text":"import requests\r\napikey = \"apiKey=7a4f853b718a40a2b518f71142c14287\"\r\ncountry = ['country=in&', 'country=us&']\r\nep = [\"top-headlines?\", \"everything?\",\"source?\"]\r\nurl = \"https://newsapi.org/v2/\"\r\n#get top-headlines\r\n#full = url+ep[0]+country[0]+apikey\r\n#res = requests.get(full)\r\n#res_O = res.json()\r\n#get everythings functions\r\ndef g_e(a,b,c,d):\r\n return a+b+c+d\r\n\r\nru = \"q=\"+input(\"keyword\")+\"&\"\r\nresult = g_e(url,ep[1],ru,apikey)\r\npik = requests.get(result)\r\nj_pik = pik.json()\r\nprint(j_pik)\r\n","sub_path":"happy.py","file_name":"happy.py","file_ext":"py","file_size_in_byte":498,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"411790559","text":"################################################################################\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n################################################################################\nimport sys\n\nimport argparse\nfrom typing import Iterable\n\nfrom pyflink.datastream.connectors.file_system import FileSink, RollingPolicy, OutputFileConfig\n\nfrom pyflink.common import Types, WatermarkStrategy, Time, Encoder\nfrom pyflink.common.watermark_strategy import TimestampAssigner\nfrom pyflink.datastream import StreamExecutionEnvironment, ProcessWindowFunction\nfrom pyflink.datastream.window import EventTimeSessionWindows, \\\n SessionWindowTimeGapExtractor, TimeWindow\n\n\nclass MyTimestampAssigner(TimestampAssigner):\n def extract_timestamp(self, value, record_timestamp) -> int:\n return int(value[1])\n\n\nclass MySessionWindowTimeGapExtractor(SessionWindowTimeGapExtractor):\n def extract(self, element: tuple) -> int:\n return element[1]\n\n\nclass CountWindowProcessFunction(ProcessWindowFunction[tuple, tuple, str, TimeWindow]):\n def process(self,\n key: str,\n context: ProcessWindowFunction.Context[TimeWindow],\n elements: Iterable[tuple]) -> Iterable[tuple]:\n return [(key, context.window().start, context.window().end, len([e for e in elements]))]\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument(\n '--output',\n dest='output',\n required=False,\n help='Output file to write results to.')\n\n argv = sys.argv[1:]\n known_args, _ = parser.parse_known_args(argv)\n output_path = known_args.output\n\n env = StreamExecutionEnvironment.get_execution_environment()\n # write all the data to one file\n env.set_parallelism(1)\n\n # define the source\n data_stream = env.from_collection([\n ('hi', 1), ('hi', 2), ('hi', 3), ('hi', 4), ('hi', 8), ('hi', 9), ('hi', 15)],\n type_info=Types.TUPLE([Types.STRING(), Types.INT()]))\n\n # define the watermark strategy\n watermark_strategy = WatermarkStrategy.for_monotonous_timestamps() \\\n .with_timestamp_assigner(MyTimestampAssigner())\n\n ds = data_stream.assign_timestamps_and_watermarks(watermark_strategy) \\\n .key_by(lambda x: x[0], key_type=Types.STRING()) \\\n .window(EventTimeSessionWindows.with_gap(Time.milliseconds(5))) \\\n .process(CountWindowProcessFunction(),\n Types.TUPLE([Types.STRING(), Types.INT(), Types.INT(), Types.INT()]))\n\n # define the sink\n if output_path is not None:\n ds.sink_to(\n sink=FileSink.for_row_format(\n base_path=output_path,\n encoder=Encoder.simple_string_encoder())\n .with_output_file_config(\n OutputFileConfig.builder()\n .with_part_prefix(\"prefix\")\n .with_part_suffix(\".ext\")\n .build())\n .with_rolling_policy(RollingPolicy.default_rolling_policy())\n .build()\n )\n else:\n print(\"Printing result to stdout. Use --output to specify output path.\")\n ds.print()\n\n # submit for execution\n env.execute()\n","sub_path":"flink-python/pyflink/examples/datastream/windowing/session_with_gap_window.py","file_name":"session_with_gap_window.py","file_ext":"py","file_size_in_byte":3927,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"19253492","text":"from baseObjectOriented2 import peopleList\nimport networkx as nx\nfrom baseToCSV import hasSharedMovie, sharedMovies\n\ng = nx.Graph() #instantiate graph object\n\ng.add_nodes_from(peopleList) #adds each person as a node\n\n#now add in edges behind people\nfor i in range(len(peopleList)):\n source = peopleList[i]\n for j in range(i + 1, len(peopleList)):\n target = peopleList[j]\n if hasSharedMovie(source, target) == True:\n #adds an edge if people have worked on same movie together\n g.add_edge(source, target)\n\n #add edge attribute of list of shared movies\n sharedList = sharedMovies(source, target)\n sharedListTitles = [t.title for t in sharedList]\n g.edge[source][target]['sharedList'] = sharedListTitles\n\n #add edge attribute of their collaboration score\n #collaboration score = average score of their shared movies\n totalBechdelScore = sum([movie.score for movie in sharedList])\n avBechdelScore = totalBechdelScore / len(sharedList)\n g.edge[source][target]['collaboration'] = avBechdelScore\n\nprint(\"Num of nodes: \",len(g.nodes())) #diagnostic, should be equal to len(peopleList)\nprint(\"Num of edges: \",len(g.edges())) #diagnostic, should be equal to rows in allConnections.csv\n\n\n\n","sub_path":"baseGraph.py","file_name":"baseGraph.py","file_ext":"py","file_size_in_byte":1318,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"248983432","text":"import numpy as np\nimport glob\nimport argparse\nimport os\nimport sys\nimport subprocess\n\n# models\n\nfrom os.path import basename, join, exists, splitext\n\nAUDIO_EXTENSION=\".wav\"\nVIDEO_EXTENSION=\".avi\"\nVIDEO_EXTENSION2=\".mpg\"\nIMG_EXTENSION=\".png\"\n\ncmd_convert = 'ffmpeg -i {} {}'\n\ndef convert(args,split,idx):\n idx = str(idx+1)\n video_folders = os.listdir(join(split,args.non_violence_folder))\n dest_folder = join(args.dataset_converted,idx,args.non_violence_folder)\n\n if not exists(join(args.dataset_converted,idx)):\n os.mkdir(join(args.dataset_converted,idx))\n\n if not exists(join(args.dataset_converted,idx,args.non_violence_folder)):\n os.mkdir(join(args.dataset_converted,idx,args.non_violence_folder))\n\n for idx,video in enumerate(video_folders):\n if os.path.isdir(join(split,args.non_violence_folder)+'/'+video) or video == '.DS_Store':\n continue\n\n orig = join(split,args.non_violence_folder)+'/'+video\n conv = dest_folder+'/'+video.replace('.avi','.mp4')\n\n subprocess.call(cmd_convert.format(orig,conv), shell=True)\n\ndef parse_args():\n \"\"\"Parse input arguments.\"\"\"\n parser = argparse.ArgumentParser(description='Extracting frames and features from videos.')\n parser.add_argument('--violence_folder', dest='violence_folder', help='folder with violent videos.', default='Violence')\n parser.add_argument('--non_violence_folder', dest='non_violence_folder', help='folder with non violent videos.', default='NonViolence')\n parser.add_argument('--dataset_splits', dest='dataset_splits', help='folder with spits of cross validation.', default='data/movies/')\n parser.add_argument('--dataset_converted', dest='dataset_converted', help='folder with dataset converted to mp4', default='data/movies_mp4/')\n parser.add_argument('--movies_folder', dest='movies_folder', help='name of folder containing videos', default=\"movies/\")\n\n args = parser.parse_args()\n\n return args\n\n\ndef converter(args):\n dataset_splits = args.dataset_splits\n\n if not exists(args.dataset_converted):\n os.mkdir(args.dataset_converted)\n\n splits = glob.glob(dataset_splits+'*')\n for idx,split in enumerate(splits):\n convert(args,split,idx)\n # for split in splits:\n # print(\"Split : \"+ split)\n # videos = split+\"/\"\n\n # print(\"[Dataset Parser] Starting frames extraction\")\n\n # #extract_frames_from_videos(args,videos,-1,split)\n # print(\"Extracting visual features\")\n # extract_features(args,videos)\n\n #\n # print(\"cleaning garbage\")\n # clean_garbage(train_folder,test_folder)\n\n print(\"Done.\")\n\nif __name__ == '__main__':\n args = parse_args()\n print(\".... Starting .....\")\n converter(args)\n","sub_path":"util/convert_to_mp4.py","file_name":"convert_to_mp4.py","file_ext":"py","file_size_in_byte":2752,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"636172546","text":"#!/usr/bin/env python\n# encoding: utf-8\n\nimport logging\nimport os\nimport sys\nimport evergreen_utils\n\nfrom statsmodels.stats.proportion import proportion_confint\n\nMIN_IMPRESSIONS = 30\nCONFIDENCE = 0.8\nGOOD_THRESH = 0.11\nBAD_THRESH = GOOD_THRESH - 0.005\nBAD_MAX_IMPRESSIONS = 100\nGOOD_MAX_IMPRESSIONS = 150\nGOOD_MIN_IMPRESSIONS = 100\n\nENABLE_MOVE_GOOD = True\nENABLE_MOVE_BAD = True\nENABLE_STAGE_CHANGE = True\n# temporarily shut it down.\nDRY_RUN = True\n\ndef filter_articles(docs, mc):\n for doc in docs:\n # get stats.\n imps = 0\n if doc.has_key(\"all_imps\"):\n imps = doc[\"all_imps\"]\n clicks = 0\n if doc.has_key(\"all_clicks\"):\n clicks = doc[\"all_clicks\"]\n if imps < MIN_IMPRESSIONS:\n continue\n ctr = clicks * 1.0 / imps\n (lb, ub) = proportion_confint(clicks, imps, (1 - CONFIDENCE), 'wilson')\n\n # make decision.\n if doc[\"from_table\"] == \"evergreen_golden\":\n if ENABLE_STAGE_CHANGE and ctr < GOOD_THRESH:\n evergreen_utils.move_article(mc, doc, \"evergreen_golden\", \"evergreen\", DRY_RUN)\n continue\n elif doc[\"from_table\"] == \"evergreen_paused\":\n if ENABLE_STAGE_CHANGE and ctr >= BAD_THRESH:\n evergreen_utils.move_article(mc, doc, \"evergreen_paused\", \"evergreen\", DRY_RUN)\n continue\n elif doc[\"from_table\"] == \"evergreen\":\n if ENABLE_MOVE_BAD and ub < BAD_THRESH:\n evergreen_utils.move_article(mc, doc, \"evergreen\", \"evergreen_paused\", DRY_RUN)\n continue\n if ENABLE_MOVE_GOOD and lb >= GOOD_THRESH and imps >= GOOD_MIN_IMPRESSIONS:\n evergreen_utils.move_article(mc, doc, \"evergreen\", \"evergreen_golden\", DRY_RUN)\n continue\n if ENABLE_MOVE_BAD and imps >= BAD_MAX_IMPRESSIONS and ctr < BAD_THRESH:\n evergreen_utils.move_article(mc, doc, \"evergreen\", \"evergreen_paused\", DRY_RUN)\n continue\n if ENABLE_MOVE_GOOD and imps >= GOOD_MAX_IMPRESSIONS and ctr >= GOOD_THRESH:\n evergreen_utils.move_article(mc, doc, \"evergreen\", \"evergreen_golden\", DRY_RUN)\n continue\n\n\ndef main():\n mc = evergreen_utils.get_mongo_client()\n candidates = evergreen_utils.get_candidates(mc)\n evergreen_utils.load_stats_into_candidates(candidates)\n filter_articles(candidates, mc)\n\nif __name__ == '__main__':\n main()\n","sub_path":"python/src/evergreen_in/filter_evergreens.py","file_name":"filter_evergreens.py","file_ext":"py","file_size_in_byte":2247,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"447739681","text":"\"\"\"\n\n\"\"\"\n\nfrom ares.Lib import AresHtml\n\nclass HandleRequest(AresHtml.Html):\n \"\"\"\n\n \"\"\"\n alias = \"handleRequest\"\n\n def __init__(self, aresObj, method, params, js=\"\", cssCls=None, cssAttr=None):\n super(HandleRequest, self).__init__(aresObj, None, cssCls, cssAttr)\n self.method = method\n self.params = params\n self.js = js\n\n def onLoadFnc(self):\n return \"\"\"$(document).ready(function() {\n $.post( '/reports/handlerequest/%s/%s?%s', function(result) { \n var res = JSON.parse(result) ;\n var data = res.data ;\n var status = res.status ;\n if (status == 'Error') { \n alert(res.message) ; \n }\n else {\n %s ;\n }\n });});\"\"\" % (self.method.__module__, self.method.__name__, \";\".join([\"%s=%s\" % (k, v) for k, v in self.params.items()]), self.js)\n\n def __str__(self):\n return \"\"#'
'\n","sub_path":"ares/Lib/html/AresHtmlAjaxCall.py","file_name":"AresHtmlAjaxCall.py","file_ext":"py","file_size_in_byte":1126,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"429191406","text":"import sys\r\nimport os\r\nimport argparse\r\nimport logging\r\n\r\nimport numpy as np\r\n\r\nsys.path.append(os.path.dirname(os.path.abspath(__file__)) + \"/../../\")\r\n\r\nparser = argparse.ArgumentParser(description=\"Get the normal region\"\r\n \" from tumor WSI \")\r\nparser.add_argument(\"tumor_path\", default=None, metavar='TUMOR_PATH', type=str,\r\n help=\"Path to the tumor mask npy\")\r\nparser.add_argument(\"tissue_path\", default=None, metavar='TISSUE_PATH', type=str,\r\n help=\"Path to the tissue mask npy\")\r\nparser.add_argument(\"normal_path\", default=None, metavar='NORMAL_PATCH', type=str,\r\n help=\"Path to the output normal region from tumor WSI npy\")\r\n\r\n\r\ndef run(args):\r\n tumor_mask = np.load(args.tumor_path)\r\n tissue_mask = np.load(args.tissue_path)\r\n\r\n normal_mask = tissue_mask & (~ tumor_mask)\r\n\r\n np.save(args.normal_path, normal_mask)\r\n\r\ndef main():\r\n logging.basicConfig(level=logging.INFO)\r\n\r\n args = parser.parse_args()\r\n run(args)\r\n\r\nif __name__ == \"__main__\":\r\n main()","sub_path":"extras/CNNRF/cnnrf/bin/non_tumor_mask.py","file_name":"non_tumor_mask.py","file_ext":"py","file_size_in_byte":1086,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"458796262","text":"import unittest\r\nimport os\r\nimport sys\r\nfrom unittest.mock import patch\r\nimport threading\r\n\r\nmock_dir = os.path.join(os.path.dirname(__file__), \"mock_libs\")\r\nassert(os.path.exists(mock_dir))\r\nsys.path.insert(0, mock_dir)\r\n\r\nfrom src import cansat\r\n\r\n\r\nclass FakeSat(threading.Thread):\r\n\r\n def __init__(self, mode=cansat.MODE_DELAY):\r\n self.mode = mode\r\n super(FakeSat, self).__init__()\r\n self.sat = cansat.Cansat(key=\"123456\", mode=self.mode)\r\n self.start()\r\n\r\n def run(self):\r\n self.sat.main()\r\n\r\n def set_wifi(self, wifi):\r\n cansat.WIFI = wifi\r\n\r\n\r\ndef wifi_switch():\r\n try:\r\n while 1:\r\n print(\"Wifi switch :\", cansat.WIFI)\r\n input()\r\n cansat.WIFI = not cansat.WIFI\r\n\r\n except (InterruptedError, KeyboardInterrupt):\r\n pass\r\n\r\n\r\nif __name__ == \"__main__\":\r\n th = FakeSat(mode=cansat.MODE_MANUAL)\r\n\r\n","sub_path":"internal software/test/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":907,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"240870244","text":"#!/usr/bin/env python2.7\n\n\"\"\"\n@author: Mitchell Scott\n@contact: miscott@uw.edu\n\"\"\"\nimport numpy as np\nimport cv2\nimport yaml\n\n\nclass Loader:\n \"\"\"\n Loader class to pass to Extrinsic/Intrinsic loader\n Can either set base path with all paths relative to that path, or\n do all relative paths\n Form:\n Paramter = [load (bool), path (string)]\n Pass path with '/' appended to front for full path. Else, relative path\n \"\"\"\n base_path = \" \"\n # Intrnsic paramaters and distortion\n im_size = [False, \" \"]\n K1 = [False, \" \"]\n K2 = [False, \" \"]\n d1 = [False, \" \"]\n d2 = [False, \" \"]\n # Rotation and Translation\n R = [False, \" \"]\n t = [False, \" \"]\n # Projection matrices\n P1 = [False, \" \"]\n P2 = [False, \" \"]\n # Rectification matricies\n R1 = [False, \" \"]\n R2 = [False, \" \"]\n\n def __init__(self, base_path=\" \"):\n \"\"\"\n Arg calibration loader: Loaded calibration.yaml file\n \"\"\"\n self.parms = dict()\n self.base_path = base_path\n\n def load_params_from_file(self, calibration_yaml):\n with open(calibration_yaml, 'r') as stream:\n calibration_loader = yaml.safe_load(stream)\n\n self._load_params_from_file(calibration_loader)\n\n def _load_params_from_file(self, calibration_loader):\n self.parms[\"base_path\"] = self.base_path\n self.parms[\"im_size\"] = self.im_size\n self.parms[\"K1\"] = self.K1\n self.parms[\"K2\"] = self.K2\n self.parms[\"d1\"] = self.d1\n self.parms[\"d2\"] = self.d2\n self.parms[\"R\"] = self.R\n self.parms[\"t\"] = self.t\n self.parms[\"P1\"] = self.P1\n self.parms[\"P2\"] = self.P2\n self.parms[\"R1\"] = self.R1\n self.parms[\"R2\"] = self.R2\n for key in calibration_loader.keys():\n\n self.parms[key] = calibration_loader[key]\n self._set_params()\n\n def _set_params(self):\n self._set_path(self.im_size, self.parms[\"im_size\"])\n self._set_path(self.K1, self.parms[\"K1\"])\n self._set_path(self.K2, self.parms[\"K2\"])\n self._set_path(self.d1, self.parms[\"d1\"])\n self._set_path(self.d2, self.parms[\"d2\"])\n self._set_path(self.R, self.parms[\"R\"])\n self._set_path(self.t, self.parms[\"t\"])\n self._set_path(self.P1, self.parms[\"P1\"])\n self._set_path(self.P2, self.parms[\"P2\"])\n self._set_path(self.R1, self.parms[\"R1\"])\n self._set_path(self.R2, self.parms[\"R2\"])\n\n def _set_path(self, val, paramater):\n val[0] = paramater[0]\n if val[-1] == '/':\n val[1] = paramater[1]\n else:\n val[1] = self.base_path + paramater[1]\n\n\nclass Paramters:\n \"\"\"\n Paramaters class for extrinsic and intrinsic stereo camera properties\n params: dictionary with all paramaters\n K1/2: Intrinsic Paramaters\n d1/d2: Distortion paramaters\n R: Rotation matrix between cameras\n t: translation vector between cameras\n P1/P2: Projection matrix\n \"\"\"\n im_size = (0, 0)\n K1 = np.eye(3)\n K2 = np.eye(3)\n d1 = np.zeros(5)\n d2 = np.zeros(5)\n R = np.eye(3)\n t = np.zeros(3)\n P1 = None\n P2 = None\n R1 = None\n R2 = None\n\n\nclass ExtrinsicIntrnsicLoaderSaver:\n \"\"\"\n Helper class to load and calculate projection matricies\n Attributes:\n paramters: Object containing camera paramaters\n Methods:\n calculate_projection_matracies: Calculate projection matrices and\n update paramters\n \"\"\"\n\n def __init__(self, paramLoader):\n \"\"\"\n Input:\n paramLoader: Dictonary of calibration matricies\n \"\"\"\n self.paramaters = Paramters\n self._load_params(paramLoader)\n\n def set_imsize(self, im_size):\n self.paramaters.im_size = im_size\n\n def calculate_rectification_matracies(self):\n R1, R2, P1, P2, Q, validPixROI1, validPixROI2 = cv2.stereoRectify(\n self.paramaters.K1, self.paramaters.d1,\n self.paramaters.K2, self.paramaters.d2,\n self.paramaters.im_size, self.paramaters.R,\n self.paramaters.t)\n self.paramaters.R1 = np.float64(R1)\n self.paramaters.R2 = np.float64(R2)\n self.paramaters.Q = np.float64(Q)\n\n def calculate_projection_matracies(self):\n \"\"\"\n Calculates projection matricies from R and t matricies. Updates\n paramters attribute\n \"\"\"\n _p1 = np.zeros((3, 4), dtype=float)\n _p1[0, 0] = 1.0\n _p1[1, 1] = 1.0\n _p1[2, 2] = 1.0\n P1 = np.matmul(self.paramaters.K1, _p1)\n P2 = np.matmul(self.paramaters.K2, np.concatenate(\n (self.paramaters.R, self.paramaters.t.reshape(3, 1)), axis=1))\n\n self.paramaters.P1 = np.float64(P1)\n self.paramaters.P2 = np.float64(P2)\n\n def save_paramater(self, paramater, save_name):\n \"\"\"\n Save a paramater\n Input:\n paramater: np.ndarry type to save\n save_name: full save path\n \"\"\"\n self._save_np_array(save_name, paramater)\n\n def _save_np_array(save_name, mat):\n np.savetxt(save_name, mat, fmt=\"%1.3f\", delimiter=\",\")\n\n def _load_params(self, paramLoader):\n \"\"\"\n Set paramters to paramLoader\n \"\"\"\n if paramLoader.im_size[0]:\n self.paramaters.im_size = np.float64(np.loadtxt(\n paramLoader.im_size[1], delimiter=','))\n if paramLoader.K1[0]:\n self.paramaters.K1 = np.float64(np.loadtxt(\n paramLoader.K1[1], delimiter=','))\n\n if paramLoader.K2[0]:\n self.paramaters.K2 = np.float64(np.loadtxt(\n paramLoader.K2[1], delimiter=','))\n if paramLoader.d1[0]:\n self.paramaters.d1 = np.float64(np.loadtxt(\n paramLoader.d1[1], delimiter=','))\n if paramLoader.d2[0]:\n self.paramaters.d2 = np.float64(np.loadtxt(\n paramLoader.d2[1], delimiter=','))\n if paramLoader.R[0]:\n self.paramaters.R = np.float64(np.loadtxt(\n paramLoader.R[1], delimiter=','))\n if paramLoader.t[0]:\n self.paramaters.t = np.float64(np.loadtxt(\n paramLoader.t[1], delimiter=','))\n if paramLoader.P1[0]:\n\n self.paramaters.P1 = np.float64(np.loadtxt(\n paramLoader.P1[1], delimiter=','))\n if paramLoader.P2[0]:\n self.paramaters.P2 = np.float64(np.loadtxt(\n paramLoader.P2[1], delimiter=','))\n if paramLoader.R1[0]:\n self.paramaters.R1 = np.float64(np.loadtxt(\n paramLoader.R1[1], delimiter=','))\n if paramLoader.R2[0]:\n self.paramaters.R2 = np.float64(np.loadtxt(\n paramLoader.R2[1], delimiter=','))\n","sub_path":"stereoProcessing/intrinsic_extrinsic.py","file_name":"intrinsic_extrinsic.py","file_ext":"py","file_size_in_byte":6761,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"23234657","text":"import csv\nimport re\n\nwith open('faculty.csv','r') as f: \n f_reader = csv.reader(f)\n next(f_reader)\n \n degrees = []\n titles = []\n emails = []\n domains = []\n \n for line in f_reader:\n deg_std = str.split(str.replace(line[1], '.','').lower(),' ')\n degrees = degrees + [d.upper() for d in deg_std if d != '' and d.isalpha()]\n titles = titles + [str.replace(str.replace(line[2], ' of Biostatistics', ''), ' is Biostatistics','')]\n emails = emails + [e for e in line if re.match(r'.*@.*',e)]\n domains = domains + [d[d.index('@') + 1:] for d in line if re.match(r'.*@.*',d)]\n \n deg_count_dict = {d:degrees.count(d) for d in degrees}\n title_count_dict = {t:titles.count(t) for t in titles}\n unique_domains = set(domains)\n \nprint(deg_count_dict)\nprint(title_count_dict)\nprint(emails)\nprint(unique_domains)\n\n","sub_path":"python/advanced_python_regex.py","file_name":"advanced_python_regex.py","file_ext":"py","file_size_in_byte":879,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"594035618","text":"import collections\nimport os\nimport enum\n\nfrom . import ast\nfrom . import parser\nfrom . import ir\nfrom . import typechecker\n\n\nclass FatalCompileError(Exception):\n def __init__(self, compilation):\n self.compilation = compilation\n\n\nclass Diagnostic(object):\n class Severity(enum.Enum):\n NOTE = 0\n WARNING = 1\n ERROR = 2\n\n def __init__(self, severity, filename, message, lineno, colno, collen):\n self.severity = severity\n self.filename = filename\n self.message = message\n self.lineno = lineno\n self.colno = colno\n self.collen = collen\n\n\nclass ImportFindingVisitor(ast.BaseVisitor):\n MAX_IMPORT_DEPTH = 20\n\n def __init__(self, compilation):\n self.compilation = compilation\n self.imports = {}\n\n def visit_import(self, node):\n if self.compilation.import_depth + 1 >= self.MAX_IMPORT_DEPTH:\n self.compilation.add_diagnostic(\n Diagnostic.Severity.ERROR,\n \"import nested too deep\".format(node.id.name),\n node.id.lineno, node.id.colno, len(node.id.raw))\n return\n\n module = None\n try:\n c, module = _compile_file(\n os.path.join(os.path.dirname(self.compilation.filename),\n node.id.name + '.admiral'),\n self.compilation.import_depth + 1, self.compilation.settings)\n except IOError:\n self.compilation.add_diagnostic(\n Diagnostic.Severity.ERROR,\n \"could not open file '{}'\".format(node.id.name),\n node.id.lineno, node.id.colno, len(node.id.raw))\n else:\n self.imports.update(c.imports)\n\n if c.diagnostics:\n self.compilation.add_diagnostic(\n Diagnostic.Severity.NOTE,\n \"included from here\",\n node.id.lineno, node.id.colno, len(node.id.raw))\n self.compilation.diagnostics.extend(c.diagnostics)\n\n if any(diagnostic.severity == Diagnostic.Severity.ERROR\n for diagnostic in c.diagnostics):\n self.compilation.add_diagnostic(\n self.compilation.Diagnostic.Severity.ERROR,\n 'could not import module',\n node.id.lineno, node.id.colno, len(node.id.raw))\n\n self.imports[node.id.name] = module\n\n\nclass ExpToIRVisitor(object):\n def __init__(self, compilation, mod):\n self.compilation = compilation\n self.mod = mod\n\n def visit_id(self, node):\n return self.mod.make(ir.Symbol, node.name).with_ast_node(node)\n\n def visit_app(self, node):\n return self.mod.make(\n ir.Instantiation, node.head.accept(self),\n [t.accept(self) for t in node.tail]).with_ast_node(node)\n\n def visit_dotted(self, node):\n return self.mod.make(\n ir.Member, node.type.accept(self), node.member.accept(self)) \\\n .with_ast_node(node)\n\n def visit_import(self, node):\n return self.mod.make(ir.Imported, node.id.name).with_ast_node(node)\n\n\nclass AggregateToIRVisitor(object):\n def __init__(self, compilation, mod):\n self.compilation = compilation\n self.mod = mod\n self.types = collections.OrderedDict()\n self.fields = collections.OrderedDict()\n self.fields_by_number = {}\n\n def visit_fielddef(self, node):\n field = self.mod.make(\n ir.Field, node.id.name,\n node.type.accept(ExpToIRVisitor(self.compilation, self.mod))\n if node.type is not None else None,\n int(node.number.value)).with_ast_node(node)\n\n if field.name in self.fields:\n last_field = self.fields[field.name]\n self.compilation.add_diagnostic(\n Diagnostic.Severity.NOTE,\n \"'{}' was previously defined here\".format(field.name),\n last_field.ast_node.id.lineno, last_field.ast_node.id.colno,\n len(last_field.ast_node.id.raw))\n\n self.compilation.add_diagnostic(\n Diagnostic.Severity.WARNING,\n \"redefinition of field '{}'\".format(field.name),\n field.ast_node.id.lineno, field.ast_node.id.colno,\n len(field.ast_node.id.raw))\n\n self.fields[field.name] = field\n\n if field.number in self.fields_by_number:\n last_field = self.fields_by_number[field.number]\n self.compilation.add_diagnostic(\n Diagnostic.Severity.NOTE,\n 'field {} was previously used here'.format(\n int(last_field.ast_node.number.value)),\n last_field.ast_node.number.lineno,\n last_field.ast_node.number.colno,\n len(last_field.ast_node.number.value))\n\n self.compilation.add_diagnostic(\n Diagnostic.Severity.WARNING,\n 'reuse of field {}'.format(field.number),\n field.ast_node.number.lineno, field.ast_node.number.colno,\n len(field.ast_node.number.value))\n\n self.fields_by_number[field.number] = field\n\n def _visit_typedef(self, node):\n type = node.accept(TypedefToIRVisitor(self.compilation, self.mod))\n\n if type.name in self.types:\n last_type = self.types[type.name]\n self.compilation.add_diagnostic(\n Diagnostic.Severity.NOTE,\n \"'{}' was previously defined here\".format(type.name),\n last_type.ast_node.id.lineno, last_type.ast_node.id.colno,\n len(last_type.ast_node.id.raw))\n\n self.compilation.add_diagnostic(\n Diagnostic.Severity.WARNING,\n \"redefinition of type '{}'\".format(type.name),\n type.ast_node.id.lineno, type.ast_node.id.colno,\n len(type.ast_node.id.raw))\n\n self.types[type.name] = type\n\n def visit_recorddef(self, node):\n self._visit_typedef(node)\n\n def visit_enumdef(self, node):\n self._visit_typedef(node)\n\n def visit_aliasdef(self, node):\n self._visit_typedef(node)\n\n\nclass TypedefToIRVisitor(object):\n def __init__(self, compilation, mod):\n self.compilation = compilation\n self.mod = mod\n\n def _extract_type_params(self, typedef):\n return [self.mod.make(ir.Symbol, param.name).with_ast_node(param)\n for param in typedef.params]\n\n def _aggregate_to_ir(self, ir_type, node):\n v = AggregateToIRVisitor(self.compilation, self.mod)\n\n for member in node.members:\n member.accept(v)\n\n return self.mod.make(\n ir_type, node.id.name, self._extract_type_params(node), v.fields,\n v.types).with_ast_node(node)\n\n def visit_recorddef(self, node):\n return self._aggregate_to_ir(ir.Record, node)\n\n def visit_enumdef(self, node):\n return self._aggregate_to_ir(ir.Enum, node)\n\n def visit_aliasdef(self, node):\n return self.mod.make(\n ir.Alias, node.id.name, self._extract_type_params(node),\n node.exp.accept(ExpToIRVisitor(self.compilation, self.mod))) \\\n .with_ast_node(node)\n\n\nclass Compilation(object):\n Diagnostic = Diagnostic\n Settings = collections.namedtuple('Settings', ['warnings_are_errors',\n 'fatal_errors'])\n\n def __init__(self, filename, import_depth, settings):\n self.filename = filename\n self.import_depth = import_depth\n self.settings = settings\n\n self.imports = {}\n self.diagnostics = []\n\n def without_diagnostics(self):\n c = Compilation(self.filename, self.import_depth, self.settings)\n c.imports = self.imports\n return c\n\n def add_diagnostic(self, severity, *args, **kwargs):\n if self.settings.warnings_are_errors and \\\n severity == Diagnostic.Severity.WARNING:\n severity = Diagnostic.Severity.ERROR\n\n self.diagnostics.append(Diagnostic(severity, self.filename, *args,\n **kwargs))\n\n if self.settings.fatal_errors and severity == Diagnostic.Severity.ERROR:\n raise FatalCompileError(self)\n\n def compile_ast(self, moduledef):\n v = ImportFindingVisitor(self)\n moduledef.accept(v)\n self.imports = v.imports\n\n mod = ir.Module(self.imports.keys()).with_ast_node(moduledef)\n\n v = AggregateToIRVisitor(self, mod)\n for member in moduledef.members:\n member.accept(v)\n mod.types.update(v.types)\n\n typechecker.check(self, mod)\n\n return mod\n\n\ndef _compile_file(filename, import_depth, settings):\n with open(filename, 'r') as f:\n filename = f.name\n source = f.read()\n\n return _compile_source(filename, source, import_depth, settings)\n\n\ndef compile_file(filename, settings):\n return _compile_file(filename, 0, settings)\n\n\ndef _compile_source(filename, source, import_depth, settings):\n c = Compilation(filename, import_depth, settings)\n\n try:\n moduledef = parser.parse(source)\n except parser.ParseError as e:\n c.add_diagnostic(Diagnostic.Severity.ERROR, str(e), e.lineno, e.colno,\n e.collen)\n return c, None\n\n mod = c.compile_ast(moduledef)\n\n if any(diagnostic.severity == Diagnostic.Severity.ERROR\n for diagnostic in c.diagnostics):\n mod = None\n\n return c, mod\n\ndef compile_source(filename, source, settings):\n return _compile_source(filename, source, 0, settings)\n","sub_path":"admiral/compiler.py","file_name":"compiler.py","file_ext":"py","file_size_in_byte":9581,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"258876651","text":"#!/usr/bin/python3\n# Copyright 2020 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Script for replaying a data log. Super handy for development.\"\"\"\n\nimport time\nfrom absl import app\nfrom absl import flags\nfrom absl import logging\nimport data_logger\nimport exit_speed\n\nFLAGS = flags.FLAGS\nFLAGS.set_default('data_log_path', '/tmp') # Dont clobber on replay.\nFLAGS.set_default('led_brightness', 0.05)\nflags.DEFINE_string('filepath', None, 'Path to the data file to replay')\nflags.mark_flag_as_required('filepath')\nflags.DEFINE_boolean('include_sleep', True,\n 'Adds delays to mimic real time replay of data.')\n\n\ndef ReplayLog(filepath, include_sleep=False):\n \"\"\"Replays data, extermely useful to LED testing.\n\n Args:\n filepath: A string of the path of lap data.\n include_sleep: If True replays adds sleeps to simulate how data was\n processed in real time.\n\n Returns:\n A exit_speed.ExitSpeed instance that has replayed the given data.\n \"\"\"\n logging.info('Replaying %s', filepath)\n logger = data_logger.Logger(filepath)\n points = list(logger.ReadProtos())\n logging.info('Number of points %d', len(points))\n if include_sleep:\n replay_start = time.time()\n time_shift = int(replay_start * 1e9 - points[0].time.ToNanoseconds())\n session_start = None\n else:\n FLAGS.set_default('commit_cycle', 10000)\n es = exit_speed.ExitSpeed(live_data=not include_sleep)\n for point in points:\n if include_sleep:\n point.time.FromNanoseconds(point.time.ToNanoseconds() + time_shift)\n if not session_start:\n session_start = point.time.ToMilliseconds() / 1000\n\n es.point = point\n es.ProcessSession()\n if include_sleep:\n run_delta = time.time() - replay_start\n point_delta = point.time.ToMilliseconds() / 1000 - session_start\n if run_delta < point_delta:\n time.sleep(point_delta - run_delta)\n\n if not include_sleep:\n time.sleep(1)\n qsize = len(es.pusher.point_queue)\n while qsize > 0:\n qsize = len(es.pusher.point_queue)\n logging.log_every_n_seconds(logging.INFO, 'Queue size %s', 2, qsize)\n es.pusher.stop_process_signal.value = True\n print(time.time())\n es.pusher.process.join(10)\n print(time.time())\n return es\n\n\ndef main(unused_argv):\n ReplayLog(FLAGS.filepath, include_sleep=FLAGS.include_sleep)\n\n\nif __name__ == '__main__':\n app.run(main)\n","sub_path":"replay_data.py","file_name":"replay_data.py","file_ext":"py","file_size_in_byte":2888,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"457789707","text":"from pyxsim.event_list import EventList\nfrom pyxsim.tests.utils import create_dummy_wcs\nfrom pyxsim.instruments import ACIS_S, sigma_to_fwhm\nfrom pyxsim.spectral_models import TBabsModel\nfrom yt.testing import requires_module\nimport os\nfrom numpy.random import RandomState\nimport tempfile\nimport shutil\nimport numpy as np\nfrom sherpa.astro.ui import load_user_model, add_user_pars, \\\n load_pha, ignore, fit, set_model, set_stat, set_method, \\\n covar, get_covar_results, set_covar_opt\n\nprng = RandomState(24)\n\ndef setup():\n from yt.config import ytcfg\n ytcfg[\"yt\", \"__withintesting\"] = \"True\"\n\ndef mymodel(pars, x, xhi=None):\n wm = TBabsModel(pars[0])\n wabs = wm.get_absorb(x)\n dx = x[1]-x[0]\n plaw = pars[1]*dx*(x*(1.0+pars[2]))**(-pars[3])\n return wabs*plaw\n\n@requires_module(\"sherpa\")\ndef test_point_source():\n\n tmpdir = tempfile.mkdtemp()\n curdir = os.getcwd()\n os.chdir(tmpdir)\n\n nH_sim = 0.02\n norm_sim = 1.0e-4\n alpha_sim = 0.95\n redshift = 0.02\n\n exp_time = (100., \"ks\")\n area = (3000., \"cm**2\")\n\n wcs = create_dummy_wcs()\n\n ebins = np.linspace(0.1, 11.5, 2001)\n emid = 0.5*(ebins[1:]+ebins[:-1])\n spec = norm_sim*(emid*(1.0+redshift))**(-alpha_sim)\n de = np.diff(ebins)[0]\n\n abs_model = TBabsModel(nH_sim)\n\n events = EventList.create_empty_list(exp_time, area, wcs)\n\n positions = [(30.01, 45.0)]\n\n new_events = events.add_point_sources(positions, ebins, spec, prng=prng,\n absorb_model=abs_model)\n\n new_events = ACIS_S(new_events, prng=prng)\n\n scalex = float(np.std(new_events['xpix'])*sigma_to_fwhm*new_events.parameters[\"dtheta\"])\n scaley = float(np.std(new_events['ypix'])*sigma_to_fwhm*new_events.parameters[\"dtheta\"])\n\n psf_scale = ACIS_S.psf_scale\n\n assert (scalex - psf_scale)/psf_scale < 0.01\n assert (scaley - psf_scale)/psf_scale < 0.01\n\n new_events.write_spectrum(\"point_source_evt.pi\", clobber=True)\n\n os.system(\"cp %s .\" % new_events.parameters[\"ARF\"])\n os.system(\"cp %s .\" % new_events.parameters[\"RMF\"])\n\n load_user_model(mymodel, \"tplaw\")\n add_user_pars(\"tplaw\", [\"nH\", \"norm\", \"redshift\", \"alpha\"],\n [0.01, norm_sim*0.8, redshift, 0.9],\n parmins=[0.0, 0.0, 0.0, 0.1],\n parmaxs=[10.0, 1.0e9, 10.0, 10.0],\n parfrozen=[False, False, True, False])\n\n load_pha(\"point_source_evt.pi\")\n set_stat(\"cstat\")\n set_method(\"simplex\")\n ignore(\":0.5, 9.0:\")\n set_model(\"tplaw\")\n fit()\n set_covar_opt(\"sigma\", 1.6)\n covar()\n res = get_covar_results()\n\n assert np.abs(res.parvals[0]-nH_sim) < res.parmaxes[0]\n assert np.abs(res.parvals[1]-norm_sim/de) < res.parmaxes[1]\n assert np.abs(res.parvals[2]-alpha_sim) < res.parmaxes[2]\n\n os.chdir(curdir)\n shutil.rmtree(tmpdir)\n\nif __name__ == \"__main__\":\n test_point_source()\n","sub_path":"pyxsim/tests/test_point_source.py","file_name":"test_point_source.py","file_ext":"py","file_size_in_byte":2894,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"326547044","text":"from subprocess import Popen, PIPE\nimport textutils as tu\n\ndef get_command_output(command):\n output = list()\n lines = list()\n p = Popen(command, stdout=PIPE)\n while True:\n line = tu.escape_cr_lf(p.stdout.readline())\n lines.append(line)\n if not line:\n break\n p.wait() # wait for the subprocess to really end, so we can get the return code\n return_code = p.returncode\n lines = tu.join_to_br(lines)\n output.append(lines)\n output.append(return_code)\n return output\n\ndef put_log_entry(json_request):\n with open(\"./logs/requests.txt\", \"a\") as logfile:\n logfile.write(str(json_request)+'\\n\\n')\n","sub_path":"botutils.py","file_name":"botutils.py","file_ext":"py","file_size_in_byte":659,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"186066560","text":"#!/usr/bin/env python\n\nfrom __future__ import division, print_function\n\n\nclass DigitGetter( object ):\n def __init__( self, inputFile, outFile ):\n self._digitNumberStringPairs = list( enumerate( ( 'ZERO', 'ONE', 'TWO', 'THREE', 'FOUR', 'FIVE', 'SIX', 'SEVEN', 'EIGHT', 'NINE' ) ) )\n self._uniqueLettersToDigits = { 'G': self._digitNumberStringPairs[ 8 ],\n 'U': self._digitNumberStringPairs[ 4 ],\n 'W': self._digitNumberStringPairs[ 2 ],\n 'X': self._digitNumberStringPairs[ 6 ],\n 'Z': self._digitNumberStringPairs[ 0 ] }\n self._uniqueLettersToDigitsSecondPass = { 'O': self._digitNumberStringPairs[ 1 ],\n 'H': self._digitNumberStringPairs[ 3 ],\n 'F': self._digitNumberStringPairs[ 5 ],\n 'S': self._digitNumberStringPairs[ 7 ] }\n\n self._digits = []\n\n with open( inputFile, 'r' ) as i, open( outFile, 'w+' ) as o:\n inputFileContents = i.readlines()\n numberOfCases = inputFileContents[ 0 ]\n for case in inputFileContents[ 1: ]:\n self.getDigits( case )\n self.printDigits( o )\n\n def getDigits( self, case ):\n digits = []\n case, digits = self._handleNumbersWithUniqueLetters( case, digits, self._uniqueLettersToDigits )\n case, digits = self._handleNumbersWithUniqueLetters( case, digits, self._uniqueLettersToDigitsSecondPass )\n case, digits = self._handleNumbersWithUniqueLetters( case, digits, { 'N': self._digitNumberStringPairs[ 9 ] } )\n digits = sorted( digits )\n self._digits.append( ''.join( map( str, digits ) ) )\n\n def _handleNumbersWithUniqueLetters( self, case, digits, uniqueLettersToDigits ):\n for letter, digitNumberStringPair in uniqueLettersToDigits.iteritems():\n #print( case, digits )\n findResult = case.find( letter )\n while findResult != -1:\n digits.append( digitNumberStringPair[ 0 ] )\n #print( case, digits )\n for digitLetter in digitNumberStringPair[ 1 ]:\n case = case.replace( digitLetter, '', 1 )\n #print( case, digits )\n findResult = case.find( letter )\n\n return case, digits\n\n def printDigits( self, outputFile ):\n for caseNumber, digits in enumerate( self._digits ):\n print( 'Case #' + str( caseNumber + 1 ) + ': ' + str( digits ), file = outputFile )\n\n\nif __name__ == '__main__':\n #digitGetter = DigitGetter( 'testInput.txt', 'testOutput.txt' )\n #digitGetter = DigitGetter( 'A-small-attempt0.in', 'results-small.txt' )\n digitGetter = DigitGetter( 'A-large.in', 'results-large.txt' )\n\n","sub_path":"codes/CodeJamCrawler/CJ/16_2_1_bpw1621_gettingTheDigits.py","file_name":"16_2_1_bpw1621_gettingTheDigits.py","file_ext":"py","file_size_in_byte":2924,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"129427722","text":"# ******************************************************************************\n# Copyright 2018 Intel Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ******************************************************************************\nimport numpy as np\nimport pytest\n\nimport ngraph as ng\n\n\ndef _get_runtime():\n manager_name = pytest.config.getoption('backend', default='CPU')\n return ng.runtime(manager_name=manager_name)\n\n\ndef _run_unary_op_node(input_data, unary_op):\n runtime = _get_runtime()\n parameter_a = ng.parameter(input_data.shape, name='A', dtype=np.float32)\n node = unary_op(parameter_a)\n computation = runtime.computation(node, parameter_a)\n return computation(input_data)\n\n\ndef _run_unary_op_numeric_data(input_data, unary_op):\n runtime = _get_runtime()\n node = unary_op(input_data)\n computation = runtime.computation(node)\n return computation()\n\n\n@pytest.mark.parametrize('ng_api_fn, numpy_fn, input_data', [\n (ng.absolute, np.abs, -1 + np.random.rand(2, 3, 4) * 2),\n (ng.absolute, np.abs, np.float32(-3)),\n (ng.acos, np.arccos, -1 + np.random.rand(2, 3, 4) * 2),\n (ng.acos, np.arccos, np.float32(-0.5)),\n (ng.asin, np.arcsin, -1 + np.random.rand(2, 3, 4) * 2),\n (ng.asin, np.arcsin, np.float32(-0.5)),\n (ng.atan, np.arctan, -100 + np.random.rand(2, 3, 4) * 200),\n (ng.atan, np.arctan, np.float32(-0.5)),\n])\ndef test_unary_op(ng_api_fn, numpy_fn, input_data):\n expected = numpy_fn(input_data)\n\n result = _run_unary_op_node(input_data, ng_api_fn)\n assert np.allclose(result, expected)\n\n result = _run_unary_op_numeric_data(input_data, ng_api_fn)\n assert np.allclose(result, expected)\n","sub_path":"python/test/ngraph/test_ops_unary.py","file_name":"test_ops_unary.py","file_ext":"py","file_size_in_byte":2179,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"646494355","text":"from django.shortcuts import render, get_object_or_404, redirect\nfrom django.core.urlresolvers import reverse\nfrom django.db.models import Q\nfrom django.contrib import messages\nfrom django.contrib.auth.decorators import login_required\n\n\nfrom .models import Book, Author\nfrom .forms import BookForm\n\ndef book_list(request):\n query_set = Book.objects.all()\n\n query = request.GET.get('q')\n if query:\n authors = Author.objects.filter(name__icontains=query)\n query_set = query_set.filter(\n Q(title__icontains=query) |\n Q(authors__in=authors)\n )\n query_set = query_set.distinct()\n\n books = query_set\n\n breadcrumbs = (\n ('Books', ),\n )\n\n context = {\n \"books\": books,\n \"breadcrumbs\": breadcrumbs,\n }\n return render(request, \"books/book_list.html\", context)\n\ndef book_detail(request, id):\n book = get_object_or_404(Book, pk=id)\n\n if book.bookshelf:\n breadcrumbs = (\n (\"Bookcases\", reverse(\"bookcases:bookcase_list\")),\n (book.bookshelf.bookcase.name,\n reverse(\"bookcases:bookcase_detail\", args=[book.bookshelf.bookcase.pk])),\n (book.bookshelf.shelf_label,\n reverse(\"bookcases:bookshelf_detail\", args=[book.bookshelf.pk])),\n (book.title, ),\n )\n else:\n breadcrumbs = (\n (\"Books\", reverse(\"books:book_list\")),\n (book.title, ),\n )\n\n context = {\n \"book\": book,\n \"breadcrumbs\": breadcrumbs\n }\n\n return render(request, \"books/book_detail.html\", context)\n\n@login_required\ndef book_new(request, bookshelf=None):\n form_kwargs = {}\n\n if bookshelf:\n form_kwargs = {\n \"initial\": {\"bookshelf\": bookshelf}\n }\n\n breadcrumbs = (\n (\"Bookcases\", reverse(\"bookcases:bookcase_list\")),\n (bookshelf.bookcase.name,\n reverse(\"bookcases:bookcase_detail\", args=[bookshelf.bookcase.pk])),\n (bookshelf.shelf_label,\n reverse(\"bookcases:bookshelf_detail\", args=[bookshelf.pk])),\n )\n else:\n breadcrumbs = (\n (\"Books\", reverse(\"books:book_list\")),\n )\n\n if request.method == \"POST\":\n form = BookForm(request.POST, request.FILES, **form_kwargs)\n\n if form.is_valid():\n book = form.save()\n book.update_authors(form.cleaned_data['author_names'])\n\n messages.success(request, \"Book Saved!\")\n return redirect(\"books:book_detail\", id=book.pk)\n else:\n form = BookForm(**form_kwargs)\n\n context = {\n \"form\": form,\n \"breadcrumbs\": breadcrumbs,\n }\n\n return render(request, \"books/book_edit.html\", context)\n\n@login_required\ndef book_edit(request, id):\n book = get_object_or_404(Book, pk=id)\n\n if book.bookshelf:\n breadcrumbs = (\n (\"Bookcases\", reverse(\"bookcases:bookcase_list\")),\n (book.bookshelf.bookcase.name,\n reverse(\"bookcases:bookcase_detail\", args=[book.bookshelf.bookcase.pk])),\n (book.bookshelf.shelf_label,\n reverse(\"bookcases:bookshelf_detail\", args=[book.bookshelf.pk])),\n (book.title, reverse(\"books:book_detail\", args=[book.pk]))\n )\n else:\n breadcrumbs = (\n (\"Books\", reverse(\"books:book_list\")),\n (book.title, reverse(\"books:book_detail\", args=[book.pk]))\n )\n\n if request.method == \"POST\":\n print(request.FILES)\n form = BookForm(request.POST, request.FILES, instance=book)\n\n if form.is_valid():\n book = form.save()\n book.update_authors(form.cleaned_data['author_names'])\n\n messages.success(request, \"Book Saved!\")\n return redirect(\"books:book_detail\", id=book.pk)\n else:\n form = BookForm(instance=book)\n\n context = {\n \"form\": form,\n \"book\": book,\n \"breadcrumbs\": breadcrumbs,\n }\n\n return render(request, \"books/book_edit.html\", context)\n","sub_path":"books/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4020,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"500631826","text":"# -*- coding: utf-8 -*-\n###############################################\n#created by : lxy\n#Time: 2020/05/11 16:09\n#project: crowed estimate\n#rversion: 0.1\n#tool: python 3.6\n#modified:\n#description histogram\n####################################################\nimport numpy as np \nimport os\nimport shutil\nimport json\nimport scipy.io as io\nfrom scipy.ndimage.filters import gaussian_filter \nfrom scipy.spatial import KDTree\nimport cv2\nimport tqdm\nimport h5py\nfrom matplotlib import pyplot as plt\n\ndef rescale(img,minSize):\n h,w = img.shape[:2]\n init_maxSize = (2048.0,2048.0)\n max_size = max(h,w)\n if max_size > 2048:\n rate = init_maxSize[0]/h\n rate_w = w*rate\n if rate_w > init_maxSize[1]:\n rate = init_maxSize[1]/w\n else:\n rate = 1.0\n tmp_h = int(h*rate/8)*8\n if tmp_h < minSize[0]:\n rate = minSize[0]/h\n tmp_w = int(w*rate/8)*8\n if tmp_w < minSize[1]:\n rate = minSize[1]/w\n tmp_h = int(h*rate/8)*8\n tmp_w = int(w*rate/8)*8\n rate_h = float(tmp_h)/h\n rate_w = float(tmp_w)/w\n img = cv2.resize(img,(tmp_w,tmp_h))\n return img,(rate_w,rate_h)\n\ndef gen_labelfile(imgdir,distdir,matdir,infile,outfile):\n '''\n imgdir: imgdir/*.jepg\n matdir: matdir/*.mat\n infile: train or test txt file, image_name,lu_id,set_id\n '''\n fr = open(infile,'r')\n fw = open(outfile,'w')\n fcnts = fr.readlines()\n total_num = len(fcnts)\n cropsize = (576,768)\n if not os.path.exists(distdir):\n os.makedirs(distdir)\n # diff_sum = 0.0\n for i in tqdm.tqdm(range(total_num)):\n tmp = fcnts[i].strip()\n tmp_sp = tmp.split()\n imgname = tmp_sp[0]\n cls_id = tmp_sp[2]\n imgpath = os.path.join(imgdir,imgname+'.jpg')\n distpath = os.path.join(distdir,imgname+'.jpg')\n img = cv2.imread(imgpath)\n img, scale_wh = rescale(img,cropsize) \n cv2.imwrite(distpath,img)\n matfile = os.path.join(matdir,imgname+'.mat')\n points_mat = io.loadmat(matfile)\n anns = points_mat['annPoints']\n if int(cls_id) >0:\n x = (anns[:,0]+1) * scale_wh[0] -1\n y = (anns[:,1]+1) * scale_wh[1] -1\n bb = np.vstack((x,y)).T\n bb = bb.reshape([-1]).tolist()\n bstr = list(map(str,bb))\n bstr = ','.join(bstr)\n fw.write(\"{},{},{}\\n\".format(imgname+'.jpg',cls_id,bstr))\n else:\n fw.write(\"{},{}\\n\".format(imgname+'.jpg',cls_id))\n # afnum = np.sum(dmap)\n #print('rt:',afnum)# don't mind this slight variation\n # record_w.write(\"{}\\t{}\\t{:.3f}\\n\".format(imgname,orgnum,afnum))\n # diff = orgnum-afnum\n # diff_sum = diff_sum+ np.abs(diff)\n # record_w.write('mse:{},cnt:{}'.format(diff_sum/float(cnt),cnt))\n fr.close()\n fw.close()\n\ndef showhot(imgdir,fpath):\n fr = open(fpath,'r')\n fcnts = fr.readlines()\n for tmp in fcnts:\n tmp_sp = tmp.strip().split(',')\n if len(tmp_sp) > 3:\n # if '1015.jpg' != tmp_sp[0].strip():\n # continue\n imgpath = os.path.join(imgdir,tmp_sp[0])\n bbox = list(map(float,tmp_sp[2:]))\n bbox = np.reshape(bbox,[-1,2])\n img = cv2.imread(imgpath)\n # img = cv2.resize(img,(1024,768))\n for j in range(bbox.shape[0]):\n cx = bbox[j,0]\n cy = bbox[j,1]\n cv2.circle(img,(int(cx),int(cy)),3,(0,255,0))\n # cv2.imwrite('bblabel_example.png',img)\n cv2.imshow('src',img)\n cv2.waitKey(0)\n fr.close()\n\ndef gaussian_filter_density(pts,gt_shape):\n leafsize = 2048\n #print(pts.shape)\n # build kdtree\n tree = KDTree(pts.copy(), leafsize=leafsize)\n # query kdtree\n distances, locations = tree.query(pts, k=4)\n # w=15 # the half of kernel size\n gt_count = pts.shape[0]\n sigma = 4.0\n density = np.zeros(gt_shape, dtype=np.float32)\n for i in range(gt_count):\n ptx,pty = int(np.floor(pts[i,0])),int(np.floor(pts[i,1]))\n pt2d = np.zeros(gt_shape, dtype=np.float32)\n pt2d[pty,ptx] = 1.\n if gt_count > 1:\n kernel2 = (distances[i][1]+distances[i][2]+distances[i][3])*0.1\n else:\n kernel2 = 15 #case: 1 point\n # kz = (((w - 1)/2.0)-0.5)/sigma\n max_k = max(gt_shape[0],gt_shape[1])\n if kernel2 > max_k:\n kernel2 = 15\n kz = (((kernel2 - 1)/2.0)-0.5)/sigma\n # kz = 3.0 #99.7% dataes lie in the +-(3*sigma) for gaussian func\n density += gaussian_filter(pt2d, sigma, mode='constant',truncate=kz)\n return density\n\ndef generatedensity_fromtxt(file_in,img_dir,save_dir,outfile):\n '''\n file_in: image_name, head_centers(x,y)\n '''\n tpath = file_in\n imgdir = img_dir\n disdir = save_dir\n fr = open(tpath,'r')\n record_w = open(outfile,'w')\n f_cnts = fr.readlines()\n kernel2 = 15.0\n sigma = 4.0\n cnt = len(f_cnts)\n diff_sum = 0\n cnt_v = 0\n if not os.path.exists(disdir):\n os.makedirs(disdir)\n for i in tqdm.tqdm(range(cnt)):\n tmp = f_cnts[i].strip()\n tmp_sp = tmp.split(',')\n imgpath = os.path.join(imgdir,tmp_sp[0])\n img = cv2.imread(imgpath)\n h,w = img.shape[:2]\n gth,gtw = (h,w)\n dmap = np.zeros([gth,gtw],dtype=np.float32)\n pname = disdir +'/'+tmp_sp[0][:-4] +'.h5'\n if os.path.exists(pname):\n continue\n if tmp_sp[0].strip() in ['1015.jpg','1088.jpg']:\n continue\n if len(tmp_sp) >3:\n centers_c = list(map(float,tmp_sp[2:]))\n centers_c = np.reshape(centers_c,[-1,2])\n orgnum = centers_c.shape[0]\n cnt_v +=1\n if orgnum <100:\n # print(imgpath)\n dmap = gaussian_filter_density(centers_c,[gth,gtw])\n else:\n # print(imgpath)\n for j in range(orgnum):\n ix,iy = np.floor(centers_c[j,0]),np.floor(centers_c[j,1])\n if ix == gtw: \n ix = gtw-1\n if iy == gth:\n iy = gth-1\n # ix = min(ix,gtw-1)\n # iy = min(iy,gth-1)\n dmap[int(iy),int(ix)] = 1\n kz = (((kernel2 - 1)/2.0)-0.5)/sigma\n dmap = gaussian_filter(dmap,sigma,truncate=kz)\n afnum = np.sum(dmap)\n #print('rt:',afnum)# don't mind this slight variation\n record_w.write(\"{}\\t{}\\t{:.3f}\\n\".format(tmp_sp[0],orgnum,afnum))\n diff = orgnum-afnum\n diff_sum = diff_sum+ np.abs(diff)\n # pname = disdir +'/'+tmp_sp[0][:-4] +'.h5'\n with h5py.File(pname, 'w') as hf:\n hf['density'] = dmap\n record_w.write('mse:{},cnt:{}'.format(diff_sum/float(cnt_v),cnt_v))\n\ndef display(imgdir):\n img_paths = os.listdir(imgdir)\n #fig, axes = plt.subplots(nrows=1, ncols=2, constrained_layout=True)\n for tmp in img_paths:\n img_path = os.path.join(imgdir,tmp)\n fig, axes = plt.subplots(nrows=1, ncols=2, constrained_layout=True)\n ax1 = axes[0]\n img = cv2.imread(img_path)\n # img = cv2.resize(img,(1024,768))\n # ax1.imshow(img[:,:,::-1])\n ax2 = axes[1]\n gt_file = h5py.File(img_path.replace('.jpg','.h5').replace('images','gts'),'r')\n gt = np.asarray(gt_file['density'])\n #gt = np.array(gt,np.uint8)\n print('org',np.sum(gt))\n # size = np.shape(gt)\n # h,w = size[0]/8,size[1]/8\n # gt = cv2.resize(gt,(int(w),int(h)))*60\n # print(np.sum(gt))\n img = apply_density(img,gt)\n ax1.imshow(img[:,:,::-1])\n ax2.imshow(gt) #cmap=CM.jet)\n # plt.savefig('nwpu_example.png')\n plt.show()\n\ndef apply_density(img,hotmap):\n # create a blank img\n h,w,_ = img.shape\n ih,iw = hotmap.shape[:2]\n # img = cv2.resize(img,(iw,ih))\n overlay = img.copy()\n pred_num = str(int(np.sum(hotmap)))\n point = (int(w-300),20)\n keep_indx = np.where(hotmap>0.0)\n alpha = 0.5\n cv2.rectangle(overlay, (0, 0), (img.shape[1], img.shape[0]), (255, 0, 0), -1) \n for i in range(len(keep_indx[0])):\n # iy = np.clip(keep_indx[0][i]/float(ih) * h,0,h-1)\n # ix = np.clip(keep_indx[1][i]/float(iw) * w,0,w-1)\n ix = keep_indx[1][i]\n iy = keep_indx[0][i]\n cv2.circle(overlay,(int(ix),int(iy)),3,(0,0,255))\n # image = cv2.addWeighted(overlay, alpha, image, 1-alpha, 0) \n image = cv2.addWeighted(overlay, alpha, img, 1-alpha, 0) \n return image\n\ndef gen_filefromdir(base_dir,txt_file):\n '''\n base_dir: saved images path\n \"base_dir/image.jpg\"\n return: \"image1.jpg\"\n '''\n f_w = open(txt_file,'w')\n files = os.listdir(base_dir)\n total_ = len(files)\n id_num = 0\n for file_cnt in files:\n file_cnt = file_cnt[:-3]\n # print(file_cnt)\n f_w.write(\"{}\\n\".format(file_cnt))\n id_num+=1\n f_w.close()\n print(\"total id \",id_num)\n\ndef show_abb(imgpath):\n img = cv2.imread(imgpath)\n h,w = img.shape[:2]\n print(h,w)\n points_mat = io.loadmat(imgpath.replace('.jpg','.mat').replace('Images','mats'))\n bbox = points_mat['annPoints']\n for j in range(bbox.shape[0]):\n cx = bbox[j,0]\n cy = bbox[j,1]\n cv2.circle(img,(int(cy),int(cx)),3,(0,255,0))\n # cv2.imwrite('bblabel_example.png',img)\n img = cv2.resize(img,(int(w/2),int(h/2)))\n cv2.imshow('src',img)\n cv2.waitKey(0)\n\n\nif __name__ == '__main__':\n matdir = '/data/detect/nwpd/mats'\n imgdir = '/data/detect/nwpd/Images'\n infile = '/data/detect/nwpd/val.txt'\n fileout = '/data/detect/nwpd/val_labels.txt'\n distdir = '/data/detect/nwpd/part_C_final/test_data/images'\n # gen_labelfile(imgdir,distdir,matdir,infile,fileout)\n # showhot(distdir,fileout)\n file_in = '/data/detect/nwpd/val_labels.txt'\n img_dir = '/data/detect/nwpd/part_C_final/test_data/images'\n h5dir = '/data/detect/nwpd/part_C_final/test_data/gts'\n file_out = 'Nwpu_testrecord.txt'\n # generatedensity_fromtxt(file_in,img_dir,h5dir,file_out)\n # display(img_dir)\n img_dir = '/data/detect/nwpd/part_C_final/train_data/gts'\n outfile = '/data/detect/nwpd/part_C_final/train_data/train_data.txt'\n # gen_filefromdir(img_dir,outfile)\n show_abb('/data/detect/nwpd/Images/0204.jpg') #0685 1088\n\n","sub_path":"src/utils/loadnwpu.py","file_name":"loadnwpu.py","file_ext":"py","file_size_in_byte":10392,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"477087974","text":"#frappe dir and site name\nSITE_NAME = 'sjerp.dmall.io'\nFRAPPE_DIR = '/home/ubuntu/frappe-bench'\n#airflow dir\nAIRFLOW_DIR = '/home/ubuntu/airflow'\n#lcoal data dir\nLOCAL_DATA_DIR = '/home/ubuntu/data'\n\n#sync sap date\nDEFAULT_SYN_DATE = True\nDATE_TO_SYNC = ''\n\nimport logging\nlogging.basicConfig(level=logging.INFO,\n format='%(asctime)s %(filename)s[line:%(lineno)d] %(levelname)s %(message)s',\n datefmt='%a, %d %b %Y %H:%M:%S',\n )\n","sub_path":"verify_script_test/conf.py","file_name":"conf.py","file_ext":"py","file_size_in_byte":438,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"599147587","text":"import tweepy\r\nimport time\r\n\r\nconsumer_key = '' #Insert the key obtained from your Twitter account.\r\nconsumer_secret = '' #Insert the key obtained from your Twitter account.\r\naccess_token = '' #Insert the key obtained from your Twitter account.\r\naccess_token_secret = '' #Insert the key obtained from your Twitter account.\r\n\r\nauthentication = tweepy.OAuthHandler(consumer_key, consumer_secret)\r\nauthentication.set_access_token(access_token, access_token_secret)\r\n\r\napi = tweepy.API(authentication, wait_on_rate_limit = True, wait_on_rate_limit_notify = True)\r\n\r\nuser = api.me()\r\n\r\nsearch = '' #Insert the message you want to search for on Twitter.\r\ntweetNumbers = 2000\r\n\r\nfor tweet in tweepy.Cursor(api.search, search).items(tweetNumbers):\r\n try:\r\n if((tweet.text) == ''): #Between the quotes, enter the message you want to search for in the tweet to make a comment.\r\n tweet.retweet()\r\n print('Tweet successfully retweeted.')\r\n print('Username: @' + tweet.user.screen_name)\r\n api.update_status(\"@\" + tweet.user.screen_name + \"Type here what you want to comment on the tweet that was searched.\", in_reply_to_status_id = tweet.id)\r\n print(\"Tweet sent successfully.\")\r\n print('')\r\n time.sleep(60)\r\n\r\n except tweepy.TweepError as e:\r\n print('Error: ', e.reason)\r\n\r\n except StopIteration:\r\n break\r\n","sub_path":"twitter.py","file_name":"twitter.py","file_ext":"py","file_size_in_byte":1400,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"258515406","text":"#-*- coding: utf-8 -*-\n\nimport pickle\nwith open('/home/hyeyoung/dataset/data/new_sentence_list.txt', 'rb') as f:\n new_sentence_list = pickle.load(f) # 단 한줄씩 읽어옴\n\nfrom gensim.corpora.dictionary import Dictionary\n\ndictionary = Dictionary(new_sentence_list)\ncorpus = [dictionary.doc2bow(dic) for dic in new_sentence_list]\nprint(corpus)\n\nfrom gensim import models\n\n# 4. 만들어진 사전 정보를 가지고 벡터화 하기\ntfidf = models.TfidfModel(corpus)\ncorpus_tfidf = tfidf[corpus]\n\nprint(tfidf)","sub_path":"105.189 - topicmodel/sentencelist_corpus.py","file_name":"sentencelist_corpus.py","file_ext":"py","file_size_in_byte":516,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"311217545","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[26]:\n\n\nfrom random import seed\nfrom random import choice\ndef game_3(explorer):\n print(\"Hey! You've made it this far into the story, congrats! You're doing fineeeeeeee. Are you ready for the next challenge?\\nHint: You might want to grab a pen and paper for this ;)\\n\")\n\n print(\"Here are some questions for you! I hope you're good at Riddles haha! Are you ready to begin?\")\n\n option = input(\"Are you ready to begin? Key in Yes or No: \")\n answer = option\n\n seed(1)\n while option != \"Yes\" and option != \"yes\":\n x1 = \"Come on, you made it this far already, give it a try!\\nLet me ask you again\\n\"\n x2 = \"Pleaseeeee... Don't be such a killjoy!\\n\"\n x3 = \"Don't you wanna find out what happened to me?\\nChoose something else!\\n\"\n responses = [x1,x2,x3]\n selection = choice(responses)\n print(selection)\n option = input(\"Are you ready to begin? Key in Yes or No: \")\n\n if option == \"Yes\" or option == \"yes\":\n print(\"Yes! That's the spirit! Now, let us begin!\\n\")\n\n response1 = input(\"A. Okay! B. I guess I'll try... C. Please give me something easy!! \")\n responseback1 = response1\n\n if responseback1 == \"A\" :\n print(\"I like your enthusiasm there! Let's proceed!\")\n elif responseback1 == \"B\" or \"C\":\n print(\"I'm sure you'll be fine! Let's proceed!\")\n\n print(\"Here's the first riddle!\\n\")\n\n print('''This thing all things devours;\n Birds, beasts, trees, flowers;\n Gnaws iron, bites steel;\n Grinds hard stones to meal;\n Slays king, ruins town,\n and beats mountains down.''')\n\n response2 = input(\"Answer: \")\n responseback2 = response2\n\n a1 = \"Nice Try! But the answer's wrong sadly... How bout you give it another go?\"\n a2 = \"Oof. Not quite there yet!. Try again!\"\n a3 = \"Not there yet! Here's a quick hint: This riddle is from a very famous movie!\"\n a4 = \"Almost there! Here's another hint: The answer can be derived from looking at a watch or a clock.\"\n a5 = \"Nope! Here's another hint: You encounter it daily.\"\n fixed_answers = [a1,a2]\n variable_answers = [a3,a4,a5]\n\n seed(1)\n count = 0\n while count < 2:\n if responseback2 != \"Time\" and responseback2 != \"time\":\n final_response = choice(fixed_answers)\n print(final_response)\n count += 1\n responseback2 = input(\"Answer: \")\n else:\n print(\"That's Correct! Tip: You may wish to take down this answer. It might come in handy later on!\")\n break\n\n while count >= 2:\n if responseback2 != \"Time\" and responseback2 != \"time\":\n final_response1 = choice(variable_answers)\n print(final_response1)\n responseback2 = input(\"Answer\")\n else:\n print(\"That's Correct! Tip: You may wish to take down this answer. It might come in handy later on!\")\n break\n\n\n\n\n\n\n\n\n\n\n\n\n# In[ ]:\n","sub_path":"Code_Files/game_3.py","file_name":"game_3.py","file_ext":"py","file_size_in_byte":2950,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"347549348","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Apr 4 21:42:24 2017\n\n@author: Ting\n\"\"\"\nINF = 99999999\nNINF = -9999999\n\nclass Node:\n def __init__(self, rule = 0, successor = [], isLeaf = False, value = None):\n if rule == 1:\n self.rule = 'max'\n else:\n self.rule = 'min'\n self.successor = successor\n self.isLeaf = isLeaf\n self.value = value\n self.visited = False\n\ndef value(node, alpha, beta):\n if node.rule == 'max':\n return maxValue(node, alpha, beta)\n if node.rule == 'min':\n return minValue(node, alpha, beta)\n\ndef maxValue(node, alpha, beta):\n if node.isLeaf:\n node.visited = True\n return node.value\n \n v = NINF\n for a in node.successor:\n v = max(v,minValue(a,alpha,beta))\n if v > beta or v == beta:\n return v\n alpha = max(alpha, v)\n return v\n\n\ndef minValue(node, alpha, beta):\n if node.isLeaf:\n node.visited = True\n return node.value\n \n v = INF\n for a in node.successor:\n v = min(v,maxValue(a,alpha,beta))\n if v < alpha or v == alpha:\n return v\n beta = min(beta, v)\n return v\n \n\ndef unvisited(node):\n unvisit = []\n if node.successor:\n for successor in node.successor:\n unvisit += unvisited(successor)\n else:\n if not node.visited:\n unvisit.append(node.value)\n return unvisit\n\n\ndef constructTree(n, tree, rule):\n '''\n construct a tree using given information, and return the root node\n :param n: the height of tree\n :param tree: the input tree described with list nested structure\n :param rule: root node's type, 1 for max, 0 for min\n :return: root node\n '''\n node = Node(rule=rule)\n successors = []\n if n == 1:\n for t in tree:\n successors.append(Node(rule=1-rule, isLeaf=True, value=t))\n else:\n for t in tree:\n successors.append(constructTree(n-1, t, 1-rule))\n node.successor = successors\n return node\n\n\nwhile True:\n try:\n rule, n = map(int, input().strip().split())\n tree = eval(input().strip())\n root_node = constructTree(n-1, tree, rule)\n\n # print(value(root_node, float(\"-inf\"), float(\"inf\"))) ## print out MINI-MAX value\n print(value(root_node, NINF, INF)) ## print out MINI-MAX value\n print(' '.join([str(node) for node in unvisited(root_node)])) ## print out unvisited nodes\n except EOFError:\n break\n","sub_path":"Lab2/abPruning.py","file_name":"abPruning.py","file_ext":"py","file_size_in_byte":2497,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"76198413","text":"import discord, random, json\nfrom discord.ext import commands \n\nclass HelpCommand:\n def __init__(self, bot):\n self.bot = bot\n\n def get_type(self, command):\n \"check if a command or a cog is passed\"\n\n if self.bot.get_command(command):\n return \"command\"\n \n elif self.bot.get_cog(command):\n return \"cog\"\n\n else:\n return None\n \n async def command_not_found(self, ctx, command):\n \"command not found error\"\n emb = discord.Embed(description = f\"```\\ncommand \\\"{command}\\\" not found!\\n```\", colour = discord.Colour.red())\n return await ctx.send(embed = emb)\n\n async def cog_not_found(self, ctx, cog):\n \"command not found error\"\n emb = discord.Embed(description = f\"```\\ncog \\\"{cog}\\\" not found!\\n```\", colour = discord.Colour.red())\n return await ctx.send(embed = emb)\n\n async def command_list(self, ctx):\n \"return the commands list\"\n\n with open(\"data/prefixes.json\", \"r\") as f:\n l = json.load(f)\n\n try:\n prefix = l[str(ctx.guild.id)]\n\n except KeyError:\n prefix = \"e?\"\n\n self.description = f\"\"\"Server Prefixes: `{prefix}`, `@{self.bot.user}`\n[Support Server](https://discord.gg/w8cbssP)\"\"\"\n\n emb = discord.Embed(title = \"Help\", description = f\"{self.description}\\n\\n\", colour = self.bot.colour, timestamp = ctx.message.created_at)\n emb.set_author(name = ctx.author, icon_url = str(ctx.author.avatar_url_as(static_format = \"png\")))\n \n for cog in self.bot.cogs:\n cog = self.bot.get_cog(cog)\n cog_str = f\"**{cog.qualified_name.upper()}\\n**\"\n commands = cog.get_commands()\n commands = [cmd for cmd in commands if not cmd.hidden]\n\n if len(commands) >= 1:\n for command in commands:\n cog_str += f\"`{command.name}` \" \n\n try:\n for cmd in command.commands:\n cog_str += f\"`{cmd.parent} {cmd.name}` \" \n except:\n pass\n\n emb.description += f\"{cog_str}\\n\\n\"\n \n return await ctx.send(embed = emb)\n\n async def command(self, ctx, command: commands.Command):\n \"return single command help\"\n\n if command.hidden:\n return await self.command_not_found(ctx, command.name)\n\n emb = discord.Embed(title = \"Help\", description = command.help, colour = self.bot.colour, timestamp = ctx.message.created_at)\n emb.set_author(name = ctx.author, icon_url = str(ctx.author.avatar_url_as(static_format = \"png\")))\n\n try:\n parent = command.parent \n except:\n parent = None\n\n if parent:\n usage = f\"```{command.parent} {command.name}{command.signature}```\" if command.signature else f\"```{command.parent} {command.name}```\"\n emb.add_field(name = \"Usage\", value = usage)\n\n if command.aliases:\n aliases = \"\\n\".join([f\"{cmd}\" for cmd in command.aliases])\n emb.add_field(name = \"Aliases\", value = '```\\n{}\\n```'.format(aliases))\n \n emb.add_field(name = \"Parent\", value = f\"```\\n{command.parent}\\n```\")\n\n return await ctx.send(embed = emb)\n\n else:\n usage = f\"```{command.name} {command.signature}```\" if command.signature else f\"```{command.name}```\"\n emb.add_field(name = \"Usage\", value = usage)\n\n if command.aliases:\n aliases = \"\\n\".join([f\"{cmd}\" for cmd in command.aliases])\n emb.add_field(name = \"Aliases\", value = '```\\n{}\\n```'.format(aliases))\n\n try:\n if command.commands:\n subcommands = \"\"\n for cmd in [c for c in command.commands if not c.hidden]:\n subcommands += f\"{cmd.parent} {cmd.name} {cmd.signature}\\n\"\n \n emb.add_field(name = \"Subcommands\", value = f\"```\\n{subcommands}\\n```\")\n\n except:\n pass\n\n return await ctx.send(embed = emb)\n\n async def cog(self, ctx, cog: commands.Cog):\n \"return cog commands\"\n \n def __init__(self, description = None):\n self.description = description\n\n emb = discord.Embed(title = cog.qualified_name, description = \"\", colour = self.bot.colour, timestamp = ctx.message.created_at)\n emb.set_author(name = ctx.author, icon_url = str(ctx.author.avatar_url_as(static_format = \"png\")))\n \n commands = cog.get_commands()\n commands = [cmd for cmd in commands if not cmd.hidden]\n\n cog_str = f\"**{cog.qualified_name.upper()}**\\n\"\n\n if len(commands) >= 1:\n for command in commands:\n cog_str += f\"`{command.name} {command.signature}` \" if command.signature else f\"`{command.name}` \" \n\n try:\n for cmd in command.commands:\n cog_str += f\"`{cmd.parent} {cmd.name} {cmd.signature}` \" if command.signature else f\"`{cmd.parent} {cmd.name}` \" \n except:\n pass\n\n emb.description = cog_str\n\n else:\n return await self.cog_not_found(ctx, cog.qualified_name)\n\n return await ctx.send(embed = emb)\n\nclass Help(commands.Cog):\n def __init__(self, bot):\n self.bot = bot\n self._original_help_command = bot.help_command\n self.help = HelpCommand(bot)\n bot.help_command = None\n\n @commands.command(hidden = True)\n async def help(self, ctx, command = None):\n \"stop it, get some help\"\n\n if not command:\n await self.help.command_list(ctx)\n \n else:\n if self.help.get_type(command) == \"command\":\n await self.help.command(ctx, self.bot.get_command(command))\n\n elif self.help.get_type(command) == \"cog\":\n await self.help.cog(ctx, self.bot.get_cog(command))\n\n else:\n await self.help.command_not_found(ctx, command)\n\n def cog_unload(self):\n self.bot.help_command = self._original_help_command\n\ndef setup(bot):\n bot.add_cog(Help(bot))","sub_path":"cogs/help.py","file_name":"help.py","file_ext":"py","file_size_in_byte":6258,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"517539590","text":"BASE_URL = 'http://www.collegehumor.com'\n\n####################################################################################################\ndef Start():\n\n\tObjectContainer.title1 = 'CollegeHumor'\n\tHTTP.CacheTime = CACHE_1HOUR\n\tHTTP.Headers['User-Agent'] = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/42.0.2311.135 Safari/537.36'\n\n####################################################################################################\n@handler('/video/collegehumor', 'CollegeHumor')\ndef MainMenu():\n\n\toc = ObjectContainer()\n\thtml = HTML.ElementFromURL(BASE_URL)\n\n\tfor show in html.xpath('//a[contains(@data-ga-label, \"Videos: \")]'):\n\n\t\t# Exclude certain items\n\t\ttitle = show.xpath('./span/text()')[0]\n\n\t\tif title.lower() in ['all originals']:\n\t\t\tcontinue\n\n\t\turl = show.get('href')\n\t\tthumb = ''\n\n\t\toc.add(DirectoryObject(\n\t\t\tkey = Callback(Show, url=url, title=title),\n\t\t\ttitle = title,\n\t\t\tthumb = Resource.ContentsOfURLWithFallback(url=thumb)\n\t\t))\n\n\treturn oc\n\n####################################################################################################\n@route('/video/collegehumor/show', page=int)\ndef Show(url, title, page=1):\n\n\toc = ObjectContainer(title2=title)\n\thtml = HTML.ElementFromURL('%s/page:%d' % (url, page))\n\n\tfor item in html.xpath('//div[@class=\"primary\"]//article[@data-node-id]/a'):\n\n\t\tvideo_url = item.get('href').split('?')[0]\n\n\t\tif not '/video/' in video_url:\n\t\t\tcontinue\n\n\t\tif not video_url.startswith(BASE_URL):\n\t\t\tvideo_url = '%s%s' % (BASE_URL, video_url)\n\n\t\tvideo_title = item.xpath('./img/@alt')[0]\n\t\tthumb = item.xpath('./img/@data-src')[0]\n\n\t\toc.add(VideoClipObject(\n\t\t\turl = video_url,\n\t\t\ttitle = video_title,\n\t\t\tthumb = Resource.ContentsOfURLWithFallback(url=thumb)\n\t\t))\n\n\tif len(html.xpath('//a[contains(@class, \"next\")]')) > 0:\n\t\toc.add(NextPageObject(\n\t\t\tkey = Callback(Show, url=url, title=title, page=page+1),\n\t\t\ttitle = 'More...'\n\t\t))\n\n\treturn oc\n","sub_path":"Contents/Code/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1941,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"433722454","text":"# Copyright 2013 OpenStack LLC.\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport fixtures\n\nfrom monitorclient import client\nfrom monitorclient import shell\nfrom tests import utils\nfrom tests.v2 import fakes\n\n\nclass ShellTest(utils.TestCase):\n\n FAKE_ENV = {\n 'ENERGY_USERNAME': 'username',\n 'ENERGY_PASSWORD': 'password',\n 'ENERGY_PROJECT_ID': 'project_id',\n 'OS_VOLUME_API_VERSION': '2',\n 'ENERGY_URL': 'http://no.where',\n }\n\n # Patch os.environ to avoid required auth info.\n def setUp(self):\n \"\"\"Run before each test.\"\"\"\n super(ShellTest, self).setUp()\n for var in self.FAKE_ENV:\n self.useFixture(fixtures.EnvironmentVariable(var,\n self.FAKE_ENV[var]))\n\n self.shell = shell.OpenStackMonitorShell()\n\n #HACK(bcwaldon): replace this when we start using stubs\n self.old_get_client_class = client.get_client_class\n client.get_client_class = lambda *_: fakes.FakeClient\n\n def tearDown(self):\n # For some method like test_image_meta_bad_action we are\n # testing a SystemExit to be thrown and object self.shell has\n # no time to get instantatiated which is OK in this case, so\n # we make sure the method is there before launching it.\n if hasattr(self.shell, 'cs'):\n self.shell.cs.clear_callstack()\n\n #HACK(bcwaldon): replace this when we start using stubs\n client.get_client_class = self.old_get_client_class\n super(ShellTest, self).tearDown()\n\n def run_command(self, cmd):\n self.shell.main(cmd.split())\n\n def assert_called(self, method, url, body=None, **kwargs):\n return self.shell.cs.assert_called(method, url, body, **kwargs)\n\n def assert_called_anytime(self, method, url, body=None):\n return self.shell.cs.assert_called_anytime(method, url, body)\n\n def test_list(self):\n self.run_command('list')\n # NOTE(jdg): we default to detail currently\n self.assert_called('GET', '/monitors/detail')\n\n def test_list_filter_status(self):\n self.run_command('list --status=available')\n self.assert_called('GET', '/monitors/detail?status=available')\n\n def test_list_filter_name(self):\n self.run_command('list --name=1234')\n self.assert_called('GET', '/monitors/detail?name=1234')\n\n def test_list_all_tenants(self):\n self.run_command('list --all-tenants=1')\n self.assert_called('GET', '/monitors/detail?all_tenants=1')\n\n def test_show(self):\n self.run_command('show 1234')\n self.assert_called('GET', '/monitors/1234')\n\n def test_delete(self):\n self.run_command('delete 1234')\n self.assert_called('DELETE', '/monitors/1234')\n\n def test_snapshot_list_filter_monitor_id(self):\n self.run_command('snapshot-list --monitor-id=1234')\n self.assert_called('GET', '/snapshots/detail?monitor_id=1234')\n\n def test_snapshot_list_filter_status_and_monitor_id(self):\n self.run_command('snapshot-list --status=available --monitor-id=1234')\n self.assert_called('GET', '/snapshots/detail?'\n 'status=available&monitor_id=1234')\n\n def test_rename(self):\n # basic rename with positional agruments\n self.run_command('rename 1234 new-name')\n expected = {'monitor': {'name': 'new-name'}}\n self.assert_called('PUT', '/monitors/1234', body=expected)\n # change description only\n self.run_command('rename 1234 --description=new-description')\n expected = {'monitor': {'description': 'new-description'}}\n self.assert_called('PUT', '/monitors/1234', body=expected)\n # rename and change description\n self.run_command('rename 1234 new-name '\n '--description=new-description')\n expected = {'monitor': {\n 'name': 'new-name',\n 'description': 'new-description',\n }}\n self.assert_called('PUT', '/monitors/1234', body=expected)\n # noop, the only all will be the lookup\n self.run_command('rename 1234')\n self.assert_called('GET', '/monitors/1234')\n\n def test_rename_snapshot(self):\n # basic rename with positional agruments\n self.run_command('snapshot-rename 1234 new-name')\n expected = {'snapshot': {'name': 'new-name'}}\n self.assert_called('PUT', '/snapshots/1234', body=expected)\n # change description only\n self.run_command('snapshot-rename 1234 '\n '--description=new-description')\n expected = {'snapshot': {'description': 'new-description'}}\n self.assert_called('PUT', '/snapshots/1234', body=expected)\n # snapshot-rename and change description\n self.run_command('snapshot-rename 1234 new-name '\n '--description=new-description')\n expected = {'snapshot': {\n 'name': 'new-name',\n 'description': 'new-description',\n }}\n self.assert_called('PUT', '/snapshots/1234', body=expected)\n # noop, the only all will be the lookup\n self.run_command('snapshot-rename 1234')\n self.assert_called('GET', '/snapshots/1234')\n\n def test_set_metadata_set(self):\n self.run_command('metadata 1234 set key1=val1 key2=val2')\n self.assert_called('POST', '/monitors/1234/metadata',\n {'metadata': {'key1': 'val1', 'key2': 'val2'}})\n\n def test_set_metadata_delete_dict(self):\n self.run_command('metadata 1234 unset key1=val1 key2=val2')\n self.assert_called('DELETE', '/monitors/1234/metadata/key1')\n self.assert_called('DELETE', '/monitors/1234/metadata/key2', pos=-2)\n\n def test_set_metadata_delete_keys(self):\n self.run_command('metadata 1234 unset key1 key2')\n self.assert_called('DELETE', '/monitors/1234/metadata/key1')\n self.assert_called('DELETE', '/monitors/1234/metadata/key2', pos=-2)\n","sub_path":"monitor/python-monitorclient-1.1/tests/v2/test_shell.py","file_name":"test_shell.py","file_ext":"py","file_size_in_byte":6538,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"138105845","text":"class Application(object):\n\t\"Classe de l'application de la résistance\"\n\n\tdef __init__(self):\n\t\tself.root = Tk()\n\t\tself.root.title('Code de couleur')\n\t\tself.dessineResistance()\n\t\tLabel(self.root, text = 'Entre la valeur de la résistance (en ohm) :').grid(row = 2, column =1, columnspan =3)\n\t\tButton(self.root, text = 'Montrer', command =self.changeCouleurs).grid(row =3, column =1)\n\t\tButton(self.root, text = 'Quitter', command =self.root.quit).grid(row = 3, column =3)\n\t\tself.entree = Entry(self.root, width = 14)\n\t\tself.entree.grid(row =3, column =2)\n\t\tself.cc = ['black','brown','red','orange','yellow','green','blue','purpe','grey','white']\n\t\tself.root.mainloop()\n\n\tdef dessineResistance(self):\n\t\t\"\"\"Canvas avec un modèle de résistance minimaliste\"\"\"\n\t\tself.can = Canvas(self.root, width =250, height =100, bg ='light blue')\n\t\tself.can.grid(row =1, column =1, columnspan =3, pady =5, padx =5)\n\t\tself.can.create_line(10, 50, 240, 50, width =3)\n\t\tself.can.create_rectangle(65, 30, 185, 70, fill ='beige', width =2)\n\t\tself.ligne = []\n\t\tfor x in range(80, 150, 24):\n\t\t\tself.ligne.append(self.can.create_rectangle(x, 30, x+15, 70, fill ='black', width =0))\n\n\tdef changeCouleurs(self):\n\t\t\"\"\"Affichage des couleurs\"\"\"\n\t\tself.v1ch = self.entree.get()\n\t\ttry:\n\t\t\tv = float(self.v1ch)\n\t\texcept:\n\t\t\terr =1\n\t\telse :\n\t\t\terr =0\n\t\tif err ==1 or v>1e11:\n\t\t\tself.signaleErreur()\n\t\telse:\n\t\t\tif v>10:\n\t\t\t\tli = [0]*3\n\t\t\t\tlogv = int(log10(v))\n\t\t\t\tordgr = 10**logv\n\t\t\t\tli[0] = int(v/ordgr)\n\t\t\t\tdecim = v/ordgr - li[0]\n\t\t\t\tli[1] = int(decim*10+0.5)\n\t\t\t\tli[2] = logv -1\n\t\t\t\tfor n in range(3):\n\t\t\t\t\tself.can.itemconfigure(self.ligne[n], fill =self.cc[li[n]])\n\t\t\telse :\n\t\t\t\tli = [0]*3\n\t\t\t\tlogv = int(v/10)\n\t\t\t\tordgr = 10*logv\n\t\t\t\tli[0] = 0\n\t\t\t\tli[1] = int(logv)\n\t\t\t\tli[2] = int(v -ordgr)\n\t\t\t\tfor n in range(3):\n\t\t\t\t\tself.can.itemconfigure(self.ligne[n], fill =self.cc[li[n]])\n\n\tdef signaleErreur(self):\n\t\tself.entree.configure(bg ='red')\n\t\tself.root.after(1000, self.videEntree)\n\n\tdef videEntree(self):\n\t\tself.entree.configure(bg ='white')\n\t\tself.entree.delete(0, len(self.v1ch))\n\n#################################\n\nif __name__ == '__main__':\n\tfrom tkinter import *\n\tfrom math import log10\n\tf = Application()\n","sub_path":"resistance.py","file_name":"resistance.py","file_ext":"py","file_size_in_byte":2189,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"334426806","text":"def coprime(a,b):\r\n q = int(a/b)\r\n r = a - q*b\r\n while r != 0:\r\n a = b\r\n b = r\r\n q = int(a/b)\r\n r = a - q * b\r\n if b == 1:\r\n return True\r\n else:\r\n return False\r\n\r\ndef main():\r\n n = int(input('输入n求得φ(n): '))\r\n counter = 0\r\n for i in range(1,n):\r\n if coprime(i,n):\r\n counter += 1\r\n print('φ(n) = '+str(counter))\r\n input(\"Press to exit.\")\r\n\r\nmain()\r\n","sub_path":"第二章 同余/欧拉公式.py","file_name":"欧拉公式.py","file_ext":"py","file_size_in_byte":455,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"343563199","text":"from nmtwizard.framework import Framework\nfrom nmtwizard.logger import get_logger\nfrom nmtwizard.serving import TranslationOutput\nimport requests\nimport os\nimport re\nimport json\nimport time\n\nlogger = get_logger(__name__)\n\nsupportedLangRe = re.compile(r\"^(en|de|fr|es|pt|it|nl|pl|ru)$\")\nentrypoint = \"https://api.deepl.com/v2/translate\"\n\nclass DeepLTranslateFramework(Framework):\n\n def __init__(self):\n super(DeepLTranslateFramework, self).__init__(stateless=True)\n self._credentials = os.getenv('DEEPL_CREDENTIALS')\n assert isinstance(self._credentials, str), \"missing credentials\"\n\n def trans(self, config, model_path, input, output, gpuid=0):\n assert supportedLangRe.match(config['source']), \"unsupported language: %s\" % config['source']\n assert supportedLangRe.match(config['target']), \"unsupported language: %s\" % config['target']\n with open(input, 'rb') as fi, open(output, 'wb') as fo:\n lines = fi.readlines()\n translations = translate_list(\n self._credentials,\n lines, source_language=config['source'], target_language=config['target'])\n for translation in translations:\n fo.write(translation.encode('utf-8') + '\\n')\n\n def train(self, *args, **kwargs):\n raise NotImplementedError(\"This framework can only be used for translation\")\n\n def release(self, *arg, **kwargs):\n raise NotImplementedError('This framework does not require a release step')\n\n def serve(self, config, model_path, gpuid=0):\n return None, {'source': config['source'], 'target': config['target']}\n\n def forward_request(self, batch_inputs, info, timeout=None):\n return [[TranslationOutput(translation)] for translation in translate_list(\n self._credentials,\n batch_inputs,\n source_language=info['source'],\n target_language=info['target'])]\n\n def _preprocess_input(self, state, input, extra_config):\n return input\n\n def _postprocess_output(self, state, source, target, extra_config):\n return target\n\n\ndef translate_list(credentials, texts, source_language, target_language):\n\n i = 0\n while i < len(texts):\n nexti = i + 10\n if nexti > len(texts):\n nexti = len(texts)\n logger.info('Translating range [%d:%d]', i, nexti)\n params = { \n \"text\": texts[i:nexti],\n \"source_lang\": source_language.upper(),\n \"target_lang\": target_language.upper(),\n \"split_sentences\": 0,\n \"auth_key\": credentials\n }\n\n retry = 0\n while retry < 10:\n r = requests.get(entrypoint, params=params)\n if r.status_code == 429:\n retry += 1\n time.sleep(5)\n else:\n break\n\n if r.status_code != 200 or 'translations' not in r.json():\n raise RuntimeError('incorrect result from \\'translate\\' service: %s' % r.text)\n for trans in r.json()['translations']:\n yield trans['text']\n i = nexti\n\nif __name__ == \"__main__\":\n DeepLTranslateFramework().run()\n","sub_path":"frameworks/deepl_translate/entrypoint.py","file_name":"entrypoint.py","file_ext":"py","file_size_in_byte":3143,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"13395974","text":"#https://github.com/Net4ky/PPsocket\n\nimport time\nfrom shelper import SocketHelper\nfrom comm_functions import startup\nfrom comm_functions import player1first\nfrom comm_functions import player2first\nfrom config import watchdog\n\nport1 = 10231\nport2 = 10236\nhost2 = \"127.0.0.1\"\nhost1 = \"localhost\"\n# host1 = \"192.168.1.182\"\n\nwhile True:\n try:\n #************************setup***********************************************************\n socket1 = SocketHelper(host1,port1)\n socket2 = SocketHelper(host2,port2)\n\n # watchdog.start()\n\n player1colour = startup(socket1, socket2)\n\n \n\n # b needed because we can only sen bytes object in python, need to decode it to get a string\n #mesg = [b\"W001B050\", b\"!sur\"] #b\"W004B050\",b\"W005B050.9\",b\"W006B050\",b\"W007B070\",b\"W008B080\",b\"W009B090.9\",b\"W0010B010\",\n\n if player1colour == b\"Blac\":\n while True:\n x = player1first(socket1, socket2)\n if x == \"end\":\n break\n elif x == \"FAIL\":\n print(\"Connection Failed\")\n break\n elif player1colour == \"FAIL\":\n print(\"Connection Failed\")\n else:\n while True:\n x = player2first(socket1, socket2)\n if x == \"end\":\n break\n elif x == \"FAIL\":\n print(\"Connection Failed\")\n break\n except Exception as e:\n print(\"[main]\"+str(e))\n finally:\n try:\n socket1.close_socket()\n socket2.close_socket()\n watchdog.stop()\n time.sleep(1)\n except:\n pass\n \n\n","sub_path":"ProjectTests/python/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":1704,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"214430184","text":"# 2つの数のXORを計算\n# 2つの数を2進数に置き換え、それぞれの桁でXORを取る\nnumbers = [int(i) for i in input('自然数をスペースで区切って2つ入力: ').rstrip().split()] # Nを受け取る\ninputNumbers = [numbers[0], numbers[1]]\n\n# 2進数、桁ごとにばらして辞書に入れる\nnum2 = {}\nshoulder = [] # 2進数の桁数\nfor i in range(2):\n num2[i] = []\n shoulder.append(0)\n while numbers[i] > 0:\n # print(str(numbers[i]) + ' を2で割ります')\n q = numbers[i] % 2\n # print('余りは ' + str(q) + ' です')\n num2[i].append(q)\n shoulder[i] += 1\n # print(shoulder[i])\n numbers[i] = numbers[i] // 2\ndigitDifference = shoulder[0] - shoulder[1]\nif digitDifference > 0:\n for d in range(shoulder[1], shoulder[1] + digitDifference):\n num2[1].append(0)\nelif digitDifference < 0:\n for d in range(shoulder[0], shoulder[0] - digitDifference):\n num2[0].append(0)\n# print(num2)\n\n# 各桁を足す\n# print(len(num2[0])) # 大きい方の数の2進数での桁数\ndigit = 0\nxor = 0\nwhile digit < (len(num2[0])):\n i = num2[0][digit] + num2[1][digit]\n if i == 2:\n i = 0\n # print(i) # 下からdigit桁目の数字\n xor += i * (2 ** digit) # 足していく\n digit += 1\n\n# XORを表示\nprint(str(inputNumbers[0]) + ' と ' + str(inputNumbers[1]) + ' のXORは ', end = \"\")\nprint(str(xor) + ' です')\neasyXor = inputNumbers[0] ^ inputNumbers[1]\nprint('普通に計算すると ' + str(inputNumbers[0]) + ' ^ ' + str(inputNumbers[1]) + ' = ' + str(easyXor) + ' なので...')\nif xor == easyXor:\n print('\\n一致しました!やったね(っ´ω`c)')\nelse:\n print('\\n間違ってるよ(´>ω<`)')\n","sub_path":"Others/calXor.py","file_name":"calXor.py","file_ext":"py","file_size_in_byte":1736,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"317035323","text":"from collections import OrderedDict\nfrom flask import redirect, request, g, jsonify, current_app, render_template, url_for\n\nfrom yublog.caches import cache_tool, global_cache_key\nfrom yublog.extensions import db\nfrom yublog.models import Post, Comment, Page, Category, Tag, Talk, SiteLink, LoveMe\nfrom yublog.views import main_bp\nfrom yublog.views.utils.comment_utils import CommentUtils\nfrom yublog.views.utils.model_cache_utils import get_model_cache\n\n\n@main_bp.route('/')\n@main_bp.route('/index')\ndef index():\n _page = request.args.get('page', 1, type=int)\n per_page = current_app.config['POSTS_PER_PAGE']\n\n _posts = Post.query.filter_by(draft=False).order_by(Post.timestamp.desc())\n counts = _posts.count()\n max_page = counts // per_page + 1 if counts % per_page != 0 else counts // per_page\n post_list = _posts.limit(per_page).offset((_page - 1) * per_page).all()\n posts = []\n for p in post_list:\n cache_key = '_'.join(map(str, ['post', p.year, p.month, p.url_name]))\n # print(f'key: {cache_key}')\n posts.append(get_model_cache(cache_key))\n return render_template('main/index.html', title='首页',\n posts=posts, page=_page, max_page=max_page,\n pagination=range(1, max_page + 1))\n\n\n@main_bp.route('////')\ndef post(year, month, post_url):\n cache_key = '_'.join(map(str, ['post', year, month, post_url]))\n _post = get_model_cache(cache_key)\n\n page_cnt = request.args.get('page', 1, type=int)\n if page_cnt == -1:\n counts = _post.get('comment_count', 0)\n page_cnt = (counts - 1) / current_app.config['COMMENTS_PER_PAGE'] + 1\n\n pagination = Comment.query.filter_by(post_id=_post['id'], disabled=True, replied_id=None) \\\n .order_by(Comment.timestamp.desc()) \\\n .paginate(page_cnt, per_page=current_app.config['COMMENTS_PER_PAGE'], error_out=False)\n comments = pagination.items\n\n return render_template('main/post.html', post=_post, title=_post['title'],\n pagination=pagination, comments=comments,\n counts=_post.get('comment_count', 0), meta_tags=','.join(_post['tags']))\n\n\n@main_bp.route('/page//')\ndef page(page_url):\n _page = Page.query.filter_by(url_name=page_url).first()\n p = request.args.get('page', 1, type=int)\n if p == -1:\n counts = _page.comments.count()\n p = (counts - 1) / current_app.config['COMMENTS_PER_PAGE'] + 1\n pagination = Comment.query.filter_by(page_id=_page.id, disabled=True, replied_id=None).order_by(\n Comment.timestamp.desc()).paginate(\n p, per_page=current_app.config['COMMENTS_PER_PAGE'],\n error_out=False)\n comments = pagination.items\n\n return render_template('main/page.html', page=_page, title=_page.title,\n pagination=pagination, comments=pagination.items,\n counts=_page.comments.filter_by(disabled=True).count())\n\n\n@main_bp.route('/tag//')\ndef tag(tag_name):\n Tag.query.filter_by(tag=tag_name).first()\n\n all_posts = Post.query.filter_by(draft=False).order_by(Post.timestamp.desc()).all()\n posts = (p for p in all_posts if p.tag_in_post(tag_name))\n\n return render_template('main/tag.html', tag=tag_name,\n posts=posts, title='标签:{}'.format(tag_name))\n\n\n@main_bp.route('/category//')\ndef category(category_name):\n _category = Category.query.filter_by(category=category_name, is_show=True).first()\n\n posts = Post.query.filter_by(category=_category,\n draft=False).order_by(Post.timestamp.desc()).all()\n return render_template('main/category.html', category=_category,\n posts=posts, title='分类:{}'.format(_category.category))\n\n\n@main_bp.route('/archives/')\ndef archives():\n count = Post.query.filter_by(draft=False).count()\n page_cnt = request.args.get('page', 1, type=int)\n pagination = Post.query.filter_by(draft=False) \\\n .order_by(Post.timestamp.desc()) \\\n .paginate(page_cnt, error_out=False,\n per_page=current_app.config['ARCHIVES_POSTS_PER_PAGE'])\n posts = pagination.items\n data = OrderedDict()\n for p in posts:\n data.setdefault(p.year, []).append(p)\n\n return render_template('main/archives.html', title='归档', posts=posts,\n data=data, count=count, pagination=pagination)\n\n\n@main_bp.route('/search/', methods=['POST'])\ndef search():\n if g.search_form.validate_on_submit():\n query = g.search_form.search.data\n return redirect(url_for('main.search_result', keywords=query))\n\n elif g.search_form2.validate_on_submit():\n query = g.search_form2.search.data\n return redirect(url_for('main.search_result', keywords=query))\n\n\n# /search-result?keywords=query\n@main_bp.route('/search-result')\ndef search_result():\n query = request.args.get('keywords')\n page_cnt = request.args.get('page', 1, type=int)\n pagination = Post.query.whooshee_search(query) \\\n .order_by(Post.id.desc()) \\\n .paginate(page_cnt, error_out=False,\n per_page=current_app.config['SEARCH_POSTS_PER_PAGE'])\n results = (p for p in pagination.items if p.draft is False)\n\n return render_template('main/results.html', results=results,\n query=query, pagination=pagination,\n title='{}的搜索结果'.format(query))\n\n\n# 侧栏 love me 插件\n@main_bp.route('/loveme', methods=['POST'])\ndef love_me():\n data = request.get_json()\n if data.get('i_am_handsome', '') == 'yes':\n # 更新缓存\n global_cache = cache_tool.get(cache_tool.GLOBAL_KEY)\n global_cache[global_cache_key.LOVE_COUNT] += 1\n cache_tool.set(cache_tool.GLOBAL_KEY, global_cache)\n love_me_counts = LoveMe.query.first()\n love_me_counts.love_count += 1\n\n db.session.add(love_me_counts)\n db.session.commit()\n return jsonify(counts=love_me_counts.love_count)\n return jsonify(you_are_sb='yes')\n\n\n@main_bp.route('///comment', methods=['POST'])\ndef comment(target_type, target_id):\n form = request.get_json()\n data = CommentUtils(target_type, form).save_comment(target_id)\n\n # todo\n return jsonify(nickname=data['nickname'], email=data['email'],\n website=data['website'], body=data['body'])\n\n\n@main_bp.route('/talk')\ndef talk():\n talks = Talk.query.order_by(Talk.timestamp.desc()).all()\n data = OrderedDict()\n for t in talks:\n data.setdefault(t.year, []).append(t)\n\n return render_template('main/talk.html', title='说说', data=data)\n\n\n# friend link page\n@main_bp.route('/friends')\ndef friends():\n friend_links = SiteLink.query.filter_by(is_friend=True).order_by(SiteLink.id.desc()).all()\n great_links = (link for link in friend_links if link.is_great is True)\n bad_links = (link for link in friend_links if link.is_great is False)\n\n return render_template('main/friends.html', title=\"朋友\",\n great_links=great_links, bad_links=bad_links)\n","sub_path":"yublog/views/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":7175,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"484842588","text":"env = DefaultEnvironment()\nenv.Append(\n CXXFLAGS=[\"-O2\", \"-Wall\", \"-std=c++11\", \"-fdiagnostics-color=auto\"],\n LINKFLAGS=\"-Wl,--unresolved-symbols=ignore-in-shared-libs -Wl,--as-needed\",\n CPPPATH=['/usr/local/include', '../include', '/usr/include/eigen3', '.'],\n LIBPATH=['/usr/local/lib', '/usr/local/lib64'],\n LIBS=['glog'],\n)\n\nProgram(\"samples/goicp\", Glob(\"*.cc\")+Glob(\"samples/*.cc\"))\n","sub_path":"SConstruct","file_name":"SConstruct","file_ext":"","file_size_in_byte":404,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"367121941","text":"# https://leetcode.com/problems/valid-sudoku/\n\n# board is a 9x9 list, per problem statement, we only handle a 9x9 board\nfrom collections import Set\n\n\ndef validate_board(board):\n dimension = 9 # later we can expand to make this configurable\n sqrt = 3 # for the 3x3 number validation\n\n for x in range(0, dimension): # 0 to 8\n is_valid = validate_numbers(board[x]) # horizontal\n if not is_valid:\n return False\n\n nums = [board[y][x] for y in range(0, dimension)] # vertical\n is_valid = validate_numbers(nums)\n if not is_valid:\n return False\n\n # 3x3 squares\n index = [x for x in range(0, dimension)]\n step_index = index[0: dimension: sqrt]\n for x in step_index:\n for y in step_index:\n nums = [board[i][j] for i in range(x, x+sqrt) for j in range(y, y+sqrt)]\n print(nums)\n is_valid = validate_numbers(nums)\n if not is_valid:\n return False\n\n return True\n\n\n# given a list of upto 9 numbers, returns true/false indicating whether they\n# constitutes valid number for sudoku\n# numbers are valid if they are 1 <= x <= 9\n# numbers cannot be repeated\ndef validate_numbers(row):\n dimension = 9\n assert row.__len__() == dimension, str(row) + ' does not have ' + str(dimension) + ' items'\n numbers = [int(x) for x in row if x != \".\"] # filter \"blanks\"\n num_set = {x for x in numbers if 1 <= x <= dimension} # filter out duplicates\n return num_set.__len__() == numbers.__len__()\n\n","sub_path":"python/local/valid_sudoku.py","file_name":"valid_sudoku.py","file_ext":"py","file_size_in_byte":1566,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"167256646","text":"from flask import Flask, request\nimport os\napp = Flask(__name__)\nimport logging\nlogging.basicConfig(level=logging.DEBUG)\n\n@app.route('/')\ndef hello_world():\n return 'Vulnerable App'\n\n@app.route('/ls/')\ndef ls(filename):\n output=\"
\".join(os.popen('ls ' + filename).readlines())\n return \"\"\"\n \"\"\" + output + \"\"\"\"\n \"\"\"\n\nif __name__ == '__main__':\n app.run(debug=True,host='0.0.0.0')\n","sub_path":"KMS-Apps/AppFlask/App/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":442,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"522114499","text":"# coding: utf-8\n\n'''\n双向循环链表\n@Author Ivanli\n@Time 2017.12.19\n'''\n\n'''\n节点类\n'''\nclass Node(object):\n\n def __init__(self, data):\n self.data = data\n self._pre = None # 前驱\n self._next = None # 后继\n\n def __repr__(self):\n return str(self.data)\n\n'''\n双向循环链表类\n'''\nclass TwoWayCircleChainTable(object):\n\n # 初始化链表\n def __init__(self):\n self.head = None\n self.rear = None\n\n # 添加元素到链表尾部\n def append(self, data):\n\n # 生成节点\n node = Node(data)\n\n # 若列表为空,则节点赋给head, rear\n if self.head is None:\n self.head = node\n self.rear = node\n self.head._pre = self.rear # head.pre -> rear\n self.head._next = self.rear # head.next -> rear\n self.rear._pre = self.head # rear.pre -> head\n self.rear._next = self.head # rear.next -> head\n else:\n pointer = self.head # pointer指向rear节点\n while pointer != self.rear:\n pointer = pointer._next # 遍历到rear节点\n pointer._next = node # 遍历到最后1个节点,将新节点append上,pointer为倒数第2个节点\n self.rear = pointer._next # 更新rear到最后1个节点\n self.rear._pre = pointer # 更新rear.pre -> pointer\n self.rear._next = self.head # 更新rear.next -> head\n\n # 插入节点到链表\n def insert(self, index, data):\n\n if index < 0 or index > self.getLength():\n return None\n\n # 生成节点\n node = Node(data)\n\n # 节点插入头部\n if index == 0:\n tmp_head = self.head # 备份原头节点\n self.head = node\n self.head._pre = self.rear # 更新head.pre -> rear\n self.head._next = tmp_head # 更新head.next -> second\n self.rear._next = self.head # 更新rear.next -> head\n elif index == self.getLength():\n self.append(data) # 节点插入尾部\n else:\n pointer = self.head\n i = 0\n while i != index-1: # 移动指针到index-1位置,找到要插入节点的上一个节点\n pointer = pointer._next\n i += 1\n ori_indexNode = pointer._next # 备份原先位置的节点,pointer为要插入位置的上1个节点\n pointer._next = node # 新节点顶替原先节点的位置,作为上一个节点的后继\n node._pre = pointer # 新节点的pre -> 上1个节点\n node._next = ori_indexNode # 新节点的next -> 原先节点\n ori_indexNode._pre = node # 原先节点的pre -> 新节点\n\n\n # 删除节点\n def delete(self, index):\n\n if index < 0 or index > self.getLength():\n return None\n\n # 删除head节点\n if index == 0:\n self.head = self.head._next\n self.head._pre = self.rear # 更新新的head.pre -> rear\n self.rear._next = self.head # 更新rear的后继为新的head\n elif index == self.getLength()-1: # 删除tail节点\n pointer = self.head\n i = 0\n while i != self.getLength()-1: # 遍历至rear节点的上一个节点\n pre = pointer\n pointer = pointer._next\n i += 1\n self.rear = pre # 更新rear节点为上一个节点\n self.rear._next = self.head # 更新新rear节点的后继节点为head\n self.head._pre = self.rear # 更新head.pre -> 新rear\n else:\n pointer = self.head\n i = 0\n while i != index-1:\n pointer = pointer._next # 移动指针到index-1位置,要删除节点的上一个节点\n i += 1\n pointer._next = pointer._next._next # del节点的上一个节点的next -> del节点的下一个节点\n pointer._next._pre = pointer # del节点的下一个节点的pre -> del节点的上一个节点\n\n # 更新链表\n def update(self, index, data):\n\n if index < 0 or index > self.getLength():\n return None\n pointer = self.head\n i = 0\n while i != index:\n pointer = pointer._next\n i += 1\n pointer.data = data\n\n # 根据index查询节点\n def getItem(self, index):\n\n if index < 0 or index > self.getLength():\n return None\n\n pointer = self.head\n i = 0\n while i != index: # 移动指针到index位置\n pointer = pointer._next\n i += 1\n return pointer.data\n\n # 根据节点数据查询index\n def getIndex(self, data):\n\n pointer = self.head\n index = 0\n while pointer.data != data:\n pointer = pointer._next\n index += 1\n return index\n\n # 获取链表长度\n def getLength(self):\n '''\n 1. 从head一直遍历到rear,统计节点数目\n 2. 最后把rear节点也算上\n :return:\n '''\n length = 0\n pointer = self.head\n while pointer != self.rear: # 遍历至rear\n length += 1\n pointer = pointer._next\n return length + 1 # 还要将rear节点算上\n\n\n # 清空链表\n def clear(self):\n self.head = None\n self.rear = None\n\n'''\n操作链表类\n'''\nclass OperateChain(object):\n\n def __init__(self):\n pass\n\n # 合并循环链表\n def merge(self, CirChain_1, CirChain_2):\n\n merge_chain = TwoWayCircleChainTable()\n\n head_1 = CirChain_1.head\n rear_1 = CirChain_1.rear\n head_2 = CirChain_2.head\n rear_2 = CirChain_2.rear\n rear_1._next = head_2\n head_2._pre = rear_1\n rear_2._next = head_1\n head_1._pre = rear_2\n\n merge_chain.head = head_1\n merge_chain.rear = rear_2\n return merge_chain\n\n","sub_path":"chain/main/TwoWayCircleChainTable.py","file_name":"TwoWayCircleChainTable.py","file_ext":"py","file_size_in_byte":5977,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"230532885","text":"#!/usr/bin/env python3\nfrom itertools import combinations\n\nm = 10**9 + 7\nn = int(input())\nS = list(range(1, n + 1))\nsoln = 0\n\nfor x in range(n + 1):\n\tfor comb in combinations(S, x):\n\t\tif sum(comb) % 10 == 7:\n\t\t\tsoln += 1\nprint(soln % m)\n","sub_path":"2023/problems/sammaeining/submissions/partially_accepted/dagur_n2n_20.py","file_name":"dagur_n2n_20.py","file_ext":"py","file_size_in_byte":237,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"370963658","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Jul 27 11:36:10 2018\n\n@author: Benson\n\"\"\"\n\n\n# list\na = [0.1, 0.5, 0.25, 0.8, 0.9]\nfor i in a: # i 為每個元素\n print( i*100 ) \n \n\t\n# tuple\na = (0.1, 0.5, 0.25, 0.8, 0.9)\nfor i in a: # i 為每個元素\n print( i*100 )\n \n\n# string\na = \"ABCDEFG\"\nfor i in a: # i 為每個字母\n print( i )\n \n\n# dictionary\na = { \"id\": \"1\", \"name\": \"benson\" }\nfor i in a:\n print( i ) # i 為鍵(key),a[i] 為值\n print( a[i] )\n\n\t\n\n# Common use of for loop in data processing\na = [0.1, 0.5, 0.25, 0.8, 0.9] # 待轉換成百分比的數值\npercent = [] # 定義空 list\nfor i in a: # 把 a 內的數值逐一乘上100,然後儲存到 percent\n percent.append( i * 100 )\n\t\n\t\n\n# for loop example\nfor i in range(5):\n print( i )\n \n\t\n\t\n# check the sequence generated by range() \nlist( range(5) )\nlist( range(0, 5, 2) ) \n\n\n\n# break\nfor i in range(5):\n if i == 2:\n break\n print( i )\n\n\n# continue\nfor i in range(5):\n if i == 2:\n continue\n print( i )\n\n\n\n\n# enumerate(),同時在for loop 裡獲得元數的index以及內容。\nfor i, j in enumerate([\"a\",\"b\",\"c\"]):\n print( i )\n print( j )\n\n\t\n\t\n# another way that equavalent to enumerate()\na = [\"a\",\"b\",\"c\"]\nfor i in range(0, len(a)):\n print( i )\n print( j )\n\t\n\t\n\t\n\t\n# list of tuples\na = [ (0, \"a\"), (1, \"b\"), (2, \"c\") ]\nfor i in a: \n print( i ) # for 用於 list of tuples 時, i 會等於每個 Tuple\n \n\t\n\t\n\t\n# list of tuples - getting elements inside tuple\na = [ (0, \"a\"), (1, \"b\"), (2, \"c\") ]\nfor i in a: \n print( i[0] ) \n print( i[1] ) \n\t# i[0] 會等於 0, 1, 2,型別是整數,i[1] 會等於 \"a\", \"b\", \"c\",型別是字串。\n\t# 但由於整數無法和字串直接相加,因此我們要用 str() 函數把 i[0] 轉換為字串,\n\t# 才能夠把 i[0] 、 \", \" 、 i[1] 三個資料相加打印出屏幕。\n\n\n\t\n\t\n# list of tuples - 2 variables\na = [ (0, \"a\"), (1, \"b\"), (2, \"c\") ]\nfor i, j in a: # 原來我們可以用兩個變數儲存 Tuple 內每個元素!\n print( i )\n print( j ) \n #for 用於 list of tuples 時, i, j 會順序等於 Tuple 內每個元素\n\n\t\n\t\n# list of tuples - 3 variables\na = [ (0, \"a\", \"a1\"), (1, \"b\", \"b1\"), (2, \"c\", \"c1\") ]\nfor i, j, k in a: # 用三個變數儲存 Tuple 內每個元素\n print( i )\n print( j ) \n print( k ) \n # for 用於 list of tuples 時, i, j, k 會順序等於 Tuple 內每個元素\n\n\n\n \n\n# exercise\n\n \n \n \n \n ","sub_path":"b教材-程式檔案/03 Python 語法3/1.11 flow control - for.py","file_name":"1.11 flow control - for.py","file_ext":"py","file_size_in_byte":2617,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"210580896","text":"text = input(\"DO IT: \")\ntext = str.lower(text)\nstartWord = 0\nendWord = 0\nsentence = []\n#This puts the sentence into a list of words\nfor var in range(len(text)):\n letterList = text[var]\n spacePlace = letterList.count(' ')\n if spacePlace == 0:\n endWord += 1\n else:\n sentence.append(text[startWord:endWord])\n startWord = endWord + 1\n endWord += 1\nsentence.append(text[startWord:endWord])\n\n#This detects the first word\nkeyword = sentence[0]\nif keyword == 'travel':\n prefix = \"You are traveling to\"\n \nelif keyword == 'attack':\n prefix = \"You are attacking \"\n\n#This detects for other words\notherWord = sentence[1:]\nif otherWord == ['the', 'courtyard'] or otherWord == ['to', 'the', 'courtyard']:\n suffix = \" to the courtyard, good luck!\"\nelif otherWord == ['the', 'old', 'man']:\n suffix = \" the old man, RIP\"\n\nprint(prefix + suffix)\n","sub_path":"src/Test.py","file_name":"Test.py","file_ext":"py","file_size_in_byte":883,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"392192836","text":"from app import db, ma, OptionalButNotEmpty, datetime\nfrom wtforms import Form, StringField, FloatField, IntegerField, BooleanField\nfrom wtforms.validators import *\nfrom marshmallow import fields\n\n\nclass Blacklist(db.Model):\n __tablename__ = 'BlackList'\n id = db.Column(\"id\", db.Integer, primary_key=True, autoincrement=True)\n name = db.Column(\"name\", db.String)\n organization = db.Column(\"organization\", db.String)\n description = db.Column(\"description\", db.String)\n sum = db.Column(\"sum\", db.String)\n reiting = db.Column(\"reiting\", db.String)\n\n \nclass BlacklistJsonSchema(ma.Schema):\n id = fields.Int()\n name = fields.Str()\n organization = fields.Str()\n description = fields.Str()\n sum = fields.Str()\n reiting = fields.Str()\n\n\nclass BlacklistAddForm(Form):\n name = StringField('Name', [InputRequired()])\n organization = StringField('Organization', [InputRequired()])\n description = StringField('Description', [InputRequired()])\n sum = StringField('Sum', [InputRequired()])\n reiting = StringField('Reiting', [InputRequired()])\n\n\nclass BlacklistEditForm(Form):\n name = StringField('Name', [OptionalButNotEmpty()])\n organization = StringField('Organization', [OptionalButNotEmpty()])\n description = StringField('Description', [OptionalButNotEmpty()])\n sum = StringField('Sum', [OptionalButNotEmpty()])\n reiting = StringField('Reiting', [OptionalButNotEmpty()])\n","sub_path":"python-flask-api/app/models/blacklist.py","file_name":"blacklist.py","file_ext":"py","file_size_in_byte":1431,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"152524893","text":"# Move a distribution (in x) by a given amount (an integer).\n# 06_a_move_distribution\n# Claus Brenner, 26 NOV 2012\nfrom pylab import plot, show, ylim\nfrom distribution import *\nimport numpy as np\n\n\ndef move(distribution, delta):\n \"\"\"Returns a Distribution that has been moved (x-axis) by the amount of\n delta.\"\"\"\n\n # --->>> Insert your code here.\n distribution.offset += delta\n return distribution\n\n\nif __name__ == '__main__':\n # List of movements: move 3 times by 20.\n moves = [20, 20, 20]\n\n # Start with a known position: probability 1.0 at position 10.\n position = Distribution.triangle(10, 2)\n plot(*position.plotlists(0, 100), drawstyle='steps')\n\n # Now move and plot.\n for m in moves:\n position = move(position, m)\n plot(*position.plotlists(0, 100), drawstyle='steps')\n\n\n ylim(0.0, 1.1)\n show()\n","sub_path":"Unit_C/slam_06_a_move_distribution_question.py","file_name":"slam_06_a_move_distribution_question.py","file_ext":"py","file_size_in_byte":860,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"156989776","text":"from django.conf.urls import url, include\nfrom django.contrib import admin\nfrom . import views\n# import re\n\nurlpatterns = [\n url(r'^$', views.index, name='index'),\n url(r'create$',views.create, name='create'),\n url(r'^(?P\\d+)/edit$',views.update, name='edit'),\n url(r'^(?P\\d+)/delete$',views.delete, name='delete'),\n url(r'^(?P\\d+)$',views.showCourse, name='show'),\n]\n","sub_path":"apps/courses/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":395,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"262538844","text":"import numpy as np\nimport theano\nfrom theano import tensor\n\nfrom .. import init\nfrom .. import nonlinearities\n\nfrom .dense import DenseLayer\nfrom .noise import DropoutLayer, GaussianNoiseLayer\nfrom .normalization import batch_norm\nfrom .shape import ReshapeLayer, PadLayer\nfrom .special import NonlinearityLayer\nfrom .merge import ElemwiseSumLayer, ShortcutLayer\n\n__all__ = [\n \"residual_dense\",\n \"residual_conv2d\"\n]\n\ndef residual_dense(incoming, num_units,\n W=init.GlorotUniform(), b=init.Constant(0.),\n nonlinearity=nonlinearities.rectify,\n noise=None, dropout=None, rescale=True,\n batch_normalization=True, **kwargs):\n '''\n Create set of fully connected Residual layers.\n\n Parameters\n ----------\n incoming : a :class:`Layer` instance or a tuple\n The layer feeding into this layer, or the expected input shape\n\n num_units : int, list(int)\n The number of units of the layer\n\n W : Theano shared variable, expression, numpy array or callable\n Initial value, expression or initializer for the weights.\n These should be a matrix with shape ``(num_inputs, num_units)``.\n See :func:`lasagne.utils.create_param` for more information.\n\n b : Theano shared variable, expression, numpy array, callable or ``None``\n Initial value, expression or initializer for the biases. If set to\n ``None``, the layer will have no biases. Otherwise, biases should be\n a 1D array with shape ``(num_units,)``.\n See :func:`lasagne.utils.create_param` for more information.\n\n nonlinearity : callable or None\n The nonlinearity that is applied to the layer activations. If None\n is provided, the layer will be linear.\n\n noise : float or tensor scalar\n (GaussianNoise) Standard deviation of added Gaussian noise\n\n dropout : float or scalar tensor\n (Dropout) The probability of setting a value to zero\n\n rescale : bool\n (Dropout) If true the input is rescaled with input / (1-p) when\n deterministic is False.\n\n batch_norm : bool\n Apply batch normalization for each layer\n\n Examples\n --------\n >>> from lasagne.layers import InputLayer, ResidualDenseLayer\n >>> l_in = InputLayer((100, 20))\n >>> l1 = ResidualDenseLayer(l_in, num_units=[50, 20,40])\n\n Reference\n ---------\n Deep Residual Learning for Image Recognition (http://arxiv.org/abs/1512.03385)\n\n Notes\n -----\n Output shape from this layer is always the same as input shape.\n If the input to this layer has more than two axes, it will flatten the\n trailing axes.\n '''\n if not hasattr(num_units, '__len__'):\n num_units = [num_units]\n\n # create intermediate layer\n l = incoming\n for i in num_units[:-1]:\n l = DenseLayer(l, num_units=i, W=W, b=b, nonlinearity=nonlinearity, **kwargs)\n if batch_normalization:\n l = batch_norm(l)\n if noise is not None:\n l = GaussianNoiseLayer(l, sigma=noise)\n if dropout is not None:\n l = DropoutLayer(l, p=dropout, rescale=rescale)\n\n # create output layer\n l = DenseLayer(l, num_units=num_units[-1], W=W, b=b, nonlinearity=None, **kwargs)\n if batch_normalization:\n l = batch_norm(l)\n return NonlinearityLayer(ShortcutLayer((l, incoming)), nonlinearity=nonlinearity)\n\ndef residual_conv2d(l, increase_dim=False, projection=False):\n ''' Add a set of convoluation 2D layer and make residual layer on\n top of them.\n\n Parameters\n ----------\n l : lasagne.layer.Layer\n input layer\n increase_dim : bool\n pass\n projection : bool\n Option (B) in paper, y = F(x,W{i}) + W{j}*x\n\n Returns\n -------\n return : lasagne.layer.Layer\n output layer\n\n Reference\n ---------\n Deep Residual Learning for Image Recognition\n (http://arxiv.org/abs/1512.03385)\n\n Notes\n -----\n Batch Norm is performed on output layer before ElemwiseSum with shortcut\n connection.\n '''\n try:\n from .dnn import Conv2DDNNLayer as Conv2DLayer\n from .dnn import Pool2DDNNLayer as Pool2DLayer\n except:\n from .conv import Conv2DLayer\n from .pool import Pool2DLayer\n\n input_num_filters = l.output_shape[1]\n if increase_dim:\n first_stride = (2, 2)\n out_num_filters = input_num_filters * 2\n else:\n first_stride = (1, 1)\n out_num_filters = input_num_filters\n\n stack_1 = batch_norm(\n Conv2DLayer(l, num_filters=out_num_filters, filter_size=(3, 3),\n stride=first_stride, nonlinearity=nonlinearities.rectify,\n pad='same', W=init.HeNormal(gain='relu'))\n )\n stack_2 = batch_norm(\n Conv2DLayer(stack_1, num_filters=out_num_filters, filter_size=(3, 3),\n stride=(1, 1), nonlinearity=None,\n pad='same', W=init.HeNormal(gain='relu'))\n )\n\n # add shortcut connections\n if increase_dim:\n if projection:\n # projection shortcut, as option B in paper\n projection = batch_norm(\n Conv2DLayer(l, num_filters=out_num_filters, filter_size=(1, 1),\n stride=(2, 2), nonlinearity=None, pad='same', b=None))\n block = NonlinearityLayer(\n ElemwiseSumLayer([stack_2, projection]), nonlinearity=nonlinearities.rectify)\n else:\n # identity shortcut, as option A in paper\n # we use a pooling layer to get identity with strides, since identity layers with stride don't exist in Lasagne\n identity = Pool2DLayer(l, pool_size=1, stride=(2, 2), mode='average_exc_pad')\n padding = PadLayer(identity, [out_num_filters // 4, 0, 0], batch_ndim=1)\n block = NonlinearityLayer(\n ElemwiseSumLayer([stack_2, padding]), nonlinearity=nonlinearities.rectify)\n else:\n block = NonlinearityLayer(ElemwiseSumLayer([stack_2, l]), nonlinearity=nonlinearities.rectify)\n\n return block\n","sub_path":"lasagne/layers/residual.py","file_name":"residual.py","file_ext":"py","file_size_in_byte":5977,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"324627433","text":"class Solution(object):\n def partitionLabels(self, S):\n \"\"\"\n :type S: str\n :rtype: List[int]\n \"\"\"\n\n if len(S) == 0:\n return\n\n ret = []\n char_list = []\n last_index = 1\n current_index = 0\n while current_index < last_index:\n char = S[current_index]\n\n if char in char_list:\n continue\n\n index = S.rfind(char) + 1\n if index > last_index:\n last_index = index\n current_index += 1\n\n ret.append(last_index)\n rest = self.partitionLabels(S[last_index:])\n if rest is not None:\n ret.extend(rest)\n return ret\n\n\ns = Solution()\nprint(s.partitionLabels(\"a\"))\nprint(s.partitionLabels(\"ababcbacadefegdehijhklij\"))\n","sub_path":"LeetCode/763. Partition Labels.py","file_name":"763. Partition Labels.py","file_ext":"py","file_size_in_byte":802,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"281055650","text":"\"\"\"\nDownload MACA downscaled and bias corrected meterological time series and calculate ASCE standardized reference ET in Python. \n\"\"\"\n\n__name__ = 'macaetr'\n__author__ = 'John Volk'\n__version__ = '0.0.2'\n\n\nfrom macaetr.maca import MACA\n","sub_path":"macaetr/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":237,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"526554670","text":"\"\"\"\nWatchit simple and useful general purpose gateway framework\n\nAlways remember to comply with the specifications of\neach resolver for the correct functioning of the gateway.\n\nDefine your resolvers modules below.\nEx: Each resolver must implement 2 fundamental methods.\n\nclass Dummy:\n def __str__(self) -> str:\n return 'Test'\n\n def __call__(self, *args, **kwargs) -> iter:\n yield data\n\n\"\"\"\nimport inspect\nimport pkgutil\n\n__title__ = 'watchit'\n__version__ = '0.1.0'\n__license__ = 'MIT'\n__copyright__ = 'Copyright 2020-2021 ZorrillosDev'\n\n\ndef load(only_str=False):\n \"\"\"\n Find modules in `resolvers` path\n \"\"\"\n for loader, name, is_pkg in pkgutil.walk_packages(__path__):\n _module = loader.find_module(name).load_module(name)\n for _, obj in inspect.getmembers(_module):\n if inspect.isclass(obj) and is_pkg:\n yield obj if not only_str else name\n\n\n__all__ = ['load']\n","sub_path":"resolvers/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":935,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"571086646","text":"import requests\nfrom bs4 import BeautifulSoup\n\n'''\nPython爬取网络图片\n本示例地址:https://www.ivsky.com/tupian/gaoguai_laoren_v55393/pic_865178.html\n'''\n\n'''\n第一步:取得网页数据\n'''\nurl = 'https://www.ivsky.com/tupian/gaoguai_laoren_v55393/pic_865178.html'\nwp = requests.get(url)\n# print(wp.text)\n\n'''\n第二步:提取特征,拿数据\n'''\nsoup = BeautifulSoup(wp.text, 'lxml')\npic_url = 'https:' + soup.find('img').get('src')\n# print(pic_url)\n\n'''\n第三步:保存数据\n'''\npicture = requests.get(pic_url).content\nfile = open('D:\\\\pic.jpg', 'wb')\nfile.write(picture)\nfile.flush()\nfile.close()\n","sub_path":"PictureSpider.py","file_name":"PictureSpider.py","file_ext":"py","file_size_in_byte":623,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"436046868","text":"# Date Created: 02/11/2022\n# Author: Ryan James (WSWC)\n# Purpose: To create CA agg organization use information and populate a dataframe for WaDE_QA 2.0.\n# Notes: 1) No input csv to read, all values are more easily hardcoded into a list here and then exported to CSV.\n\n\n# Needed Libraries\n############################################################################\nimport os\nimport numpy as np\nimport pandas as pd\n\n\n# Inputs\n############################################################################\nprint(\"Reading inputs...\")\nworkingDir = \"G:/Shared drives/WaDE Data/California/AggregatedAmounts\"\nos.chdir(workingDir)\n\n# Needed WaDE columns\ncolumnslist = [\n \"OrganizationUUID\",\n \"OrganizationContactEmail\",\n \"OrganizationContactName\",\n \"OrganizationDataMappingURL\",\n \"OrganizationName\",\n \"OrganizationPhoneNumber\",\n \"OrganizationPurview\",\n \"OrganizationWebsite\",\n \"State\"]\n\n\n# Creating output dataframe (outdf)\n############################################################################\nprint(\"Populating dataframe...\")\n\noutdf = pd.DataFrame(columns=columnslist)\noutdf = outdf.append(pd.Series(), ignore_index = True) # This approach requires a blank row to be appended into the outbound dataframe.\n\noutdf.OrganizationUUID = \"CAag_O1\"\n\noutdf.OrganizationContactEmail = \"Jennifer.Stricklin@water.ca.gov\"\n\noutdf.OrganizationContactName = \"Jennifer Stricklin\"\n\noutdf.OrganizationDataMappingURL = \"https://github.com/WSWCWaterDataExchange/MappingStatesDataToWaDE2.0/tree/master/California\"\n\noutdf.OrganizationName = \"California Department of Water Resources\"\n\noutdf.OrganizationPhoneNumber = \"303-866-3581\"\n\noutdf.OrganizationPurview = \"Department of Water Resources California Water Plan program computes applied, net, and depletion water balances for California.\"\n\noutdf.OrganizationWebsite = \"https://water.ca.gov/\"\n\noutdf.State = \"CA\"\n\n\n# Check required fields are not null\n############################################################################\nprint(\"Checking required is not null...\") # Check all 'required' (not NA) columns have value (not empty).\noutdf = outdf.replace('', np.nan) # Replace blank strings by NaN, if there are any.\noutdf_nullMand = outdf.loc[(outdf[\"OrganizationUUID\"].isnull()) | (outdf[\"OrganizationUUID\"] == '') |\n (outdf[\"OrganizationContactEmail\"].isnull()) | (outdf[\"OrganizationContactEmail\"] == '') |\n (outdf[\"OrganizationContactName\"].isnull()) | (outdf[\"OrganizationContactName\"] == '') |\n (outdf[\"OrganizationDataMappingURL\"].isnull()) | (outdf[\"OrganizationDataMappingURL\"] == '') |\n (outdf[\"OrganizationName\"].isnull()) | (outdf[\"OrganizationName\"] == '') |\n (outdf[\"OrganizationPhoneNumber\"].isnull()) | (outdf[\"OrganizationPhoneNumber\"] == '') |\n (outdf[\"OrganizationWebsite\"].isnull()) | (outdf[\"OrganizationWebsite\"] == '') |\n (outdf[\"State\"].isnull()) | (outdf[\"State\"] == '')]\n\n\n# Export to new csv\n############################################################################\nprint(\"Exporting dataframe to csv...\")\n\n# The working output DataFrame for WaDE 2.0 input.\noutdf.to_csv('ProcessedInputData/organizations.csv', index=False)\n\n# Report purged values.\nif(len(outdf_nullMand.index) > 0):\n outdf_nullMand.to_csv('organizations_mandatoryFieldMissing.csv', index=False)\n\nprint(\"Done.\")\n","sub_path":"California/AggregatedAmounts/3_CAag_Organizations.py","file_name":"3_CAag_Organizations.py","file_ext":"py","file_size_in_byte":3453,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"35916399","text":"from django import forms\n\nfrom .models import Publication, VoteComment, VotePublication, Comment\n\n\nclass CommentForm(forms.ModelForm):\n class Meta:\n model = Comment\n fields = '__all__'\n\n\nclass VoteForm(forms.Form):\n VOTE_OBJ_COMMENT = 'comment'\n VOTE_OBJ_PUBLICATION = 'publication'\n VOTE_OBJ_CHOICES = [\n (VOTE_OBJ_COMMENT, 'comment'),\n (VOTE_OBJ_PUBLICATION, 'publication'),\n ]\n\n vote_type = forms.BooleanField(required=False)\n obj_type = forms.ChoiceField(choices=VOTE_OBJ_CHOICES)\n obj_id = forms.IntegerField()\n\n def clean(self):\n cleaned_data = super(VoteForm, self).clean()\n obj_type = self.cleaned_data.get('obj_type')\n obj_id = self.cleaned_data.get('obj_id')\n\n if obj_type == VoteForm.VOTE_OBJ_PUBLICATION:\n try:\n cleaned_data['obj_id'] = Publication.objects.get(id=obj_id)\n except Publication.DoesNotExist:\n self.add_error('obj_id', 'Object does not exist')\n else:\n try:\n cleaned_data['obj_id'] = Comment.objects.get(id=obj_id)\n except Comment.DoesNotExist:\n self.add_error('obj_id', 'Object does not exist')\n\n return cleaned_data\n\n def save(self, commit=True):\n vote_type = self.cleaned_data.get('vote_type')\n obj_type = self.cleaned_data.get('obj_type')\n obj_id = self.cleaned_data.get('obj_id')\n\n if obj_type == VoteForm.VOTE_OBJ_PUBLICATION:\n vote = VotePublication()\n vote.publication = obj_id\n\n else:\n vote = VoteComment()\n vote.comment = obj_id\n\n if vote_type:\n vote.is_up = True\n else:\n vote.is_up = False\n\n if commit:\n vote.save()\n\n return vote\n","sub_path":"pages/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":1809,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"122126462","text":"#\n# @lc app=leetcode.cn id=189 lang=python\n#\n# [189] 旋转数组\n#\n# https://leetcode-cn.com/problems/rotate-array/description/\n#\n# algorithms\n# Easy (40.18%)\n# Likes: 478\n# Dislikes: 0\n# Total Accepted: 94.2K\n# Total Submissions: 234.2K\n# Testcase Example: '[1,2,3,4,5,6,7]\\n3'\n#\n# 给定一个数组,将数组中的元素向右移动 k 个位置,其中 k 是非负数。\n# \n# 示例 1:\n# \n# 输入: [1,2,3,4,5,6,7] 和 k = 3\n# 输出: [5,6,7,1,2,3,4]\n# 解释:\n# 向右旋转 1 步: [7,1,2,3,4,5,6]\n# 向右旋转 2 步: [6,7,1,2,3,4,5]\n# 向右旋转 3 步: [5,6,7,1,2,3,4]\n# \n# \n# 示例 2:\n# \n# 输入: [-1,-100,3,99] 和 k = 2\n# 输出: [3,99,-1,-100]\n# 解释: \n# 向右旋转 1 步: [99,-1,-100,3]\n# 向右旋转 2 步: [3,99,-1,-100]\n# \n# 说明:\n# \n# \n# 尽可能想出更多的解决方案,至少有三种不同的方法可以解决这个问题。\n# 要求使用空间复杂度为 O(1) 的 原地 算法。\n# \n# \n#\n\n# @lc code=start\nclass Solution(object):\n\n # Mutable sequences can be grafted, excised, \n # and otherwise modified in place using slice\n def rotate2(self, nums, k):\n \"\"\"\n :type nums: List[int]\n :type k: int\n :rtype: None Do not return anything, modify nums in-place instead.\n \"\"\"\n N = len(nums)\n k %= N\n if N == 1 or k % N <= 0:\n return\n \n # replace N-k and k items\n nums[:N-k], nums[-k:] = nums[-k:], nums[:N-k]\n return\n\n def rotate1(self, nums, k):\n \"\"\"\n :type nums: List[int]\n :type k: int\n :rtype: None Do not return anything, modify nums in-place instead.\n \"\"\"\n N = len(nums)\n k %= N\n if N == 1 or k % N <= 0:\n return\n \n\n # KEY: move k position\n # The 1st step reverse will apply on the first N-k items\n # The 2nd step reverse will apply on the k items to be moved\n # Therefore, the sequence is reverse N-k first, and then k\n\n self.reverse(0, N-k-1, nums)\n self.reverse(N-k, N-1, nums)\n self.reverse(0, N-1, nums)\n return\n\n def reverse(self, start, end, nums):\n if start >= end:\n return\n while start < end:\n nums[start], nums[end] = nums[end], nums[start]\n start += 1\n end -= 1\n return\n\n # brute-force\n def rotate(self, nums, k):\n \"\"\"\n :type nums: List[int]\n :type k: int\n :rtype: None Do not return anything, modify nums in-place instead.\n \"\"\"\n N = len(nums)\n k %= N\n if N == 1 or k <= 0:\n return\n \n for i in xrange(k):\n self.moveOneStep(nums)\n \n return\n \n # move forward one step of the nums\n # nums[i] = nums[i-1]\n\n def moveOneStep(self, nums):\n N = len(nums)\n if N <= 1:\n return\n tmp = nums[-1]\n for i in xrange(N-1,-1,-1):\n nums[i] = nums[i-1]\n nums[0] = tmp\n return\n\n# @lc code=end\n\n","sub_path":"Week_01/G20190343010139/leetcode_189_0139.py","file_name":"leetcode_189_0139.py","file_ext":"py","file_size_in_byte":3036,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"451290821","text":"import numpy as np\n\n\nclass AdalineGD(object):\n \"\"\"\n Parameters\n --------------\n eta0 : learning rate\n n_iter : iteration on data set\n\n Attributes\n ---------------\n w_ : 1d weight array\n error_ : error list\n \"\"\"\n\n def __init__(self,eta0,n_iter):\n self.eta0=eta0\n self.n_iter=n_iter\n self.w_ = []\n\n\n def fit(self,data_matrix,class_labels_vector):\n \"\"\"\n Define attributes\n \"\"\"\n self.w_ = np.zeros(1 + data_matrix.shape[1])\n self.error_ : []\n self.cost_vector = list()\n self.threshold_vector = list()\n\n for i in range(self.n_iter):\n result_vector = self.matrix_to_weight_vector_dot_product(data_matrix)\n error_vector = class_labels_vector - result_vector\n\n self.w_[1:] += self.eta0 * (np.dot(data_matrix.T,error_vector))\n self.w_[0] += self.eta0 * error_vector.sum()\n self.cost_vector.append(sum(np.square(error_vector))/2)\n self.threshold_vector.append(self.w_[0])\n\n\n def matrix_to_weight_vector_dot_product(self,data_matrix):\n return np.dot(data_matrix,self.w_[1:])+self.w_[0]\n\n def activation(self,test_data_matrix):\n return self.matrix_to_weight_vector_dot_product(test_data_matrix)\n\n def predict(self,test_data_matrix):\n return np.where(self.activation(test_data_matrix) >=0.0,1,-1)\n\n def plot(self):\n print(self.w_.shape)\n import matplotlib.pyplot as plt\n import seaborn as sbn\n sbn.set()\n fig,f_index=plt.subplots(nrows=1,ncols=2,figsize=(8,4))\n f_index[0].set_xlabel(\"Iterations\")\n f_index[0].set_ylabel(\"Cost\")\n f_index[0].scatter(range(1,len(self.cost_vector)+1),self.cost_vector,marker='o',color=\"b\")\n f_index[1].set_xlabel(\"Iterations\")\n f_index[1].set_ylabel(\"Threshold\")\n f_index[1].scatter(range(1,len(self.threshold_vector)+1),self.threshold_vector,marker='o',color=\"r\")\n plt.show()\n\n\nclass PreProcessing(object):\n\n def __init__(self):\n\n pass\n\n def set_data(self,data,target):\n self.data=data\n self.target=target\n return self\n\n def get_train_test_splits(self):\n import sklearn.model_selection as ms\n return ms.train_test_split(self.data,self.target,random_state=1,shuffle=True,train_size=0.7)\n\n\ndef main():\n import sklearn.datasets as data\n iris = data.load_iris()\n\n p = PreProcessing()\n training_data,test_data,training_target,test_target = p.set_data(iris.data,iris.target).get_train_test_splits()\n # print(test_data)\n # print(test_target)\n model = AdalineGD(0.001,200)\n # print(iris.data[:10,:])\n # print(np.array(iris.target[0:10])[np.newaxis])\n data =np.concatenate((iris.data[:6,:],iris.data[145:,:]))\n target = np.concatenate((iris.target[0:6],iris.target[145:]))\n\n print(data.shape)\n print(target.shape)\n model.fit(data,target)\n model.plot()\n\nif __name__ == '__main__':\n main()","sub_path":"ML/AdalineGD.py","file_name":"AdalineGD.py","file_ext":"py","file_size_in_byte":3001,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"82962281","text":"import numpy as np\nimport h5py\nfrom PIL import Image\nimport os\nfrom matplotlib import pyplot as plt\nimport pickle\n\n\nDATA_PREFIX = '/Users/yee/Downloads/NYUv2pics'\nOUT_PREFIX = '/Users/yee/Desktop/NYUv2'\n\nfor i in range(1449):\n depth = np.array(\n Image.open(\n os.path.join(DATA_PREFIX, 'nyu_depths', f'{i}.png')\n )\n )\n img = np.array(\n Image.open(\n os.path.join(DATA_PREFIX, 'nyu_images', f'{i}.jpg')\n )\n )\n # channels_first\n img = np.transpose(img, [2, 0, 1])\n depth = np.expand_dims(depth, 0)\n with open(os.path.join(OUT_PREFIX, f'{i}.pickle'), 'wb') as f:\n pickle.dump(np.concatenate([img, depth]), f)\n\n# depth = np.array(Image.open(\n# os.path.join(DATA_PREFIX, 'nyu_depths', '0.png')\n# ))\n\n# img = np.array(Image.open(\n# os.path.join(DATA_PREFIX, 'nyu_images', '0.jpg')\n# ))\n# img = np.transpose(img, [2, 0, 1])\n# depth = np.expand_dims(depth, 0)\n# _, height, width = depth.shape\n# print(np.concatenate([img, depth]).shape)\n\n# drgb = np.concatenate([img, depth])\n\n# # with open('test.pickle', 'wb') as f:\n# # pickle.dump(drgb, f)\n\n# with open('test.pickle', 'rb') as f:\n# drgb = pickle.load(f)\n\n# # Image.fromarray(np.transpose(drgb, [1, 2, 0])).save('test.png')\n# plt.figure()\n# plt.imshow(np.transpose(drgb[:3], [1, 2, 0]))\n# plt.figure()\n# plt.imshow(np.reshape(drgb[3], [height, width]))\n# plt.show()\n\n\n\n\n","sub_path":"super_resolution/code/preprocess.py","file_name":"preprocess.py","file_ext":"py","file_size_in_byte":1409,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"624167612","text":"# 函数传参:测试用例传参需要用装饰器@pytest.mark.parametrize,里面写两个参数\n\nimport pytest\n\n# 测试登录数据\ntest_login_data = [(\"admin\", \"111111\"), (\"admin\", \"\")]\n\ndef login(user, psw):\n \"\"\"普通登录函数\"\"\"\n print(\"登录账号:%s\" % user)\n print(\"登录密码:%s\" % psw)\n if psw:\n return True\n else:\n return False\n\n@pytest.mark.parametrize(\"user, psw\", test_login_data)\ndef test_login(user, psw):\n \"\"\"登录用例\"\"\"\n result = login(user, psw)\n assert result == True, \"失败原因:密码为空\"\n\nif __name__ == '__main__':\n pytest.main([\"-s\", \"test_01.py\"])","sub_path":"pytest14/test_01.py","file_name":"test_01.py","file_ext":"py","file_size_in_byte":640,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"375365796","text":"# -*- coding: utf-8 -*-\nfrom arc.http import session_read, session_write\nfrom arc.http.session.jwe import _create_jwe, _parse_jwe\n\n\ndef test_jwe_read_write():\n payload = {\"foo\": {\"bar\": 123}, \"yak\": None}\n token = _create_jwe(payload)\n parsed = _parse_jwe(token)\n del parsed[\"iat\"] # delete issued at timestamp\n assert parsed == payload\n\n\ndef test_jwe_cookies(monkeypatch):\n monkeypatch.setenv(\"SESSION_TABLE_NAME\", \"jwe\")\n cookie = session_write({\"count\": 0})\n mock = {\n \"headers\": {\n \"cookie\": cookie,\n },\n }\n\n session = session_read(mock)\n assert \"count\" in session\n assert session[\"count\"] == 0\n","sub_path":"tests/test_http_sessions.py","file_name":"test_http_sessions.py","file_ext":"py","file_size_in_byte":661,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"23571870","text":"import os, uuid\n\nBOT_NAME = 'hcicrawler'\nBOT_VERSION = '0.1'\n\nSPIDER_MODULES = ['hcicrawler.spiders']\nNEWSPIDER_MODULE = 'hcicrawler.spiders'\nUSER_AGENT = '%s/%s' % (BOT_NAME, BOT_VERSION)\n\nITEM_PIPELINES = [\n 'hcicrawler.pipelines.OutputFeeds',\n 'hcicrawler.pipelines.OutputStore',\n 'hcicrawler.pipelines.RemoveBody',\n 'hcicrawler.pipelines.OutputQueue',\n]\n\n#OUTPUT_DIR = 'output'\n\nDOWNLOADER_HTTPCLIENTFACTORY = 'hcicrawler.webclient.LimitSizeHTTPClientFactory'\n\nMONGO_HOST = 'localhost'\nMONGO_DB = 'hci'\nMONGO_QUEUE_COL = 'crawler.queue'\nMONGO_PAGESTORE_COL = 'crawler.pages'\n\nif 'SCRAPY_JOB' in os.environ:\n JOBID = os.environ['SCRAPY_JOB']\nelse:\n JOBID = str(uuid.uuid4())\n","sub_path":"hcicrawler/hcicrawler/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":696,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"620140989","text":"#\n# Copyright (c) 2016, Prometheus Research, LLC\n#\n\nfrom rex.action import typing\nfrom rex.widget import Field\n\nfrom .assessment_base import AssessmentAction\n\n\n__all__ = (\n 'ViewAssessmentAction',\n)\n\n\nclass ViewAssessmentAction(AssessmentAction):\n name = 'assessment-view'\n js_type = 'rex-acquire-actions', 'ViewAssessment'\n\n entity = Field(\n typing.RowTypeVal(),\n doc='The record containing the Assessment.',\n )\n\n def context(self):\n return (\n self.domain.record(self.entity),\n self.domain.record(),\n )\n\n","sub_path":"src/rex.acquire_actions/src/rex/acquire_actions/view_assessment.py","file_name":"view_assessment.py","file_ext":"py","file_size_in_byte":573,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"362397527","text":"#!/usr/bin/env python3\n# -*-coding:utf-8-*-\n#小说爬虫\n#目前针对网站(#www.biqukan.com)\n#vision 0.1\n#mpage_url : 小说目录主页地址\n#cpage_url : 小说各章地址\n#chapters : 章节列表\n#cname : 章节名称\n\nfrom bs4 import BeautifulSoup\nfrom urllib import request\nimport sys\nimport re\nimport requests\n\nfileName=input(\"请输入小���名:\")\nfile = open(fileName+\".txt\",\"w\",encoding=\"utf-8\")\n#选定小说目录主页,确定头文件和客户运行环境\nmpage_url=input(\"请输入小说章节页的地址:\")\nhead = {}\nhead[\"User-Agent\"] = \"Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.101 Safari/537.36\"\n#抓取小说主页源文件内容,并转码成中文 第一种逻辑方法\n#mpage_req = urllib.request.Request(url = mpage_url , headers = head)\n#mpage_res = urllib.request.urlopen(mpage_req)\n#mpage_html = mpage_res.read().decode(\"gbk\", \"ignore\")\n#第二种逻辑方法更简单直白,输出的文本要带.text才行\nmpage_res = requests.get(url = mpage_url , headers = head)\n#将爬下的主页源文件用Beautiful拓展\n#用chrome的开发工具,查看主页可以发现,章节及链接内容所在\nsoup = BeautifulSoup(mpage_res.text, \"lxml\")\n#获取章节列表\nchapters = soup.select(\"div.listmain dl a\")\nlist1 = []\nlist2 = []\nss = 1\nfor line in chapters:\n chapters_url = \"http://www.biqukan.com\" + line.get('href')\n chapters_name = line.string\n mpage = chapters_name + \":\" + chapters_url\n list1.append(chapters_name)\n list2.append(chapters_url)\nchapters_number = []\nfor ll in list1:\n kkk = ll[:3]\n chapters_number.append(kkk)\nif \"第一章\" in chapters_number:\n i_1 = chapters_number.index(\"第一章\")\nif \"第1章\" in chapters_number:\n i_1 = chapters_number.index(\"第1章\")\nif \"第00\" in chapters_number:\n i_1 = chapters_number.index(\"第00\")\nchapters_list = list2[i_1:]\nnumber = len(chapters_list)\nfor i in chapters_list:\n txt_res = requests.get(url = i, headers = head)\n txt_soup = BeautifulSoup(txt_res.text, \"lxml\")\n title = txt_soup.select(\"#wrapper .book .content h1\")[0].text.replace(\"/x0a\",\"\")\n txt = txt_soup.select(\"#wrapper .book .content .showtxt\")[0].text.replace(\"/x0a\",\"\")\n file.write(title + \"\\n\\n\")\n file.write(txt + \"\\n\\n\")\n sys.stdout.write(\"下载进度:%.1f%%\" % float(ss / number*100) + \"\\r\")\n sys.stdout.flush()\n ss+=1\nfile.close()\n","sub_path":"爬小说第一波0.1.py","file_name":"爬小说第一波0.1.py","file_ext":"py","file_size_in_byte":2409,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"131974434","text":"__author__ = 'Morten'\nimport csv\n\ncount_b = 0\nwith open('C:/Users/Morten/Downloads/dsenames2out.csv', 'w', newline='') as out:\n r = csv.reader(open('C:/Users/Morten/Downloads/dsenames.csv'), delimiter=',')\n iter_r = iter(r)\n next(iter_r)\n for i in iter_r:\n count_b += 1\n line = []\n\n # According to WRDS this is an numeric value. However, sometimes there will be letter.\n #for b in [7, 24, 40, 42]:\n # if i[b].isalpha():\n # i[b] = '\\\\N'\n\n line.append(count_b)\n for x in i:\n if not x:\n line.append('\\\\N')\n else:\n line.append(x)\n\n csv_out = csv.writer(out)\n csv_out.writerow(line)\n","sub_path":"db_import/csv2dsenames.py","file_name":"csv2dsenames.py","file_ext":"py","file_size_in_byte":722,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"398804274","text":"#!/usr/bin/env python3\n# coding: utf-8\n# File: question_classifier.py\n# Author: lhy\n# Date: 18-10-4\n\n# 否定词\nimport os\nimport ahocorasick\n\nclass QuestionClassifier:\n def __init__(self):\n cur_dir = '/'.join(os.path.abspath(__file__).split('/')[:-1])\n # 特征词路径\n self.zuopin_path = os.path.join(cur_dir, 'dict/zuopin.txt')\n self.person_path = os.path.join(cur_dir, 'dict/person.txt')\n self.job_path = os.path.join(cur_dir, 'dict/job.txt')\n self.type_path = os.path.join(cur_dir, 'dict/z_type.txt')\n\n self.date_path = os.path.join(cur_dir, 'dict/date.txt')\n self.week_path = os.path.join(cur_dir, 'dict/week.txt')\n self.producer_path = os.path.join(cur_dir, 'dict/producer.txt')\n self.chara_path = os.path.join(cur_dir, 'dict/character.txt')\n self.tag_path = os.path.join(cur_dir, 'dict/tags.txt')\n self.deny_path = os.path.join(cur_dir, 'dict/deny.txt')\n # 加载特征词\n self.zuopin_wds= [i.strip() for i in open(self.zuopin_path) if i.strip()]\n self.person_wds= [i.strip() for i in open(self.person_path) if i.strip()]\n self.job_wds= [i.strip() for i in open(self.job_path) if i.strip()]\n self.type_wds= [i.strip() for i in open(self.type_path) if i.strip()]\n self.tag_wds= [i.strip() for i in open(self.tag_path) if i.strip()]\n # self.date_wds= [i.strip() for i in open(self.date_path) if i.strip()]\n # self.week_wds= [i.strip() for i in open(self.week_path) if i.strip()]\n self.producer_wds = [i.strip() for i in open(self.producer_path) if i.strip()]\n self.chara_wds= [i.strip() for i in open(self.chara_path) if i.strip()]\n # 同义词\n self.cv_swds = ['配音', '配的', 'cv','配过']\n self.msc_swds = ['配乐', 'ost', 'bgm', '曲', '歌', '背景音乐', '音乐']\n self.drct_swds = ['监督','导的']\n self.job_wds= self.job_wds+ self.cv_swds + self.msc_swds + self.drct_swds\n\n self.region_words = set(self.zuopin_wds + self.person_wds + self.job_wds + self.type_wds + self.tag_wds + self.producer_wds + self.chara_wds)\n self.deny_wds = [i.strip() for i in open(self.deny_path) if i.strip()]\n # 构造领域actree\n self.region_tree = self.build_actree(list(self.region_words))\n # 构建词典\n self.wdtype_dict = self.build_wdtype_dict()\n # 问句疑问词\n self.person_qwds = ['谁']\n self.chara_qwds = ['主角', '主演', '角色']\n self.rec_qwds = ['想看', '有没有', '类似', '推荐', '差不多', '同类型','相似']\n self.search_qwds = ['有哪些','有什么','讲什么']\n self.job_qwds = ['负责什么','担任什么','做什么']\n\n print('model init finished ......')\n\n return\n\n '''分类主函数'''\n def classify(self, question):\n data = {}\n medical_dict = self.check_anime(question)\n data['args'] = medical_dict\n #收集问句当中所涉及到的实体类型\n types = []\n for type_ in medical_dict.values():\n types += type_\n question_type = 'others'\n\n question_types = []\n\n # 查找作品制作人员\n if self.check_words(self.person_qwds, question) and ('zuopin' in types) and ('job' in types):\n question_type = 'zuopin_person'\n question_types.append(question_type)\n\n # 查找角色声优是谁\n if self.check_words(self.person_qwds, question) and ('character' in types) and ('job' in types):\n question_type = 'chara_person'\n question_types.append(question_type)\n\n\n # 推荐番剧\n if self.check_words(self.rec_qwds, question) and 'zuopin' in types:\n question_type = 'similar_rec'\n if 'tag' in types:\n question_type += '_plus'\n question_types.append(question_type)\n if self.check_words(self.rec_qwds, question) and 'producer' in types:\n question_type = 'pd_rec'\n if 'tag' in types:\n question_type += '_plus'\n question_types.append(question_type)\n if self.check_words(self.rec_qwds, question) and 'person' in types:\n question_type = 'person_rec'\n if 'tag' in types:\n question_type += '_plus'\n question_types.append(question_type)\n if self.check_words(self.rec_qwds, question) and (types==[]):\n question_type = 'random_rec'\n question_types.append(question_type)\n\n # 查询番剧\n if self.check_words(self.search_qwds,question) and 'person' in types:\n question_type = 'person_srch'\n if 'tag' in types:\n question_type += '_plus'\n question_types.append(question_type)\n\n if self.check_words(self.search_qwds,question) and 'producer' in types:\n question_type = 'pd_srch'\n if 'tag' in types:\n question_type += '_plus'\n question_types.append(question_type)\n\n if self.check_words(self.job_qwds,question) and 'person' in types:\n question_type = 'person_job'\n question_types.append(question_type)\n\n\n \n # 若没有查到相关的外部查询信息,那么则返回基本信息\n if question_types == [] and 'zuopin' in types:\n question_types = ['zuopin_summary']\n if question_types == [] and 'character' in types:\n question_types = ['character_zuopin_cv']\n if question_types == [] and 'person' in types:\n question_types = ['person_job']\n if question_types == [] and 'tag' in types:\n question_types = ['tag_rec']\n\n if (not medical_dict) and (not question_types):\n return {}\n # 将多个分类结果进行合并处理,组装成一个字典\n data['question_types'] = question_types\n\n return data\n# set(self.zuopin_wds + self.person_wds + self.job_wds + self.type_wds + self.tag_wds + self.producer_wds + self.chara_wds)\n \n '''构造词对应的类型'''\n def build_wdtype_dict(self):\n wd_dict = dict()\n for wd in self.region_words:\n wd_dict[wd] = []\n if wd in self.zuopin_wds:\n wd_dict[wd].append('zuopin')\n if wd in self.person_wds:\n wd_dict[wd].append('person')\n if wd in self.job_wds:\n wd_dict[wd].append('job')\n if wd in self.type_wds:\n wd_dict[wd].append('type')\n if wd in self.producer_wds:\n wd_dict[wd].append('producer')\n if wd in self.chara_wds:\n wd_dict[wd].append('character')\n if wd in self.deny_wds:\n wd_dict[wd].append('deny')\n if (wd in self.tag_wds) and len(wd_dict[wd])==0:\n wd_dict[wd].append('tag')\n\n\n return wd_dict\n\n '''构造actree,加速过滤'''\n def build_actree(self, wordlist):\n actree = ahocorasick.Automaton()\n for index, word in enumerate(wordlist):\n actree.add_word(word, (index, word))\n actree.make_automaton()\n return actree\n\n '''问句过滤'''\n def check_anime(self, question):\n region_wds = []\n for i in self.region_tree.iter(question):\n wd = i[1][1]\n region_wds.append(wd)\n stop_wds = []\n for wd1 in region_wds:\n for wd2 in region_wds:\n if wd1 in wd2 and wd1 != wd2:\n stop_wds.append(wd1)\n pre_wds = [i for i in region_wds if i not in stop_wds]\n final_wds=[]\n for i in pre_wds:\n if i in self.cv_swds:\n i = \"声优\"\n if i in self.msc_swds:\n i = \"音乐人\"\n if i in self.drct_swds:\n i = \"导演\"\n final_wds.append(i)\n final_dict = {i:self.wdtype_dict.get(i) for i in final_wds}\n\n return final_dict\n\n '''基于特征词进行分类'''\n def check_words(self, wds, sent):\n for wd in wds:\n if wd in sent:\n return True\n return False\n\n\nif __name__ == '__main__':\n handler = QuestionClassifier()\n while 1:\n question = input('input an question:')\n data = handler.classify(question)\n print(data)","sub_path":"question_classifier.py","file_name":"question_classifier.py","file_ext":"py","file_size_in_byte":8408,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"236040435","text":"class Solution(object):\n def isStrobogrammatic(self, num):\n \"\"\"\n :type num: str\n :rtype: bool\n \"\"\"\n\n xform = { '0' : '0', '1' : '1', '6' : '9', '8' : '8', '9' : '6' }\n size = len( num )\n\n for i in xrange( ( size + 1 ) /2 ):\n j = ( size - 1 ) - i\n if xform.get( num[ i ] ) != num[ j ]:\n return False\n\n return True\n","sub_path":"246.py","file_name":"246.py","file_ext":"py","file_size_in_byte":409,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"119365632","text":"import re\nfrom diff_result import DiffResult\nfrom difflib import SequenceMatcher\n\n# *Goal*\n# Parse a diff files in the most efficient way possible.\n# Keep these in mind, speed, maintainability, evolvability, etc....\n# Compute the following\n# - List of files in the diffs\n# - number of regions\n# - number of lines added\n# - number of lines deleted\n# - list of function calls seen in the diffs and their number of calls\n\n\nclass DiffParser:\n\n def __init__(self):\n print(\"DiffParser created\")\n\n def parse(self, file):\n # Regex Patterns\n filelist_rgx = r'^diff --[^\\s]* (.*)'\n region_rgx = r'^@@ -\\d+(,\\d+)? \\+\\d+(,\\d+)? @@.*'\n added_rgx = r'^(\\+).*'\n deleted_rgx = r'^(\\-).*'\n fnlist_rgx = r'(?<=(?:\\s|\\.))([\\w]+)(?=\\()'\n\n # Object holding results\n diff_res = DiffResult()\n\n lines = file.readlines()\n # Lines such as\n # +++ \n # --- \n # are caught in the regex for added lines\n # having a \"bubble\" after a region starts allows us to manually filter those out.\n area_start = 0\n for line in lines:\n if re.search(filelist_rgx, line):\n path_a = re.search(filelist_rgx, line).group(1).split(\" \")[0]\n path_b = re.search(filelist_rgx, line).group(1).split(\" \")[1]\n if len(path_a) is 0 or len(path_b) is 0:\n raise ValueError\n match = SequenceMatcher(None, path_a, path_b).find_longest_match(0, len(path_a), 0, len(path_b))\n diff_res.files.append(path_a[match.a: match.a + match.size])\n\n area_start = 4\n if re.search(region_rgx, line):\n diff_res.regions += 1\n if re.search(added_rgx, line) and area_start < 0:\n diff_res.lineAdded += 1\n if re.search(deleted_rgx, line) and area_start < 0:\n diff_res.lineDeleted += 1\n if re.search(fnlist_rgx, line):\n diff_res.functionCalls[re.search(fnlist_rgx, line).group(1)] += 1\n area_start -= 1\n return diff_res\n\n","sub_path":"diff_parser.py","file_name":"diff_parser.py","file_ext":"py","file_size_in_byte":2123,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"339867800","text":"# Functions in Python\n# def :\n# \n\ndef matrix_mul(a, b):\n return [[sum(i * j for i, j in zip(r, c)) for c in zip(*b)] for r in a]\n\n\na = [[1, 2], [3, 4]]\nb = [[5, 1], [2, 1]]\n\nprint(matrix_mul(a, b))\n\n\n# Scope in function\n# vars in function are in the local scope\n# vars outside of functione are in the global scope\n# vars in the local scope are NOT availabe in the local scope, but globals ARE available in the local scop\ndef my_function():\n test = 1\n print(test)\n\n\ntest = 0\nmy_function()\nprint(test)\n\n\n# The scopes can be 'nested', following the same rules in each scope\ndef outer():\n test = 1 # Try commenting out this line to see that test would be global value 0\n\n def inner():\n test = 2 # Commenting this would see test = 1, because it would take the NEAREST global (which is test=1)\n print('inner: ', test)\n\n inner()\n print('outer: ', test)\n\n\ntest = 0\nouter()\nprint('global: ', test)\n\n\n# Local and nonlocal\n# We can define vars in each namespace, but cannot actually modify them outside of the scope\n# Nonlocal\ndef outer2():\n # nonlocal test # This would not work, nonlocal only works on enclosing scopes, not on global\n test = 1\n\n def inner2():\n # nonlocal test # bind test to the nearest scope, so test = 1 in outer gets overwritten\n test = 2\n print('inner: ', test)\n\n inner2()\n print('outer: ', test)\n\n\ntest = 0\nouter2()\nprint('global: ', test)\n\n\n# Global\n\ndef outer3():\n test = 1\n\n def inner3():\n global test # The same idea as nonlocal, but binds directly to the global scope, so test in global gets overwritten\n test = 2\n print('inner: ', test)\n\n inner3()\n print('outer: ', test)\n\n\ntest = 0\nouter3()\nprint('global: ', test)\n\n\n# Input parameters\n# assigns an object to a local variable name\ndef func(y):\n # y is a local scope var that points to the SAME object as x outside the func\n print(id(y))\n print(y)\n\n\nx = 3\nprint(id(x))\nfunc(x)\n\n\n# assignments to argument names do not affect the caller\n\ndef func2(x):\n x = 7 # define a local var, not changing the global one\n\n\nfunc2(x)\nprint(x)\n\n# changing a mutable DOES affect the caller\nx = [1, 2, 3]\nprint(x)\n\n\ndef func3(x):\n x[1] = 4567 # this WILL affect the caller\n\n\nfunc3(x)\nprint(x)\n\n\n# keyword arguments\ndef keyword(a, b, c):\n print(a, b, c)\n\n\nkeyword(a=1, c=2, b=3) # Here the order does not matter, they are matched by key\n\n\n# default values\ndef default(a, b=4, c=88):\n print(a, b, c)\n\n\ndefault(1)\ndefault(b=5, a=7, c=9)\ndefault(42, c=9)\n\n\n# Variable positional arguments\n# You can define a parameter is not beeing of a fixed number of arguments\ndef minimum(*n):\n print(n) # n is a Tuple\n\n if n:\n mn = n[0]\n for value in n[1:]:\n if value < mn:\n mn = value\n print(mn)\n\n\nminimum(1, 3, -7, 9)\nminimum(33, 2456, -1, 5676567, 56)\nminimum()\n\n\n# Using * in a function calls means you unpack the values of the tuple, and are effectively\n# calling the function with the elements themselves (so unpacking(a, b, c, d))\ndef unpacking(*args):\n print(args)\n\n\nvalues = (1, 3, -7, 9)\n\nunpacking(values)\nunpacking(*values)\n\n\n# Variable keyword arguments\n# By adding ** in front of a dictionary as argument in function, the arguments is a dictionary wth keywords instread of a tuple\ndef dictionary(**kwargs):\n print(kwargs)\n\n\ndictionary(a=1, b=42)\ndictionary(**dict(a=1, b=42))\ndictionary(**{'a': 1, 'b': 42})\n\n\n# What is the value of being able to add variable amount of parameters?\n# for example : a database connection function\ndef connect(**options):\n conn_params = {\n 'host': options.get('host', '127.0.0.1'),\n 'port': options.get('port', 5432),\n 'user': options.get('user', ''),\n 'pwd': options.get('pwd', ''),\n }\n\n print(conn_params)\n\n\nconnect()\nconnect(host='127.0.0.42', port=5433)\nconnect(port=5431, user='test', pwd='gandalf')\n\n\n# Keyword only arguments\n# The keyword part MUST be called\n# Not much used.\ndef kwo(*a, c):\n print(a, c)\n\n\nkwo(1, 2, 3, c=7)\nkwo(c=4)\n\n\n# Watch out for mutable defaults!!!!\n# every operation on the defaults will be kept on subsequent function calls!!!\ndef mutable(a=[], b={}):\n print(a)\n print(b)\n print('#' * 12)\n a.append(len(a)) # This will affect default value of a!\n b[len(a)] = len(a) # This will affect default value of b!\n\n\nmutable()\nmutable()\n# this in interesting : introduce one parameter that doesn't use a default\nmutable(a=[1, 2, 3], b={'B': 1})\nmutable()\n\n\n# Return values\n# In Python you can return almost anything you want.\n# Since you can return a tuple, you are not restricted to 1 return value\n# By default, a function returns None\n\ndef returns():\n pass\n\n\nreturns() # The return value is not collected and is lost\na = returns()\nprint(a)\n\n\ndef factorial(n):\n if n in (0, 1):\n return 1\n result = n\n for k in range(2, n):\n result *= k\n return result\n\n\nprint(factorial(5))\n\n\n# Multiple return values using tuples\ndef moddiv(a, b):\n return a // b, a % b\n\n\nprint(moddiv(20, 7))\n\n\n# Final considerations on functions:\n# - They should do one thing (Single Responsibility)\n# - They should be small\n# - They should not contain too many input parameters\n# - They should be consistent in their return values\n# - They shouldn't have side effects\n\n\n# SPecial type : Recursive functions\ndef factorial_recursive(n):\n if n in (0, 1):\n return 1\n return factorial_recursive(n - 1) * n\n\n\nprint(factorial_recursive(5))\n\n\n# Special type : Anonymous functions\n# are called Lambda in Python\n\ndef get_multiples_of_five(n):\n return list(filter(lambda k: not k % 5, range(n)))\n\n\nprint(get_multiples_of_five(30))\n\nadder_lambda = lambda a, b: a + b\nprint(adder_lambda('test', 'ing'))\n\nto_upper_lambda = lambda s: s.upper()\nprint(to_upper_lambda('testing'))\n\n# Importing objects (functions)\n# You can import functions and modules using\n# - import \n# - from import \n","sub_path":"work/basics/functions.py","file_name":"functions.py","file_ext":"py","file_size_in_byte":6009,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"312846205","text":"from LoopsHelpers import Decomposer\nfrom LoopsHelpers import Experiment\nimport sys\nimport os\nfrom multiprocessing import Pool\nfrom argparse import ArgumentParser\nimport json\nimport copy\nimport itertools\nfrom LoopsHelpers.deepsetting import setDeep\nfrom LoopsHelpers import SettingsCollectionParser\n\nclass DecomposerRunner:\n def __init__(self, extraArgs={}):\n self.extraArgs = extraArgs\n def __call__(self, settingsObjDict):\n dec = Decomposer(str(Experiment().logsFolder() / 'runs.log'))\n dec.loadSettingsFromObj(settingsObjDict)\n dec.overwriteExisting = settingsObjDict['overwriteExisting']\n dec.decompose(self.extraArgs)\n\ndef runDecomposer(settingsObj,extraArgs):\n dec = Decomposer(str(Experiment().logsFolder() / 'runs.log'))\n dec.loadSettingsFromObj(settingsObj)\n dec.overwriteExisting = settingsObj['overwriteExisting']\n dec.decompose(extraArgs)\n\nif __name__ == \"__main__\":\n # CLI arguments\n parser = ArgumentParser(description='Decompose a field with multiple settings, using Python multiprocessing')\n parser.add_argument('settingsText', type=str, help='.txt file containing lines of .json files to run, or a directory with .json files when running in folder mode')\n parser.add_argument('poolSize', type=int, help='Size of the pool to use. Specify <= 1 for non-mp sequential run')\n parser.add_argument('-c','--checkexistence', help='Checks existence of run, continues if incomplete',action='store_true')\n parser.add_argument('-w','--writesettings', help='Write the settings to the result folder of the run',action='store_true')\n parser.add_argument('-o','--overwrite', help='Overwrite previous results',action='store_true')\n parser.add_argument('-f','--parsefolder', help='Parse folder?',action='store_true')\n parser.add_argument('-d','--deduplicate',help='Deduplicate results in folder based on mtime', action='store_true')\n parser.add_argument('-s','--startFrom',help='Start from the given iteration and step, specified as ,. Loads result of (it,step) and starts computing (it,step+1) etc',type=str)\n\n args = parser.parse_args()\n\n r = SettingsCollectionParser(args.overwrite)\n dr = DecomposerRunner()\n if args.parsefolder:\n r.parseFolder(args.settingsText)\n else:\n r.parseSettingsTxt(Experiment().settingsFolder() / args.settingsText)\n if args.startFrom is not None:\n parts = list(map(int, args.startFrom.split(',')))\n dr.extraArgs = {'redoPartial':{'i':parts[0],'j':parts[1]}}\n if args.checkexistence:\n r.checkExistence()\n elif args.deduplicate:\n for so in r.getSettingObjects():\n d = Decomposer('')\n d.setData(so)\n d.resultsFolder.deduplicateResults()\n elif args.writesettings:\n r.writeSettingFiles('dump')\n else:\n settingObjs = r.getSettingObjects()\n print('[decompose-mp.py] Running {} tasks on pool of size {}'.format(len(settingObjs), args.poolSize))\n if args.poolSize <= 1:\n print('decompose-mp.py] Running sequential')\n # Run sequential\n for so in settingObjs:\n dr(so)\n else:\n # This had a reason, but don't know why anymore\n pool = Pool(min(args.poolSize,len(settingObjs)))\n with Pool(min(args.poolSize,len(settingObjs))) as pool:\n pool.map(dr, settingObjs)\n\n print('[decompose-mp.py] Done processing')\n","sub_path":"experiment-env/scripts/decompose-mp.py","file_name":"decompose-mp.py","file_ext":"py","file_size_in_byte":3451,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"417907818","text":"import logging\nimport time\nimport random\n\n# must start with the main logger name\nlogger = logging.getLogger('mylib.mymath')\n\ndef multi (num_1, num_2):\n \"\"\" multiple num_2 with num_1 \"\"\"\n logger.debug('Start Multi function for {} and {}'.format(num_1,num_2))\n res = num_1 * num_2\n time.sleep(0.8)\n if not random.randint(0,5) :\n raise ValueError ('an internal error occurs...')\n logger.debug('Enf of Square function. return value is {}'.format(res))\n return (res)\n\n\nif __name__ == '__main__':\n import random\n\n x = random.randint(1,100)\n y = random.randint(1,100)\n logging.info('Mul : {} * {} = {}'.format(x,y,multi(x,y)))\n\n","sub_path":"learn/logging/mymath.py","file_name":"mymath.py","file_ext":"py","file_size_in_byte":661,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"136892397","text":"import scrapy\n\nproxy_list = []\n\nclass ProxySpider(scrapy.Spider):\n name = 'ProxySpider'\n download_delay = 10\n \n def start_requests(self):\n yield scrapy.Request(\n url='https://free-proxy-list.net/',\n callback = self.parse_proxy\n )\n \n def parse_proxy(self, response):\n #print (response.xpath('//tbody/tr').extract())\n \n for proxy in response.xpath('//table[@id=\"proxylisttable\"]/tbody/tr'):\n ip = proxy.xpath('./td/text()')[0].extract()\n port = proxy.xpath('./td/text()')[1].extract()\n https = proxy.xpath('./td/text()')[6].extract()\n if https == 'no':\n with open('proxy_list.txt', 'a') as f:\n f.write('http://{0}:{1}\\n'.format(ip, port))\n elif https == 'yes':\n with open('proxy_list.txt', 'a') as f:\n f.write('https://{0}:{1}\\n'.format(ip, port))\n \n print(len(proxy_list))\n ","sub_path":"YouTube_test/spiders/proxy_spider.py","file_name":"proxy_spider.py","file_ext":"py","file_size_in_byte":991,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"7082660","text":"import udt\nimport util\nimport config\nimport time\nimport collections\nimport threading\nimport _thread\n\n\n# Go-Back-N reliable transport protocol.\nclass GoBackN:\n _N = 8\n # \"msg_handler\" is used to deliver messages to application layer\n # when it's ready.\n def __init__(self, local_ip, local_port,\n remote_ip, remote_port, msg_handler):\n self.network_layer = udt.NetworkLayer(local_ip, local_port,\n remote_ip, remote_port, self)\n self.msg_handler = msg_handler\n self.next_seq_number = 1\n self.base_seq_number = 1\n self.last_seq_number_to_be_sent = None\n self.base_seq_number_lock = threading.Lock()\n self.next_seq_number_lock = threading.Lock()\n self.last_ack_number = 0\n self.last_ack_number_lock = threading.Lock()\n self.timer = None\n self.timer_lock = threading.Lock()\n\n # \"send\" is called by application. Return true on success, false\n # otherwise.\n def send(self, msg):\n # TODO: impl protocol to send packet from application layer.\n # call self.network_layer.send() to send to network layer.\n msg_size = len(msg)\n bytes_sent = 0\n start = 0\n end = 0\n send_pkts = {}\n try:\n while True:\n with self.next_seq_number_lock:\n \n with self.base_seq_number_lock:\n base_seq_number = self.base_seq_number\n \n if self.next_seq_number < base_seq_number + self._N:\n if self.next_seq_number not in send_pkts:\n if bytes_sent < msg_size:\n if msg_size - bytes_sent < config.MAX_MESSAGE_SIZE:\n end = msg_size\n else:\n end += config.MAX_MESSAGE_SIZE\n\n pkt = util.make_pkt(config.MSG_TYPE_DATA, self.next_seq_number, msg[start:end])\n print(\"Saving pkt with seq_number: \", self.next_seq_number)\n send_pkts[self.next_seq_number] = pkt\n bytes_sent += (end - start)\n start = end\n if end == msg_size:\n # we have reached end of message, track last seq number here\n self.last_seq_number_to_be_sent = self.next_seq_number\n else:\n if self.last_seq_number_to_be_sent is None:\n # return once we have received the ack for the last seq number\n return True\n\n if self.next_seq_number in send_pkts:\n self.network_layer.send(send_pkts[self.next_seq_number])\n \n with self.base_seq_number_lock:\n base_seq_number = self.base_seq_number\n \n if self.next_seq_number == base_seq_number:\n # start timer\n with self.timer_lock:\n if self.timer:\n # if timer already exists, cancel and restart\n self.timer.cancel()\n self.timer = threading.Timer(config.TIMEOUT_MSEC/1000.0, self.reset_next_seq_num)\n self.timer.start()\n print(\"Started timer for pkt with seq_number: \", self.next_seq_number)\n\n self.next_seq_number += 1\n except:\n raise\n # return False\n return True\n\n def reset_next_seq_num(self):\n with self.base_seq_number_lock:\n base_seq_number = self.base_seq_number\n \n with self.next_seq_number_lock:\n self.next_seq_number = base_seq_number\n\n # \"handler\" to be called by network layer when packet is ready.\n def handle_arrival_msg(self):\n # TODO: impl protocol to handle arrived packet from network layer.\n # call self.msg_handler() to deliver to application layer.\n msg = self.network_layer.recv()\n \n if util.is_corrupt_pkt(msg):\n return\n\n seq_number = util.pkt_seq_number(msg)\n if util.is_ack_pkt(msg):\n with self.base_seq_number_lock:\n if seq_number >= self.base_seq_number and seq_number < self.base_seq_number + self._N:\n # slide the window by 1\n self.base_seq_number = seq_number + 1\n if seq_number == self.last_seq_number_to_be_sent:\n # if this is the last seq number of the stream, set it to None\n self.last_seq_number_to_be_sent = None\n \n # restart timer\n with self.timer_lock:\n if self.timer:\n self.timer.cancel()\n self.timer = threading.Timer(config.TIMEOUT_MSEC/1000.0, self.reset_next_seq_num)\n self.timer.start()\n else:\n received_msg = False\n with self.last_ack_number_lock:\n if seq_number == self.last_ack_number + 1:\n # if we receive a data pkt with next seq number, accept. otherwise, discard and send ack with last ack number\n received_msg = True\n self.last_ack_number = seq_number\n else:\n seq_number = self.last_ack_number\n \n if received_msg:\n self.msg_handler(util.pkt_data(msg))\n \n # send ack pkt for accepted/discarded pkts with last ack number\n ack_pkt = util.make_pkt(config.MSG_TYPE_ACK, seq_number)\n self.network_layer.send(ack_pkt)\n\n # Cleanup resources.\n def shutdown(self):\n # TODO: cleanup anything else you may have when implementing this\n # class.\n self.network_layer.shutdown()\n","sub_path":"project03/gbn.py","file_name":"gbn.py","file_ext":"py","file_size_in_byte":5346,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"195943454","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Oct 18 19:31:46 2018\n\n@author: Gadcet\n\"\"\"\n\n\"\"\"\nThe best way to go about this is to leave it as it is, then if we are calling the function in another function, we simply take the functions and put in the new one, so we shouldnt be calling 'dummy_data_funcs ever again'\n\"\"\"\nimport random\nimport pandas as pd\nfrom faker import Faker\nimport names\nimport numpy as np\nfrom ast import literal_eval\nimport nltk\nimport spacy\nnlp = spacy.load('en_core_web_lg') \nimport enchant\nimport pycountry\nfrom bs4 import BeautifulSoup\nfrom bs4.element import Comment\nimport urllib.request\nimport pickle\nimport re\n\nd = enchant.Dict('en_UK')\n\n# Get the bloverse categories\ndef get_bloverse_categories():\n mappings_df = pd.read_csv(r'/Users/Gadcet/Documents/Ani extraction/Google_Bloverse Category Mappings.csv', encoding = 'latin1')\n google = list(mappings_df['Google name'])\n bloverse = list(mappings_df['Bloverse name'])\n \n category_dict = dict(zip(google, bloverse))\n \n categories = list(set(bloverse))\n \n categories = [categories for categories in categories if str(categories) != 'nan']\n return categories\n\n# Fix age group problem\ndef get_age_bracket(age):\n bracket = 0\n if 18 <= age < 25:\n bracket = '18 to 24'\n elif 25 <= age < 35:\n bracket = '25 to 34'\n elif 35 <= age < 45:\n bracket = '35 to 44'\n elif 45 <= age < 55:\n bracket = '45 to 54'\n elif age > 54:\n bracket = '55+'\n return bracket\n\n# Convert the user age into an age bracket\ndef get_age_groups(df):\n user_age_group_list = []\n for i in range(len(df)):\n temp_age = df.iloc[i]['User Age']\n user_age_bracket = get_age_bracket(temp_age)\n user_age_group_list.append(user_age_bracket)\n return user_age_group_list\n\n\n# Assign a unique number to each article - lucky to improve on this to be hexadecimal unique reference\ndef assign_article_refs(news_df):\n article_ref = []\n for x in range(len(news_df)):\n temp = '#%s' % random.randint(100000000,999999999)\n if temp not in article_ref:\n article_ref.append(temp)\n else:\n temp = '#%s' % random.randint(100000000,999999999)\n article_ref.append(temp)\n \n total_refs = list(set(article_ref))\n# print(len(news_df))\n# print(len(total_refs))\n return total_refs\n\ndef get_user_intro(user_id, df):\n user_id_list = list(df['User IDs'])\n i = user_id_list.index(user_id)\n temp_user_id = df.iloc[i]['User IDs']\n temp_name = df.iloc[i]['User Name']\n temp_age = df.iloc[i]['User Age']\n temp_gender = df.iloc[i]['User Gender']\n temp_home_country = df.iloc[i]['User Home Country']\n temp_countries = literal_eval(df.iloc[i]['User Countries'])\n temp_countries2 = ', '.join(temp_countries)\n temp_categories = literal_eval(df.iloc[i]['User Categories'])\n temp_categories2 = ', '.join(temp_categories)\n temp_articles_read = literal_eval(df.iloc[i]['User Num Articles Read'])\n temp_art_avg = int(np.average(temp_articles_read))\n if temp_gender == 'male':\n text = 'This is %s, he is %s years old, from %s and typically consumes content from %s. The types of content he typically engages with are %s. On average he read %s articles a day' % (temp_name,temp_age, temp_home_country,temp_countries2,temp_categories2,temp_art_avg)\n else:\n text = 'This is %s, she is %s years old, from %s and typically consumes content from %s. The types of content she typically engages with are %s. On average she read %s articles a day' % (temp_name,temp_age,temp_home_country,temp_countries2,temp_categories2,temp_art_avg)\n print('***********************************')\n print(text)\n print('***********************************')\n\ndef flatten_list(stacked_list):\n articles_list = []\n \n for sublist in stacked_list:\n for item in sublist:\n articles_list.append(item)\n return(articles_list)\n\ndef return_shorter_flags(news_df, articles_to_today, long_list):\n shorter_refs = list(news_df['Article Ref'])\n short_list = []\n # Now reduce his news index to only include the news articles in the smaller sample\n for t in range(len(articles_to_today)):\n if articles_to_today[t] in shorter_refs:\n short_list.append(long_list[t])\n return short_list\n\ndef get_total_points(user_id, day_cutoff, df, news_df, cat_flag):\n \n # Get a list of all the user ids and then get the location of the current one\n user_id_list = list(df['User IDs'])\n i = user_id_list.index(user_id)\n temp_daily_articles = literal_eval(df.iloc[i]['User Daily Articles'])\n temp_daily_comments_flag = literal_eval(df.iloc[i]['Daily Articles Comments Flag'])\n temp_daily_comments_likes = literal_eval(df.iloc[i]['Daily Articles Comments Likes'])\n temp_daily_comments_sentiment = literal_eval(df.iloc[i]['Daily Articles Comments Sentiment'])\n temp_daily_shares_flag = literal_eval(df.iloc[i]['Daily Articles Shares Flag'])\n temp_daily_likes_flag = literal_eval(df.iloc[i]['Daily Articles Likes Flag'])\n \n # pick the articles activity up until the cutoff date\n\n \n articles_to_today = temp_daily_articles[0:day_cutoff]\n articles_comments_flag_to_today = temp_daily_comments_flag[0:day_cutoff]\n articles_comments_likes_to_today = temp_daily_comments_likes[0:day_cutoff]\n articles_comments_sentiment_to_today = temp_daily_comments_sentiment[0:day_cutoff]\n articles_shares_flag_to_today = temp_daily_shares_flag[0:day_cutoff]\n articles_likes_flag_to_today = temp_daily_likes_flag[0:day_cutoff]\n \n articles_to_today = flatten_list(articles_to_today)\n articles_comments_flag_to_today = flatten_list(articles_comments_flag_to_today)\n articles_comments_likes_to_today = flatten_list(articles_comments_likes_to_today)\n articles_comments_sentiment_to_today = flatten_list(articles_comments_sentiment_to_today)\n articles_shares_flag_to_today = flatten_list(articles_shares_flag_to_today)\n articles_likes_flag_to_today = flatten_list(articles_likes_flag_to_today)\n \n if cat_flag == 1:#flag added for cases where we are feeding in a subset of the news data (e.g when getting ranking by category)\n articles_comments_flag_to_today = return_shorter_flags(news_df, articles_to_today, articles_comments_flag_to_today)\n articles_comments_likes_to_today = return_shorter_flags(news_df, articles_to_today, articles_comments_likes_to_today)\n articles_comments_sentiment_to_today = return_shorter_flags(news_df, articles_to_today, articles_comments_sentiment_to_today)\n articles_shares_flag_to_today = return_shorter_flags(news_df, articles_to_today, articles_shares_flag_to_today)\n articles_likes_flag_to_today = return_shorter_flags(news_df, articles_to_today, articles_likes_flag_to_today)\n \n # Get a df of all the articles where they have commented\n comment_indices = [i for i, x in enumerate(articles_comments_flag_to_today) if x == 1]\n commented_article_refs = [articles_to_today[i] for i in comment_indices]\n commented_article_likes = [articles_comments_likes_to_today[i] for i in comment_indices]\n commented_article_sentiment = [articles_comments_sentiment_to_today[i] for i in comment_indices]\n \n comment_news = news_df[news_df['Article Ref'].isin(commented_article_refs)]\n #print(len(comment_news))\n \n comment_points = sum(commented_article_likes) * 1\n share_points = sum(articles_shares_flag_to_today) * 50\n like_points = sum(articles_likes_flag_to_today) * 25\n \n total_points = comment_points + share_points + like_points\n return total_points\n\n\n# Build a function to get the ranking for out current user on that day\ndef get_user_rank(user_id, day_cutoff, df, news_df, cat_flag):\n user_id_list = list(df['User IDs'])\n user_index = user_id_list.index(user_id)\n user_points_tally = []\n count_flag = []\n user_refs = []\n for i in range(len(df)):\n user_id = df.iloc[i]['User IDs']\n points = get_total_points(user_id, day_cutoff, df, news_df, cat_flag)\n if points > 0:\n count_flag.append(1)\n user_refs.append(user_id)\n else:\n count_flag.append(0)\n user_points_tally.append(points)\n \n # Get the index of the sorted point tallies and then use that to sort the\n # user_ids and then get the location of your temp_user_id in the sorted lists\n sorted_ind = sorted(range(len(user_points_tally)), key=lambda k: user_points_tally[k], reverse=True)\n sorted_user_id_list = [user_id_list[i] for i in sorted_ind]\n sorted_points_list = [user_points_tally[i] for i in sorted_ind]\n user_rank = sorted_user_id_list.index(user_id)\n user_points = sorted_points_list[user_rank]\n \n return user_rank+1, len(sorted_ind), user_points, sum(count_flag), user_refs\n\n# This gets the rank on a global level - ** come back to this function later and return the rank, points etc instead of just printing them alone\ndef get_demo_score(temp_user_id, temp_name, day_cutoff, users_df, user_news, flag, flag2):\n output = get_user_rank(temp_user_id, day_cutoff, users_df, user_news, flag)\n #print(output[0]) # prints the user rank\n #print(output[1]) # prints the total number of users\n #print(output[2]) # prints the points the user has accrued over the time period\n #print(output[3]) # prints the number of users (across the number of users) who have read at least 1 of the articles in the news data\n #print(output[4]) # prints the user IDs of the users that have also read\n if flag2 == 1:\n print('%s has a global rank of %s/%s and has accrued %s points in the last %s days ' % (temp_name, output[0], output[3], output[2], day_cutoff))\n elif flag2 == 2:\n print('%s has a country rank of %s/%s in the last %s days ' % (temp_name, output[0], output[3], day_cutoff))\n elif flag2 == 3:\n print('%s has a gender rank of %s/%s in the country in the last %s days ' % (temp_name, output[0], output[3], day_cutoff))\n \n#************* Look into multithreading later to speed up your code!!!!!!!!\n# Get the rank based on the category globally **** theres an issue here and you need to do some stuff to make the function work\ndef get_category_scores(temp_name, temp_categories, news_data, temp_user_id, day_cutoff, users_df, user_news_cat, flag):\n print('We are now getting the global rank of %s for the different content types' % temp_name)\n for cats in temp_categories:\n print(cats)\n cat_indices = []\n for ii in range(len(news_data)):\n cat_list = news_data['Article Category'].iloc[ii]\n try:\n if cats in cat_list:\n cat_indices.append(ii)\n except:\n continue\n user_news_cat = news_data.iloc[cat_indices]\n print(len(user_news_cat))\n output = get_user_rank(temp_user_id, day_cutoff, users_df, user_news_cat, flag)\n print('%s/%s for articles in the %s category over the last %s days ' % (output[0], output[3], cats, day_cutoff))\n\n# Get points based on the index of the paragraph\ndef get_entity_points(index):\n points = 0\n if 0 <= index < 5:\n points = 10\n elif 5<= index < 10:\n points = 5\n elif 10 <= index < 20:\n points = 2\n elif index > 20:\n points = 1\n return points\n\n# Get the total points for the entity across the whole article\ndef get_entity_total_points(entity, entity_type, headline, text):\n sentences = nltk.sent_tokenize(text)\n points = 0\n entity2 = entity.split()\n if entity in headline:\n points += 20\n elif len(entity2) > 1:\n if entity_type == 'PERSON': # add organisation to this list\n secondary_entity = entity2[-1] # gets the last name of the entity\n if secondary_entity in headline:\n points += 20\n for i in range(len(sentences)):\n temp = sentences[i]\n if entity in temp:\n pnt = get_entity_points(i)\n points += pnt\n elif len(entity2) > 1:\n if entity_type == 'PERSON':\n secondary_entity = entity2[-1] # gets the last name of the entity\n if secondary_entity in temp:\n pnt = get_entity_points(i)\n points += pnt\n return points\n\n# Get the total points for the entity across the whole article\ndef get_keyword_total_points(keyword, headline, text):\n sentences = nltk.sent_tokenize(text)\n points = 0\n if keyword in headline:\n points += 20\n for i in range(len(sentences)):\n temp = sentences[i]\n if keyword in temp:\n pnt = get_entity_points(i)\n points += pnt\n return points\n\n\ndef get_entity_points_and_top_5(news_df):\n entity_points_list_full = []\n top_5_entity_list_full = []\n \n for i in range(len(news_df)):\n print(i)\n headline = news_df.iloc[i]['Title']\n text = news_df.iloc[i]['Text']\n try:\n entity_list = literal_eval(news_df.iloc[i]['Article Entities'])\n entity_types_list = literal_eval(news_df.iloc[i]['Article Entity Types'])\n except:\n entity_list = news_df.iloc[i]['Article Entities']\n entity_types_list = news_df.iloc[i]['Article Entity Types']\n \n entity_points_list = []\n top_5_entity_list = []\n for entity,entity_type in zip(entity_list, entity_types_list):\n points = get_entity_total_points(entity, entity_type, headline, text )\n entity_points_list.append(points)\n sorted_ind = sorted(range(len(entity_points_list)), key=lambda k: entity_points_list[k], reverse=True)\n sorted_entity_list = [entity_list[i] for i in sorted_ind]\n top_5_entities = sorted_entity_list[0:5]\n for ent in top_5_entities:\n top_5_entity_list.append(ent)\n entity_points_list_full.append(entity_points_list)\n top_5_entity_list_full.append(top_5_entity_list)\n \n news_df['Article Entity Points'] = entity_points_list_full\n news_df['Top 5 Entities'] = top_5_entity_list_full\n return news_df\n\ndef get_keyword_points_and_top_5(news_df):\n keyword_points_list_full = []\n top_5_keyword_list_full = []\n \n for i in range(len(news_df)):\n print(i)\n headline = news_df.iloc[i]['Title']\n text = news_df.iloc[i]['Text']\n try:\n keyword_list = literal_eval(news_df.iloc[i]['Article Keywords'])\n except:\n keyword_list = news_df.iloc[i]['Article Keywords']\n \n keyword_points_list = []\n top_5_keyword_list = []\n for keyword in keyword_list:\n points = get_keyword_total_points(keyword, headline, text )\n keyword_points_list.append(points)\n sorted_ind = sorted(range(len(keyword_points_list)), key=lambda k: keyword_points_list[k], reverse=True)\n sorted_keyword_list = [keyword_list[i] for i in sorted_ind]\n top_5_keywords = sorted_keyword_list[0:5]\n for keywords in top_5_keywords:\n top_5_keyword_list.append(keywords)\n keyword_points_list_full.append(keyword_points_list)\n top_5_keyword_list_full.append(top_5_keyword_list)\n \n news_df['Article Keyword Points'] = keyword_points_list_full\n news_df['Top 5 Keywords'] = top_5_keyword_list_full\n return news_df\n\ndef get_user_entity_points(user_id, users_df, entity, news_data, day_cutoff):\n # Get the user and their index\n user_id_list = list(users_df['User IDs'])\n user_index = user_id_list.index(user_id)\n \n temp_daily_articles = literal_eval(users_df.iloc[user_index]['User Daily Articles'])\n temp_daily_comments_likes = literal_eval(users_df.iloc[user_index]['Daily Articles Comments Likes'])\n temp_daily_comments_sentiment = literal_eval(users_df.iloc[user_index]['Daily Articles Comments Sentiment'])\n \n # pick the articles activity up until the cutoff date\n articles_to_today = temp_daily_articles[0:day_cutoff]\n articles_comments_likes_to_today = temp_daily_comments_likes[0:day_cutoff]\n articles_comments_sentiment_to_today = temp_daily_comments_sentiment[0:day_cutoff]\n \n # Get the list of article refs for articles that have been read up until the current day (it will be a list of lists)\n articles_to_today = temp_daily_articles[0:day_cutoff+1]\n articles_comments_likes_to_today = temp_daily_comments_likes[0:day_cutoff+1]\n articles_comments_sentiment_to_today = temp_daily_comments_sentiment[0:day_cutoff+1]\n \n # Flatten the lists so you have a '1 x n' list instead of an 'M x N' list\n articles_to_today = flatten_list(articles_to_today)\n articles_comments_likes_to_today = flatten_list(articles_comments_likes_to_today)\n articles_comments_sentiment_to_today = flatten_list(articles_comments_sentiment_to_today)\n\n # select all the articles that he has read up to that day. \n user_news = news_data[news_data['Article Ref'].isin(articles_to_today)]\n \n ent_indices = []\n for ii in range(len(user_news)):\n ent_list = user_news['Article Entities'].iloc[ii]\n try:\n if entity in ent_list:\n ent_indices.append(ii)\n except:\n continue\n ent_news = user_news.iloc[ent_indices] \n \n entity_point_list = []\n entity_sent_point_list = []\n \n for i in range(len(ent_news)):\n # Get the news article and details about it\n example_news = ent_news.iloc[i]\n headline = example_news['Title']\n text = example_news['Text']\n article_ref = example_news['Article Ref']\n entities = literal_eval(example_news['Article Entities'])\n entity_type = literal_eval(example_news['Article Entity Types'])\n entity_points = literal_eval(example_news['Entity Points'])\n \n # Get the points for that entity in the current article\n try:\n ent_ind = entities.index(entity)\n except:\n for i in range(len(entities)):\n if entity in entities[i]:\n ent_ind = i\n curr_ent_points = entity_points[ent_ind]\n \n\n # Now get the index of the current article that were looking at\n article_index = articles_to_today.index(article_ref)\n article_comment_likes = articles_comments_likes_to_today[article_index]\n article_comment_sentiment = articles_comments_sentiment_to_today[article_index]\n total_ent_points = (curr_ent_points * article_comment_likes)\n total_ent_sent_points = (curr_ent_points * article_comment_likes * article_comment_sentiment)\n entity_point_list.append(total_ent_points)\n entity_sent_point_list.append(total_ent_sent_points)\n total_points = sum(entity_point_list)\n total_sent_points = sum(entity_sent_point_list)\n \n return total_points, total_sent_points, len(ent_news)\n\ndef get_user_entity_rank(user_id, entity, news_data, day_cutoff, users_df):\n user_id_list = list(users_df['User IDs'])\n user_index = user_id_list.index(user_id)\n user_points_tally = []\n user_sent_points_tally = []\n count_flag = []\n for i in range(len(users_df)):\n temp_user_id = users_df.iloc[i]['User IDs']\n points = get_user_entity_points(temp_user_id, users_df, entity, news_data, day_cutoff)\n if points[2] > 0:\n count_flag.append(1)\n else:\n count_flag.append(0)\n user_points_tally.append(points[0])\n user_sent_points_tally.append(points[1])\n \n # Get the index of the sorted point tallies and then use that to sort the\n # user_ids and then get the location of your temp_user_id in the sorted lists\n sorted_ind = sorted(range(len(user_points_tally)), key=lambda k: user_points_tally[k], reverse=True)\n sorted_user_id_list = [user_id_list[i] for i in sorted_ind]\n sorted_points_list = [user_points_tally[i] for i in sorted_ind]\n sorted_sent_points_list = [user_sent_points_tally[i] for i in sorted_ind]\n user_rank = sorted_user_id_list.index(user_id)\n user_points = sorted_points_list[user_rank]\n user_sent_points = sorted_sent_points_list[user_rank]\n \n return min(user_rank+1,sum(count_flag)), sum(count_flag), user_points, user_sent_points\n\ndef jaccard_similarity(list1, list2):\n intersection = len(list(set(list1).intersection(list2)))\n #print(list(set(list1).intersection(list2)))\n union = (len(list1) + len(list2)) - intersection\n return float(intersection / union)\n\n#******************************************************************************************** \n\"\"\"\nThe functions here are related to article similarity\n\"\"\"\n \n# Ok so the first function takes any two texts and spits out their similarity\ndef get_text_similarity(text1, text2):\n text1 = nlp(text1)\n text2 = nlp(text2)\n sim = text1.similarity(text2)\n return sim\n\ndef check_for_similar_articles(art_ref, headline_sim, text_sim, ent_key_sim, news):\n# \"\"\"\n# This function checks if the current article is similar to articles that have\n# been published previously.\n# \n# art_ref - the article reference for the current article\n# news - the list of news articles that the current article is being compared to\n# for example you can compare to all articles published in the last week,\n# or articles that have been published for that category today. \n# *** later you may need to build a function that does the similarity based solely on the\n# article headline\n# \"\"\"\n article_ref_list = list(news['Article Ref'])\n art_ind = article_ref_list.index(art_ref)\n sim_art_ref = []\n try:\n curr_headline = nlp(news.iloc[art_ind]['Title'])\n curr_text = nlp(news.iloc[art_ind]['Text'])\n try:\n curr_entities = literal_eval(news.iloc[art_ind]['Top 5 Entities'])\n curr_keywords = literal_eval(news.iloc[art_ind]['Top 5 Entities'])\n except:\n curr_entities = news.iloc[art_ind]['Top 5 Entities']\n curr_keywords = news.iloc[art_ind]['Top 5 Entities'] \n except:\n if type(news.iloc[art_ind]['Title']) == str:\n curr_headline = nlp(get_english_translation(news.iloc[art_ind]['Title']))\n curr_text = nlp(get_english_translation(news.iloc[art_ind]['Text']))\n try:\n curr_entities = literal_eval(news.iloc[art_ind]['Top 5 Entities'])\n curr_keywords = literal_eval(news.iloc[art_ind]['Top 5 Entities'])\n except:\n curr_entities = news.iloc[art_ind]['Top 5 Entities']\n curr_keywords = news.iloc[art_ind]['Top 5 Entities'] \n \n for i in range(len(news)):\n #print('Now going through artile %s out of %s' % (i+1, len(news)))\n if i != art_ind:\n if type(news.iloc[i]['Title']) == str:\n try:\n try:\n headline = nlp(news.iloc[i]['Title'])\n #print(headline)\n except: \n headline = nlp(get_english_translation(news.iloc[i]['Title']))\n #print(headline)\n sim1 = curr_headline.similarity(headline)\n #print(sim1)\n if sim1 > headline_sim:\n #print('now starting to do the bidness')\n try:\n entities = literal_eval(news.iloc[i]['Top 5 Entities'])\n keywords = literal_eval(news.iloc[i]['Top 5 Entities'])\n except:\n entities = news.iloc[i]['Top 5 Entities']\n keywords = news.iloc[i]['Top 5 Entities']\n text = nlp(news.iloc[i]['Text'])\n #print('we were able to do the text part')\n sim2 = curr_text.similarity(text)\n #print('we got the similarity')\n if sim2 > text_sim:\n #print('we looking at jaccards')\n try:\n ent_jacc = jaccard_similarity(curr_entities, entities)\n #print('we got the entity jaccards')\n key_jacc = jaccard_similarity(curr_keywords, keywords)\n ent_key_sum = ent_jacc+key_jacc \n #print('we got through the jaccards')\n except:\n continue\n \n if ent_key_sum > ent_key_sim:\n sim_art_ref.append(news.iloc[i]['Article Ref'])\n\n except:\n print('An exception has been raised')\n continue\n \n sim_articles = news[news['Article Ref'].isin(sim_art_ref)]\n\n if len(sim_art_ref) > 0:\n similar = 1\n else:\n similar = 0\n return similar, sim_articles, sim_art_ref\n\n\ndef check_for_similar_headlines(art_ref, headline_sim, news):\n# \"\"\"\n# This function checks if the current article is similar to articles that have\n# been published previously.\n# \n# art_ref - the article reference for the current article\n# news - the list of news articles that the current article is being compared to\n# for example you can compare to all articles published in the last week,\n# or articles that have been published for that category today. \n# *** later you may need to build a function that does the similarity based solely on the\n# article headline\n# \"\"\"\n article_ref_list = list(news['Article Ref'])\n art_ind = article_ref_list.index(art_ref)\n sim_art_ref = []\n try:\n curr_headline = nlp(news.iloc[art_ind]['Title'])\n except:\n if type(news.iloc[art_ind]['Title']) == str:\n print('We were unable to get a headline for this article, now trying a translation')\n curr_headline = nlp(get_english_translation(news.iloc[art_ind]['Title']))\n\n \n for i in range(len(news)):\n if i != art_ind:\n if i%100 == 0:\n print(i)\n try:\n \n headline = nlp(news.iloc[i]['Title'])\n sim1 = curr_headline.similarity(headline)\n if sim1 > headline_sim:\n sim_art_ref.append(news.iloc[i]['Article Ref'])\n\n except:\n continue\n \n sim_articles = news[news['Article Ref'].isin(sim_art_ref)]\n\n if len(sim_art_ref) > 0:\n similar = 1\n else:\n similar = 0\n return similar, sim_articles, sim_art_ref\n\ndef check_for_plagiarism(art_ref, headline_sim, text_sim, ent_key_sim, news): \n sim = check_for_similar_articles(art_ref, headline_sim, text_sim, ent_key_sim, news)\n articles = sim[1]\n art_refs = sim[2]\n return articles, art_refs\n\ndef check_for_plagiarism_by_headlines(art_ref, headline_sim, news): \n sim = check_for_similar_headlines(art_ref, headline_sim, news)\n articles = sim[1]\n art_refs = sim[2]\n return articles, art_refs\n\ndef clean_news_data(news_df):\n # goes through a news dataframe and removes the articles that are too similar\n \n art_del_list = []\n \n for i in range(len(news_df)):\n print(i)\n curr_art_ref = news_df.iloc[i]['Article Ref']\n if curr_art_ref not in art_del_list:\n sim_arts = check_for_plagiarism(curr_art_ref, 0.85, 0.8, 0.1, news_df)\n del_refs = sim_arts[1]\n if len(del_refs) > 0:\n for arts in del_refs:\n if arts not in art_del_list:\n art_del_list.append(arts)\n else:\n print('The current article is in the delete list so were not working on it')\n \n article_ref_list = list(news_df['Article Ref'])\n article_ref_list_cleaned = []\n for arts in article_ref_list:\n if arts not in art_del_list:\n article_ref_list_cleaned.append(arts)\n print('Total number of articles is %s' % len(article_ref_list))\n #print(len(art_del_list))\n print('Articles to be deleted: %s' % len(list(set(art_del_list))))\n print('Articles left: %s' % len(article_ref_list_cleaned))\n \n cleaned_news_df = news_df[news_df['Article Ref'].isin(article_ref_list_cleaned)]\n print(len(cleaned_news_df))\n return cleaned_news_df\n\ndef get_similar_articles(art_ref, news, num_arts, sim_threshold = 1.5): # you probably want this to be a process that runs once an hour every hour and updates the articles if need be\n# \"\"\"\n# This function gets the 5 most similar articles to the subject article.\n# \n# art_ref - the article reference for the current article\n# news - the list of news articles that the current article is being compared to\n# for example you can compare to all articles published in the last week,\n# or articles that have been published for that category today. \n \n# \"\"\"\n print('Now getting similar articles')\n nlp = spacy.load('en_core_web_lg') \n article_ref_list = list(news['Article Ref'])\n art_ind = article_ref_list.index(art_ref)\n tot_sim_list = []\n sim_art_ref_list = []\n try:\n curr_headline = nlp(news.iloc[art_ind]['Title'])\n curr_text = nlp(news.iloc[art_ind]['Text'])\n curr_entities = literal_eval(news.iloc[art_ind]['Article Entities'])\n curr_keywords = literal_eval(news.iloc[art_ind]['Article Keywords'])\n except:\n if type(news.iloc[art_ind]['Title']) == str:\n print('We were unable to get a headline for this article, now trying a translation')\n curr_headline = nlp(get_english_translation(news.iloc[art_ind]['Title']))\n curr_text = nlp(get_english_translation(news.iloc[art_ind]['Text']))\n curr_entities = literal_eval(news.iloc[art_ind]['Article Entities'])\n curr_keywords = literal_eval(news.iloc[art_ind]['Article Keywords'])\n \n for i in range(len(news)):\n if i != art_ind:\n #if i%100 == 0:\n #print(i)\n try: \n tot_sim = 0\n headline = nlp(news.iloc[i]['Title'])\n sim1 = curr_headline.similarity(headline)\n tot_sim += sim1\n try:\n entities = news.iloc[i]['Article Entities']\n keywords = news.iloc[i]['Article Keywords']\n except:\n print('the initial one didnt work')\n entities = literal_eval(news.iloc[i]['Article Entities'])\n keywords = literal_eval(news.iloc[i]['Article Keywords'])\n text = nlp(news.iloc[i]['Text'])\n sim2 = curr_text.similarity(text)\n tot_sim += sim2\n \n ent_jacc = jaccard_similarity(curr_entities, entities)\n tot_sim += ent_jacc\n key_jacc = jaccard_similarity(curr_keywords, keywords)\n tot_sim += key_jacc\n ent_key_sum = ent_jacc+key_jacc \n \n if tot_sim > sim_threshold:\n tot_sim_list.append(tot_sim)\n sim_art_ref_list.append(news.iloc[i]['Article Ref']) #work on this a lil bit more later\n except:\n continue\n #print(len(tot_sim_list))\n #print(len(sim_art_ref_list))\n if len(tot_sim_list) > num_arts:\n sorted_ind = sorted(range(len(tot_sim_list)), key=lambda k: tot_sim_list[k], reverse=True)\n sorted_art_ref_list = [sim_art_ref_list[i] for i in sorted_ind]\n article_recommendations = sorted_art_ref_list[0:min(len(news),num_arts)]\n article_recommendations_1 = news[news['Article Ref'].isin(article_recommendations)]\n return article_recommendations, article_recommendations_1\n else:\n print('Reduce number of recommendations asked for, we can only produce %s' % len(tot_sim_list))\n return None\n \n\ndef clean_news_data_by_headlines(news_df):\n # goes through a news dataframe and removes the articles that are too similar\n \n art_del_list = []\n print('Number of articles at the start is %s' % len(news_df))\n for i in range(len(news_df)):\n curr_art_ref = news_df.iloc[i]['Article Ref']\n if curr_art_ref not in art_del_list:\n sim_arts = check_for_plagiarism_by_headlines(curr_art_ref, 0.8, news_df)\n del_refs = sim_arts[1]\n if len(del_refs) > 0:\n for arts in del_refs:\n if arts not in art_del_list:\n art_del_list.append(arts)\n else:\n print('The current article is in the delete list so were not working on it')\n \n article_ref_list = list(news_df['Article Ref'])\n article_ref_list_cleaned = []\n for arts in article_ref_list:\n if arts not in art_del_list:\n article_ref_list_cleaned.append(arts)\n print('Number of articles to be deleted is %s' % len(list(art_del_list)))\n print('Number of articles left is %s' % len(article_ref_list_cleaned))\n \n cleaned_news_df = news_df[news_df['Article Ref'].isin(article_ref_list_cleaned)]\n print(len(cleaned_news_df))\n return cleaned_news_df\n\ndef reduce_articles_by_headlines(art_ref_list, news_df): # this function takes a bunch of article refs as input and then produces a smaller list of article refs\n group_news_df = news_df[news_df['Article Ref'].isin(art_ref_list)]\n group_news_df2 = clean_news_data_by_headlines(group_news_df)\n new_articles_refs = list(group_news_df2['Article Ref'])\n return new_articles_refs\n\ndef print_grouped_articles(news_df):\n unique_news = news_df[news_df['Group Indicator'] == 1]\n if len(unique_news) > 0:\n for i in range(len(unique_news)):\n print('******************************')\n print('The current article is')\n title = unique_news.iloc[i]['Title']\n print('_________________________')\n print(title)\n print('_________________________')\n try:\n sub_art_refs = unique_news.iloc[i]['Group List']\n group_news_df = news_df[news_df['Article Ref'].isin(sub_art_refs)]\n except:\n sub_art_refs = literal_eval(unique_news.iloc[i]['Group List'])\n group_news_df = news_df[news_df['Article Ref'].isin(sub_art_refs)]\n print(len(group_news_df))\n if len(group_news_df) > 0:\n print('Now printing the grouped articles')\n for i in range(len(group_news_df)):\n title = group_news_df.iloc[i]['Title']\n print('---------------------------')\n print(title)\n print('---------------------------')\n print('******************************')\n else:\n print('There seems to be a slight error here, no articles found')\n\n\ndef group_related_articles(news_df):\n# this function reduces the articles to a list of unique articles and then puts related articles under it \n #** add extr feature to order the articles within groups by similarity to the head article\n\n article_ref_list = list(news_df['Article Ref'])\n art_del_list = []\n article_group_list = []\n group_indicator = []\n \n for i in range(len(news_df)):\n print('Article %s out of %s' % (i, len(news_df)))\n article_ref_list_temp = []\n for arts in article_ref_list:\n if arts not in art_del_list:\n article_ref_list_temp.append(arts)\n temp_df = news_df[news_df['Article Ref'].isin(article_ref_list_temp)]\n curr_art_ref = news_df.iloc[i]['Article Ref']\n if curr_art_ref not in art_del_list:\n sim_arts = check_for_plagiarism(curr_art_ref, 0.75, 0.75, 0.3, temp_df)\n group_refs = sim_arts[1]\n for arts in group_refs:\n if arts not in art_del_list:\n art_del_list.append(arts)\n if len(group_refs) > 0:\n group_refs2 = reduce_articles_by_headlines(group_refs, temp_df)\n article_group_list.append(group_refs2)\n group_indicator.append(1)\n \n else:\n group_indicator.append(0)\n article_group_list.append('NA')\n else:\n group_indicator.append(0)\n article_group_list.append('NA')\n \n \n article_ref_list_cleaned = []\n for arts in article_ref_list:\n if arts not in art_del_list:\n article_ref_list_cleaned.append(arts)\n print(len(article_ref_list))\n print(len(art_del_list))\n print(len(list(set(art_del_list))))\n print(len(article_ref_list_cleaned))\n news_df['Group Indicator'] = group_indicator\n news_df['Group List'] = article_group_list\n \n return news_df\n\ndef group_related_articles_by_headlines(news_df):\n# this function reduces the articles to a list of unique articles and then puts related articles under it \n #** add extr feature to order the articles within groups by similarity to the head article\n\n article_ref_list = list(news_df['Article Ref'])\n art_del_list = []\n article_group_list = []\n group_indicator = []\n \n for i in range(len(news_df)):\n print(i)\n article_ref_list_temp = []\n for arts in article_ref_list:\n if arts not in art_del_list:\n article_ref_list_temp.append(arts)\n temp_df = news_df[news_df['Article Ref'].isin(article_ref_list_temp)]\n curr_art_ref = news_df.iloc[i]['Article Ref']\n if curr_art_ref not in art_del_list:\n sim_arts = check_for_plagiarism_by_headlines(curr_art_ref, 0.9, temp_df)\n group_refs = sim_arts[1]\n for arts in group_refs:\n if arts not in art_del_list:\n art_del_list.append(arts)\n if len(group_refs) > 0:\n group_refs2 = reduce_articles_by_headlines(group_refs, temp_df)\n article_group_list.append(group_refs2)\n group_indicator.append(1)\n \n else:\n group_indicator.append(0)\n article_group_list.append('NA')\n else:\n group_indicator.append(0)\n article_group_list.append('NA')\n \n \n article_ref_list_cleaned = []\n for arts in article_ref_list:\n if arts not in art_del_list:\n article_ref_list_cleaned.append(arts)\n print(len(article_ref_list))\n print(len(art_del_list))\n print(len(list(set(art_del_list))))\n print(len(article_ref_list_cleaned))\n news_df['Group Indicator'] = group_indicator\n news_df['Group List'] = article_group_list\n \n return news_df\n#******************************************************************************************** \n \n#******************************************************************************************** \n#********************************************************************************************\n## The functions below are all related to 'get_twitter_topics_from_webhose.py'\n \n#******************************************************************************************** \n# These four functions make up 'convert_twitter_topics_to_search_term' function\ndef extract_largest_english_word_and_delete_it(text):\n text2 = []\n for txt in text:\n for t in txt:\n text2.append(t)\n if len(text2) > 4:\n word_list = []\n word_list_i = []\n word_list_j = []\n l = len(text)\n for i in range(l):\n for j in range(i+3, l+1):\n if d.check(text[i:j]):\n word_list.append(text[i:j].strip())\n word_list_i.append(i)\n word_list_j.append(j)\n if len(word_list) > 0:\n # Get the word lengths\n word_lens = []\n for words in word_list:\n word_lens.append(len(words))\n \n max_ind = word_lens.index(max(word_lens))\n try:\n max_word = word_list[max_ind]\n except:\n max_word = word_list[max_ind[0]]\n del text2[word_list_i[max_ind]:word_list_j[max_ind]]\n text3 = ''.join(text2)\n else:\n max_word = text\n text3 = text\n else:\n #print('No more words to extract')\n max_word = text\n text3 = text\n return max_word, text3\n\ndef find_str(s, char):\n index = 0\n\n if char in s:\n c = char[0]\n for ch in s:\n if ch == c:\n if s[index:index+len(char)] == char:\n return index\n\n index += 1\n\n return -1\n\n\ndef clean_up_text(text):\n word_list = []\n output = extract_largest_english_word_and_delete_it(text) \n word_list.append(output[0])\n if len(output[1]) > 0:\n if output[1] not in word_list:\n output2 = extract_largest_english_word_and_delete_it(output[1]) \n word_list.append(output2[0])\n if len(output2[1]) > 0:\n if output2[1] not in word_list:\n output3 = extract_largest_english_word_and_delete_it(output2[1]) \n word_list.append(output3[0])\n if len(output3[1]) > 0:\n if output3[1] not in word_list:\n output4 = extract_largest_english_word_and_delete_it(output3[1]) \n word_list.append(output4[0])\n if len(output4[1]) > 0:\n if output4[1] not in word_list:\n output5 = extract_largest_english_word_and_delete_it(output4[1]) \n word_list.append(output5[0])\n \n \n word_list_index = []\n for words in word_list:\n index = find_str(text, words)\n word_list_index.append(index)\n \n sorted_ind = sorted(range(len(word_list_index)), key=lambda k: word_list_index[k], reverse=False)\n sorted_word_list = [word_list[i].capitalize() for i in sorted_ind]\n text_output = ''.join(sorted_word_list)\n #print(text_output)\n if text_output.lower() != text.lower():\n text_output2 = text\n text_output = text\n else:\n text_output2 = ' '.join(sorted_word_list)\n text_output = '#%s' % text_output\n return text_output, text_output2\n\ndef clean_up_text2(text):\n words = text.split()\n word_list = []\n for word in words:\n word_list.append(word.capitalize())\n text_output2 = ' '.join(word_list)\n text_output = ''.join(word_list)\n text_output = '#%s' % text_output\n return text_output, text_output2\n\n\ndef clean_twitter_topic_output(tweet_topics):\n\n topic_hashtag = []\n topic_search_term = []\n \n for topics in tweet_topics:\n if len(topics.split()) == 1:\n if d.check(topics):\n topic_search_term.append(topics)\n topics = '#%s' % topics\n topic_hashtag.append(topics)\n else:\n output = clean_up_text(topics)\n topic_search_term.append(output[1])\n topic_hashtag.append(output[0])\n else:\n #print('Another approach has to be used')\n output = clean_up_text2(topics)\n topic_search_term.append(output[1])\n topic_hashtag.append(output[0])\n \n return topic_hashtag, topic_search_term\n\n\n#******************************************************************************************** \n\n#******************************************************************************************** \n# this function takes the twitter topics, cleans them and then restructures them into a search string\n# that can be fed into webhose\n\ndef convert_twitter_topics_to_search_term(tweet_topics, country):\n output = clean_twitter_topic_output(tweet_topics)\n topic_hashtags = output[0]\n topic_search_terms = output[1]\n search_list = []\n \n for i in range(len(topic_search_terms)):\n \n text = topic_search_terms[i]\n output = 'text: \\\"%s\\\"' % text\n search_list.append(output)\n search_list = ' OR '.join(search_list)\n search_list = '(%s)' % search_list\n \n \n prefix = \"language:english thread.country:%s\" % country\n search_term = \"%s %s\" % (prefix, search_list)\n return search_term, topic_search_terms, topic_hashtags\n\n\n#******************************************************************************************** \n\n#******************************************************************************************** \n# This function gets articles from webhose based on the search string and the \n# number of days in the past that we want to look \ndef get_webhouse_articles(webhouse_search_term, num_days): \n # this function gets the articles based on the search terms we have\n # obtained from twitter\n import time\n import requests\n import webhoseio\n import pandas as pd\n\n search_time_seconds = time.time() - 86400*num_days\n search_time_webhose = int(search_time_seconds * 1000)\n\n webhoseio.config(token=\"320ae62b-850f-4d1a-a09c-484da9b4a003\")\n query_params = {\n \t\"q\": webhouse_search_term,\n \t\"ts\": search_time_webhose,\n \t\"sort\": \"domain_rank\"\n }\n output = webhoseio.query(\"filterWebContent\", query_params)\n print(output['totalResults'])\n \n vals = output['totalResults']\n no_vals = vals//100\n print(no_vals)\n \n content = output\n \n sources = []\n authors = []\n titles = []\n post_bodies = []\n urls = []\n image_urls = []\n published_dates = []\n news_type = []\n news_country = []\n entities = []\n \n for i in range(len(content['posts'])):\n #print(i)\n try:\n sources.append(content['posts'][i]['thread']['site'])\n except:\n sources.append('NA')\n try:\n authors.append(content['posts'][i]['author'])\n except:\n authors.append('NA')\n try:\n titles.append(content['posts'][i]['title'])\n except:\n titles.append('NA')\n try:\n post_bodies.append(content['posts'][i]['text'])\n except:\n post_bodies.append('NA')\n try:\n urls.append(content['posts'][i]['url'])\n except:\n urls.append('NA')\n try:\n image_urls.append(content['posts'][i]['thread']['main_image'])\n except:\n image_urls.append('NA')\n try:\n published_dates.append(content['posts'][i]['published'])\n except:\n published_dates.append('NA')\n try:\n news_type.append(content['posts'][i]['thread']['site_categories'][0])\n except:\n news_type.append('NA')\n try:\n news_country.append(content['posts'][i]['thread']['country'])\n except:\n news_country.append('NA')\n try:\n entities.append(content['posts'][i]['entities'])\n except:\n entities.append('NA')\n \n # Get the next batch of posts\n \n for ii in range(no_vals):\n print(ii)\n output = webhoseio.get_next()\n print('we have gotten the next batch of articles')\n content = output\n #print(len(content))\n \n for i in range(len(content['posts'])):\n try:\n sources.append(content['posts'][i]['thread']['site'])\n except:\n sources.append('NA')\n try:\n authors.append(content['posts'][i]['author'])\n except:\n authors.append('NA')\n try:\n titles.append(content['posts'][i]['title'])\n except:\n titles.append('NA')\n try:\n post_bodies.append(content['posts'][i]['text'])\n except:\n post_bodies.append('NA')\n try:\n urls.append(content['posts'][i]['url'])\n except:\n urls.append('NA')\n try:\n image_urls.append(content['posts'][i]['thread']['main_image'])\n except:\n image_urls.append('NA')\n try:\n published_dates.append(content['posts'][i]['published'])\n except:\n published_dates.append('NA')\n try:\n news_type.append(content['posts'][i]['thread']['site_categories'][0])\n except:\n news_type.append('NA')\n try:\n news_country.append(content['posts'][i]['thread']['country'])\n except:\n news_country.append('NA')\n try:\n entities.append(content['posts'][i]['entities'])\n except:\n entities.append('NA')\n \n \n df = pd.DataFrame(\n \n {'source':sources,\n 'author':authors,\n 'title':titles,\n 'post_body':post_bodies,\n 'post_url':urls,\n 'image_url':image_urls,\n 'published_date':published_dates,\n 'category':news_type,\n 'country':news_country,\n 'entities':entities,})\n \n print(len(df))\n \n return df\n#******************************************************************************************** \n \n#******************************************************************************************** \n# Get entities and classify articles\ndef get_article_entities_and_classification(news_df):\n from google.cloud import language\n from google.oauth2 import service_account\n import pandas as pd\n \n creds = service_account.Credentials.from_service_account_file(\n '/Users/Gadcet/Documents/Ani extraction/webhose-tester-003736b2b834.json')\n \n client = language.LanguageServiceClient(\n credentials=creds,\n )\n \n news_df = news_df.dropna(how='any',axis=0) \n \n enums = ['UNKNOWN','PERSON','LOCATION','ORGANIZATION','EVENT','WORK_OF_ART','CONSUMER_GOOD','OTHER']\n \n # important to make sure the file for mappings_df is in a central location somewhere that is available to this program\n mappings_df = pd.read_csv(r'/Users/Gadcet/Documents/Ani extraction/Google_Bloverse Category Mappings.csv', encoding = 'latin1')\n google = list(mappings_df['Google name'])\n bloverse = list(mappings_df['Bloverse name'])\n \n category_dict = dict(zip(google, bloverse))\n \n #news_df.rename(index=str, columns={\"title\": \"Title\", \"post_body\": \"Text\"}, inplace = True)\n \n article_named_entities = []\n article_named_entity_types = []\n article_named_entity_metadata = []\n article_named_entity_metadata_wiki = []\n article_categories = []\n \n for i in range(len(news_df)):\n print(i)\n temp = news_df['Text'].iloc[i]\n temp2 = news_df['Title'].iloc[i]\n #print(temp2)\n \n #print('Getting article analysis from GCP')\n document = language.types.Document(\n content=temp,\n language='en',\n type='PLAIN_TEXT',\n )\n \n response = client.analyze_entities(\n document=document,\n encoding_type='UTF32',\n )\n \n app_ent_list = ['PERSON', 'ORGANIZATION']\n \n entity_names = []\n entity_type = []\n article_category = []\n #print('finished getting article analysis and now storing')\n \n for entity in response.entities:\n temp = entity.metadata\n temp2 = temp['wikipedia_url']\n if ((enums[entity.type] in app_ent_list) and (len(temp2) > 0)): #this is a hack that were doing for now where we are only looking for named entities that \n entity_names.append(entity.name)\n entity_type.append(enums[entity.type]) \n \n article_category = []\n article_category_conf = []\n try:\n response2 = client.classify_text(\n document=document,\n )\n \n temp = response2.categories\n for cats in temp:\n article_category.append(cats.name)\n article_category_conf.append(cats.confidence)\n from heapq import nlargest\n max_inds = nlargest(3, article_category_conf)\n temp_list = []\n for inds in max_inds:\n max_index = article_category_conf.index(inds)\n temp_list.append(max_index)\n art_cat = []\n for inds in temp_list:\n temp = article_category[inds]\n art_cat.append(category_dict[temp])\n # if len(response2.categories) > 0:\n # article_category.append(response2.categories)\n # else:\n # article_category.append('NA')\n except:\n art_cat = 'NA'\n \n article_categories.append(art_cat)\n article_named_entities.append(entity_names)\n article_named_entity_types.append(entity_type)\n \n \n news_df['Article Category'] = article_categories\n news_df['Article Entities'] = article_named_entities\n news_df['Article Entity Types'] = article_named_entity_types\n \n return news_df\n#******************************************************************************************** \n \n#******************************************************************************************** \ndef get_article_keywords(news_df): \n from gensim.summarization import keywords\n article_keywords = []\n \n for i in range(len(news_df)):\n print(i)\n temp = news_df['Text'].iloc[i]\n try:\n keys = keywords(temp).split('\\n')\n except:\n keys = 'NA'\n article_keywords.append(keys)\n\n news_df['Article Keywords'] = article_keywords\n \n return news_df\n\n#******************************************************************************************** \n \n#******************************************************************************************** \n \ndef get_grouped_articles_by_category(news_df):\n mappings_df = pd.read_csv(r'/Users/Gadcet/Documents/Ani extraction/Google_Bloverse Category Mappings.csv', encoding = 'latin1')\n google = list(mappings_df['Google name'])\n bloverse = list(mappings_df['Bloverse name'])\n \n category_list = list(set(bloverse))\n \n for cats in category_list:\n print('||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||')\n print(cats)\n cat_indices = []\n for ii in range(len(news_df)):\n cat_list = news_df['Article Category'].iloc[ii]\n try:\n if cats in cat_list:\n cat_indices.append(ii)\n except:\n continue\n user_news_cat = news_df.iloc[cat_indices]\n if len(user_news_cat) > 0:\n print('Now printing out grouped articles (if any)')\n print_grouped_articles(user_news_cat)\n\n#******************************************************************************************** \n \n#******************************************************************************************** \n \ndef assign_topic_hashtags(webhose_topics, webhose_hashtags, news_df):\n topic_hashtags = []\n for i in range(len(news_df)):\n text = news_df['Text'].iloc[i]\n title = news_df['Title'].iloc[i]\n hashtag_list = []\n for ii in range(len(webhose_topics)):\n if webhose_topics[ii] in text:\n hashtag_list.append(webhose_hashtags[ii])\n elif topics in title:\n hashtag_list.append(webhose_hashtags[ii])\n topic_hashtags.append(hashtag_list)\n news_df['Topic Hashtags'] = topic_hashtags\n return news_df\n\n#******************************************************************************************** \n \n#******************************************************************************************** \n \ndef get_english_translation(text):\n from google.cloud import translate\n from google.cloud import translate\n from google.cloud import language\n from google.cloud import translate\n from google.oauth2 import service_account\n # point this code to the JSON file where the creds are kept\n creds = service_account.Credentials.from_service_account_file(\n '/Users/Gadcet/Documents/Ani extraction/webhose-tester-003736b2b834.json')\n\n \n client = translate.Client(credentials=creds)\n output = client.translate(text)\n return output['translatedText']\n\n#******************************************************************************************** \n \n#******************************************************************************************** \n\ndef translate_df_titles(news_df):\n non_eng_country_list = ['IT', 'FR', 'DE', 'AE', 'PH', 'HK', 'PK', 'MX', 'SG', 'NL']\n from google.cloud import translate\n from google.cloud import language\n from google.cloud import translate\n from google.oauth2 import service_account\n import time\n # point this code to the JSON file where the creds are kept\n creds = service_account.Credentials.from_service_account_file(\n '/Users/Gadcet/Documents/Ani extraction/webhose-tester-003736b2b834.json')\n client = translate.Client(credentials=creds)\n \n \n english_titles = []\n english_text = []\n print('Now going through articles to detect those that are not english')\n for i in range(len(news_df)):\n print('Articles %s of %s' % (i,len(news_df)))\n news_country = news_df.iloc[i]['country']\n title = news_df.iloc[i]['title']\n text = news_df.iloc[i]['post_body']\n if news_country in non_eng_country_list:\n try:\n trans = client.translate(title)\n trans2 = client.translate(text)\n except:\n print('Rate limit exceeded, sleeping for 2 minutes')\n time.sleep(120) # waits for 2 minutes\n trans = client.translate(title)\n trans2 = client.translate(text)\n #print(trans['translatedText'])\n english_titles.append(trans['translatedText'])\n english_text.append(trans2['translatedText'])\n else:\n english_titles.append(title)\n english_text.append(text)\n news_df['English Title'] = english_titles\n news_df['post_body'] = english_text\n news_df.rename(index=str, columns={\"English Title\": \"Title\", \"post_body\": \"Text\"}, inplace = True)\n return news_df\n\n#def translate_df_titles(news_df):\n# non_eng_country_list = ['IT', 'FR', 'DE', 'AE', 'PH', 'HK', 'PK', 'MX', 'SG', 'NL']\n# from google.cloud import translate\n# from google.cloud import language\n# from google.cloud import translate\n# from google.oauth2 import service_account\n# import time\n# # point this code to the JSON file where the creds are kept\n# creds = service_account.Credentials.from_service_account_file(\n# '/Users/Gadcet/Documents/Ani extraction/My First Project-8521ee738526.json')\n# client = translate.Client(credentials=creds)\n# \n# \n# english_titles = []\n# english_text = []\n# print('Now going through articles to detect those that are not english')\n# for i in range(len(news_df)):\n# print('Articles %s of %s' % (i,len(news_df)))\n# news_country = news_df.iloc[i]['country']\n# title = news_df.iloc[i]['title']\n# text = news_df.iloc[i]['post_body']\n# if news_country in non_eng_country_list:\n# try:\n# lang = client.detect_language(title) \n# except:\n# print('Rate limit exceeded, sleeping for 2 minutes')\n# time.sleep(120) # waits for 2 minutes\n# \n# if lang['language'] != 'en':\n# #print('The original title was not in english')\n# try:\n# trans = client.translate(title)\n# trans2 = client.translate(text)\n# except:\n# print('Rate limit exceeded, sleeping for 2 minutes')\n# time.sleep(120) # waits for 2 minutes\n#\n# trans = client.translate(title)\n# trans2 = client.translate(text)\n# #print(trans['translatedText'])\n# english_titles.append(trans['translatedText'])\n# english_text.append(trans2['translatedText'])\n# else:\n# english_titles.append(title)\n# english_text.append(text)\n# else:\n# english_titles.append(title)\n# english_text.append(text)\n# news_df['English Title'] = english_titles\n# news_df['post_body'] = english_text\n# news_df.rename(index=str, columns={\"English Title\": \"Title\", \"post_body\": \"Text\"}, inplace = True)\n# return news_df\n\n#******************************************************************************************** \n \n#******************************************************************************************** \n \n\"\"\"\nSide-bar recommendation functions\n\"\"\"\n\ndef get_top_n_most_read(news_df, num_arts, curr_art_date):\n # Get articles in the last 7 days\n recommender_articles = get_recommender_news_subset(news_df, curr_art_date, timeframe = 1) \n views = list(recommender_articles['Article Views'])\n article_refs = list(recommender_articles['Article Ref'])\n sorted_ind = sorted(range(len(views)), key=lambda k: views[k], reverse=True)\n sorted_art_ref_list = [article_refs[i] for i in sorted_ind]\n article_recommendations = sorted_art_ref_list[0:num_arts]\n article_recommendations_df = recommender_articles[recommender_articles['Article Ref'].isin(article_recommendations)]\n return article_recommendations_df\n \n#******************************************************************************************** \n \n#******************************************************************************************** \n \n# Function to get a subset of news articles \ndef get_recommender_news_subset(news_df, date, timeframe = 7, country = None, category = None, country_flag = 'N', category_flag = 'N', country_cat_flag = 'N'):\n unique_dates = list(news_df['Date'].unique())\n date_index = unique_dates.index(date)\n \n # check if we have enough data for the timeframe\n if date_index < timeframe:\n date_list = unique_dates[0:date_index+1]\n else:\n date_list = unique_dates[date_index-timeframe:date_index+1]\n\n timeframe_articles = news_df[news_df['Date'].isin(date_list)]\n\n if country_flag == 'Y':\n recommender_articles = timeframe_articles[timeframe_articles['country'] == country]\n return recommender_articles\n elif category_flag == 'Y':\n cat_indices = []\n for ii in range(len(news_df)):\n cat_list = news_df['Article Category'].iloc[ii]\n try:\n if category in cat_list:\n cat_indices.append(ii)\n except:\n continue\n recommender_articles = news_df.iloc[cat_indices]\n return recommender_articles\n elif country_cat_flag == 'Y':\n temp = timeframe_articles[timeframe_articles['country'] == country]\n cat_indices = []\n for ii in range(len(temp)):\n cat_list = temp['Article Category'].iloc[ii]\n try:\n if category in cat_list:\n cat_indices.append(ii)\n except:\n continue\n recommender_articles = temp.iloc[cat_indices]\n return recommender_articles\n else:\n return timeframe_articles\n\n#******************************************************************************************** \n \n#******************************************************************************************** \n \ndef get_similar_articles_sidebar(news_df, curr_art_ref, curr_art_headline, curr_art_date, curr_art_country, curr_art_category, num_arts = 5):\n # Get subset of articles for recommendation\n recommender_articles = get_recommender_news_subset(news_df, curr_art_date, timeframe=7, country = curr_art_country , category = curr_art_category, country_cat_flag = 'Y') \n if len(recommender_articles) < 50: \n print('Insufficient number of articles for recommendation so we are widening the net')\n recommender_articles = get_recommender_news_subset(news_df, curr_art_date, timeframe=7, country = curr_art_country, country_flag = 'Y') \n \n # Get article recommendations \n print('We have a sufficient number of articles to generate recommendations')\n output = get_similar_articles(curr_art_ref, recommender_articles, 5, 1.75)\n if output is not None:\n print('We had sufficiently similar articles to push out these recommendations')\n recommendation_df = output[1]\n else:\n # Tag this as 'Other articles you may be interested in'\n print('We were unable to get sufficient recommendations so are getting vaguely related articles')\n output = get_similar_articles(curr_art_ref, recommender_articles, 5, 1)\n if output is not None:\n print('We were able to get a sufficient number of loosely related articles')\n recommendation_df = output[1]\n else:\n print(\"\"\"\n We currently dont have enough articles to generate recommendations so \n we are going to list out the 5 most read articles today across the bloverse instead\n \"\"\")\n recommendation_df = get_top_n_most_read(news_df, num_arts, curr_art_date)\n \n print('Current article is: %s' % curr_art_headline)\n print('Recommendations are:')\n for i in range(len(recommendation_df)):\n title = recommendation_df.iloc[i]['Title']\n print(title) \n\n#******************************************************************************************** \n \n#******************************************************************************************** \n \n\"\"\"\nGet country from named entity functions\n\"\"\"\n\n#def tag_visible(element):\n# if element.parent.name in ['style', 'script', 'head', 'title', 'meta', '[document]']:\n# return False\n# if isinstance(element, Comment):\n# return False\n# return True\n\n#******************************************************************************************** \n \n#******************************************************************************************** \n \n#def text_from_html(body):\n# soup = BeautifulSoup(body, 'html.parser')\n# texts = soup.findAll(text=True)\n# visible_texts = filter(tag_visible, texts) \n# return u\" \".join(t.strip() for t in visible_texts)\n\n#******************************************************************************************** \n \n#******************************************************************************************** \n \n#def get_top_5_entity_countries(country_list, text):\n# \n# country_points_list = []\n# top_5_country_list = []\n# for country in country_list:\n# points = get_keyword_total_points(country, '', text)\n# country_points_list.append(points)\n# sorted_ind = sorted(range(len(country_points_list)), key=lambda k: country_points_list[k], reverse=True)\n# sorted_country_list = [country_list[i] for i in sorted_ind]\n# sorted_points_list = [country_points_list[i] for i in sorted_ind]\n# top_5_countries = sorted_country_list[0:5]\n# if len(sorted_points_list) > 0:\n# coverage = round(sorted_points_list[0]/sum(sorted_points_list),2)\n# for countries in top_5_countries:\n# top_5_country_list.append(countries)\n# else:\n# top_5_country_list.append('Unknown')\n# coverage = 0\n# \n# return top_5_country_list[0], coverage\n\n#******************************************************************************************** \n \n#******************************************************************************************** \n \n#def get_entity_country(url):\n# #print('Started on the entity function')\n# import pycountry\n# try:\n# html = urllib.request.urlopen(url).read()\n# text = text_from_html(html)\n# \n# #Create dictionary of countries and their 2 name list\n# gb_countries = ['England', 'Scotland', 'Northern Ireland', 'Wales']\n# country_names_list = []\n# country_alpha_list = []\n# for country in gb_countries:\n# country_names_list.append(country)\n# country_alpha_list.append('GB')\n# \n# # Accomodate for other ways america is said (may need to add some more exceptions here)\n# us_countries = ['America', 'U.S', 'U.S.A', 'USA']\n# for country in us_countries:\n# country_names_list.append(country)\n# country_alpha_list.append('US')\n# \n# for country in pycountry.countries:\n# country_names_list.append(country.name)\n# country_alpha_list.append(country.alpha_2)\n# \n# country_names_list.append('Unknown')\n# country_alpha_list.append('Unknown')\n# \n# country_alpha_dict = dict(zip(country_names_list, country_alpha_list))\n# \n# country_names_list = []\n# country_alpha_list = []\n# for country in pycountry.countries:\n# country_names_list.append(country.name)\n# country_alpha_list.append(country.alpha_2)\n# \n# alpha_to_country_dict = dict(zip(country_alpha_list, country_names_list))\n# \n# \n# country_list_full = []\n# for country in pycountry.countries:\n# if country.name in text:\n# country_list_full.append(country.name)\n# \n# for country in gb_countries:\n# if country in text:\n# country_list_full.append(country)\n# \n# for country in us_countries:\n# if country in text:\n# country_list_full.append(country)\n# \n# #print(country_list_full)\n# \n# main_country, confidence = get_top_5_entity_countries(country_list_full, text)\n# \n# return country_alpha_dict[main_country], confidence\n# except:\n# return 'Unknown', 0\n\n#******************************************************************************************** \n \n#******************************************************************************************** \n\n\"\"\"\nGet the most read stories across the launch countries for each categories\n\"\"\"\n \n# function to get the top 2 most read [category] for each country that day\ndef get_category_most_read_article_for_each_country(news_df, curr_art_date, curr_art_category):\n countries_list = list(set(news_df['country']))\n if 'MY' in countries_list:\n countries_list.remove('MY')\n if 'TH' in countries_list:\n countries_list.remove('TH')\n \n today_news = news_df[news_df['Date'] == curr_art_date]\n cat_indices = []\n for ii in range(len(today_news)):\n cat_list = today_news['Article Category'].iloc[ii]\n try:\n if curr_art_category in cat_list:\n cat_indices.append(ii)\n except:\n continue\n \n today_news_cat = today_news.iloc[cat_indices]\n \n top_news_cat_country_ref = []\n for country in countries_list:\n today_news_cat_country = today_news_cat[today_news_cat['country'] == country]\n #print(len(today_news_cat_country))\n \n # Get a list of the articles refs\n article_ref_list = list(today_news_cat_country['Article Ref'])\n \n # Get a list of the article points\n article_ref_points = list(today_news_cat_country['Article Points'])\n \n # Order them\n sorted_ind = sorted(range(len(article_ref_points)), key=lambda k: article_ref_points[k], reverse=True)\n sorted_art_list = [article_ref_list[i] for i in sorted_ind]\n sorted_art_points_list = [article_ref_points[i] for i in sorted_ind]\n if len(sorted_art_list) > 1:\n top_news_cat_country_ref.append(sorted_art_list[0])\n top_news_cat_country_ref.append(sorted_art_list[1])\n \n top_country_cat_articles_df = news_df[news_df['Article Ref'].isin(top_news_cat_country_ref)]\n return top_country_cat_articles_df\n \n#******************************************************************************************** \n \n#******************************************************************************************** \n\ndef get_category_recommendations(news_df, curr_art_ref, curr_art_date, curr_art_headline, curr_art_category):\n # select the articles that have been published today\n today_news = news_df[news_df['Date'] == curr_art_date]\n print(len(today_news))\n cat_indices = []\n for ii in range(len(today_news)):\n cat_list = today_news['Article Category'].iloc[ii]\n try:\n if curr_art_category in cat_list:\n cat_indices.append(ii)\n except:\n continue\n today_news_cat = today_news.iloc[cat_indices]\n\n top_news_cat = get_category_most_read_article_for_each_country(news_df, curr_art_date, curr_art_category)\n # Get a list of the articles refs\n article_ref_list = list(top_news_cat['Article Ref'])\n #print(article_ref_list[0])\n \n # Get a list of the article points\n article_ref_points = list(top_news_cat['Article Points'])\n \n # Order them\n sorted_ind = sorted(range(len(article_ref_points)), key=lambda k: article_ref_points[k], reverse=True)\n sorted_art_list = [article_ref_list[i] for i in sorted_ind]\n sorted_art_points_list = [article_ref_points[i] for i in sorted_ind]\n \n if curr_art_ref in sorted_art_list:\n sorted_art_list.remove(curr_art_ref)\n\n \n # Get the top 10 most read articles for that category across the world - in production we would want to get the top 3 most read articles in that category and randomly shuffle and feed it out as recommendations. \n # We should also monitor what we have and remove recommendations once the person has seen it and not clicked on it so that we keep their feed of recommendations ever fresh\n article_recommendations = sorted_art_list[0:10]\n article_recommendations_1 = top_news_cat[top_news_cat['Article Ref'].isin(article_recommendations)]\n \n print('The current article headline is %s' % curr_art_headline)\n for i in range(len(article_recommendations_1)):\n title = article_recommendations_1.iloc[i]['Title']\n print(title)\n \n#******************************************************************************************** \n \n#******************************************************************************************** \n \n\"\"\"\nFunctions related to the wiki_entity_country_from_url.py\n\"\"\"\ndef tag_visible(element):\n if element.parent.name in ['style', 'script', 'head', 'title', 'meta', '[document]']:\n return False\n if isinstance(element, Comment):\n return False\n return True\n\ndef text_from_html(body):\n soup = BeautifulSoup(body, 'html.parser')\n texts = soup.findAll(text=True)\n visible_texts = filter(tag_visible, texts) \n return u\" \".join(t.strip() for t in visible_texts)\n\ndef get_entity_points(index):\n points = 0\n if 0 <= index < 5:\n points = 10\n elif 5<= index < 10:\n points = 5\n elif 10 <= index < 20:\n points = 2\n elif index > 20:\n points = 1\n return points\n\n# Get the total points for the entity across the whole article\ndef get_keyword_total_points(keyword, headline, text):\n import nltk\n sentences = nltk.sent_tokenize(text)\n points = 0\n if keyword in headline:\n points += 20\n for i in range(len(sentences)):\n temp = sentences[i]\n if keyword in temp:\n pnt = get_entity_points(i)\n points += pnt\n return points\n\n\ndef get_top_5_entity_countries(country_list, text): # there is a slight bug here in cases where an country may have multiple variations within the text, like united states for example, however its giving the right answer so we good\n \n country_points_list = []\n top_5_country_list = []\n for country in country_list:\n points = get_keyword_total_points(country, '', text)\n country_points_list.append(points)\n sorted_ind = sorted(range(len(country_points_list)), key=lambda k: country_points_list[k], reverse=True)\n sorted_country_list = [country_list[i] for i in sorted_ind]\n sorted_points_list = [country_points_list[i] for i in sorted_ind]\n \n try:\n top_5_countries = sorted_country_list[0:5]\n coverage = round(sorted_points_list[0]/sum(sorted_points_list),2)\n for countries in top_5_countries:\n top_5_country_list.append(countries)\n \n return top_5_country_list, coverage\n except:\n return ['Unknown'], 0\n\ndef get_name_from_wiki_url(url): # Strips out the name of an entity from its Wikipedia URL\n text = re.search(r'(?<=wiki/)[^.\\s]*',url)\n names1 = text.group(0)\n names2 = names1.split('_')\n if len(names2) > 1:\n names2 = ' '.join(names2)\n else:\n names2 = names2[0]\n return names2\n\ndef generate_country_alpha_dicts(): # This generates a dict of world countries and maps to their 2-digit ISO codes\n import pycountry\n #Create dictionary of countries and their 2 name list\n gb_countries = ['England', 'Scotland', 'Northern Ireland', 'Wales']\n country_names_list = []\n country_alpha_list = []\n for country in gb_countries:\n country_names_list.append(country)\n country_alpha_list.append('GB')\n \n # Accomodate for other ways america is said (may need to add some more exceptions here)\n us_countries = ['America', 'U.S', 'U.S.A']\n for country in us_countries:\n country_names_list.append(country)\n country_alpha_list.append('US')\n \n for country in pycountry.countries:\n country_names_list.append(country.name)\n country_alpha_list.append(country.alpha_2)\n \n # Add in the edge case for when its unknown\n country_names_list.append('Unknown')\n country_alpha_list.append('Unknown')\n \n country_alpha_dict = dict(zip(country_names_list, country_alpha_list))\n alpha_to_country_dict = dict(zip(country_alpha_list, country_names_list))\n \n # Save the country_alpha dict, and alpha_country dict into a pickle *** Change this filename to your current one\n with open('/Users/dr_d3mz/Documents/GitHub/Bloverse_Data_Science/Development/country_alpha_dict.pickle', \"wb\") as output_file:\n pickle.dump(country_alpha_dict, output_file)\n \n with open('/Users/dr_d3mz/Documents/GitHub/Bloverse_Data_Science/Development/alpha_country_dict.pickle', \"wb\") as output_file:\n pickle.dump(alpha_to_country_dict, output_file)\n \n return None\n\ntry:\n with open('/Users/dr_d3mz/Documents/GitHub/Bloverse_Data_Science/Development/country_alpha_dict.pickle', \"rb\") as input_file:\n country_alpha_dict = pickle.load(input_file)\nexcept:\n generate_country_alpha_dicts()\n with open('/Users/dr_d3mz/Documents/GitHub/Bloverse_Data_Science/Development/country_alpha_dict.pickle', \"rb\") as input_file:\n country_alpha_dict = pickle.load(input_file)\n print('Country dicts have been generated and saved')\n\ndef get_infobox_text(soup): # This extracts all the text from the infobox card of a wiki page\n infobox_list = ['infobox', 'infobox vcard', 'infobox biography vcard', 'infobox geography vcard']\n for vcards in infobox_list: # loops through a number of infobox vcards to find the one that works\n try:\n info_text = soup.find_all(\"table\",vcards)[0]\n if len(info_text) > 0:\n break\n except:\n continue\n try:\n col_len = len(info_text)\n except:\n info_text = []\n return info_text\n\ndef get_entity_country_from_infobox(url): # Get the country of an entity from just the infobox text\n try:\n html = urllib.request.urlopen(url).read()\n soup = BeautifulSoup(html,'lxml')\n \n except:\n text = re.search(r'(?<=wiki/)[^.\\s]*',url)\n encoded_name = urllib.parse.quote(text.group(0))\n url = url.replace(text.group(0),encoded_name)\n html = urllib.request.urlopen(url).read()\n soup = BeautifulSoup(html,'lxml')\n\n \n info_text = get_infobox_text(soup)\n \n if len(info_text) > 0:\n info_text_list = []\n for cols in info_text:\n info_text_list.append(cols.text)\n \n text = '. '.join(info_text_list)\n \n with open('/Users/Gadcet/Documents/Ani extraction/country_alpha_dict.pickle', \"rb\") as input_file:\n country_alpha_dict = pickle.load(input_file)\n \n country_list_full = []\n \n for country in country_alpha_dict.keys(): # Find a list of countries appearing in the text\n if country in text:\n country_list_full.append(country)\n \n countries, confidence = get_top_5_entity_countries(country_list_full, text)\n \n for country in countries: # CLean up the list of countries\n for cntry in countries:\n if country in cntry:\n if country != cntry:\n countries.remove(country)\n \n country_final = [] # Convert the countries into their 2 digits ISO codes\n for country in countries:\n country_final.append(country_alpha_dict[country])\n #print(country_final)\n\n if len(country_final) > 0:\n unique_countries = list(set(country_final))\n return unique_countries[0], confidence, unique_countries[1:], unique_countries\n else:\n return None\n else:\n return None\n\ndef get_entity_country_from_wiki_text(url):\n\n try:\n name = get_name_from_wiki_url(url)\n text = wikipedia.summary(name, sentences=2)\n except:\n try:\n html = urllib.request.urlopen(url).read()\n text = text_from_html(html)\n except:\n text = re.search(r'(?<=wiki/)[^.\\s]*',url)\n encoded_name = urllib.parse.quote(text.group(0))\n url = url.replace(text.group(0),encoded_name)\n html = urllib.request.urlopen(url).read()\n text = text_from_html(html)\n\n with open('/Users/Gadcet/Documents/Ani extraction/country_alpha_dict.pickle', \"rb\") as input_file:\n country_alpha_dict = pickle.load(input_file)\n \n country_list_full = []\n for country in country_alpha_dict.keys():\n if country in text:\n country_list_full.append(country)\n \n countries, confidence = get_top_5_entity_countries(country_list_full, text)\n #print(countries)\n \n country_final = [] # Convert the countries into their 2 digits ISO codes\n for country in countries:\n country_final.append(country_alpha_dict[country])\n #print(country_final)\n\n if len(country_final) > 0:\n unique_countries = list(set(country_final))\n return unique_countries[0], confidence, unique_countries[1:], unique_countries\n else:\n return None\n\ndef get_wiki_country(url):\n name = get_name_from_wiki_url(url)\n try:\n main_country, confidence, other_countries, all_countries = get_entity_country_from_infobox(url)\n if len(all_countries) > 2:\n main_country, confidence, other_countries, all_countries = get_entity_country_from_wiki_text(url)\n except:\n main_country, confidence, other_countries, all_countries = get_entity_country_from_wiki_text(url)\n \n if len(all_countries) > 2:\n main_country = 'Multiple'\n other_countries = all_countries\n return main_country, confidence, other_countries, all_countries\n\ndef get_wiki_summary(url):\n #print(url) \n try:\n name = get_name_from_wiki_url(url)\n summ = wikipedia.summary(name, sentences=2)\n except:\n try:\n text = re.search(r'(?<=wiki/)[^.\\s]*',url)\n encoded_name = urllib.parse.quote(text.group(0))\n url2 = url.replace(text.group(0),encoded_name)\n html = urllib.request.urlopen(url2).read()\n text = text_from_html(html)\n sentences = nltk.sent_tokenize(text)\n summ = ' '.join(sentences[2:4])\n except:\n summ = 'Not Available'\n \n return summ\n\n#******************************************************************************************** \n \n#******************************************************************************************** \n \n# Check to see if this file is being executed as the \"Main\" python\n# script instead of being used as a module by some other python script\n# This allows us to use the module which ever way we want.\nif __name__ == '__main__':\n print('Successfully loaded the module')","sub_path":"Functions/.ipynb_checkpoints/dummy_data_funcs-checkpoint.py","file_name":"dummy_data_funcs-checkpoint.py","file_ext":"py","file_size_in_byte":85341,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"368926181","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n#\n# Copyright © Manoel Vilela 2016\n#\n# @team: NewayPix\n# @project: slabot: Slack-Bot\n# @author: Manoel Vilela\n# @email: manoel_vilela@engineer.com\n#\n\nimport json\nimport logging.config\nimport os\n\n\ndef setup_logging(path='logging.json'):\n \"\"\"Setup logging configuration\"\"\"\n cdir = os.path.dirname(__file__)\n with open(cdir + '/' + path, 'rt') as f:\n config = json.load(f)\n logging.config.dictConfig(config)\n\nsetup_logging()\nlogger = logging.getLogger(__name__)\n","sub_path":"slabot/log/logger.py","file_name":"logger.py","file_ext":"py","file_size_in_byte":547,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"271874213","text":"## Python CookBook \n## by David Beazley\n\n## 4. Iterator and Generator\n\n## Flatten a nested Sequence\n\n## yield from is nice shortcut to use\n## if your generator calls other generators \n\nfrom collections import Iterable\n\ndef flatten(items, ignore_types = (str, bytes)):\n\tfor x in items:\n\t\tif isinstance(x, Iterable) and not isinstance(x, ignore_types):\n\t\t\tyield from flatten(x)\n#### yield from : is equal to \n#### for i in flatten(x):\n#### yield i \n\n\t\telse:\n\t\t\tyield x\n\nnum_list = [1,2,[3,[4,5,6],7],9]\n\nfor x in flatten(num_list):\n\tprint(x)\n\nstr_list = ['Thomas', ['Paulo',['Luis'], 'Leo'], 'Qing']\n\nfor x in flatten(str_list):\n\tprint(x)\n\nprint(list(flatten(str_list)))","sub_path":"PythonCookBook_4_Flatten_Nested_Seq.py","file_name":"PythonCookBook_4_Flatten_Nested_Seq.py","file_ext":"py","file_size_in_byte":672,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"244354144","text":"from os.path import exists, join as join_path\nfrom os import makedirs\n\nimport argparse as ap\nimport numpy as np\nimport re\nimport sys\n\nfrom tqdm import tqdm\n\nscript_file = sys.argv[0]\n\nparser = ap.ArgumentParser()\nparser.add_argument('--hop', type=int, default=20, help='Number of hop frames')\nparser.add_argument('--max-samples', type=int, default=3, help='Max samples from a file')\nparser.add_argument('--segment-length', type=int, default=1000, help='Segment length from a file')\nparser.add_argument('--step', type=int, default=500, help='Step size')\nparser.add_argument('--window', type=int, default=100, help='Number of window frames')\nparser.add_argument('data_dir', action=\"store\")\nparser.add_argument('new_data_dir', action=\"store\")\nparser.add_argument('trial_segments_file', action=\"store\")\nargs = parser.parse_args()\n\n\ndef make_directory(path):\n if not exists(path):\n makedirs(path)\n\n\ndef make_file_dict(file, pattern='[\\s]+'):\n file_dict = dict()\n with open(file) as f:\n for line in f.readlines():\n tokens = re.split(pattern, line.strip())\n file_dict[tokens[0]] = tokens[1]\n return file_dict\n\n\nutt2num_frames = join_path(args.data_dir, 'utt2num_frames')\nutt2spk = join_path(args.data_dir, 'utt2spk')\nfeats_scp = join_path(args.data_dir, 'feats.scp')\n\nif not exists(utt2num_frames):\n print('{}: utt2num_frames file missing'.format(script_file))\n exit(1)\n\nif not exists(utt2spk):\n print('{}: utt2spk file missing'.format(script_file))\n exit(1)\n\nif not exists(feats_scp):\n print('{}: feats.scp file missing'.format(script_file))\n exit(1)\n\nmake_directory(args.new_data_dir)\n\nutt2num_frames_dict = make_file_dict(utt2num_frames)\nutt2spk_dict = make_file_dict(utt2spk)\nfeats_scp_dict = make_file_dict(feats_scp)\n\nnew_feats_scp = open(join_path(args.new_data_dir, 'feats.scp'), 'w')\nnew_utt2spk = open(join_path(args.new_data_dir, 'utt2spk'), 'w')\ntrial_segments = open(args.trial_segments_file, 'w')\n\nprint('{}: Making segments'.format(script_file))\np_bar = tqdm(total=len(utt2num_frames_dict.keys()))\nfor utt, frames in list(utt2num_frames_dict.items()):\n try:\n frames = int(frames)\n if frames >= args.segment_length:\n sample_heads = np.arange(0, frames - args.segment_length, args.step)\n num_samples = args.max_samples if args.max_samples <= len(sample_heads) else len(sample_heads)\n starts = np.random.choice(sample_heads, num_samples, replace=False)\n scp = feats_scp_dict[utt]\n spk = utt2spk_dict[utt]\n for start_frame in starts:\n trial_segments.write('{} {} {} {}\\n'.format(utt, start_frame, args.segment_length, spk))\n final_frame = start_frame + args.segment_length - args.window\n while start_frame <= final_frame:\n end_frame = start_frame + args.window\n new_utt2spk.write('{}_{:06d}-{:06d} {}\\n'.format(utt, start_frame, end_frame, spk))\n new_feats_scp.write('{}_{:06d}-{:06d} {}[{}:{}]\\n'.format(utt, start_frame, end_frame, scp,\n start_frame, end_frame - 1))\n start_frame = end_frame - args.hop\n except KeyError:\n pass\n p_bar.update(1)\n\np_bar.close()\n\nprint('{}: Writing to {}'.format(script_file, args.new_data_dir))\nnew_feats_scp.close()\nnew_utt2spk.close()\ntrial_segments.close()\n","sub_path":"kaldi/scripts/make_segments.py","file_name":"make_segments.py","file_ext":"py","file_size_in_byte":3457,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"191277755","text":"# Sentiment Analysis Using the Movie Ratings Data (Python)\r\n\r\n# prepare for Python version 3x features and functions\r\nfrom __future__ import division, print_function\r\n\r\n# import packages for text processing and machine learning\r\nimport os # operating system commands\r\nimport re # regular expressions\r\nimport nltk # draw on the Python natural language toolkit\r\nimport pandas as pd # DataFrame structure and operations\r\nimport matplotlib.pyplot as plt # 2D plotting\r\n\r\n# list files in directory omitting hidden files\r\ndef listdir_no_hidden(path):\r\n start_list = os.listdir(path)\r\n end_list = []\r\n for file in start_list:\r\n if (not file.startswith('.')):\r\n end_list.append(file)\r\n return(end_list) \r\n \r\n\r\n# define list of codes to be dropped from document\r\n# carriage-returns, line-feeds, tabs\r\ncodelist = ['\\r', '\\n', '\\t'] \r\n\r\n# there are certain words we will ignore in subsequent\r\n# text processing... these are called stop-words \r\n# and they consist of prepositions, pronouns, and \r\n# conjunctions, interrogatives, ...\r\n# we begin with the list from the natural language toolkit\r\n# examine this initial list of stopwords\r\nnltk.download('stopwords')\r\n\r\n# let's look at that list \r\n# print(nltk.corpus.stopwords.words('english'))\r\n\r\n# previous analysis of a list of top terms showed a number of words, along \r\n# with contractions and other word strings to drop from further analysis, we add\r\n# these to the usual English stopwords to be dropped from a document collection\r\nmore_stop_words = ['cant','didnt','doesnt','dont','goes','isnt','hes',\\\r\n 'shes','thats','theres','theyre','wont','youll','youre','youve', 'br'\\\r\n 've', 're', 'vs'] \r\n\r\n# start with the initial list and add to it for movie text work \r\nstoplist = nltk.corpus.stopwords.words('english') + more_stop_words\r\n\r\n# text parsing function for creating text documents \r\n# there is more we could do for data preparation \r\n# stemming... looking for contractions... possessives... \r\n# but we will work with what we have in this parsing function\r\n# if we want to do stemming at a later time, we can use\r\n# porter = nltk.PorterStemmer() \r\n# in a construction like this\r\n# words_stemmed = [porter.stem(word) for word in initial_words] \r\ndef text_parse(string):\r\n # replace non-alphanumeric with space \r\n temp_string = re.sub('[^a-zA-Z]', ' ', string) \r\n # replace codes with space\r\n for i in range(len(codelist)):\r\n stopstring = ' ' + codelist[i] + ' '\r\n temp_string = re.sub(stopstring, ' ', temp_string) \r\n # replace single-character words with space\r\n temp_string = re.sub('\\s.\\s', ' ', temp_string) \r\n # convert uppercase to lowercase\r\n temp_string = temp_string.lower() \r\n # replace selected character strings/stop-words with space\r\n for i in range(len(stoplist)):\r\n stopstring = ' ' + str(stoplist[i]) + ' '\r\n temp_string = re.sub(stopstring, ' ', temp_string) \r\n # replace multiple blank characters with one blank character\r\n temp_string = re.sub('\\s+', ' ', temp_string) \r\n return(temp_string) \r\n\r\nos.listdir(\".\")\r\nos.chdir(\"C:/projectFall17/TeamRed_final/\")\r\n\r\n# read in positive and negative word lists from Hu and Liu (2004)\r\nwith open('ref_files/Hu_Liu_positive_word_list.txt','rt') as f:\r\n positive_word_list = f.read().split() \r\nwith open('ref_files/Hu_Liu_negative_word_list.txt','rt') as f:\r\n negative_word_list = f.read().split() \r\n \r\n# define counts of positive, negative, and total words in text document \r\ndef count_positive(text): \r\n positive = [w for w in text.split() if w in positive_word_list]\r\n return(len(positive))\r\n\r\n# define text measure for negative score as percentage of negative words \r\ndef count_negative(text): \r\n negative = [w for w in text.split() if w in negative_word_list]\r\n return(len(negative))\r\n \r\n# count number of words \r\ndef count_total(text): \r\n total = [w for w in text.split()]\r\n return(len(total)) \r\n\r\n# define text measure for positive score as percentage of positive words \r\ndef score_positive(text): \r\n positive = [w for w in text.split() if w in positive_word_list]\r\n total = [w for w in text.split()]\r\n if len(total) > 0:\r\n return 100 * len(positive)/len(total)\r\n else:\r\n return 0\r\n\r\n# define text measure for negative score as percentage of negative words \r\ndef score_negative(text): \r\n negative = [w for w in text.split() if w in negative_word_list]\r\n total = [w for w in text.split()]\r\n if len(total) > 0:\r\n return 100 * len(negative)/len(total)\r\n else:\r\n return 0\r\n\r\ndef compute_scores(corpus):\r\n # use the complete word lists for POSITIVE and NEGATIVE measures\r\n # to score all documents in a corpus or list of documents\r\n positive = []\r\n negative = []\r\n days = []\r\n count = 0\r\n for document in corpus:\r\n positive.append(score_positive(document)) \r\n negative.append(score_negative(document))\r\n days.append(count)\r\n count = count+1\r\n return(positive, negative, days)\r\n \r\n# we use movie ratings data from Mass et al. (2011) \r\n# available at http://ai.stanford.edu/~amaas/data/sentiment/\r\n\r\n# function for creating corpus and aggregate document \r\n# input is directory path for documents\r\n# document parsing accomplished by text_parse function\r\n# directory of parsed files set up for manual inspection\r\ndef corpus_creator (input_directory_path, output_directory_path):\r\n # identify the file names in unsup directory\r\n file_names = listdir_no_hidden(path = input_directory_path)\r\n # create list structure for storing parsed documents \r\n document_collection = [] \r\n # initialize aggregate document for all documents in set\r\n aggregate_document = ''\r\n # create a directory for parsed files \r\n parsed_file_directory = output_directory_path\r\n os.mkdir(parsed_file_directory)\r\n # parse each file and write to directory of parsed files\r\n for filename in file_names:\r\n with open(os.path.join(input_directory_path, filename), 'r') as infile: \r\n this_document = text_parse(infile.read())\r\n aggregate_document = aggregate_document + this_document\r\n document_collection.append(this_document)\r\n outfile = parsed_file_directory + filename\r\n with open(outfile, 'wt') as f:\r\n f.write(str(this_document)) \r\n aggregate_words = [w for w in aggregate_document.split()] \r\n aggregate_corpus = nltk.Text(aggregate_words) \r\n return(file_names, document_collection, aggregate_corpus)\r\n \r\n \r\n# begin working with the unsup corpus\r\nunsup_file_names, unsup_corpus, unsup_aggregate_corpus = \\\r\n corpus_creator(input_directory_path = 'input_raw_data/twitter_taco_17/',\\\r\n output_directory_path = 'output_parsed_data/taco_17_v4/')\r\n \r\n# examine frequency distribution of words in unsup corpus\r\nunsup_freq = nltk.FreqDist(unsup_corpus)\r\nprint('\\nNumber of Unique Words in unsup corpus',len(unsup_freq.keys()))\r\n# Number of Unique Words in unsup corpus 12518\r\n# print('\\nTop Fifty Words in unsup Corpus:',unsup_freq.keys()[0:50])\r\n\r\n#initial_words = unsup_aggregate_corpus\r\n# stemming... looking for contractions... possessives... \r\nporter = nltk.PorterStemmer() \r\n# and make it a unique list\r\nwords_stemmed = [porter.stem(word) for word in unsup_corpus] \r\n\r\n \r\n# examine frequency distribution of words in unsup corpus\r\nunsup_freq = nltk.FreqDist(unsup_aggregate_corpus)\r\nprint('\\nNumber of Unique Words in unsup aggregate corpus',len(unsup_freq.keys()))\r\n# Number of Unique Words in unsup corpus 93000\r\nprint('\\nTop Fifty Words in unsup aggregate Corpus:',unsup_freq.keys()[0:50])\r\n \r\n\r\n# identify the most frequent unsup words from the positive word list\r\n# here we use set intersection to find a list of the top 25 positive words \r\nlength_test = 0 # initialize test length\r\nnkeys = 0 # slicing index for frequency table extent\r\nwhile (length_test < 25):\r\n length_test =\\\r\n len(set(unsup_freq.keys()[:nkeys]) & set(positive_word_list))\r\n nkeys = nkeys + 1\r\nprint(nkeys)\r\n# nkeys reached to 1353 to get 25 positive words; \r\nselected_positive_set =\\\r\n set(unsup_freq.keys()[:nkeys]) & set(positive_word_list)\r\nselected_positive_words = list(selected_positive_set)\r\nselected_positive_words.sort()\r\nprint('\\nSelected Positive Words:', selected_positive_words)\r\nprint(len(selected_positive_words))\r\n\r\n# identify the most frequent unsup words from the negative word list\r\n# here we use set intersection to find a list of the top 25 negative words \r\nlength_test = 0 # initialize test length\r\nnkeys = 0 # slicing index for frequency table extent\r\nwhile (length_test < 25):\r\n length_test =\\\r\n len(set(unsup_freq.keys()[:nkeys]) & set(negative_word_list))\r\n nkeys = nkeys + 1\r\nprint(nkeys)\r\n# nkeys is 1121 to get 25 negative words\r\nselected_negative_set =\\\r\n set(unsup_freq.keys()[:nkeys]) & set(negative_word_list)\r\n# list is actually 25 items \r\nselected_negative_words = list(selected_negative_set)\r\nselected_negative_words.sort()\r\nprint('\\nSelected Negative Words:', selected_negative_words)\r\nprint(len(selected_negative_words))\r\n\r\n# use the complete word lists for POSITIVE and NEGATIVE measures/scores\r\n\r\npositive, negative, days = compute_scores(unsup_corpus)\r\n\r\n# create data frame to explore POSITIVE and NEGATIVE measures\r\nunsup_data = {'POSITIVE': positive, 'NEGATIVE': negative} \r\n# unsup_data = {'file': unsup_file_names,\\\r\n# 'POSITIVE': positive, 'NEGATIVE': negative} \r\n# pd.DataFrame({'A': a, 'B': b}, index=[0]) \r\n# not working with index=[0]\r\n# unsup_data_frame = pd.DataFrame(unsup_data, index=[0])\r\nunsup_data_frame = pd.DataFrame(unsup_data)\r\n\r\n# summary of distributions of POSITIVE and NEGATIVE scores for unsup corpus\r\nprint(unsup_data_frame.describe())\r\n\r\n# -----------------------results looks reasonable\r\nprint('\\nCorrelation between POSITIVE and NEGATIVE',\\\r\n round(unsup_data_frame['POSITIVE'].corr(unsup_data_frame['NEGATIVE']),3))\r\n\r\n# scatter plot of POSITIVE and NEGATIVE scores for unsup_aggregate_corpus2\r\nax = plt.axes()\r\nax.scatter(unsup_data_frame['NEGATIVE'], unsup_data_frame['POSITIVE'],\\\r\n facecolors = 'none', edgecolors = 'blue')\r\nax.set_xlabel('NEGATIVE')\r\nax.set_ylabel('POSITIVE') \r\nplt.savefig('fig_sentiment_text_measures_scatter_plot_taco17.pdf', \r\n bbox_inches = 'tight', dpi=None, facecolor='none', edgecolor='blue', \r\n orientation='portrait', papertype=None, format=None, \r\n transparent=True, pad_inches=0.25, frameon=None) \r\n\r\nplt.figure()\r\nt = days\r\ns = positive\r\ns2 = negative\r\nplt.plot(t, s2, 'r-', t, s, 'b--')\r\nplt.xlabel('days from 8/1/2017 - 10/31/2017 (Taco Bell)')\r\nplt.ylabel('sentiment %, blue-positive, red-negative') \r\nplt.show() \r\nplt.savefig('fig_line_plot_taco17.pdf', \r\n bbox_inches = 'tight', dpi=None, facecolor='none', edgecolor='blue', \r\n orientation='portrait', papertype=None, format=None, \r\n transparent=True, pad_inches=0.25, frameon=None) \r\n# Suggestions for the student:\r\n# Employ stemming prior to the creation of terms-by-document matrices.\r\n# Try alternative positive and negative word sets for sentiment scoring.\r\n# Try word sets that relate to a wider variety of emotional or opinion states.\r\n# Better still, move beyond a bag-of-words approach to sentiment. Use\r\n# the tools of natural language processing and define text features\r\n# based upon combinations of words such as bigrams (pairs of words)\r\n# and taking note of parts of speech. Yet another approach would be\r\n# to define ignore negative and positive word lists and work directly \r\n# with identified text features that correlate with movie review ratings or\r\n# do a good job of classifying reviews into positive and negative groups.\r\n# Text features within text classification problems may be defined \r\n# on term document frequency alone or on measures of term document\r\n# frequency adjusted by term corpus frequency. Using alternative \r\n# features and text measures as well as alternative classification methods,\r\n# run a true benchmark within a loop, using hundreds or thousands of iterations.\r\n# See if you can improve upon the performance of modeling methods by\r\n# modifying the values of arguments to algorithms used here.\r\n# Use various methods of classifier performance to evaluate classifiers.\r\n# Try text classification for the movie reviews without using initial\r\n# lists of positive an negative words. That is, identify text features\r\n# for thumbs up/down text classification directly from the training set.\r\n\r\n\r\n","sub_path":"parse_sentiment_plot_taco17.py","file_name":"parse_sentiment_plot_taco17.py","file_ext":"py","file_size_in_byte":12647,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"349543188","text":"class BankAccount:\n def __init__(self, input_holder_name, input_balance,\n input_account_type):\n self.holder_name = input_holder_name\n self.balance = input_balance\n self.account_type = input_account_type\n self.rates = {\n \"personal\": 10,\n \"business\": 50,\n \"savings\": -5\n }\n\n def pay_in(self, amount):\n self.balance += amount\n\n def pay_monthly_fee(self):\n self.balance -= self.rates[self.account_type]","sub_path":"day_01/classes/modules/bank_account.py","file_name":"bank_account.py","file_ext":"py","file_size_in_byte":492,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"361801502","text":"import os\nfrom flask import Flask, request, jsonify, render_template, abort, make_response\nfrom flask_sqlalchemy import SQLAlchemy\nfrom sqlalchemy import update, func\n\napp = Flask(__name__)\n\nproject_dir = os.path.dirname(os.path.abspath(__file__))\ndatabase_file = \"sqlite:///{}\".format(os.path.join(project_dir, \"user_preferences.db\"))\napp.config['SQLALCHEMY_DATABASE_URI'] = database_file\napp.config['SECRET_KEY'] = 'app_test_af.guzman'\ndb = SQLAlchemy(app)\n\nclass user_entry(db.Model):\n __tablename__ = 'user_preferences'\n user_name = db.Column(db.String(255), primary_key = True)\n user_color = db.Column(db.String(255))\n user_pref = db.Column(db.String(255))\n\n def __init__(self, user_name, user_color, user_pref): \n self.user_name = user_name\n self.user_color = user_color\n self.user_pref = user_pref\n \n@app.route(\"/\", methods = ['GET', 'POST'])\ndef register_user_pref():\n if request.method == 'POST': \n user_name = request.form.get('user_name')\n user_color = request.form.get('user_color')\n user_pref = request.form.get('user_pref')\n existing_entry = user_entry.query.filter_by(user_name = user_name).first()\n if existing_entry is None: \n new_entry = user_entry(user_name, user_color, user_pref)\n db.session.add(new_entry)\n db.session.commit()\n return user_name + \"'s preferences were saved.\"\n else: \n return 'There is already a user with name: ' + user_name + ' please try other name.'\n else: \n return render_template('register_user_pref.html')\n\nif __name__ == '__main__': \n db.drop_all()\n db.create_all()\n app.run(host=\"0.0.0.0\", port=80)","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1705,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"485962563","text":"from hoshino.typing import CQEvent\nfrom . import sv\nfrom .duelconfig import *\nfrom .ScoreCounter import ScoreCounter2\nfrom . import duel_chara\n\n\n@sv.on_fullmatch(['天气帮助'])\nasync def manor_help(bot, ev: CQEvent):\n msg = '''\n╔ ╗ \n 天气帮助\n[今日天气] [天气一览]\n[天气效果] {天气名}\n╚ ╝\n '''\n await bot.send(ev, msg)\n\n\n# 今日天气\n@sv.on_fullmatch(['今日天气', '当前天气'])\nasync def now_weather(bot, ev: CQEvent):\n git = ev.group_id\n model = get_weather(git)\n msg = f'''\n当前天气为:{model.value['name']}\n效果:{model.value['desc']}\n '''\n await bot.send(ev, msg, at_sender=True)\n\n\n# 天气效果\n@sv.on_prefix(['天气效果'])\nasync def weather_help(bot, ev: CQEvent):\n name = str(ev.message).strip()\n model = WeatherModel.get_by_name(name)\n if not model:\n await bot.finish(ev, f\"未找到名为{name}的天气\", at_sender=True)\n msg = f'''\n天气:{model.value['name']}\n效果:{model.value['desc']}\n '''\n await bot.send(ev, msg, at_sender=True)\n\n\n@sv.on_fullmatch('天气一览')\nasync def item_all(bot, ev: CQEvent):\n tas_list = []\n data = {\n \"type\": \"node\",\n \"data\": {\n \"name\": \"ご主人様\",\n \"uin\": \"1587640710\",\n \"content\": \"====== 天气一览 ======\"\n }\n }\n tas_list.append(data)\n for i in WeatherModel:\n msg = f\"\"\"{i.value['name']}\n效果:{i.value['desc']}\n \"\"\".strip()\n data = {\n \"type\": \"node\",\n \"data\": {\n \"name\": \"ご主人様\",\n \"uin\": \"1587640710\",\n \"content\": msg\n }\n }\n tas_list.append(data)\n await bot.send_group_forward_msg(group_id=ev['group_id'], messages=tas_list)\n\n\n@sv.on_fullmatch(['快速决斗'])\nasync def manor_begin(bot, ev: CQEvent):\n gid = ev.group_id\n uid = ev.user_id\n weather = get_weather(gid)\n if weather != WeatherModel.FENGYU:\n return\n guid = gid, uid\n duel = DuelCounter()\n score_counter = ScoreCounter2()\n level = duel._get_level(gid, uid)\n if not daily_duel_limiter.check(guid):\n await bot.finish(ev, '今天的决斗次数已经超过上限了哦,明天再来吧。', at_sender=True)\n wingold = 1000 + (level * 200)\n winSW = WinSWBasics - LoseSWBasics\n score_counter._add_score(gid, uid, wingold)\n score_counter._add_prestige(gid, uid, winSW)\n CE = CECounter()\n bangdin = CE._get_guaji(gid, uid)\n bd_msg = ''\n if bangdin:\n bd_info = duel_chara.fromid(bangdin)\n card_level = add_exp(gid, uid, bangdin, WIN_EXP)\n nvmes = get_nv_icon_with_fashion(gid, uid, bangdin)\n bd_msg = f\"\\n您绑定的女友{bd_info.name}获得了{WIN_EXP}点经验,{card_level[2]}\\n{nvmes}\"\n daily_duel_limiter.increase(guid)\n num = get_user_counter(gid, uid, UserModel.DUEL_COIN)\n num += 1\n save_user_counter(gid, uid, UserModel.DUEL_COIN, num)\n await bot.send(ev, f'快速决斗成功,获得{wingold}金币,{winSW}声望,1枚决斗币。' + bd_msg, at_sender=True)\n","sub_path":"hoshino/modules/pcr_duel/weather.py","file_name":"weather.py","file_ext":"py","file_size_in_byte":3180,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"508817631","text":"N, V = map(int, input().split())\n\ndp = [0 for _ in range(V + 1)]\n\n\ndef complete_pack(v, w) -> None:\n for i in range(v, V + 1):\n dp[i] = max(dp[i], dp[i - v] + w)\n\n\ndef zero_one_pack(v, w) -> None:\n for i in range(V, v - 1, -1):\n dp[i] = max(dp[i], dp[i - v] + w)\n\n\ndef multiple_pack(v, w, amount) -> None:\n if v * amount >= V:\n complete_pack(v, w)\n return\n k = 1\n while k < amount:\n zero_one_pack(k * v, k * w)\n amount -= k\n k *= 2\n zero_one_pack(amount * v, amount * w)\n\n\nfor i in range(N):\n v, w, amount = list(map(int, input().split()))\n multiple_pack(v, w, amount)\n\nprint(dp[-1])\n","sub_path":"4_2.py","file_name":"4_2.py","file_ext":"py","file_size_in_byte":657,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"356793165","text":"#卷积网络mnist demo\r\n#2个全连接层 7*7*64-1024-10 交叉熵 Adam优化器 dropout 学习率梯度下降\r\n\r\nimport tensorflow as tf\r\nimport numpy as np\r\nimport cv2\r\nimport struct\r\nfrom tensorflow.examples.tutorials.mnist import input_data\r\n\r\nmnist = input_data.read_data_sets('MNIST_data/',one_hot = True)\r\n\r\nbatch_size = 100\r\nn_batch = 600\r\n\r\ndef weight_variable(shape):\r\n return tf.Variable(tf.truncated_normal(shape,stddev=0.1))\r\n\r\ndef bias_vairable(shape):\r\n return tf.Variable(tf.constant(0.1, shape=shape))\r\n\r\ndef conv2d(x,W):\r\n return tf.nn.conv2d(x,W,strides=[1,1,1,1],padding='SAME')\r\n\r\ndef max_pool_2x2(x):\r\n return tf.nn.max_pool(x,ksize=[1,2,2,1],strides=[1,2,2,1],padding='SAME')\r\n\r\nx = tf.placeholder(tf.float32,[None,784])\r\ny = tf.placeholder(tf.float32,[None,10])\r\nkeep_prob = tf.placeholder(tf.float32)\r\n\r\nx_image = tf.reshape(x,[-1,28,28,1])\r\n\r\nW_conv1 = weight_variable([5,5,1,32]) # 5*5的采样窗口,32个卷积核从1个平面抽取特征\r\nb_conv1 = bias_vairable([32]) #每个卷积核一个偏置值\r\n\r\n# 28*28*1 的图片卷积之后变为28*28*32\r\nh_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1)\r\n# 池化之后变为 14*14*32\r\nh_pool1 = max_pool_2x2(h_conv1)\r\n\r\n# 第二次卷积之后变为 14*14*64\r\nW_conv2 = weight_variable([5,5,32,64])\r\nb_conv2 = bias_vairable([64])\r\nh_conv2 = tf.nn.relu(conv2d(h_pool1,W_conv2) + b_conv2)\r\n# 第二次池化之后变为 7*7*64\r\nh_pool2 = max_pool_2x2(h_conv2)\r\n\r\n\r\n# 第一个全连接层\r\nW_fc1 = weight_variable([7*7*64,1024])\r\nb_fc1 = bias_vairable([1024])\r\n# 7*7*64的图像变成1维向量\r\nh_pool2_flat = tf.reshape(h_pool2,[-1,7*7*64])\r\nh_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1)\r\nh_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)\r\n\r\n# 第二个全连接层\r\nW_fc2 = weight_variable([1024,10])\r\nb_fc2 = bias_vairable([10])\r\nlogits = tf.matmul(h_fc1_drop,W_fc2) + b_fc2\r\nprediction = tf.nn.sigmoid(logits)\r\n\r\nloss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y, logits=logits))\r\ntrain_step = tf.train.AdamOptimizer(0.001).minimize(loss)\r\n\r\nprediction_2 = tf.nn.softmax(prediction)\r\ncorrect_prediction = (tf.equal(tf.argmax(prediction_2,1), tf.argmax(y,1)))\r\naccuracy = tf.reduce_mean(tf.cast(correct_prediction,tf.float32))\r\nsaver=tf.train.Saver()\r\n\r\nwith tf.Session() as sess:\r\n sess.run(tf.global_variables_initializer())\r\n for epoch in range(3):\r\n for batch in range(n_batch):\r\n batch_xs, batch_ys = mnist.train.next_batch(batch_size)\r\n sess.run(train_step, feed_dict={x:batch_xs,y:batch_ys,keep_prob:0.7})\r\n acc = sess.run(accuracy, feed_dict={x:mnist.test.images, y:mnist.test.labels, keep_prob:1.0})\r\n print(\"Iter: \" + str(epoch) + \", acc: \" + str(acc))\r\n\r\n saver.save(sess,'net/my_net.ckpt')","sub_path":"mnist_conv_demo.py","file_name":"mnist_conv_demo.py","file_ext":"py","file_size_in_byte":2784,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"597247585","text":"import os\nimport boto3\nimport json\nimport argparse\nfrom server import Org, SERVERS\nfrom rich.table import Column, Table\nfrom rich.console import Console\n\nCREDENTIALFOLDER='.creds'\nconsole = Console()\n\ndef getCredentialLocation(org):\n if not isinstance(org,Org):\n print(\"org is not a Org enum\")\n return False\n \n credentialLocation=os.path.join(\"./\", CREDENTIALFOLDER)\n\n if org == Org.RC:\n credentialLocation = os.path.join(credentialLocation, \"rc.json\")\n elif org == Org.KFNEXT:\n credentialLocation = os.path.join(credentialLocation, \"kf.json\")\n elif org == Org.REESELAB:\n credentialLocation = os.path.join(credentialLocation, \"rl.json\")\n \n return credentialLocation\n\n\ndef checkforCredential(org):\n if not isinstance(org,Org):\n print(\"org is not a Org enum\")\n return False\n \n location = getCredentialLocation(org)\n\n if not os.path.isfile(location):\n return False\n else:\n return True\n\n\ndef loadCredentials(org):\n if not isinstance(org,Org):\n print(\"org is not a Org enum\")\n return\n\n if not checkforCredential(org):\n print(f\"Do not have the credentials for org: {org}\")\n return False\n \n credentialLocation = getCredentialLocation(org) \n \n with open(credentialLocation) as fh:\n credentials = json.load(fh)\n\n return credentials\n\n\ndef ls(args):\n servers = SERVERS\n\n if args.server:\n if not args.server in SERVERS:\n print(f\"No servers named {args.server}\")\n return\n else:\n #make the servers variable only contain a single server\n servers = {}\n servers[args.server] = SERVERS[args.server]\n\n\n table = Table(show_header=True, header_style=\"bold blue\")\n table.add_column(\"Project\")\n table.add_column(\"Server\")\n table.add_column(\"Status\")\n table.add_column(\"Public IP\")\n \n for name in servers:\n data = {}\n data['servers'] = servers[name]['servers'] \n \n creds = loadCredentials(servers[name]['org'])\n \n lambda_client = boto3.client(\n \"lambda\",\n aws_access_key_id=creds['access_key'],\n aws_secret_access_key=creds['secret_key'],\n region_name=creds['region']\n )\n\n rv = lambda_client.invoke(\n FunctionName=\"describe-ec2\",\n Payload=json.dumps(data)\n )\n\n outcome = json.load(rv['Payload'])\n \n for server_result in outcome['messages']:\n ip = \"\"\n if not 'public_ip' in server_result:\n ip =\"*\"\n else:\n ip = server_result['public_ip']\n\n table.add_row(name, server_result['name'], server_result['message'], ip)\n\n console.print(table) \n\n\ndef updateServer(args):\n if args.server is None:\n print(\"Need to provide the server to stop\")\n return\n\n if not args.server in SERVERS:\n print(f\"{args.server} is not one of our managed servers\")\n return\n\n data = {}\n data['servers'] = SERVERS[args.server]['servers'] \n creds = loadCredentials(SERVERS[args.server]['org'])\n \n if not creds:\n #Problem when loading credentials let it handle error message\n return\n\n lambda_client = boto3.client(\n \"lambda\",\n aws_access_key_id=creds['access_key'],\n aws_secret_access_key=creds['secret_key'],\n region_name=creds['region']\n )\n\n if args.command == \"stop\":\n func=\"stop-ec2\"\n verb=\"stop\"\n elif args.command == \"start\":\n func=\"start-ec2\"\n verb=\"start\"\n else:\n print(\"command was not 'start' or 'stop'\")\n return\n\n rv = lambda_client.invoke(\n FunctionName=func,\n Payload=json.dumps(data)\n )\n\n outcome = json.load(rv['Payload'])\n\n for server_result in outcome['messages']:\n if server_result['success']:\n print(f\"{verb.capitalize()}{verb[-1]}ing server {server_result['name']}. This will take time to complete. Use the 'ls' command to check on the status of the server\")\n else:\n print(f\"Could not {verb} {server_result['name']} because {server_result['message']}\")\n\ndef main():\n parser = argparse.ArgumentParser(description=\"Manage the Reese Inovation and KF Next EC2 Instances\")\n parser.add_argument('command', type=str, choices=['ls', 'start', 'stop', 'check'], help='List servers, start server, stop sever, check server')\n parser.add_argument('--server', type=str, help=\"name of the server\")\n\n args = parser.parse_args()\n\n if args.command == \"ls\":\n ls(args)\n elif args.command == 'stop' or args.command == 'start':\n updateServer(args)\n\n\nif __name__=='__main__':\n main()\n","sub_path":"cli/server-management.py","file_name":"server-management.py","file_ext":"py","file_size_in_byte":4750,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"388629765","text":"from elasticsearch import Elasticsearch\nfrom pandasticsearch import Select\nimport pandas as pd\nimport numpy as np\nimport datetime\nimport math\n\nprint('conectando a ecomart ...')\nnodos = '10.32.16.134:9200'\nes = Elasticsearch(['http://'+nodos], timeout=10)\nindex='recoline_socofin_lu'\n\ndef get_body_query( param ):\n body = ''' \n {\n \"_source\": [\"fecha_generacion\", \"rut_cliente\", \"saldo_no_castigado\", \"fecha_vencimiento_01\", \"fase_dia\", \"fase_inicio\", \"dias_mora_real\"],\n \"query\" : {\n \"bool\" : {\n \"must\" : [\n {\n \"query_string\" : {\n \"query\" : \"BAJAEXPOF*\",\n \"fields\" : [ \"lista_cliente.keyword\" ]\n }\n },\n {\n \"query_string\" : {\n \"query\" : \"*__param__*2019\",\n \"fields\" : [\"fecha_generacion.keyword\"]\n }\n }\n ]\n }\n }\n }\n '''\n nb = body.replace( \"__param__\" , param)\n return nb\n\n\nfor i in range(1,9):\n mes = ''\n if i < 10:\n mes = '0'+str(i)\n else:\n mes = str(i)\n \n dia_mes = ' '\n for dia in range(1,31):\n if dia < 10:\n sdia = '0' + str(dia)\n else:\n sdia = str(dia)\n \n dia_mes = sdia + '*' + mes\n print('recuperando periodo', mes, sdia, dia_mes, '...')\n body = get_body_query(dia_mes)\n #print(body)\n df = ''\n documents = es.search(index=index, body=body, size = 10000) \n try:\n df = Select.from_dict(documents).to_pandas()\n nmesdia = mes + sdia\n periodo = '2019_' + nmesdia\n fname = '../../data/F2_F3_2019/analisis/recoline_socofin_lu_xanal_' + periodo + '.csv'\n print('escribiendo a ', fname)\n df.to_csv(index=False, path_or_buf =fname, header=True)\n except:\n print('error')\n\n","sub_path":"utils/get_lu_fpago.py","file_name":"get_lu_fpago.py","file_ext":"py","file_size_in_byte":2126,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"390393998","text":"import os\nimport pydicom\nimport numpy as np\n\nclass VirtualFSV():\n \n def __init__(self,fsv_filename):\n self.fsv_filename = fsv_filename\n self.fsv_filesize = VirtualFSV.__file_size(fsv_filename)\n self.__start_end_jpg_list = [] #[[[pairA,pairB],[next pair]][Newsection]]\n self.__start_end_dicom_header_list = []\n self.__start_end_section_list = []\n self.__locate_tags()\n\n def __locate_tags(self):\n # Algorithm to reduce bytes read:\n # 1) All the bytes of the first photo are read, its len calculated\n # 2) We go to curr_disp+len(first photo)\n # 3) alternate reading right bits and reading left bits\n # until correct jpg trailer or header is found\n # 4) always check to see if the header immediately follows trailer.\n # indicates that there is another photo.\n\n #Full Plan\n # 1) All bytes are read until first JPG header tag\n # Along the way, mark the header and trailer of dicom\n # 2) Use photo algorithm above.\n j = 0\n with open(self.fsv_filename, \"rb\") as f:\n disp = 0\n byte = 0xFF # this is just a filler\n # Master Loop\n while not VirtualFSV.__isAtEnd(f,disp,self.fsv_filesize): # change to whenever file is done being read\n #jpeg\n disp, start_end_jpg_pair, photoLen = \\\n VirtualFSV.__findFirstJPG(f,disp,self.fsv_filesize)\n if disp == 'None':\n raise ValueError('Unable to Extract jpeg') \n else:\n start_end_jpg_sublist = []\n start_end_jpg_sublist.append(start_end_jpg_pair)\n while VirtualFSV.__isJPGHeader(f,disp) or \\\n VirtualFSV.__isJPGHeader(f,disp+1): \n\n disp,start_end_jpg_pair = \\\n VirtualFSV.__findJPG(f,disp,photoLen)\n start_end_jpg_sublist.append(start_end_jpg_pair)\n self.__start_end_jpg_list.append(start_end_jpg_sublist)\n #dicom\n disp,start_end_DICOM_pair = VirtualFSV.__findDICOM(f,disp,self.fsv_filesize)\n if disp == 'None':\n raise ValueError('Unable to Extract Dicom')\n else:\n self.__start_end_dicom_header_list.append(start_end_DICOM_pair)\n\n\n @staticmethod\n def __isAtEnd(f,disp,filesize):\n while disp < filesize and \\\n not VirtualFSV.__isDICOMHeader(f,disp):\n disp += 1\n if disp >= filesize:\n return True\n else:\n return False\n\n @staticmethod\n def __findDICOM(f,disp,filesize):\n while not VirtualFSV.__isDICOMHeader(f,disp):\n disp += 1\n if disp > filesize:\n return 'None', 'None'\n #dicom header found\n start = disp\n disp += 6\n\n while not VirtualFSV.__isDICOMTrailer(f,disp):\n disp += 1\n if disp > filesize:\n return 'None', 'None'\n #dicom trailer found\n end = disp\n disp += 4\n return disp+1,[start,end]\n\n @staticmethod\n def __findFirstJPG(f,disp,filesize):\n while not VirtualFSV.__isJPGHeader(f,disp):\n disp += 1\n if disp > filesize:\n return 'None', 'None', 'None'\n start = disp\n disp += 2 #size of header - inc in loop\n\n while not VirtualFSV.__isJPGTrailer(f,disp):\n disp += 1\n if disp > filesize:\n return 'None', 'None', 'None'\n end = disp + 1 #size of trailer - increment in while loop\n disp += 1 #size of trailer - inc in loop\n photoLen = end - start\n return disp + 1,[start,end],photoLen\n\n @staticmethod\n def __findJPG(f,disp,photoLen):\n #jpgs are back to back (sometimes have one 0x00 between)\n if VirtualFSV.__isJPGHeader(f,disp+1):\n disp += 1\n start = disp\n #alternate back and forth to find JPG\n disp += photoLen\n i = 0\n while i < photoLen:\n #go right\n disp += i\n if VirtualFSV.__isJPGTrailer(f,disp):\n end = disp + 1\n return disp + 2,[start,end]\n i+=1\n #go left\n disp -= i\n if VirtualFSV.__isJPGTrailer(f,disp):\n end = disp + 1\n return disp + 2,[start,end]\n i+=1\n\n @staticmethod\n def __isDICOMHeader(f,disp):\n f.seek(disp)\n #start \"02 00 00 00 55 4c\" -> little endian of 00 02 00 00 | 55 4c <- last part in big endian for some reason\n byte = f.read(1)\n if(byte.hex() == \"02\"):\n byte = f.read(1)\n if(byte.hex() == \"00\"):\n byte = f.read(1)\n if(byte.hex() == \"00\"):\n byte = f.read(1)\n if(byte.hex() == \"00\"):\n byte = f.read(1)\n if(byte.hex() == \"55\"):\n byte = f.read(1)\n if(byte.hex() == \"4c\"):\n byte = f.read(1)\n return True\n return False\n\n @staticmethod\n def __isDICOMTrailer(f,disp):\n f.seek(disp)\n #start \"ff d8 ff\"\n byte = f.read(1)\n if(byte.hex() == \"04\"):\n byte = f.read(1)\n if(byte.hex() == \"00\"):\n byte = f.read(1)\n if(byte.hex() == \"ff\"):\n byte = f.read(1)\n if(byte.hex() == \"ff\"):\n return True\n return False\n\n @staticmethod\n def __isJPGHeader(f,disp):\n f.seek(disp)\n #start \"ff d8 ff\"\n byte = f.read(1)\n if(byte.hex() == \"ff\"):\n byte = f.read(1)\n if(byte.hex() == \"d8\"):\n byte = f.read(1)\n if(byte.hex() == \"ff\"):\n return True\n return False\n\n @staticmethod\n def __isJPGTrailer(f,disp):\n f.seek(disp)\n #trailer \"ff d9\"\n byte = f.read(1)\n if(byte.hex() == \"ff\"):\n byte = f.read(1)\n if(byte.hex() == \"d9\"):\n return True\n return False\n\n @staticmethod\n def __file_size(fname):\n statinfo = os.stat(fname)\n return statinfo.st_size\n\n #opening file in directory that doesn't exist likeopen(\"dir/file\",\"wb+\")\n def __open2(self,filename,type):\n os.makedirs(os.path.dirname(filename), exist_ok=True)\n return open(filename,type)\n\n #uses a location list and filename to extract and create jpgs.\n def extract_photos(self,output_folder_jpegs):\n try:\n with open(self.fsv_filename, \"rb\") as fsv_f:\n for section_count in range(len(self.__start_end_jpg_list)):\n for photo_count in range(len(self.__start_end_jpg_list[section_count])):\n #file naming\n print(\"creating {}/{}.jpg\".format(section_count,photo_count))\n photo_filename = \"{}/{}/{}.jpg\".format(output_folder_jpegs,section_count,photo_count)\n\n photo_f = self.__open2(photo_filename,\"wb+\")\n\n #copy bytes from fsv to photo\n start = self.__start_end_jpg_list[section_count][photo_count][0]\n end = self.__start_end_jpg_list[section_count][photo_count][1]\n\n fsv_f.seek(start)\n photo_byte_str = fsv_f.read(end-start)\n photo_f.write(photo_byte_str)\n photo_f.close()\n except Exception as e:\n print(e)\n\n def extract_dicoms(self,output_folder_dicoms):\n try:\n with open(self.fsv_filename, \"rb\") as fsv_f:\n # dicom_datasets=[]\n for dicom_count in range(len(self.__start_end_dicom_header_list)):\n start=self.__start_end_dicom_header_list[dicom_count][0]\n end=self.__start_end_dicom_header_list[dicom_count][1]\n print(\"creating {}/main.dcm\".format(dicom_count))\n dicom_filename = \"{}/{}/main.dcm\".format(output_folder_dicoms,dicom_count)\n fsv_f.seek(start)\n # ds = pydicom.dcmread(fsv_f,defer_size=end-start,force=True)\n dicom_byte_str = fsv_f.read(end-start)\n dicom_f = self.__open2(dicom_filename,\"wb+\")\n dicom_f.write(dicom_byte_str)\n # ds = pydicom.dcmread(dicom_f,force=True)\n # dicom_datasets.append(ds)\n # print(ds.keys())\n # print(ds.items())\n # for x in ds.elements():\n # print(x)\n dicom_f.close()\n # dicom_f = open(dicom_filename,\"w\")\n # dicom_f.write(str(ds))\n # return dicom_datasets\n except Exception as e:\n print(e)\n\n\n###USAGE###\n\"\"\"\nfrom extract import VirtualFSV\nfsv = \"001b1017-9a3b-48ce-b951-3249ce00182a.fsv\"\nv = VirtualFSV(fsv)\nv.extract_photos(\"data/\"+fsv+\"/\")\nv.extract_dicoms(\"data/\"+fsv+\"/\")\n\"\"\"","sub_path":"Extract/extract_fsv.py","file_name":"extract_fsv.py","file_ext":"py","file_size_in_byte":9331,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"159390386","text":"import numpy as np\nimport torch\nimport torch.utils.data\n\nfrom transformer import Constants\n\ndef paired_collate_fn(insts):\n src_insts, tgt_insts = list(zip(*insts))\n src_insts = collate_fn(src_insts)\n tgt_insts = collate_fn(tgt_insts)\n return (*src_insts, *tgt_insts)\n\ndef openie_paired_collate_fn(insts):\n word_insts, path_insts, tag_insts = list(zip(*insts))\n return (*collate_fn(word_insts), collate_fn_2d(path_insts),\n collate_fn(tag_insts, get_pos=False))\n\ndef openie_collate_fn(insts):\n word_insts, path_insts = list(zip(*insts))\n return (*collate_fn(word_insts), collate_fn_2d(path_insts))\n\ndef collate_fn_2d(insts):\n if len(insts) <= 0:\n raise ValueError\n max_len = max(len(inst) for inst in insts)\n max_depth = max(len(c) for inst in insts for r in inst for c in r)\n insts = [[[c + [Constants.PAD] * (max_depth - len(c)) for c in r] for r in inst] for inst in insts]\n pad_shape = (max_len, max_len, max_depth)\n def pad(inst):\n ninst = np.full(pad_shape, Constants.PAD)\n ninst[:len(inst), :len(inst)] = inst\n return ninst\n batch_seq = np.array([pad(inst) for inst in insts])\n batch_seq = torch.LongTensor(batch_seq)\n return batch_seq\n\ndef collate_fn(insts, get_pos=True):\n ''' Pad the instance to the max seq length in batch '''\n max_len = max(len(inst) for inst in insts)\n # get the size of one element\n # a element could be either a list of integers or an integer\n ele_size = None\n for inst in insts:\n if len(inst) == 0:\n continue\n ele_size = inst[0]\n if hasattr(ele_size, '__len__'):\n ele_size = len(ele_size)\n if ele_size == 0:\n raise ValueError('empty element')\n else:\n ele_size = 0\n break\n if ele_size is None:\n raise ValueError('all instances are empty')\n\n batch_seq = np.array([\n inst + [[Constants.PAD] * ele_size or Constants.PAD] * (max_len - len(inst))\n for inst in insts])\n batch_seq = torch.LongTensor(batch_seq)\n\n if get_pos:\n batch_pos = np.array([\n [pos_i+1 if (w_i[0] if ele_size else w_i) != Constants.PAD else 0\n for pos_i, w_i in enumerate(inst)] for inst in batch_seq]) # the first element is word\n batch_pos = torch.LongTensor(batch_pos)\n return batch_seq, batch_pos\n\n return batch_seq\n\nclass TranslationDataset(torch.utils.data.Dataset):\n def __init__(\n self, src_word2idx, tgt_word2idx,\n src_insts=None, tgt_insts=None):\n\n assert src_insts\n assert not tgt_insts or (len(src_insts) == len(tgt_insts))\n\n src_idx2word = {idx:word for word, idx in src_word2idx.items()}\n self._src_word2idx = src_word2idx\n self._src_idx2word = src_idx2word\n self._src_insts = src_insts\n\n tgt_idx2word = {idx:word for word, idx in tgt_word2idx.items()}\n self._tgt_word2idx = tgt_word2idx\n self._tgt_idx2word = tgt_idx2word\n self._tgt_insts = tgt_insts\n\n @property\n def n_insts(self):\n ''' Property for dataset size '''\n return len(self._src_insts)\n\n @property\n def src_vocab_size(self):\n ''' Property for vocab size '''\n return len(self._src_word2idx)\n\n @property\n def tgt_vocab_size(self):\n ''' Property for vocab size '''\n return len(self._tgt_word2idx)\n\n @property\n def src_word2idx(self):\n ''' Property for word dictionary '''\n return self._src_word2idx\n\n @property\n def tgt_word2idx(self):\n ''' Property for word dictionary '''\n return self._tgt_word2idx\n\n @property\n def src_idx2word(self):\n ''' Property for index dictionary '''\n return self._src_idx2word\n\n @property\n def tgt_idx2word(self):\n ''' Property for index dictionary '''\n return self._tgt_idx2word\n\n def __len__(self):\n return self.n_insts\n\n def __getitem__(self, idx):\n if self._tgt_insts:\n return self._src_insts[idx], self._tgt_insts[idx]\n return self._src_insts[idx]\n\nclass OpenIEDataset(torch.utils.data.Dataset):\n def __init__(\n self, word2idx, tag2idx, word_insts=None, path_insts=None, tag_insts=None):\n\n assert word_insts\n assert path_insts\n\n idx2word = {idx:word for word, idx in word2idx.items()}\n idx2tag = {idx:tag for tag, idx in tag2idx.items()}\n self._word2idx = word2idx\n self._idx2word = idx2word\n self._tag2idx = tag2idx\n self._idx2tag = idx2tag\n self._word_insts = word_insts\n self._tag_insts = tag_insts\n self._path_insts = path_insts\n\n @property\n def n_insts(self):\n ''' Property for dataset size '''\n return len(self._word_insts)\n\n @property\n def vocab_size(self):\n ''' Property for vocab size '''\n return len(self._word2idx)\n\n @property\n def word2idx(self):\n ''' Property for word dictionary '''\n return self._word2idx\n\n @property\n def idx2word(self):\n ''' Property for index dictionary '''\n return self._idx2word\n\n @property\n def tag2idx(self):\n ''' Property for word dictionary '''\n return self._tag2idx\n\n @property\n def idx2tag(self):\n ''' Property for index dictionary '''\n return self._idx2tag\n\n def __len__(self):\n return self.n_insts\n\n def __getitem__(self, idx):\n # return word seq, path seq, tag seq separately\n if self._tag_insts:\n return self._word_insts[idx], self._path_insts[idx], self._tag_insts[idx]\n return self._word_insts[idx], self._path_insts[idx]\n","sub_path":"dataset.py","file_name":"dataset.py","file_ext":"py","file_size_in_byte":5670,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"515161336","text":"import random\nimport math\nimport sfml as sf\nfrom particle import Particle\n\ndef addVectors(angle1, length1, angle2, length2):\n x = math.sin(angle1) * length1 + math.sin(angle2) * length2\n y = math.cos(angle1) * length1 + math.cos(angle2) * length2\n \n angle = 0.5 * math.pi - math.atan2(y, x)\n length = math.hypot(x, y)\n\n return (angle, length)\n\ndef collide(p1, p2):\n dx = p1.x - p2.x\n dy = p1.y - p2.y\n \n dist = math.hypot(dx, dy)\n if dist < p1.size + p2.size:\n tangent = math.atan2(dy, dx)\n angle = 0.5 * math.pi + tangent\n\n angle1 = 2*tangent - p1.angle\n angle2 = 2*tangent - p2.angle\n speed1 = p2.speed\n speed2 = p1.speed\n\n (p1.angle, p1.speed) = (angle1, speed1)\n (p2.angle, p2.speed) = (angle2, speed2)\n\n p1.x += math.sin(angle)\n p1.y -= math.cos(angle)\n p2.x -= math.sin(angle)\n p2.y += math.cos(angle)\n\n\n\nclass Background():\n def __init__(self,window,colors):\n self.window = window\n self.width,self.height = self.window.size\n self.colors = colors\n self.colorpos = 0\n self.number_of_particles = 50\n self.my_particles = []\n\n for n in range(self.number_of_particles):\n size = random.randint(10, 20)\n x = random.randint(size, self.width-size)\n y = random.randint(size, self.height-size)\n\n particle = Particle(x, y, size, self.colors[self.colorpos])\n self.colorpos = (self.colorpos +1) % (len(self.colors)-1)\n particle.speed = random.random()\n particle.angle = random.uniform(0, math.pi*2)\n\n self.my_particles.append(particle)\n\n def draw(self,target):\n for i, particle in enumerate(self.my_particles):\n particle.move()\n particle.bounce(target)\n for particle2 in self.my_particles[i+1:]:\n collide(particle, particle2)\n particle.display(target)","sub_path":"newback.py","file_name":"newback.py","file_ext":"py","file_size_in_byte":1972,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"49121322","text":"import tensorflow as tf\r\nimport pickle\r\nimport numpy as np\r\nimport nltk\r\nfrom nltk.tokenize import sent_tokenize, word_tokenize\r\nfrom nltk.stem import WordNetLemmatizer\r\nfrom bottle import route, run, request\r\n\r\n#Creating Globals \r\nlemmatizer = WordNetLemmatizer()\r\n\r\nn_nodes_hl1 = 500\r\nn_nodes_hl2 = 500\r\n\r\nn_classes = 2\r\nhm_data = 2000000\r\n\r\nbatch_size = 32\r\nhm_epochs = 10\r\n#tf.reset_default_graph()\r\nx = tf.placeholder('float')\r\ny = tf.placeholder('float')\r\n\r\n\r\ncurrent_epoch = tf.Variable(1)\r\n\r\nhidden_1_layer = {'f_fum':n_nodes_hl1,\r\n 'weight':tf.Variable(tf.random_normal([2638, n_nodes_hl1])),\r\n 'bias':tf.Variable(tf.random_normal([n_nodes_hl1]))}\r\n\r\nhidden_2_layer = {'f_fum':n_nodes_hl2,\r\n 'weight':tf.Variable(tf.random_normal([n_nodes_hl1, n_nodes_hl2])),\r\n 'bias':tf.Variable(tf.random_normal([n_nodes_hl2]))}\r\n\r\noutput_layer = {'f_fum':None,\r\n 'weight':tf.Variable(tf.random_normal([n_nodes_hl2, n_classes])),\r\n 'bias':tf.Variable(tf.random_normal([n_classes])),}\r\n\r\n#Define the network shape\r\ndef neural_network_model(data):\r\n\r\n l1 = tf.add(tf.matmul(data,hidden_1_layer['weight']), hidden_1_layer['bias'])\r\n l1 = tf.nn.relu(l1)\r\n\r\n l2 = tf.add(tf.matmul(l1,hidden_2_layer['weight']), hidden_2_layer['bias'])\r\n l2 = tf.nn.relu(l2)\r\n\r\n output = tf.matmul(l2,output_layer['weight']) + output_layer['bias']\r\n\r\n return output\r\n#create the saver\r\nsaver = tf.train.import_meta_graph('./model.ckpt.meta')\r\n#Use the network for training\r\ndef use_neural_network(input_data):\r\n prediction = neural_network_model(x)\r\n with open('lexicon-2500-2638.pickle','rb') as f:\r\n lexicon = pickle.load(f)\r\n \r\n with tf.Session() as sess:\r\n #tf.reset_default_graph()\r\n sess.run(tf.global_variables_initializer())\r\n #tf.reset_default_graph()\r\n saver.restore(sess,\"model.ckpt\")\r\n #print('model restored')\r\n current_words = word_tokenize(input_data.lower())\r\n current_words = [lemmatizer.lemmatize(i) for i in current_words]\r\n features = np.zeros(len(lexicon))\r\n\r\n for word in current_words:\r\n if word.lower() in lexicon:\r\n index_value = lexicon.index(word.lower())\r\n # OR DO +=1, test both\r\n features[index_value] += 1\r\n\r\n features = np.array(list(features))\r\n # pos: [1,0] , argmax: 0\r\n # neg: [0,1] , argmax: 1\r\n result = (sess.run(tf.argmax(prediction.eval(feed_dict={x:[features]}),1)))\r\n if result[0] == 0:\r\n print('Positive:',input_data)\r\n return 'Positive:',input_data\r\n elif result[0] == 1:\r\n #Print('Neg')\r\n print('Negative:',input_data)\r\n return 'Negative:',input_data\r\n \r\n#use the network for predictions/classification\r\n#http://127.0.0.1:12345/posneg?blog=I%20hate%20fish&key=test1\r\n@route('/posneg')\r\ndef index():\r\n if request.GET.get('key') == 'test1':\r\n\t\t\r\n return str(use_neural_network(request.GET.get('blog')))\r\n\t\t#str(int(request.GET.get('number1')) + int(request.GET.get('number2')))\r\n else:\r\n return 'Unsupported operation'\r\n#start the server hosting the network\r\nif __name__ == '__main__':\r\n\t#run\r\n run(host='127.0.0.1', port=12345)\r\n\r\n#use_neural_network(\"He's an idiot and a jerk.\")\r\n#use_neural_network(\"This was the best store i've ever seen.\")\r\n#use_neural_network(\"I really hate that boy he totally suck\")\r\n","sub_path":"UseNNwebservice.py","file_name":"UseNNwebservice.py","file_ext":"py","file_size_in_byte":3514,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"567975518","text":"import csv\r\nimport os\r\nimport shutil\r\ncible = input(\"selectionner le fichier cible \\n\")\r\nrecup = \"\"\r\nif cible != \"\":\r\n os.makedirs(cible, exist_ok=True)\r\n os.chdir(cible)\r\n recup = \"../\"\r\nwith open(recup + 'departements-france.csv', newline='') as csvfile:\r\n spamreader = csv.reader(csvfile, delimiter=',', quotechar='|')\r\n count = 0\r\n region = []\r\n suppr = 0\r\n for row in spamreader:\r\n if count == 0:\r\n count = 1\r\n else:\r\n os.makedirs(row[3], exist_ok=True)\r\n for rec in region:\r\n if rec == row[3]:\r\n suppr = 1\r\n if suppr == 0:\r\n vider = input(\"Voulez vous vider le dossier \" + row[3] + \" ? y / n \\n\")\r\n if vider == \"y\":\r\n shutil.rmtree(row[3])\r\n os.makedirs(row[3], exist_ok=True)\r\n region.append(row[3])\r\n suppr = 0\r\n os.chdir(row[3])\r\n os.makedirs(row[1], exist_ok=True)\r\n os.chdir('../')\r\n","sub_path":"dossier_region/region.py","file_name":"region.py","file_ext":"py","file_size_in_byte":1033,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"652274198","text":"#!/usr/bin/env python\n# ex:ts=4:sw=4:sts=4:et\n# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-\n\"\"\"\nBitBake 'Fetch' implementations\n\nClasses for obtaining upstream sources for the\nBitBake build tools.\n\nCopyright (C) 2006 Jiff Shen\n\nThis program is free software; you can redistribute it and/or modify it under\nthe terms of the GNU General Public License as published by the Free Software\nFoundation; either version 2 of the License, or (at your option) any later\nversion.\n\nThis program is distributed in the hope that it will be useful, but WITHOUT\nANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS\nFOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.\n\nYou should have received a copy of the GNU General Public License along with\nthis program; if not, write to the Free Software Foundation, Inc., 59 Temple\nPlace, Suite 330, Boston, MA 02111-1307 USA. \n\nBased on functions from the base bb module, Copyright 2003 Holger Schurig\n\"\"\"\n\nimport os, re\nimport bb\nfrom bb import data\nfrom bb.fetch import Fetch\nfrom bb.fetch import FetchError\nfrom bb.fetch import MissingParameterError\n\nclass SvnGet(Fetch):\n \"\"\"Class to fetch a module or modules from svn repositories\"\"\"\n def supports(url, d):\n \"\"\"Check to see if a given url can be fetched with svn.\n Expects supplied url in list form, as outputted by bb.decodeurl().\n \"\"\"\n (type, host, path, user, pswd, parm) = bb.decodeurl(data.expand(url, d))\n return type in ['svnget']\n supports = staticmethod(supports)\n\n def localpath(url, d):\n (type, host, path, user, pswd, parm) = bb.decodeurl(data.expand(url, d))\n if \"localpath\" in parm:\n# if user overrides local path, use it.\n return parm[\"localpath\"]\n\n if 'rev' in parm:\n revision = parm['rev']\n else:\n revision = \"\"\n\n url = bb.encodeurl([type, host, path, user, pswd, {}])\n\n return os.path.join(data.getVar(\"DL_DIR\", d), os.path.basename(url))\n localpath = staticmethod(localpath)\n\n def go(self, d, urls = []):\n \"\"\"Fetch urls\"\"\"\n if not urls:\n urls = self.urls\n\n localdata = data.createCopy(d)\n data.setVar('OVERRIDES', \"svn:%s\" % data.getVar('OVERRIDES', localdata), localdata)\n data.update_data(localdata)\n\n for loc in urls:\n bb.note (\"fetch %s\" % loc)\n (type, host, path, user, pswd, parm) = bb.decodeurl(data.expand(loc, localdata))\n\n bb.debug (2, \"type %s, host %s, path %s, user %s, paswd %s\" % (type, host, path, user, pswd))\n options = []\n if 'rev' in parm:\n revision = parm['rev']\n else:\n revision = \"\"\n\n if \"proto\" in parm:\n proto = parm[\"proto\"]\n else:\n proto = \"svn\"\n\n svn_rsh = None\n if proto == \"svn+ssh\" and \"rsh\" in parm:\n svn_rsh = parm[\"rsh\"]\n\n dlfile = self.localpath(loc, localdata)\n dlfile = data.expand(dlfile, localdata)\n\n dldir = data.getVar('DL_DIR', localdata, 1)\n dldir = data.expand(dldir, localdata)\n\n md5 = dlfile + '.md5'\n if os.path.exists(md5):\n continue\n\n infocmd = \"svn info %s://%s%s\" % (proto, host, path)\n bb.debug (2, \"svn info command %s\" % infocmd)\n \n hinfo = os.popen (infocmd, \"r\")\n lines = hinfo.readlines()\n hinfo.close()\n \n regex = re.compile (r\"\"\"^Node Kind:\\s+file\"\"\")\n for line in lines:\n if regex.match (line):\n break\n else:\n raise FetchError (\"not found or not a regular file: \" + loc)\n \n bb.debug (1, loc + \" is a regular file\")\n\n olddir = os.path.abspath(os.getcwd())\n os.chdir(data.expand(dldir, localdata))\n\n svncmd = \"svn cat %s://%s%s > %s\" % (proto, host, path, dlfile)\n\n # either use the revision or if SRCDATE is now no braces\n if revision:\n svncmd = \"svn cat -r %s %s://%s%s > %s\" % (revision, proto, host, path, dlfile)\n\n if svn_rsh:\n svncmd = \"svn_RSH=\\\"%s\\\" %s\" % (svn_rsh, svncmd)\n\n bb.debug(1, \"Running %s\" % svncmd)\n myret = os.system(svncmd)\n if myret != 0:\n try:\n os.remove(dlfile)\n except OSError:\n pass\n raise FetchError(loc)\n \n os.system (\"md5sum %s > %s\" % (os.path.basename(dlfile), md5))\n os.chdir(olddir)\n del localdata\n","sub_path":"bitbake/lib/bb/fetch/svnget.py","file_name":"svnget.py","file_ext":"py","file_size_in_byte":4760,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"569897239","text":"from typing import Dict, Iterable, List\n\nimport apache_beam as beam\nimport logging\n\nimport spacy\nfrom apache_beam.transforms import window\n\nfrom nlp.nlp_spacy import NlpSpacy\nfrom dofn.parser import remove_html_tags, remove_special_characters\nimport en_core_web_lg as en_model\n\n\nclass TextAnalyser(beam.DoFn):\n\n def __init__(self, feature_columns: dict, batch_size: int = 1000):\n\n \"\"\"\n\n This DoFn executes some parsing operation\n like removal of special characters and\n uses the NlpSpacy class to clean the text\n and to perform some of the spacy functionalities\n in processing the text such es entity recognition,explained entity\n removing of stop words and punctuation.\n\n Then it saves the result in the pcollection\n in order to enrich the original dataset with new\n features\n\n :param feature_columns: {'id':'story_id','feature':'story_text','output_prefix': \"story\"}\n\n :param batch_size: int\n \"\"\"\n\n self.feature_columns = feature_columns\n self.id_column = self.feature_columns['id']\n self.output_prefix = self.feature_columns['output_prefix']\n\n self.feature_column = self.feature_columns['feature']\n self.batch_size = batch_size\n\n self.nlp_model: spacy = None\n self.spacy_handler: NlpSpacy = None\n\n self.le_col_name = f'{self.output_prefix}_label_entity'\n self.ee_col_name = f'{self.output_prefix}_explained_entity'\n self.sp_col_name = f'{self.output_prefix}_spacy_text'\n\n def add_default_column_to_element(self, element: List) -> List:\n for document in element:\n document[self.le_col_name] = \"\"\n document[self.ee_col_name] = \"\"\n document[self.sp_col_name] = \"\"\n return element\n\n def filter_special_char(self, element: Dict) -> Dict:\n \"\"\"\n some special characters are hard to remove also with spacy\n :param element:\n :return:\n \"\"\"\n output = element.copy()\n document = output[self.feature_column]\n id_value = output[self.id_column]\n if document:\n output[self.feature_column] = remove_html_tags(document=document)\n output[self.feature_column] = remove_special_characters(document=document)\n return output\n\n def prepare_document_ids_for_spacy(self, column, output):\n id_values = {elem[self.id_column]: elem for elem in output}\n id_values_none = {elem[self.id_column]: elem for elem in output if not elem[column]}\n documents = ((elem[column], elem[self.id_column]) for elem in output if elem[column])\n return documents, id_values, id_values_none\n\n def spacy_processing(self, element, column) -> List:\n\n output = element.copy()\n output = self.add_default_column_to_element(output)\n\n documents, id_values, id_values_none = self.prepare_document_ids_for_spacy(column, output)\n output = [id_values[_id] for _id in id_values_none]\n try:\n spacy_processed_data = self.spacy_handler.process(text_key=documents)\n for result in spacy_processed_data:\n spacy_idx = result['idx']\n original_document = id_values[spacy_idx]\n original_document[self.le_col_name] = result['label_entity']\n original_document[self.ee_col_name] = result['explained_entity']\n original_document[self.sp_col_name] = result['spacy_text']\n output.append(original_document)\n\n except Exception as e:\n logging.exception(f\"spacy processing exception: {e}\")\n return output\n\n def process(self, element: Dict, *args, **kwargs) -> Iterable[Dict]:\n feature = self.feature_columns['feature']\n filter_special_chars_element = self.filter_special_char(element=element)\n self._batch.append(filter_special_chars_element)\n if len(self._batch) >= self.batch_size:\n for data_processed in self.spacy_processing(element=self._batch, column=feature):\n yield data_processed\n self._batch = []\n\n def setup(self):\n \"\"\"\n\n Called once per DoFn instance when the DoFn instance is initialized.\n \"\"\"\n if self.nlp_model is None:\n self.init_spacy_model()\n self.spacy_handler = NlpSpacy(self.nlp_model)\n\n def init_spacy_model(self):\n try:\n self.nlp_model = en_model.load()\n except Exception as e:\n logging.exception(f\"spacy init model failed with exception : {e}\")\n\n def start_bundle(self):\n \"\"\"\n Called once per bundle of elements before calling process on the first element of the bundle\n\n \"\"\"\n self._batch = []\n\n def finish_bundle(self):\n \"\"\"\n Called once per bundle of elements after calling process after the last element of the bundle,\n can yield zero or more elements.\n\n \"\"\"\n feature = self.feature_columns['feature']\n if len(self._batch) != 0:\n for doc_processed in self.spacy_processing(element=self._batch, column=feature):\n yield window.GlobalWindows.windowed_value(doc_processed)\n self._batch = []\n","sub_path":"src/dofn/text_processing.py","file_name":"text_processing.py","file_ext":"py","file_size_in_byte":5214,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"639444922","text":"#-*-coding:utf-8-*-\n#SettingCode here\n__author__ = \"a_little_rubbish\"\n__date__ = \"2019-06-04 19:52\"\n\n#import your model here\nimport re\nfrom datetime import datetime, timedelta\nMAIL_MOBILE='/^([a-z0-9_\\.-]+)@([\\da-z\\.-]+)\\.([a-z\\.]{2,6})$/'\nfrom users.models import VerifyCode\nfrom rest_framework import serializers\nfrom django.contrib.auth import get_user_model\nUser = get_user_model()\n\n\nclass mailSerializer(serializers.Serializer):\n mobile = serializers.CharField(max_length=100)\n\n # 函数名必须:validate + 验证字段名\n def validate_mobile(self, mail):\n \"\"\"\n 手机号码验证\n \"\"\"\n # 是否已经注册\n if User.objects.filter(mobile=mail).count():\n raise serializers.ValidationError(\"邮箱已经注册\")\n\n # 是否合法\n if not re.match(MAIL_MOBILE, mail):\n raise serializers.ValidationError(\"邮箱非法\")\n\n # 验证码发送频率\n # 60s内只能发送一次\n one_mintes_ago = datetime.now() - timedelta(hours=0, minutes=1, seconds=0)\n if VerifyCode.objects.filter(add_time__gt=one_mintes_ago, mail=mail).count():\n raise serializers.ValidationError(\"发送频率过快\")\n\n return mail\n#your class&function here\n","sub_path":"apps/users/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":1257,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"31903889","text":"#!/usr/bin/env python\n\nfrom urllib2 import urlopen\nfrom bs4 import BeautifulSoup\nimport string, os, platform, time, json, re, unicodedata, HTMLParser, csv\n\n# Papercut url\nurl = \"https://papercut.stolaf.edu/environment/dashboard/\"\n# What our inFile is named\ninFile = \"./data/campus.csv\"\n# What our outFile will be named\noutFile = \"./data/data.json\"\n\n\n# Our constructed JSON data\nresponseData = []\n# Parser to clean up the string names\nh = HTMLParser.HTMLParser()\n# Time when we started the script\nstart = time.time()\n\ndef appendData( login, week, month ):\n\tresponseData.append( {'login':login,'week':week,'month':month} )\n\n\n# Finds the cafeteria id and name\ndef getPaperCutData( url, login ):\n\t# Construct the full url\n\turl2 = ''.join( [ url, str( login ) ] )\n\n\t# Receive the content of url and convert it to a string\n\tresponse = urlopen( url2 )\n\tdata \t = str( response.read() )\n\n\ttry:\n\t\t# user html variables\n\t\tsoup \t = BeautifulSoup( data, 'html.parser' )\n\t\tuserStats = soup.find_all( \"div\", class_=\"user-stats-value\" )\n\t\tenvStats = soup.find_all( \"div\", class_=\"env-stats-text\" )\n\n\t\t# user personal use\n\t\tweek \t = userStats[ 0 ].text\n\t\tmonth \t = userStats[ 1 ].text\n\t\tcostMonth = userStats[ 2 ].text\n\n\t\t# Package off the user's data\n\t\tappendData( login, week, month )\n\n\texcept:\n\t\tpass\n\n\n# Round numbers to a decimal point\ndef num2str( num, precision ):\n\treturn \"%0.*f\" % ( precision, num )\n\n# Get the outFile's size\ndef calculateFileSize():\n\tfileSize = os.path.getsize( outFile )\n\tfileSize = str( fileSize )\n\treturn fileSize\n\n# Read in a CSV\ndef csv_reader( file_obj ):\n\treader = csv.reader( file_obj )\n\tfor row in reader:\n\t\tlogin = ( \" \".join( row ) )\n\t\tgetPaperCutData( url, login )\n\n# Loop through the list of students\nif __name__ == \"__main__\":\n\tcsv_path = inFile\n\twith open( csv_path, \"rb\" ) as f_obj:\n\t\t# Read a login from the CSV\n\t\tcsv_reader( f_obj )\n\n# Write our output to a file\nwith open( outFile, 'w' ) as outfile:\n\t # Output the data into a file\n\tjson.dump( responseData, outfile )\n\n\t# Detect Mac OS for script end sound\n\tif platform.system() == \"Darwin\":\n\t\t# Play a sound to alert that we have finished\n\t\tos.system( 'afplay /System/Library/Sounds/Glass.aiff' )\n\n\t# Save the runtime\n\tendTime = time.time() - start;\n\nprint ( 'File: ' + outFile )\nprint ( 'Size: ' + calculateFileSize() + ' bytes' )\nprint ( 'This took ' + num2str( endTime, 2 ) + ' seconds\\n' )\n\n","sub_path":"scripts/py/getData.py","file_name":"getData.py","file_ext":"py","file_size_in_byte":2391,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"66076709","text":"import gzip\nimport numpy as np\nimport os\nfrom datetime import datetime\nfrom tensorflow.contrib.tensorboard.plugins import projector\nimport tensorflow as tf\nimport pandas as p\n\nIMAGE_SIZE = 28\nNUM_CHANNELS = 1\nPIXEL_DEPTH = 255\nNUM_LABELS = 10\n\n\ndef extract_data(filename, num_images):\n \"\"\"Extract the images into a 4D tensor [image index, y, x, channels].\n Values are rescaled from [0, 255] down to [-0.5, 0.5].\n \"\"\"\n print('Extracting', filename)\n with gzip.open(filename) as bytestream:\n bytestream.read(16)\n buf = bytestream.read(IMAGE_SIZE * IMAGE_SIZE * num_images)\n data = np.frombuffer(buf, dtype=np.uint8).astype(np.float32)\n data = data.reshape(num_images, IMAGE_SIZE, IMAGE_SIZE, 1)\n return data\n\n\ndef extract_labels(filename, num_images):\n \"\"\"Extract the labels into a vector of int64 label IDs.\"\"\"\n print('Extracting', filename)\n with gzip.open(filename) as bytestream:\n bytestream.read(8)\n buf = bytestream.read(1 * num_images)\n labels = np.frombuffer(buf, dtype=np.uint8).astype(np.int64)\n return labels\n\n\ndef normalize(imgs):\n \"\"\"\n Transform inputs so that pixel values are in range 0 to 1\n :param imgs: numpy array with images\n :return: normalized numpy array\n \"\"\"\n min_val = np.min(imgs)\n max_val = np.max(imgs)\n imgs_n = (imgs - min_val) / (max_val - min_val)\n return imgs_n\n\n\ndef load_data():\n imgs = extract_data(\"./MNIST_data/train-images-idx3-ubyte.gz\", 60000)\n lbls = extract_labels(\"./MNIST_data/train-labels-idx1-ubyte.gz\", 60000)\n return normalize(imgs), lbls\n\n\ndef create_dirs(log_dir, current_run):\n current_path = os.getcwd()\n log_path = os.path.join(current_path, log_dir)\n print(log_path)\n if not os.path.isdir(log_path):\n os.mkdir(log_path)\n run_path = os.path.join(log_path, current_run)\n if not os.path.isdir(run_path):\n os.mkdir(run_path)\n return log_path, run_path\n\n\ndef get_current_time():\n return datetime.now().strftime(\"%Y-%m-%d-%H-%M-%S\")\n\n\ndef create_minibatches(data, labels, mb_size):\n \"\"\"\n\n :param data: numpy array of shape (n_samples,...) where the first dimension iterates through samples\n :param labels: numpy array with labels\n :param mb_size: samples per minibatch\n :return: list of tuples (minibatch_data, minibatch_labels)\n \"\"\"\n n_samples = data.shape[0]\n full_mb = n_samples // mb_size\n\n mbs = []\n\n for i in range(full_mb):\n mb = data[i * mb_size: (i + 1) * mb_size, ...]\n lbl = labels[i * mb_size: (i + 1) * mb_size, ...]\n mbs.append((mb, lbl))\n\n if n_samples > full_mb * mb_size:\n mb = data[full_mb * mb_size:]\n lbl = labels[full_mb * mb_size:]\n mbs.append((mb, lbl))\n\n return mbs\n\n\ndef write_to_tensorboard(sess, writer, for_tensorboard, evaluation_set, evaluation, epoch_index):\n \"\"\"\n save the results of evaluation to tensorboard\n :param sess: current session\n :param writer: tensorflow writer object\n :param for_tensorboard: dictionary with tensors for image and scalar summary generation\n :param evaluation_set: tuple that stores the data used for evaluation and corresponding labels.\n Shape should be the same as for evaluation tensors\n :param evaluation: dictionary that holds evaluation tensor, variable for storing embeddings, and input placeholder\n :param epoch_index:\n :return: Nothing\n \"\"\"\n summ, _ = sess.run([for_tensorboard['summary'], evaluation['assign_embedding']],\n feed_dict={evaluation['input']: evaluation_set[0]})\n writer.add_summary(summ, epoch_index)\n projector.visualize_embeddings(writer, for_tensorboard['projector'])\n\n\ndef create_summary_and_projector(model, evaluation, evaluation_set, run_path, max_to_output=10):\n \"\"\"\n Set up objects to store embeddings and learning loss for the use in tensorboard\n :param model: dictionary that holds model tensors\n :param evaluation: dictionary that holds evaluation tensors\n :param evaluation_set: tuple that stores the data used for evaluation and corresponding labels.\n Shape should be the same as for evaluation tensors.\n :param run_path: path to current project\n :param max_to_output: number of sample images to output into tensorboard\n :return: dictionary with summary generation and embedding projector tensors\n \"\"\"\n\n def store_labels(mb):\n emb_lbls = mb[1].reshape(-1, 1)\n p.DataFrame(emb_lbls).to_csv(os.path.join(run_path, \"emb_lbls.tsv\"), index=False, header=False, sep='\\t')\n\n # https: // www.tensorflow.org / programmers_guide / summaries_and_tensorboard\n summ_sc = tf.summary.scalar(name=\"training_loss\", tensor=model['cost'])\n summ_im_inp = tf.summary.image(name=\"original_image\", tensor=model['input'], max_outputs=max_to_output)\n summ_im_dec = tf.summary.image(name=\"decoded_image\", tensor=model['dec'], max_outputs=max_to_output)\n summ = tf.summary.merge([summ_sc, summ_im_inp, summ_im_dec])\n\n # https: // www.tensorflow.org / programmers_guide / embedding\n config = projector.ProjectorConfig()\n embedding = config.embeddings.add()\n embedding.tensor_name = evaluation['embedding_variable'].name\n embedding.metadata_path = \"emb_lbls.tsv\"\n\n store_labels(evaluation_set)\n\n return {'summary': summ, 'projector': config}\n\n\ndef create_evaluation_tensor(model, evaluation_shape):\n \"\"\"\n Create tensors that are used for visualizing in tensorboard\n :param model: dictionary that holds model tensors\n :param evaluation_shape: tuple (n_samples_for_evaluation, size_of_hidden_space)\n :return: dictionary that holds tensors for computing image embeddings\n \"\"\"\n # prepare variable for visualizing embeddings\n # beware of fixed input size\n # ev = tf.placeholder(shape=evaluation_shape, dtype=tf.float32, name=\"emb_placeholder\")\n embedding_var = tf.Variable(np.zeros(evaluation_shape), dtype=tf.float32, name=\"embeddings\")\n emb_assign = tf.assign(embedding_var, model['enc'])\n\n evaluation = {'assign_embedding': emb_assign,\n 'embedding_variable': embedding_var,\n 'input': model['input']}\n return evaluation\n","sub_path":"Labs/Lab7/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":6184,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"231551868","text":"import re\nimport sys\n\nre_1 = re.compile(r'R(\\d+)C(\\d+)')\nre_2 = re.compile(r'([A-Z]+)(\\d+)')\n\ndef row_column_to_fmt_1(r, c):\n return 'R' + str(r) + 'C' + str(c)\n\ndef digits(b, x, num_digits):\n ds = []\n i = 0\n while i < num_digits:\n d = x % b\n x = x // b\n ds.append(d)\n i = i + 1\n ds.reverse()\n return ds\n\ndef compute_bounds(c):\n if c <= 26:\n raise Exception(\"c must be greater then 26!\")\n x = 2\n p_t = 26;\n t = p_t * 26;\n l = p_t;\n u = l + t\n while u < c:\n l = u\n p_t = t\n t = t * 26\n x = x + 1\n u = l + t\n return (l, u, x)\n\ndef column_to_str(c):\n if c <= 26:\n return chr(ord('A') + c - 1);\n else:\n (l, u, num_digits) = compute_bounds(c)\n ds = digits(26, c - l - 1, num_digits)\n return ''.join(map(lambda x: chr(ord('A') + x), ds))\n\ndef row_column_to_fmt_2(r, c):\n return column_to_str(c) + str(r)\n\ndef str_to_column(s):\n c = 0\n for x in s:\n c = (c * 26) + (ord(x) - ord('A') + 1)\n return c\n\ndef fmt_1_to_row_column(m):\n r = int(m.group(1))\n c = int(m.group(2))\n return (r, c)\n\ndef fmt_2_to_row_column(m):\n r = int(m.group(2))\n c = str_to_column(m.group(1))\n return (r, c)\n\ndef translate(line):\n m_1 = re_1.match(line)\n m_2 = re_2.match(line)\n if m_1:\n (r, c) = fmt_1_to_row_column(m_1)\n return row_column_to_fmt_2(r, c)\n elif m_2:\n (r, c) = fmt_2_to_row_column(m_2)\n return row_column_to_fmt_1(r, c)\n else:\n raise Exception(\"Unknown input: %s\" % line)\n\ndef main(args):\n lines = sys.stdin.readlines()\n lines = lines[1:]\n for line in lines:\n print(translate(line.strip()))\n\nif __name__ == \"__main__\":\n main(sys.argv)\n","sub_path":"1_100/problem_1_b/problem_1_b.py","file_name":"problem_1_b.py","file_ext":"py","file_size_in_byte":1768,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"43598140","text":"\"\"\"\nДаны K строк из маленьких латинских букв. Требуется найти их наибольшую общую подстроку.\ninput:\n3\nabacaba\nmycabarchive\nacabistrue\n\noutput:\ncab\n\"\"\"\n\nfrom collections import defaultdict\n\nDEFAULT_MULT = 31\nDEFAULT_MOD = 1073676287\n\n\nclass HashString:\n def __init__(self, string, p=DEFAULT_MULT, m=DEFAULT_MOD):\n self.string = string\n self.p = p\n self.m = m\n self.hashes = []\n self.powers = []\n self.prepare_hashes()\n\n def prepare_hashes(self):\n cur_hash = 0\n cur_p = 1\n for char in self.string:\n cur_hash = cur_hash * self.p + ord(char)\n cur_hash %= self.m\n self.hashes.append(cur_hash)\n self.powers.append(cur_p)\n cur_p = (cur_p * self.p) % self.m\n\n def compute_hash(self, a, b):\n if a == 0:\n return self.hashes[b]\n return (self.hashes[b] - self.hashes[a - 1] * self.powers[b - a + 1]) % self.m\n\n def __len__(self):\n return len(self.string)\n\n\ndef exists_lcp(words, cur_len):\n hashes = defaultdict(list)\n intersected_hashes = set()\n for idx, word in enumerate(words):\n for i in range(len(word) - cur_len + 1):\n hashes[word].append(word.compute_hash(i, i + cur_len - 1))\n if idx == 0:\n intersected_hashes = set(hashes[word])\n else:\n intersected_hashes = intersected_hashes.intersection(set(hashes[word]))\n if len(intersected_hashes) == 0:\n return False, []\n\n for i in range(len(hashes[word])):\n if hashes[word][i] in intersected_hashes:\n return True, word.string[i:i + cur_len]\n\n\ndef get_lcp(words):\n # for fast breaking loop in exists_lcp\n words = list(map(HashString, sorted(words, key=len)))\n min_len = len(words[0])\n\n left, right = 1, min_len + 1\n best_lcp = None\n while left < right:\n middle = (right + left) // 2\n exists, res = exists_lcp(words, middle)\n if not exists:\n right = middle\n else:\n best_lcp = res\n left = middle + 1\n return best_lcp\n\n\nif __name__ == '__main__':\n k = int(input())\n words = []\n for _ in range(k):\n words.append(input())\n\n print(get_lcp(words))","sub_path":"14 - Базовые алгоритмы на строках/E. Подстроки-3.py","file_name":"E. Подстроки-3.py","file_ext":"py","file_size_in_byte":2322,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"125980811","text":"# MET2UTC.py -- Convert from MET to UTC, for New Horizons only.\n#\n# Returns error message for any other mission.\n#\n# Assumes that the SPICE Leap seconds kernel and New Horizons\n# spacecraft clock (SCLK) kernel have already been loaded. \n\n# The NH sclk produces a \"tick\" every 20 microseconds, so there are\n# 50,000 ticks in 1 second of MET\n#\n# Note, 1 \"second\" of MET is not quite equal to 1 second of time, since the\n# spacecraft clock drifts a tiny amount. \n#\n# A. Steffl, Mar 2008 -- Original version\n# H. Throop, Apr 2008 -- Modified for GV\n# H. Throop, Apr 2013 -- Changed sclk_ticks from double to float, as per Nathaniel Cunningham\n# H. Throop, Aug 2016 -- Converted to python\n\nimport hbt\nimport numpy as np\nimport spiceypy as sp\n\ndef met2utc(met_in, name_observer = 'NEW HORIZONS'):\n\n# met_in = 299348928.9358144\n# name_observer = 'New Horizons'\n\n if (name_observer.upper().find('NEW HORIZONS') == -1):\n print('MET can be used only for New Horizons')\n return\n\n# Convert input to an array, even if it is not\n\n if hbt.is_array(met_in):\n met = np.array(met_in)\n else:\n met = np.array([met_in])\n\n# If there are any commas in the MET, then remove them\n\n if (type(met[0]) == str):\n met = np.zeros(np.shape(met_in))\n for i,met_i in enumerate(met_in):\n met[i] = float(met_in[i].replace(',', ''))\n\n sclk_ticks = np.array(met * 5e4) # Have to include np.array() -- not sure why. I guess a 1x1 np-array is demoted??\n ntime = np.size(met_in) # Number of elements\n et = np.zeros(ntime) \n utc = np.zeros(ntime, dtype = 'U30')\n\n for i in range(np.size(ntime)): \n et[i] = sp.sct2e(-98, sclk_ticks[i])\n utc[i] = sp.et2utc(et[i], 'C', 3)\n# utc[i] = sp_et2utc, et_i, 'ISOD', 3, utc_i\n \n if (ntime == 1):\n utc = utc[0]\n\n return utc\n","sub_path":"hbt/met2utc.py","file_name":"met2utc.py","file_ext":"py","file_size_in_byte":1792,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"336318824","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nAutor: Luciano Plascencia Valle\r\nFecha de creación: Thu Mar 7 20:07:19 2013\r\nMÓDULO DE INTERPOLACIÓN\r\n\"\"\"\r\n##module interpol\r\n\r\ndef coeffts(xDatos,yDatos):\r\n m=len(xDatos)\r\n a=yDatos.copy()\r\n for k in range (1,m):\r\n a[k:m]=(a[k:m]-a[k-1])/(xDatos[k:m]-xDatos[k-1])\r\n return a\r\n \r\ndef newton(xDatos,yDatos,x):\r\n a=coeffts(xDatos,yDatos)\r\n n=len(xDatos)-1\r\n p=a[n]\r\n for k in range(1,n+1):\r\n p=a[n-k]+(x-xDatos[n-k])*p\r\n return p\r\n\r\ndef neville(xDatos,yDatos,x):\r\n m=len(xDatos)\r\n y=yDatos.copy()\r\n for k in range (1,m):\r\n y[0:m-k]=((x-xDatos[k:m])*y[0:m-k]+\\\r\n (xDatos[0:m-k]-x)*y[1:m-k+1])/(xDatos[0:m-k]-xDatos[k:m])\r\n return y[0]\r\n\r\ndef lagrange(xDatos,yDatos,x):\r\n y=0.\r\n m=len(xDatos)\r\n for k in range(0,m):\r\n L=1.\r\n for l in range(0,m):\r\n if l!=k:\r\n L=L*(x-xDatos[l])/(xDatos[k]-xDatos[l])\r\n y=y+L*yDatos[k]\r\n return y\r\n","sub_path":"Tema 2 - Operaciones matematicas basicas/Examen 2 Parte/Luciano Plascencia/interpol.py","file_name":"interpol.py","file_ext":"py","file_size_in_byte":988,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"491672596","text":"def test(input):\n\t\n\tlist1 = [0]*26\n\tlist2 = [0]*26\n\tinput[0] = input[0].lower()\n\tinput[1] = input[1].lower()\n\tfor x in input[0]:\n\t\tlist1[ord(x)-97] = list1[ord(x)-97] + 1\n\tfor x in input[1]:\n\t\tlist2[ord(x)-97] = list2[ord(x)-97] + 1\n\tfor x in range(0,26):\n\t\tif list1[x] 2:\n print(\"可以上车\")\n is_seat = input(\"是否有座位:\")\n if is_seat == \"1\":\n print(\"有座位,我坐下了\")\n else:\n print(\"没有座位,哥哥要站着了\")\nelse:\n print(\"口袋没钱,走着回家\")","sub_path":"basic_Day02/12_练习题-if语句嵌套.py","file_name":"12_练习题-if语句嵌套.py","file_ext":"py","file_size_in_byte":718,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"266699644","text":"# A Floater is Prey; it updates by moving mostly in\r\n# a straight line, but with random changes to its\r\n# angle and speed, and displays as ufo.gif (whose\r\n# dimensions (width and height) are computed by\r\n# calling .width()/.height() on the PhotoImage\r\n\r\n\r\n#from PIL.ImageTk import PhotoImage\r\nfrom prey import Prey\r\nimport random\r\n\r\nclass Floater(Prey):\r\n radius = 5\r\n \r\n def __init__(self,x,y):\r\n #self._image = PhotoImage(file='ufo.gif')\r\n self.randomize_angle()\r\n Prey.__init__(self,x,y,Floater.radius*2,Floater.radius*2, self.get_angle(),5)\r\n \r\n def update(self, model):\r\n self.move()\r\n self.wall_bounce()\r\n rand_int = random.randint(0,10)\r\n if rand_int <= 3:\r\n self.set_angle(self.get_angle() + random.uniform(-.5, .5))\r\n while True:\r\n n = random.uniform(-.5, .5)\r\n if (self.get_speed() + n) < 7 or (self.get_speed() + n) > 3:\r\n self.set_speed(self._speed+n)\r\n break\r\n \r\n def display(self,the_canvas):\r\n #the_canvas.create_image(*self.get_location())#,image=self._image)\r\n the_canvas.create_oval(self._x - Floater.radius, self._y - Floater.radius,\r\n self._x + Floater.radius, self._y + Floater.radius, fill = 'red')\r\n","sub_path":"floater.py","file_name":"floater.py","file_ext":"py","file_size_in_byte":1330,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"149464892","text":"# Copyright 2014 by Ethan Fritz. All Rights Reserved.\n\nimport unicodedata\nfrom ui import utilities\nfrom data.contestant import Contestant\n\ntry:\n import wx\n import wx.dataview as dv\n from wx.dataview import DataViewCtrl\n from wx.dataview import PyDataViewIndexListModel\n from wx.dataview import PyDataViewModel\n if \"3.0.2.0\" not in wx.__version__:\n raise ImportError\nexcept ImportError:\n raise ImportError(\n \"The wxPython 3.0.2.0 module is required to run this program.\")\n\n\n#\n# Constants\n#\nfrom ui import COL_TO_ROW_MAPPING\n\n\n#\n# Exceptions\n#\nfrom core.exceptions import MissingInputDataError\n\n\ndef make_tuple_spacer(size=1):\n return ((0, 0), size, wx.EXPAND)\n\n\ndef make_tuple_button(button):\n return (button, 0, wx.EXPAND | wx.ALIGN_CENTER)\n\n\nclass DialogAddGroup(wx.Dialog):\n def __init__(self, parent, api):\n self.FONT_HEADER = wx.Font(22, wx.DEFAULT, wx.NORMAL, wx.LIGHT)\n self.FONT_TITLE = wx.Font(14, wx.DEFAULT, wx.NORMAL, wx.LIGHT)\n wx.Dialog.__init__(self, parent, title=\"Add Group\")\n self.api = api\n self.parent = parent\n self.create_sizer(self)\n\n def create_sizer(self, parent):\n header = wx.StaticText(self, label=\"Add Group\")\n header.SetFont(self.FONT_HEADER)\n title_group = wx.StaticText(parent, label=\"Group Name\")\n title_group.SetFont(self.FONT_TITLE)\n self.text_group = wx.TextCtrl(parent)\n self.button_add = wx.Button(self, id=wx.ID_OK, label=\"Add\")\n self.Bind(wx.EVT_BUTTON, self.OnButtonAdd, self.button_add)\n self.button_close = wx.Button(parent, id=wx.ID_CANCEL, label=\"Close\")\n sizer_group = wx.BoxSizer(wx.VERTICAL)\n sizer_group.AddMany([\n (title_group, 0, wx.EXPAND),\n (self.text_group, 0, wx.EXPAND)\n ])\n sizer_dialog_buttons = wx.BoxSizer(wx.HORIZONTAL)\n sizer_dialog_buttons.AddMany([\n ((0, 0), 1, wx.EXPAND),\n (self.button_add, 0, wx.EXPAND | wx.RIGHT),\n (self.button_close, 0, wx.EXPAND | wx.RIGHT)\n ])\n sizer = wx.BoxSizer(wx.VERTICAL)\n sizer.AddMany([\n (header, 0, wx.EXPAND | wx.TOP | wx.BOTTOM, 10),\n (sizer_group, 1, wx.EXPAND | wx.LEFT | wx.RIGHT, 20),\n (sizer_dialog_buttons, 0, wx.EXPAND | wx.ALL, 25)\n ])\n self.SetSizerAndFit(sizer)\n\n def OnButtonAdd(self, event):\n group = str(self.text_group.GetValue())\n if not group:\n raise MissingInputDataError()\n #TODO: do this more intelligently by determining correct order\n #selections = list(self.parent.listbox_group.GetSelections())\n #selections.reverse()\n #for index in selections:\n # self.parent.listbox_group.Delete(index)\n\n self.api.add_group(group)\n items = list(self.api.session.registrar.groups)\n self.parent.listbox_group.Set(items)\n self.parent.listbox_group.SetSelection(\n items.index(group)\n )\n self.Close()\n\n def GetGroup(self):\n return str(self.text_group.GetValue())\n\n\nclass PanelRegistrarFields(wx.Panel):\n def __init__(self, parent, api):\n self.FONT_TITLE = wx.Font(14, wx.DEFAULT, wx.NORMAL, wx.LIGHT)\n wx.Panel.__init__(self, parent=parent)\n self.api = api\n self.text_id = None\n self.text_first = None\n self.text_last = None\n self.text_car = None\n self.listbox_group = None\n self.create_sizer()\n\n def ctrl_data(self):\n \"\"\" Data should be passed in the format:\n wxStaticText:(title_*, label, font)\n wxTextCtrl: (text_*, label, handler)\n wxChoice: (choice_*, label, handler, choices, selection)\n wxButton: (button_*, label, handler, image_path_sequence)\n wxListBox: (listbox_*, label, handler, choices, selection)\n \"\"\"\n return ((\"title_id\", \"Car ID\", self.FONT_TITLE),\n (\"text_id\", \"\"),\n (\"title_first\", \"First Name\", self.FONT_TITLE),\n (\"text_first\", \"\"),\n (\"title_last\", \"Last Name\", self.FONT_TITLE),\n (\"text_last\", \"\"),\n (\"title_car\", \"Car Name\", self.FONT_TITLE),\n (\"text_car\", \"\"),\n (\"button_add_entry\", \"Add Entry\", self.OnAddEntries,\n (\"./images/add.png\", \"./images/add_hover.png\", \"./images/add_press.png\")),\n (\"title_group\", \"Group Tags\", self.FONT_TITLE),\n (\"listbox_groups\", \"Group Tags\", None,\n list(self.api.session.registrar.groups)),\n (\"title_select_hint\", \"Use CTRL and SHIFT\\nto select multiple tags\"),\n (\"button_add_group\", \"Add Group\", self.OnAddGroup,\n (\"./images/add.png\", \"./images/add_hover.png\", \"./images/add_press.png\")),\n (\"button_toggle_group\", \"Show/Hide Groups\", self.OnShowGroups,\n (\"./images/group.png\", \"./images/group_hover.png\", \"./images/group_press.png\")))\n\n def create_sizer(self):\n utilities.create_ctrls(self, self.ctrl_data())\n\n sizer_top_fields = wx.BoxSizer(wx.VERTICAL)\n sizer_top_fields.Add(self.title_id, 0, wx.EXPAND)\n sizer_top_fields.Add(self.text_id, 0, wx.EXPAND)\n sizer_top_fields.Add(self.title_first, 0, wx.EXPAND)\n sizer_top_fields.Add(self.text_first, 0, wx.EXPAND)\n sizer_top_fields.Add(self.title_last, 0, wx.EXPAND)\n sizer_top_fields.Add(self.text_last, 0, wx.EXPAND)\n sizer_top_fields.Add(self.title_car, 0, wx.EXPAND)\n sizer_top_fields.Add(self.text_car, 0, wx.EXPAND)\n sizer_top_fields.Add(self.button_add_entry, 0, wx.CENTER | wx.TOP, 10)\n\n sizer_bottom_fields = wx.BoxSizer(wx.VERTICAL)\n sizer_bottom_fields.Add(self.title_group, 0, wx.EXPAND)\n sizer_bottom_fields.Add(self.title_select_hint, 0, wx.EXPAND)\n sizer_bottom_fields.Add(self.listbox_groups, 1, wx.EXPAND)\n sizer_bottom_fields.Add(self.button_add_group, 0, wx.CENTER | wx.TOP, 10)\n sizer_bottom_fields.Add(self.button_toggle_group, 0, wx.CENTER | wx.TOP, 10)\n\n sizer = wx.BoxSizer(wx.VERTICAL)\n sizer.AddMany([\n (sizer_top_fields, 1, wx.EXPAND | wx.BOTTOM, 10),\n (sizer_bottom_fields, 1, wx.EXPAND)\n ])\n self.SetSizerAndFit(sizer)\n\n def OnAddEntries(self, event):\n id = self.parent.registrar_fields.text_id.GetValue()\n first = self.parent.registrar_fields.text_first.GetValue()\n last = self.parent.registrar_fields.text_last.GetValue()\n car = self.parent.registrar_fields.text_car.GetValue()\n group_strings = self.parent.registrar_fields.listbox_group.GetStrings()\n group_selections = []\n for index in self.parent.registrar_fields.listbox_group.GetSelections():\n group_selections.append(group_strings[index])\n if not all([id, first, last, car, group_selections]):\n raise MissingInputDataError()\n entry = Contestant(\n id,\n first,\n last,\n car,\n group_selections\n )\n self.api.add_entry(entry)\n\n def OnAddGroup(self, event):\n dlg = DialogAddGroup(self, self.api)\n if dlg.ShowModal() == wx.ID_OK:\n self.listbox_group.Append(dlg.GetGroup())\n dlg.Destroy()\n\n def OnShowGroups(self, event):\n pass\n\n\nclass EntriesDataModel(PyDataViewIndexListModel):\n def __init__(self, data):\n PyDataViewIndexListModel.__init__(self, len(data))\n self.data = data\n\n def UpdateDataRef(self, data):\n self.data = data\n\n def GetColumnType(self, col):\n return \"string\"\n\n def GetValueByRow(self, row, col):\n return self.data[row][col]\n\n def SetValueByRow(self, value, row, col):\n normalized_value = unicodedata.normalize('NFKD', value).encode('ascii','ignore')\n self.data[row][col] = normalized_value\n self.RowValueChanged(row, col)\n\n def GetColumnCount(self):\n return len(COL_TO_ROW_MAPPING)\n\n def GetCount(self):\n return len(self.data)\n\n def GetAttrByRow(self, row, col, attr):\n if col == 3:\n attr.SetColour('red')\n attr.SetBold(True)\n return True\n return False\n\n def Compare(self, item1, item2, col, ascending):\n if not ascending:\n item2, item1 = item1, item2\n row1 = self.GetRow(item1)\n row2 = self.GetRow(item2)\n return cmp(self.data[row1][col], self.data[row2][col])\n\n def DeleteRows(self, rows):\n rows = list(rows)\n rows.sort(reverse=True)\n\n for row in rows:\n del self.data[row]\n self.RowDeleted(row)\n\n def AddRow(self, value):\n self.data.append(value)\n self.RowAppended()\n\n\nclass DVC_IndexListModelRegistrar(DataViewCtrl):\n def __init__(self, parent, api):\n DataViewCtrl.__init__(self, parent,\n style=wx.BORDER_THEME\n | dv.DV_ROW_LINES\n | dv.DV_VERT_RULES\n | dv.DV_MULTIPLE)\n self.api = api\n self.model = EntriesDataModel(self.api.session.registrar.Entries)\n self.AssociateModel(self.model)\n\n self.AppendTextColumn(\"ID\", 0, width=100, mode=dv.DATAVIEW_CELL_EDITABLE)\n self.AppendTextColumn(\"First Name\", 1, width=100, mode=dv.DATAVIEW_CELL_EDITABLE)\n self.AppendTextColumn(\"Last Name\", 2, width=100, mode=dv.DATAVIEW_CELL_EDITABLE)\n self.AppendTextColumn(\"Car Name\", 3, width=100, mode=dv.DATAVIEW_CELL_EDITABLE)\n self.AppendTextColumn(\"Group Tags\", 4, width=100, mode=dv.DATAVIEW_CELL_EDITABLE)\n\n for c in self.Columns:\n c.Sortable = True\n c.Reorderable = True\n\n self.Bind(dv.EVT_DATAVIEW_ITEM_VALUE_CHANGED, self.OnValueChanged)\n\n def OnValueChanged(self, event):\n self.api.save_updates()\n\n\nclass TreeListModel(PyDataViewModel):\n def __init__(self, data):\n PyDataViewModel.__init__(self)\n self.data = data\n\n # The objmapper is an instance of DataViewItemObjectMapper and is used\n # to help associate Python objects with DataViewItem objects. Normally\n # a dictionary is used so any Python object can be used as data nodes.\n # If the data nodes are weak-referencable then the objmapper can use a\n # WeakValueDictionary instead. Each PyDataViewModel automagically has\n # an instance of DataViewItemObjectMapper preassigned. This\n # self.objmapper is used by the self.ObjectToItem and\n # self.ItemToObject methods used below.\n self.objmapper.UseWeakRefs(True)\n\n # Report how many columns this model provides data for.\n def GetColumnCount(self):\n return 6\n\n # Map the data column numbers to the data type\n def GetColumnType(self, col):\n mapper = { 0 : 'string',\n 1 : 'string',\n 2 : 'string',\n 3.: 'string', # the real value is an int, but the renderer should convert it okay\n 4 : 'datetime',\n 5 : 'bool',\n }\n return mapper[col]\n\n def GetChildren(self, parent, children): \n # The view calls this method to find the children of any node in the\n # control. There is an implicit hidden root node, and the top level\n # item(s) should be reported as children of this node. A List view\n # simply provides all items as children of this hidden root. A Tree\n # view adds additional items as children of the other items, as needed,\n # to provide the tree hierachy.\n ##self.log.write(\"GetChildren\\n\")\n\n # If the parent item is invalid then it represents the hidden root\n # item, so we'll use the genre objects as its children and they will\n # end up being the collection of visible roots in our tree.\n if not parent:\n for genre in self.data:\n children.append(self.ObjectToItem(genre))\n return len(self.data)\n\n # Otherwise we'll fetch the python object associated with the parent\n # item and make DV items for each of it's child objects.\n node = self.ItemToObject(parent)\n if isinstance(node, str):\n for song in node.songs:\n children.append(self.ObjectToItem(song))\n return len(node.songs)\n return 0\n\n def IsContainer(self, item):\n # Return True if the item has children, False otherwise.\n ##self.log.write(\"IsContainer\\n\")\n\n # The hidden root is a container\n if not item:\n return True\n # and in this model the genre objects are containers\n node = self.ItemToObject(item)\n if isinstance(node, str):\n return True\n # but everything else (the song objects) are not\n return False\n\n #def HasContainerColumns(self, item):\n # self.log.write('HasContainerColumns\\n')\n # return True\n\n def GetParent(self, item):\n # Return the item which is this item's parent.\n ##self.log.write(\"GetParent\\n\")\n\n if not item:\n return dv.NullDataViewItem\n\n node = self.ItemToObject(item)\n if isinstance(node, str):\n return dv.NullDataViewItem\n elif isinstance(node, Contestant):\n for g in self.data:\n if g.name == node.genre:\n return self.ObjectToItem(g)\n\n def GetValue(self, item, col):\n # Return the value to be displayed for this item and column. For this\n # example we'll just pull the values from the data objects we\n # associated with the items in GetChildren.\n\n # Fetch the data object for this item.\n node = self.ItemToObject(item)\n\n if isinstance(node, str):\n # We'll only use the first column for the Genre objects,\n # for the other columns lets just return empty values\n mapper = { 0 : node.name,\n 1 : \"\",\n 2 : \"\",\n 3 : \"\",\n 4 : wx.DateTimeFromTimeT(0), # TODO: There should be some way to indicate a null value...\n 5 : False,\n }\n return mapper[col]\n\n elif isinstance(node, Contestant):\n mapper = { 0 : node.group,\n 1 : node.id,\n 2 : node.first,\n 3 : node.last,\n 4 : node.car\n }\n return mapper[col]\n else:\n raise RuntimeError(\"unknown node type\")\n\n def GetAttr(self, item, col, attr):\n ##self.log.write('GetAttr')\n node = self.ItemToObject(item)\n if isinstance(node, str):\n attr.SetColour('blue')\n attr.SetBold(True)\n return True\n return False\n\n def SetValue(self, value, item, col):\n self.log.write(\"SetValue: %s\\n\" % value)\n\n # We're not allowing edits in column zero (see below) so we just need\n # to deal with Song objects and cols 1 - 5\n\n node = self.ItemToObject(item)\n if isinstance(node, Contestant):\n if col == 1:\n node.artist = value\n elif col == 2:\n node.title = value\n elif col == 3:\n node.id = value\n elif col == 4:\n node.date = value\n elif col == 5:\n node.like = value\n\n\nclass DVC_DataViewModelRegistrar(DataViewCtrl):\n\n def __init__(self, parent, api):\n DataViewCtrl.__init__(self, parent,\n style=wx.BORDER_THEME\n | dv.DV_ROW_LINES\n | dv.DV_VERT_RULES\n | dv.DV_MULTIPLE)\n self.api = api\n self.model = TreeListModel(self.api.session.registrar.Entries)\n self.AssociateModel(self.model)\n\n self.AppendTextColumn(\"ID\", 0, width=100, mode=dv.DATAVIEW_CELL_EDITABLE)\n self.AppendTextColumn(\"First Name\", 1, width=100, mode=dv.DATAVIEW_CELL_EDITABLE)\n self.AppendTextColumn(\"Last Name\", 2, width=100, mode=dv.DATAVIEW_CELL_EDITABLE)\n self.AppendTextColumn(\"Car Name\", 3, width=100, mode=dv.DATAVIEW_CELL_EDITABLE)\n self.AppendTextColumn(\"Group\", 4, width=100, mode=dv.DATAVIEW_CELL_EDITABLE)\n\n for c in self.Columns:\n c.Sortable = True\n c.Reorderable = True","sub_path":"ui/registrar.py","file_name":"registrar.py","file_ext":"py","file_size_in_byte":16752,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"335493510","text":"from mcpi.minecraft import Minecraft\nmc = Minecraft.create()\nimport pickle\n\ndef sortPair(val1, val2):\n if val1 > val2:\n return val2, val1\n else:\n return val1, val2\n\ndef copyStructure(x1, y1, z1, x2, y2, z2):\n x1, x2 = sortPair(x1, x2)\n y1, y2 = sortPair(y1, y2)\n z1, z2 = sortPair(z1, z2)\n\n width = x2 - x1\n height = y2 - y1\n length = z2 - z1\n\n structure = []\n\n print(\"Подождите, пока мы смотрим видео про котиков и заставляем себя начать копировать блоки воздуха, которые вы случайно добавили в пространство копирования, в список\")\n print(\"ЧТО? Так быстро закончилось видео? Ну ладно, начинаем. А вы пока покормите своих оцелотов\")\n\n for row in range(height):\n structure.append([])\n for column in range(width):\n structure[row].append([])\n for depth in range(length):\n block = mc.getBlock(x1 + column, y1 + row, z1 + depth)\n structure[row][column].append(block)\n return structure\n\n\ninput(\"Подойдите к одному углу констукции и нажмите \")\npos = mc.player.getTilePos()\nx1, y1, z1 = pos.x, pos.y, pos.z\n\ninput(\"Подойдите к противоположному углу и нажмите \")\npos = mc.player.getTilePos()\nx2, y2, z2 = pos.x, pos.y, pos.z\n\nstructure = copyStructure(x1, y1, z1, x2, y2, z2)\n\nnameOfFile = input(\"Как назовем файл? \") + \".txt\"\n\nnameOfFile = open(nameOfFile, \"xb\")\npickle.dump(structure, nameOfFile)\n\n# pickleFile = open(\"pickleFile.txt\", \"wb\")\n# pickle.dump(structure, pickleFile)","sub_path":"BookCraigRichardson/files/saveStructure.py","file_name":"saveStructure.py","file_ext":"py","file_size_in_byte":1793,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"205107654","text":"# -*- coding: utf-8 -*-\nimport scrapy\n\nfrom env.items import EnvItem\nfrom scrapy.http import Request\nfrom env.city import get_province_city\n\nclass SinaSpider(scrapy.Spider):\n name = \"sina\"\n allowed_domains = [\"roll.green.sina.com.cn\"]\n start_urls = (\n 'http://roll.green.sina.com.cn/green/hb_gdxw/index.shtml',\n )\n\n def parse(self, response):\n try:\n newslistlb = response.css('ul[class=\"list_009\"]')\n for li in newslistlb.xpath('li'):\n item = EnvItem()\n item[\"link\"] = li.xpath(\"a/@href\").extract()[0]\n item[\"title\"] = li.xpath(\"a/text()\").extract()[0]\n item[\"date\"] = str(item[\"link\"].split(\"/\")[-2])\n (province, city) = get_province_city(item[\"title\"])\n item[\"province\"] = province\n item[\"city\"] = city\n yield item\n a = response.css('span[class=\"pagebox_next\"]')\n if a:\n first_url = \"/\".join(response.url.split(\"/\")[:-1])\n last_url = a.xpath(\"a/@href\").extract()[0].split(\"/\")[-1]\n url = first_url + \"/\" + last_url;\n yield Request(url, callback = self.parse)\n except:\n pass\n\n\n","sub_path":"envspider/env/env/spiders/sina.py","file_name":"sina.py","file_ext":"py","file_size_in_byte":1249,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"413713246","text":"from MobileApps.libs.flows.common.moobe_ows.moobe_ows_flow_container import Gen2MoobeOWSFlowContainer\n\n\nclass MalbecMoobeOWSFlow(Gen2MoobeOWSFlowContainer):\n project = \"malbec\"\n\n def __init__(self, driver, printer_obj, ows_flow):\n super(MalbecMoobeOWSFlow, self).__init__(driver, printer_obj, ows_flow)\n self.fp_elements = {\"cartridge_error\": [\"fl_devstatus::st_devsts_error_message\", \"fb_action\"],\n \"home_screen\": \"flow_home::state_home\"\n }","sub_path":"MobileApps/libs/flows/common/moobe_ows/malbec_moobe_ows_flow.py","file_name":"malbec_moobe_ows_flow.py","file_ext":"py","file_size_in_byte":515,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"178306754","text":"#!/usr/bin/env python2\r\n# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Thu Oct 25 22:55:29 2018\r\n\r\n@author: Melody\r\n\r\nThe path should contain some subfolders, each subfolder is a class with class \r\nname as subfolder name.\r\n\r\nThe output will build an images folder contain all images, and a label.csv containing \r\nthe label information.\r\n\"\"\"\r\n\r\nimport os\r\nimport pandas as pd\r\nimport argparse\r\nimport shutil\r\nimport numpy as np\r\nimport math\r\nimport tensorflow as tf\r\nimport cv2\r\n\r\ndef create_train_test_idx_from_range_num(num, ratio = 0.8):\r\n \r\n trainidx = np.random.choice(num, size = int(math.floor(num*ratio)), replace = False)\r\n textidx = [x for x in list(range(num)) if x not in trainidx]\r\n \r\n return(trainidx, textidx)\r\n \r\ndef create_labelmap(path, output = True):\r\n folders = os.listdir(path)\r\n folders = [x for x in folders if os.path.isdir(os.path.join(path,x))]\r\n \r\n labelmap = pd.DataFrame(columns = ['classname', 'classid'])\r\n idx = 0\r\n for folder in folders:\r\n labelmap.loc[idx] = [folder, idx]\r\n idx =idx + 1\r\n \r\n labelmap.to_csv(os.path.join(path, 'labelmap.csv'), index = False)\r\n \r\n if output:\r\n return(labelmap)\r\n\r\ndef _int64_feature(value):\r\n return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))\r\n\r\ndef _bytes_feature(value):\r\n return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))\r\n\r\ndef main(path, ratio, remove_tmp_folder, resize, size):\r\n \r\n tmpfolder = path+'_tmpoutput'\r\n if not os.path.exists(tmpfolder):\r\n os.mkdir(tmpfolder)\r\n \r\n labelfile = pd.DataFrame(columns = ['file', 'label'])\r\n \r\n labelmap = create_labelmap(path)\r\n \r\n idx = 0\r\n \r\n folders = os.listdir(path)\r\n folders = [x for x in folders if os.path.isdir(os.path.join(path,x))]\r\n \r\n \r\n for folder in folders:\r\n \r\n files = os.listdir(os.path.join(path, folder))\r\n files = [x for x in files if x[-3:] in ['jpg', 'JPG']]\r\n \r\n for file in files:\r\n \r\n shutil.copyfile(os.path.join(path, folder, file), \r\n os.path.join(tmpfolder, folder+'_'+file))\r\n classid = labelmap.loc[labelmap.loc[:,'classname'] == folder, 'classid'].values[0]\r\n \r\n labelfile.loc[idx] = [os.path.join(tmpfolder, folder+'_'+file), classid]\r\n\r\n idx = idx + 1\r\n \r\n labelfile.to_csv(os.path.join(tmpfolder, 'label.csv'), index = False)\r\n \r\n # ======================\r\n \r\n \r\n tffile = dict()\r\n (tffile['train'], tffile['test']) = create_train_test_idx_from_range_num(len(labelfile), ratio)\r\n \r\n for i in ['train', 'test']:\r\n tffile_name = os.path.join(path, i+'.tfrecord')\r\n \r\n writer = tf.python_io.TFRecordWriter(tffile_name)\r\n \r\n for j in tffile[i]:\r\n image = cv2.imread(labelfile.loc[j,'file'])\r\n if resize:\r\n image = cv2.resize(image, size)\r\n \r\n label = labelfile.loc[j,'label']\r\n height, width, depth = image.shape\r\n \r\n feature = {'label': _int64_feature(label),\r\n 'image': _bytes_feature(tf.compat.as_bytes(image.tostring()))\r\n #'height': _int64_feature(height),\r\n #'width': _int64_feature(width),\r\n #'depth': _int64_feature(depth)\r\n }\r\n \r\n example = tf.train.Example(features=tf.train.Features(feature=feature))\r\n \r\n writer.write(example.SerializeToString())\r\n \r\n writer.close()\r\n \r\n if remove_tmp_folder:\r\n shutil.rmtree(tmpfolder)\r\n \r\n\r\n \r\n \r\n \r\nif __name__== \"__main__\":\r\n parser = argparse.ArgumentParser(description='from folder to create images folder and label.csv')\r\n parser.add_argument('--path', required = True,\r\n help='path of the folder containing several subfolders')\r\n parser.add_argument('--ratio', default = 0.8,\r\n type = float,\r\n help = 'the ratio of train sample')\r\n parser.add_argument('--remove_tmp_folder', default = True,\r\n type = bool, help = 'whether remove the tmp folder after the process')\r\n parser.add_argument('--resize', default = True,\r\n type = bool, help = 'whether resize the images')\r\n parser.add_argument('--size', default = (299, 299),\r\n type = int, help = 'if resize the image, what is the new size')\r\n args = parser.parse_args()\r\n \r\n main(path = args.path, ratio = args.ratio, \r\n remove_tmp_folder = args.remove_tmp_folder, resize = args.resize,\r\n size = args.size)","sub_path":"build_images_folder_and_label.py","file_name":"build_images_folder_and_label.py","file_ext":"py","file_size_in_byte":4817,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"253905061","text":"import cv2\nimport mediapipe as mp\n\nmp_face_detection = mp.solutions.face_detection\n# использовали медиапайп в котором есть готовая модель для обнаружения лица человека\n\ndef test_static_imgs(img_paths, output_path):\n with mp_face_detection.FaceDetection(\n min_detection_confidence=0.5) as face_detection:\n for idx, img_path in enumerate(img_paths):\n image = cv2.imread(img_path)\n W = image.get(3)\n H = image.get(4)\n results = face_detection.process(cv2.cvtColor(image, cv2.COLOR_BGR2RGB))\n\n if not results.detections:\n continue\n annotated_image = image.copy()\n for detection in results.detections:\n bb = detection.location_data.relative_bounding_box\n x = bb.xmin * W\n y = bb.ymin * H\n w = bb.width * W\n h = bb.height * H\n x, y, w, h = int(x - 0.1 * w), int(y - 0.1 * h), int(w * 1.2), int(h * 1.2)\n kernel = 50\n image[y:y + h, x:x + w] = cv2.blur(image[y:y + h, x:x + w], (kernel, kernel))\n cv2.imwrite(output_path + str(idx) + '.png', annotated_image)\n\n#обработка видео\ndef infer_video(cap):\n W = cap.get(3)\n H = cap.get(4)\n\n with mp_face_detection.FaceDetection(min_detection_confidence=0.5) as face_detection:\n while cap.isOpened():\n success, image = cap.read()\n if not success:\n print(\"Ignoring empty camera frame.\")\n continue\n\n image = cv2.cvtColor(cv2.flip(image, 1), cv2.COLOR_BGR2RGB)\n image.flags.writeable = False\n # получаем для конкрентного изображения, возрвращает список найденых объектов,\n # каждый из которых имеет 16 полей, первые четыре являются координатами рамки вокруг лица\n # остальные 12 содержат координаты ключевых точек лица(нос, глаза, уши)\n results = face_detection.process(image)\n\n image.flags.writeable = True\n image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)\n # если найдены лица, тогда ля каждой ограничевающей рамки мы применяем функцию blur из библиотеки cv2\n if results.detections:\n for detection in results.detections:\n bb = detection.location_data.relative_bounding_box\n x = abs(bb.xmin * W)\n y = abs(bb.ymin * H)\n w = abs(bb.width * W)\n h = abs(bb.height * H)\n x, y, w, h = int(x - 0.1 * w), int(y - 0.1 * h), int(w * 1.2), int(h * 1.2)\n x = 0 if x < 0 else x\n y = 0 if y < 0 else y\n\n kernel = 50 # сила размытия\n # заменяем данный участок картинки его заблюренным вариантом\n image[y:y + h, x:x + w] = cv2.blur(image[y:y + h, x:x + w], (kernel, kernel))\n\n cv2.imshow('Anon Face Detection', image)\n if cv2.waitKey(5) & 0xFF == 27:\n break\n\n cap.release()\n\n\ndef test_video(video_path):\n cap = cv2.VideoCapture(video_path)\n infer_video(cap)\n\n\ndef test_webcam(ip_cam_link=None):\n cap = cv2.VideoCapture(0)\n if ip_cam_link:\n cap.open(ip_cam_link)\n\n infer_video(cap)\n\n\nif __name__ == '__main__':\n test_webcam()","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3770,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"230745012","text":"# -*- coding: utf-8 -*-\n\nfrom flask import Flask, jsonify,request\n\napp = Flask(__name__)\n\nfrom products import products\n\n@app.route('/ping')\ndef ping():\n return jsonify({\"message\":\"Pong\"})\n@app.route('/products')\ndef getproducts():\n return jsonify({\"products\": products,\"message\": \"Product's list\"})\n@app.route('/products/')\ndef getproduct(product_name):\n productsFound = [product for product in products if product['Name']==product_name]\n if (len(productsFound) > 0):\n return jsonify({\"product\":productsFound[0]})\n return jsonify({\"message\":\"Product not found\"})\n\n@app.route('/products',methods=['POST'])\ndef addPoduct():\n new_product = {\n \"Name\": request.json['Name'],\n \"price\": request.json['price'],\n \"quantity\": request.json['quantity'] \n }\n products.append(new_product)\n\n return jsonify({\"message\":\"Item was added succesfully\",\"products\":products})\n\n@app.route('/products/',methods = ['PUT'])\ndef editProduct(product_name):\n productFound = [product for product in products if product['Name']== product_name]\n if (len(productFound)>0):\n productFound[0]['Name'] = request.json['Name']\n productFound[0]['price'] = request.json['price']\n productFound[0]['quantity'] = request.json['quantity']\n return jsonify({\"message\":\"Product Updated\",\n \"product\":productFound[0]})\n return jsonify({\"message\":\"Item not found\"})\n\n\n@app.route('/products/',methods=['DELETE'])\ndef deleteproduct(product_name):\n productFound = [product for product in products if product['Name']== product_name]\n if len(productFound) > 0:\n products.remove(productFound[0])\n return jsonify({\"message\":\"Product removed\", \n \"Products\":products})\n return jsonify({\"message\":\"Product was not found\"})\n\nif __name__=='__main__':\n app.run(debug=True,port=5000)\n\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1983,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"347261913","text":"import GPz\nfrom numpy import *\nimport matplotlib.pyplot as plt\nfrom numpy.linalg import *\n\n########### Model options ###########\n\nmethod = 'VL' # select method, options = GL, VL, GD, VD, GC and VC\nm = 200 # number of basis functions to use\n\n########### Generate Data ###########\n\nn = 4000\n\nX = linspace(-10, 10, n)\nX = X[(X<-6)|(X>-3)]\n\nn = len(X)\nX = X.reshape(n, 1)\n\nf_noise = (0.01+3 * sin(X) * (1 + exp(-0.1 * X)) ** -1) ** 2\nY = 10*sinc(2*X) + random.randn(n, 1) * f_noise\n\nXs = linspace(-12, 12, n)\nXs = Xs.reshape(n, 1)\n\n########### Start Script ###########\n\n# optain an initial model using the default options\nmodel = GPz.GP(m,method=method)\n\n# train the model using the default options\nmodel.train(X,Y)\n\n# use the model to generate predictions for the test set\nmu,sigma,variance,noise,PHI = model.predict(Xs)\n\n########### Display Results ###########\n\nplt.fill_between(Xs[:,0], mu[:,0]-2 * sqrt(sigma[:,0]), mu[:,0]+2 * sqrt(sigma[:,0]),facecolor=(0.85, 0.85, 0.85))\nplt.plot(X, Y, 'b.')\n\nSIGMAi = model.SIGMAi\nmuY = model.muY\nw = model.w\n\n[U,S,V] = svd(SIGMAi[:,:,0])\nR = dot(U,diag(sqrt(S)))\n\nk = 20\n\nws = dot(R,random.randn(len(w),k))+w\nmus = dot(PHI,ws)+dot(Xs,model.wL)+muY\nplt.plot(Xs,mus)\nplt.plot(Xs, mu, 'r-',linewidth=2)\nplt.show()","sub_path":"python/demo_sinc.py","file_name":"demo_sinc.py","file_ext":"py","file_size_in_byte":1283,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"403971762","text":"import logging\nfrom steelfig.models.mysql import Mysql\n\nlogger = logging.getLogger(__name__)\nclass Message(object):\n\n def __init__(self, identifier=None):\n self.id = None\n\n if type(identifier) == int:\n self.load_from_id(identifier)\n\n def load_from_id(self, id):\n db = Mysql()\n cursor = db.get_cursor()\n cursor.execute('SELECT * FROM v_messages WHERE id = %s', (id,))\n data = cursor.fetchone()\n self.id = data.get('id')\n self.event_id = data.get('event_id')\n self.from_id = data.get('from_id')\n self.from_name = data.get('from_name')\n self.to_id = data.get('to_id')\n self.to_name = data.get('to_name')\n self.subject = data.get('subject')\n self.message = data.get('message')\n self.created_at = data.get('created_at')\n self.read_at = data.get('read_at')\n\n def get(self):\n return {\n \"event_message\": {\n \"id\": self.id,\n \"event_id\": self.event_id,\n \"from_id\": self.from_id,\n \"from_name\": self.from_name,\n \"to_id\": self.to_id,\n \"to_name\": self.to_name,\n \"subject\": self.subject,\n \"message\": self.message,\n \"created_at\": self.created_at,\n \"read_at\": self.read_at,\n }\n }\n\n def create(self, params):\n db = Mysql()\n cursor = db.get_cursor()\n cursor.execute('''\n CALL create_event_message(%s, %s, %s, %s, %s)\n ''', (\n params.get('event_id'),\n params.get('from_id'),\n params.get('to_id'),\n params.get('subject'),\n params.get('message')\n ))\n result = cursor.fetchone()\n return self.load_from_id(result.get('id'))\n\n def reply(self, params):\n db = Mysql()\n cursor = db.get_cursor()\n cursor.execute('''\n CALL create_reply_message(%s, %s, %s, %s)\n ''', (\n params.get('id'),\n params.get('from_id'),\n params.get('subject'),\n params.get('message')\n ))\n result = cursor.fetchone()\n return self.load_from_id(result.get('id'))\n\n def set_read(self, account_id):\n db = Mysql()\n cursor = db.get_cursor()\n cursor.execute('''\n UPDATE messages SET read_at = NOW()\n WHERE id = %s\n AND event_id IN (\n SELECT event_id FROM attendees WHERE account_id = %s\n )\n ''', (self.id, account_id))\n self.load_from_id(self.id)\n\n return self.get()\n\n def delete(self, account_id, message_id):\n db = Mysql()\n cursor = db.get_cursor()\n cursor.execute('''\n DELETE FROM messages\n WHERE id = %s\n AND to_id = %s\n LIMIT 1\n ''', (message_id, account_id))\n return True\n","sub_path":"steelfig/models/message.py","file_name":"message.py","file_ext":"py","file_size_in_byte":2942,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"231386799","text":"import unittest\nfrom pyomo.solvers.plugins.solvers.CPLEX import *\nfrom pyomo.environ import *\nfrom pyomo.opt import SolverFactory\nfrom pyomo.core.base.var import *\nfrom pyomo.core.base.param import SimpleParam\n# from sddp.pyomo_tool import *\nfrom pyomotools.tools import model_str\n\n\nclass MyTestCase(unittest.TestCase):\n def setUp(self):\n self.opt = SolverFactory('cplex',\n executable=\"/opt/ibm/ILOG/CPLEX_Studio128/cplex/bin/x86-64_linux/cplex\") # type:CPLEXSHELL\n self.model = ConcreteModel()\n\n def test_base(self):\n \"\"\"\n min x**2\n \"\"\"\n model = self.model\n model.x = Var(domain=Reals, bounds=(1, None))\n model.obj = Objective(expr=model.x ** 2, sense=minimize)\n result = self.opt.solve(model)\n self.assertEquals(value(model.x), 1)\n self.assertEquals(value(model.obj), 1)\n print(type(result))\n print(result.solver.status)\n def test_model_expresion(self):\n \"\"\"\n min x**2\n \"\"\"\n model = self.model\n model.x = Var(domain=Reals, bounds=(1, None))\n model.obj = Objective(expr=model.x ** 2, sense=minimize)\n result = self.opt.solve(model)\n print(model_str(model))\n\n\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"sddp/test/test_pyomo.py","file_name":"test_pyomo.py","file_ext":"py","file_size_in_byte":1298,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"585751561","text":"from yeahml.config.default.types.base_types import (\n categorical,\n list_of_categorical,\n numeric,\n)\nfrom yeahml.config.default.types.compound.data import data_set_name_dict\nfrom yeahml.config.default.types.compound.directive import instruct_parser\nfrom yeahml.config.default.types.compound.layer import layers_parser\nfrom yeahml.config.default.types.compound.optimizer import optimizers_parser\nfrom yeahml.config.default.types.compound.performance import performances_parser\nfrom yeahml.config.default.types.param_types import optional_config\n\n# TODO: check for extra keys in the configs that are not specified here\n# meta\n# TODO: set accepted options for `trace_level`\n# TODO: ensure `default_load_params_path` is a path.. also, does this belong in\n# meta?\n# TODO: numbers could probably be converted to string (for experiment_name?)\nmeta = {\n \"meta\": {\n # directory\n \"yeahml_dir\": categorical(\n default_value=\"yeahml\",\n required=False,\n is_type=str,\n to_lower=False,\n description=(\n \"Root directory to store information\\n\"\n \" > e.g. meta:yeahml_dir: 'yeahml'\"\n ),\n ), # could add a check that the location exists\n \"data_name\": categorical(\n default_value=None,\n required=True,\n is_type=str,\n to_lower=False,\n description=(\n \"Description of the data used \\n\"\n \" > e.g. meta:data_name: 'mnist', or meta:data_name: 'V00'\\n\"\n \"this logic will likely change in the future\"\n ),\n ),\n \"experiment_name\": categorical(\n default_value=None,\n required=True,\n is_type=str,\n to_lower=False,\n description=(\n \"Name for the experiment being performed\\n\"\n \" > e.g. meta:experiment_name: 'trial_00'\"\n ),\n ),\n \"start_fresh\": categorical(\n default_value=False,\n required=False,\n is_type=bool,\n description=(\n \"Used to determine whether previous experiments should be deleted\\n\"\n \" > e.g. meta:start_fresh: True\"\n ),\n ),\n # random seed\n \"rand_seed\": numeric(\n default_value=None,\n required=False,\n is_type=int,\n description=(\n \"Used to set the random seed for tensorflow\\n\"\n \" > e.g. meta:rand_seed: 42\"\n ),\n ),\n # TODO: tracing\n # \"trace_level\": categorical(\n # default_value=None, required=False, description=\"meta:trace_level: \"\n # ),\n # default path to load param information\n # TODO: this should likely move to the model config\n \"default_load_params_path\": categorical(\n default_value=None,\n required=False,\n is_type=str,\n to_lower=False,\n description=(\n \"Default location to load parameters from\\n\"\n \"meta:default_load_params_path: './path/to/some/parameters...'\"\n ),\n ), # TODO: confirm path exists\n }\n}\n\n\n# TODO: some of these values are positive only .. may consider additional check\nhyper_parameters = {\n \"hyper_parameters\": {\n \"dataset\": {\n \"batch\": numeric(\n default_value=None,\n required=True,\n is_type=int,\n description=\"hyper_parameters:dataset:batch: \",\n ),\n \"shuffle_buffer\": numeric(\n default_value=None,\n required=False,\n is_type=int,\n description=\"hyper_parameters:dataset:shuffle_buffer: \",\n ),\n },\n \"epochs\": numeric(\n default_value=None,\n required=True,\n is_type=int,\n description=\"hyper_parameters:epochs: \",\n ),\n # TODO: need to account for optional outter keys\n \"early_stopping\": optional_config(\n conf_dict={\n \"epochs\": numeric(\n default_value=None,\n required=False,\n is_type=int,\n description=\"hyper_parameters:early_stopping:epochs: \",\n ),\n \"warm_up\": numeric(\n default_value=None,\n required=False,\n is_type=int,\n description=\"hyper_parameters:early_stopping:warm_up: \",\n ),\n }\n ),\n }\n}\n\noptimize = {\n \"optimize\": {\n \"optimizers\": optimizers_parser(),\n \"directive\": {\"instructions\": instruct_parser()},\n }\n}\n\n\nERR_LEVELS = [x.lower() for x in [\"DEBUG\", \"INFO\", \"WARNING\", \"ERROR\", \"CRITICAL\"]]\n\n# TODO: it should be acceptable to pass a \"CRITICAL\" here\nlogging = {\n \"logging\": {\n \"console\": optional_config(\n conf_dict={\n \"level\": categorical(\n default_value=\"critical\",\n required=False,\n is_in_list=ERR_LEVELS,\n is_type=str,\n description=(\n \"the level to log information to the console\\n\"\n \" > e.g. logging:console:level: 'critical'\"\n ),\n ),\n \"format_str\": categorical(\n default_value=\"%(name)-12s: %(levelname)-8s %(message)s\",\n required=False,\n is_type=str,\n to_lower=False,\n description=(\n \"the level to log information to the console\\n\"\n \" > e.g. logging:console:format_str: '%(name)-12s: %(levelname)-8s %(message)s'\"\n ),\n ),\n }\n ),\n \"file\": optional_config(\n conf_dict={\n \"level\": categorical(\n default_value=\"critical\",\n required=False,\n is_in_list=ERR_LEVELS,\n is_type=str,\n description=(\n \"the level to log information to the log file\\n\"\n \" > e.g. logging:file:level: 'critical'\"\n ),\n ),\n \"format_str\": categorical(\n default_value=\"%(filename)s:%(lineno)s - %(funcName)20s()][%(levelname)-8s]: %(message)s\",\n required=False,\n is_type=str,\n to_lower=False,\n description=(\n \"the level to log information to the log file\\n\"\n \" > e.g. logging:file:format_str: '%(name)-12s: %(levelname)-8s %(message)s'\"\n ),\n ),\n }\n ),\n \"track\": optional_config(\n conf_dict={\n \"tracker_steps\": numeric(\n default_value=0,\n required=False,\n is_type=int,\n description=(\n \"the frequency (as a number of training steps) at which to log tracker information\\n\"\n \" > e.g. logging:tracker_steps: 30\"\n ),\n ),\n \"tensorboard\": optional_config(\n conf_dict={\n \"param_steps\": numeric(\n default_value=0,\n required=False,\n is_type=int,\n description=(\n \"the frequency (as a number of training steps) at which to log tracker information\\n\"\n \" > e.g. logging:track:tensorboard:param_steps: 30\"\n ),\n )\n }\n ),\n }\n ),\n }\n}\n\n\n# Data\ndata = {\"data\": {\"datasets\": data_set_name_dict(required=True)}}\n\n# NOTE: these two are really simliar, but I think it may be worth keeping them\n# separate.. that is I like the idea of being able to define these as separate processes\npreprocess = {\"preprocess\": {}}\naugment = {\"augment\": {}}\n\n# TODO: these need to be moved to preprocess\n# # copy is used to prevent overwriting underlying data\n# formatted_dict[\"input_layer_dim\"] = None\n# formatted_dict[\"in_dim\"] = raw_config[\"in\"][\"dim\"].copy()\n# if formatted_dict[\"in_dim\"][0]: # as oppposed to [None, x, y, z]\n# formatted_dict[\"in_dim\"].insert(0, None) # add batching\n# formatted_dict[\"in_dtype\"] = raw_config[\"in\"][\"dtype\"]\n# try:\n# formatted_dict[\"reshape_in_to\"] = raw_config[\"in\"][\"reshape_to\"]\n# if formatted_dict[\"reshape_in_to\"][0] != -1: # as oppposed to [None, x, y, z]\n# formatted_dict[\"reshape_in_to\"].insert(0, -1) # -1\n# except KeyError:\n# # None in this case is representative of not reshaping\n# formatted_dict[\"reshape_in_to\"] = None\n# if formatted_dict[\"reshape_in_to\"]:\n# formatted_dict[\"input_layer_dim\"] = raw_config[\"in\"][\"reshape_to\"]\n# else:\n# formatted_dict[\"input_layer_dim\"] = raw_config[\"in\"][\"dim\"].copy()\n# formatted_dict[\"augmentation\"] = raw_config[\"image\"][\"augmentation\"]\n# formatted_dict[\"image_standardize\"] = raw_config[\"image\"][\"standardize\"]\n\n# NOTE: this doesn't matter.. unless we're doing supervised. but even still,\n# this could be specified/figured out by the var that is passed to the loss/metrics\n# formatted_dict[\"output_dim\"] = raw_config[\"label\"][\"dim\"].copy()\n# if formatted_dict[\"output_dim\"][0]: # as oppposed to [None, x, y, z]\n# formatted_dict[\"output_dim\"].insert(0, None) # add batching\n# formatted_dict[\"label_dtype\"] = raw_config[\"label\"][\"dtype\"]\n# formatted_dict[\"label_one_hot\"] = raw_config[\"label\"][\"one_hot\"]\n# NOTE: I think the specification of final iteration shape/size should be\n# defined in the preprocess function.. because there may be preprocess functions\n# that need to happen before features/labels (if they even exist) are seperated.\n\n# NOTE: not sure how I want to do this.. this basically becomes dataduit all\n# over again..\n# formatted_dict[\"TFR_dir\"] = raw_config[\"TFR\"][\"dir\"]\n# formatted_dict[\"TFR_train\"] = raw_config[\"TFR\"][\"train\"]\n# formatted_dict[\"TFR_test\"] = raw_config[\"TFR\"][\"test\"]\n# formatted_dict[\"TFR_val\"] = raw_config[\"TFR\"][\"validation\"]\n\n# # TODO: this is a first draft for this type of organization and will\n# # will likely be changed\n# formatted_dict[\"data_in_dict\"] = raw_config[\"in\"]\n# formatted_dict[\"data_out_dict\"] = raw_config[\"label\"]\n# formatted_dict[\"TFR_parse\"] = raw_config[\"TFR_parse\"]\n\n# TODO: eventually, we need to support custom performance/loss metrics\nperformance = {\"performance\": {\"objectives\": performances_parser()}}\nmodel = {\n \"model\": {\n # directory\n # TODO: check that no spaces or special chars are included in the model\n # name and other directory names?\n \"name\": categorical(\n default_value=None,\n required=True,\n is_type=str,\n description=(\"name of the model\\n\" \" > e.g. model:name: 'jacks_model\"),\n ),\n \"start_fresh\": categorical(\n default_value=False,\n required=False,\n is_type=bool,\n description=(\n \"model start_fresh `start_fresh: ` is used to determine \"\n \"whether to start the directory 'fresh'/delete current contents \\n\"\n \" > e.g. model:start_fresh: True\"\n ),\n ),\n \"layers\": layers_parser(), # could add a check that the location exists\n }\n}\n\n\nDEFAULT_CONFIG = {}\nDEFAULT_CONFIG = {**DEFAULT_CONFIG, **meta}\nDEFAULT_CONFIG = {**DEFAULT_CONFIG, **performance}\nDEFAULT_CONFIG = {**DEFAULT_CONFIG, **hyper_parameters}\nDEFAULT_CONFIG = {**DEFAULT_CONFIG, **logging}\nDEFAULT_CONFIG = {**DEFAULT_CONFIG, **data}\nDEFAULT_CONFIG = {**DEFAULT_CONFIG, **model}\nDEFAULT_CONFIG = {**DEFAULT_CONFIG, **optimize}\n","sub_path":"src/yeahml/config/default/default_config.py","file_name":"default_config.py","file_ext":"py","file_size_in_byte":12089,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"143088200","text":"import requests\nimport os\nimport time\nfrom bs4 import BeautifulSoup\nfrom pathvalidate import sanitize_filename\nfrom urllib.parse import urljoin\nfrom parse_tululu_category import collect_book_urls, get_last_page_number\nimport json\nimport argparse\nimport logging\nimport sys\nimport urllib3\n\n\ndef check_response(response):\n response.raise_for_status()\n if response.history:\n raise requests.HTTPError('Redirect')\n return response\n\ndef get_book_soup(book_url):\n response = check_response(requests.get(book_url, allow_redirects=False, verify=False))\n soup = BeautifulSoup(response.text, 'lxml')\n return soup\n\n\ndef get_title_and_author(soup):\n title, author = soup.select_one('h1').text.split('::')\n\n return {\n 'title': title.strip(),\n 'author': author.strip()\n }\n\n\ndef get_cover_fullpath(soup, base):\n img_name = soup.select_one('div.bookimage img')['src']\n return urljoin(base, img_name)\n\n\ndef get_comments(soup):\n comments = [\n comment.text for comment in soup.select('div.texts span.black')\n ]\n return comments or None\n\n\ndef get_genres(soup):\n genres = [genre.text for genre in soup.select('span.d_book a')]\n return genres or None\n\n\ndef download_txt(book_id, filename, folder='books'):\n filename_cleaned = f'{book_id}-{sanitize_filename(filename)}'\n file_url = f'https://tululu.org/txt.php?id={book_id}'\n\n os.makedirs(folder, exist_ok=True)\n response = check_response(requests.get(file_url, allow_redirects=False, verify=False))\n full_path = os.path.join(folder, filename_cleaned)\n\n if not response.status_code == 200:\n return\n data = response.text\n full_path_with_ext = f'{full_path}.txt'\n with open(full_path_with_ext, 'w') as f:\n f.write(data)\n return full_path_with_ext\n\n\ndef download_image(image_url, folder='images'):\n os.makedirs(folder, exist_ok=True)\n response = check_response(requests.get(image_url, allow_redirects=False, verify=False))\n full_path = os.path.join(folder, str(image_url.split('/')[-1]))\n\n if not response.status_code == 200:\n return\n data = response.content\n with open(full_path, 'wb') as f:\n f.write(data)\n return full_path\n\ndef main():\n urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)\n book_category = 55\n last_page_number = get_last_page_number(book_category)\n\n parser = argparse.ArgumentParser(\n description='The parser of tululu.org library')\n parser.add_argument('--start_page', default=1,\n help='Download starts with this page', type=int)\n parser.add_argument('--end_page',\n help='Download ends with this page', type=int)\n parser.add_argument('--filename', default='books_db.json',\n help='Name of json-db file')\n parser.add_argument('--skip_txt', action='store_true',\n help='Skip saving the files', required=False)\n parser.add_argument('--skip_images', action='store_true',\n help='Skip saving the images',\n required=False)\n parser.add_argument('--dest_folder', default='',\n help='Folder for book saving',\n type=str, required=False)\n\n args = parser.parse_args()\n\n parse_up_to = args.end_page or args.start_page+1 or last_page_number \n\n print('Actions summary:')\n print('-'*50)\n print(f'Pages to load from {args.start_page} to {parse_up_to}')\n print(f'Filename of the DB is {args.filename}')\n print(f'Is filesaving will be skipped? {args.skip_txt}')\n print(f'Is cover inages will be skipped? {args.skip_images}')\n\n json_db_file_path = os.path.join(args.dest_folder, args.filename)\n \n\n books_collection = []\n\n try:\n book_urls = collect_book_urls(\n args.start_page, \n parse_up_to, \n book_category)\n\n except (requests.HTTPError, requests.ConnectionError):\n logging.critical(f'HTTPError or ConnectionError')\n sys.exit()\n\n for book_url in book_urls:\n time.sleep(3)\n try:\n book_soup = get_book_soup(book_url)\n except requests.exceptions.HTTPError:\n logging.warning(f'Page {book_url} could not be opened.')\n continue\n if book_soup:\n try:\n comments = get_comments(book_soup)\n genres = get_genres(book_soup)\n title_and_author = get_title_and_author(book_soup)\n book_id = book_url.split('/')[-2].replace('b', '')\n book_path = None if args.skip_txt else download_txt(book_id, title_and_author['title'])\n\n img_src = get_cover_fullpath(book_soup, book_url)\n cover_link = None if args.skip_images else download_image(img_src)\n logging.info('This book was saved: ', title_and_author['title'])\n books_collection.append(\n {\n 'title': title_and_author['title'],\n 'author': title_and_author['author'],\n 'img_src': cover_link,\n 'book_path': book_path,\n 'comments': comments,\n 'genres': genres\n }\n )\n except (requests.HTTPError, requests.ConnectionError):\n logging.critical(f'HTTPError or ConnectionError')\n sys.exit()\n\n with open(json_db_file_path, 'w', encoding='utf8') as books_json_db:\n json.dump(books_collection, books_json_db, ensure_ascii=False)\n\nif __name__ == \"__main__\":\n main()\n\n","sub_path":"script.py","file_name":"script.py","file_ext":"py","file_size_in_byte":5642,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"369236395","text":"# Hint: You may not need all of these. Remove the unused functions.\nclass Ticket:\n def __init__(self, source, destination):\n self.source = source\n self.destination = destination\n\n\ndef reconstruct_trip(tickets, length):\n \"\"\"\n YOUR CODE HERE\n \"\"\"\n route = []\n cache={} #<- created a dictionary or hashtable to store my values\n\n for indi_tick in tickets: # <- for each instance of the Ticket class in the tickets array I'm going to do the following\n cache[indi_tick.source] = indi_tick.destination # <- mapping their source and destination as the key and value pairs in the dictionary/hash table\n\n route.append(cache['NONE']) # <- finding departure point and adding it to the beginning of list.. so the key is none here but the value will be appended to my route array?\n key = cache['NONE'] # <-- I now set the position of none as my new key.. NONE is not the key here but the value of the position of NONE is, the function will use the destination/value that is attached to the 'none' as the new key?\n\n while key != 'NONE': # <- so while the key is not the actual string 'NONE', do this loop.. this is kind of like a base case right, because in this problem we will eventually get to the key being the string NONE again when we reach the final destination.\n route.append(cache[key]) # yeah so we're just going to append the hashed value to the route and then in the next line make it our new key. this loop will continue until the base base is fufilled\n key = cache[key] \n\n return route # <- returning the array with proper loop\n","sub_path":"hashtables/ex2/ex2.py","file_name":"ex2.py","file_ext":"py","file_size_in_byte":1598,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"456995595","text":"\n\nfrom xai.brain.wordbase.verbs._martyr import _MARTYR\n\n#calss header\nclass _MARTYRS(_MARTYR, ):\n\tdef __init__(self,): \n\t\t_MARTYR.__init__(self)\n\t\tself.name = \"MARTYRS\"\n\t\tself.specie = 'verbs'\n\t\tself.basic = \"martyr\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/verbs/_martyrs.py","file_name":"_martyrs.py","file_ext":"py","file_size_in_byte":238,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"586265903","text":"#!/usr/bin/python3\n\n# sleep(2)\n\n# Import the ev3dev specific library\nfrom time import sleep\nfrom ev3dev.ev3 import *\n\n\nprint(\"FUCKING PRINT\")\nprint(\"1\")\n# Connect motors\nrightMotor = LargeMotor(OUTPUT_C)\n\nassert rightMotor.connected\n\nleftMotor = LargeMotor(OUTPUT_B)\nassert leftMotor.connected\n\n# Connect sensors\nprint(\"2\")\nus = UltrasonicSensor(INPUT_1)\n\ncs = ColorSensor(INPUT_4)\n\ntsRIGHT = TouchSensor(INPUT_3)\ntsLEFT = TouchSensor(INPUT_2)\n\n\n# Declaring buttons\nbtn = Button()\n\n\n# Basic movement control\ndef drive(left, right):\n \"\"\"\n Start both motors at the given speeds.\n \"\"\"\n leftMotor.run_direct(duty_cycle_sp=left)\n\n rightMotor.run_direct(duty_cycle_sp=right)\n\n\n# Spinning the robot, clockwise/anticlockwise depending on required direction\ndef search(spinDirection):\n drive(spinDirection * -50, spinDirection * 50)\n\n\n# Stop both motors\ndef stop():\n leftMotor.stop(stop_action='brake')\n rightMotor.stop(stop_action='brake')\n\n\n# Basic Start sequence\ndef start_sequence(spinDirection):\n # sleep(3)\n Sound.speak('WAAAALLL E')\n while not btn.any():\n if us.value() < 500:\n drive(100, 100)\n else:\n search(1)\n\n\n\n# If the robot cannot see the other bot after the starting sequence\ndef lost():\n # If robot cannot find object drive forward to boundary then do another check\n # Below loop, keeps the robot driving back and forth till target is found.\n while us.value > 750 and not tsLEFT.value() and not tsRIGHT.value():\n while cs.value() > 30:\n drive(-20, -20)\n search(1)\n # Didn't know the code, to make it spin 180 degrees.\n\ndrive(20,20)\nprint(\"3\")\nwhile not btn.any():\n cs.mode = 'COL-REFLECT'\n start_sequence(1)\n lost()\n if (tsRIGHT.value() and tsLEFT.value) or us.value() < 40():\n drive(100, 100)\n elif us.value() < 400 and cs.value() > 40:\n drive(70, 70)\n elif tsLEFT.value() and not tsRIGHT.value():\n drive(80, 50)\n elif tsRIGHT.value() and not tsLEFT.value():\n drive(50, 80)\n\n else:\n continue\n\n #if btn.left():\n # start_sequence(1)\n #if btn.right():\n # start_sequence(-1)\nprint(\"4\")\n\n# sleep(3)\n","sub_path":"Sumo_programs/probablyGoodCode/testing.py","file_name":"testing.py","file_ext":"py","file_size_in_byte":2184,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"387015436","text":"## in both body and tags\r\nkeywords = [\r\n\"tensorflow\",\r\n\" torch \",\r\n\" theano\", # TheAnonymousType\r\n\"caffe\",\r\n#\"microsoft azure\",\r\n#\"cuda\", # it is related but too nvidia specific\r\n\"cntk\",\r\n\" keras \", #RunWorkerAsync in c#\r\n\r\n\"machine learning\",\r\n\"feature selection\",\r\n\"tf-idf\",\r\n\"neural network\",\r\n\"deep learning\",\r\n\"feature extraction\",\r\n\"cluster analysis \",\r\n\"unsupervised learning\",\r\n\"sentiment analysis\",\r\n\"text mining\",\r\n#\" gmm\",\r\n\"classification\",\r\n#\"regression\", # just regression, regression tests\r\n#\"predict\", #predicable\r\n#\"boost\",\r\n\"bagging\",\r\n\r\n\"bigdata\",\r\n#\"analytics\",\r\n\r\n\"speech recognition\",\r\n\"acoustic model\",\r\n\"image recognition\",\r\n\"machine translation\",\r\n\"gene network inference\",\r\n\r\n\"knn\",\r\n\"k-nn\",\r\n\"nearest neighbor\",\r\n\"random forest\",\r\n\"logistic regression\",\r\n\"linear regression\",\r\n\"decision tree\",\r\n\"bayesian network\",\r\n\"naive bayes\",\r\n\"multinomial naive\", \r\n\" svm\",\r\n\"limma\",\r\n\" lars \", #dollars\r\n\" lasso\", #IsSubclassOf, classobj\r\n\"elastic net\",\r\n#\"hmm\", #Hmm\r\n#\" dnn\",\r\n#\" rnn\",\r\n#\" cnn\", # name of variable for \"sql connection\" # cnn website\r\n\"neural network\",\r\n\"topic analy\",\r\n\r\n\r\n\"scikit\",\r\n\"sklearn\",\r\n\"cross_validation\",\r\n\"feature_selection\",\r\n\"ensemble method\",\r\n\"naive_bayes\",\r\n\"linear_model\",\r\n\"neural_network\",\r\n\"e1071\",\r\n\"randomForest\"\r\n\r\n]\r\n\r\n## Only in tags\r\nkeytags = [\r\n## How to handle the tags\r\n\"\", #jnlp\r\n\"\",\r\n\"topic-modeling\"\r\n\"machine-learning\",\r\n\"artificial-intelligence\",\r\n#\"mathematical-optimization\",\r\n\"grid-search\",\r\n#\"data-science\",\r\n\"neural-network\",\r\n\"deep-learning\",\r\n\"feature-extraction\",\r\n\"cluster-analysis \",\r\n\"unsupervised-learning\",\r\n\"sentiment-analysis\",\r\n\"text-mining\"\r\n]\r\n\r\n\r\nimport os\r\nimport json\r\nfrom shutil import copyfile\r\nfrom pathlib import Path\r\n\r\nenvi_path = \"../output/\"\r\n\r\noutput_question_path = envi_path+\"questionsML_coarse/\"\r\noutput_answer_path = envi_path+\"answersML_coarse/\"\r\n\r\noutput_path_Q = envi_path+\"questionsML/\"\r\noutput_path_A = envi_path+\"answersML/\"\r\n\r\nimport xml.etree.ElementTree\r\nfrom bs4 import BeautifulSoup\r\n\r\n\r\nfor post_f in os.listdir(output_question_path):\r\n qRoot = xml.etree.ElementTree.parse(output_question_path+post_f).getroot()\r\n post_id = qRoot.get('Id')\r\n answer_id = qRoot.get('AcceptedAnswerId')\r\n body = qRoot.get('Body') \r\n ## handle it later\r\n code = BeautifulSoup(body, 'lxml').code\r\n ## ensure enough length of code snippet\r\n if len(str(code)) < 100:\r\n continue\r\n tags = qRoot.get('Tags')\r\n\r\n ## clean html tags\r\n body = BeautifulSoup(body, \"lxml\").text\r\n \r\n if any(keyword in body.lower() for keyword in keywords) or any(keytag in tags.lower() for keytag in keytags): \r\n answer_file = output_answer_path+\"post_\"+answer_id+\".xml\"\r\n if Path(answer_file).exists():\r\n copyfile(output_question_path+\"post_\"+post_id+\".xml\", output_path_Q+\"post_\"+post_id+\".xml\")\r\n copyfile(answer_file, output_path_A+\"post_\"+answer_id+\".xml\")\r\n","sub_path":"src/filterML.py","file_name":"filterML.py","file_ext":"py","file_size_in_byte":2938,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"22662714","text":"#-*- coding:utf-8 -*-\r\n#!usr/bin/python3\r\n# from libs.flash.flash_lib import get_flashed_messages\r\n# from libs.permission.permission_auth.permission_interface_libs import menu_permission\r\n\r\n\r\nsettings = dict(\r\n template_path = 'templates',\r\n static_path = 'static',\r\n debug = True,\r\n cookie_secret = '123456',\r\n login_url = '/user_login',\r\n xsrf_cookies = True,\r\n # ui_methods = {\r\n # 'menu_permission':menu_permission,\r\n # 'get_flashed_messages':get_flashed_messages\r\n # },\r\n pycket = {\r\n 'engine':'redis',\r\n 'storage':{\r\n 'host':'127.0.0.1',\r\n 'port':6379,\r\n 'db_sessions':6,\r\n 'db_notifications':11,\r\n 'max_connections':2**31,\r\n },\r\n 'cookies':{\r\n 'expires_days':30,\r\n },\r\n },\r\n)\r\n\r\n","sub_path":"config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":831,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"94"} +{"seq_id":"277487437","text":"from django.http import JsonResponse\nfrom django.shortcuts import render, redirect, get_object_or_404\nfrom django.contrib.auth.models import User\nfrom nltk.tokenize import sent_tokenize\nimport nltk \nimport numpy as np\nfrom nltk.tokenize import sent_tokenize\nimport pandas as pd\nfrom nltk.corpus import stopwords \nstop_words = stopwords.words('english')\nimport numpy as np\nfrom gensim.corpora import Dictionary\nfrom gensim.models import TfidfModel\nfrom gensim.models import WordEmbeddingSimilarityIndex\nfrom gensim.similarities import SparseTermSimilarityMatrix\nfrom gensim.similarities import SoftCosineSimilarity\nfrom sklearn.metrics.pairwise import cosine_similarity\nimport networkx as nx \nfrom gensim.corpora import Dictionary\nfrom gensim.models import TfidfModel\nfrom gensim.models import WordEmbeddingSimilarityIndex\nfrom gensim.similarities import SparseTermSimilarityMatrix\nfrom gensim.similarities import SoftCosineSimilarity\nimport numpy as np \nfrom nltk.corpus import stopwords \nstop_words = stopwords.words('english')\nfrom .models import *\nimport re\nimport plotly.graph_objects as go\nimport json\nimport pandas as pd\nimport nltk\nfrom os import listdir\nfrom sklearn.metrics.pairwise import cosine_similarity\nfrom os.path import isfile, join\nimport seaborn as sns\nfrom tqdm import tqdm\nimport glob\nimport nltk\nfrom nltk.corpus import stopwords\nfrom nltk.tokenize import word_tokenize\nimport os\nimport string\nfrom nltk.stem import WordNetLemmatizer\nfrom pattern.en import tag\nfrom nltk.corpus import wordnet as wn\nimport warnings\nimport scipy.sparse as sp \nimport numpy as np\nfrom sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer\nfrom collections import Counter\nfrom nltk.corpus import reuters\nfrom nltk.stem.snowball import SnowballStemmer\nfrom nltk.corpus import stopwords\nimport nltk.data\nimport math\nimport re\nfrom nltk import word_tokenize, pos_tag\nfrom nltk.corpus import wordnet as wn\nimport numpy as np\nimport pandas as pd\nimport scipy as sc\nstop_words = stopwords.words('english')\nideal_sent_length = 20.0\nstemmer = SnowballStemmer(\"english\")\nimport re\nfrom urllib.request import urlopen\n#import gensim \nimport numpy as np \nfrom time import time # To time our operations\nfrom collections import defaultdict # For word frequency\nimport re\nimport spacy # For preprocessing\n\nimport logging # Setting up the loggings to monitor gensim\n\nimport spacy\n\nfrom tqdm import tqdm\nimport seaborn as sb\nfrom matplotlib import pyplot as plt\n\nwnl = WordNetLemmatizer()\n\n\nSUMMARY_LENGTH = 12 # number of sentences in final summary\nstop_words = stopwords.words('english')\n\nfrom rank_bm25 import BM25Okapi\nimport pandas as pd\ndata = pd.read_csv(\"test.csv\")\nfrom rank_bm25 import BM25Okapi\nenglish_stopwords = list(set(stopwords.words('english')))\n\ndef strip_characters(text):\n t = re.sub('\\(|\\)|:|,|;|\\.|’|”|“|\\?|%|>|<', '', text)\n t = re.sub('/', ' ', t)\n t = t.replace(\"'\",'')\n return t\n\ndef clean(text):\n t = text.lower()\n t = strip_characters(t)\n return t\n\ndef tokenize(text):\n words = nltk.word_tokenize(text)\n return list(set([word for word in words \n if len(word) > 1\n and not word in english_stopwords\n and not (word.isnumeric() and len(word) is not 4)\n and (not word.isnumeric() or word.isalpha())] )\n )\n\ndef preprocess(text):\n t = clean(text)\n tokens = tokenize(t)\n return tokens\n\nclass SearchResults:\n \n def __init__(self, \n data: pd.DataFrame,\n columns = None):\n self.results = data\n if columns:\n self.results = self.results[columns]\n #print( \"self.results\")\n #print( self.results)\n \n def __getitem__(self, item):\n return Paper(self.results.loc[item])\n \n def __len__(self):\n return len(self.results)\n \n def _repr_html_(self):\n return self.results._repr_html_()\n\nSEARCH_DISPLAY_COLUMNS = ['paper_id', 'title', 'abstract', 'body_text']\n\nclass WordTokenIndex:\n \n def __init__(self, \n corpus: pd.DataFrame, \n columns=SEARCH_DISPLAY_COLUMNS):\n self.corpus = corpus\n #print(\"self.corpus\")\n #print(self.corpus)\n raw_search_str = self.corpus.abstract.fillna('') + ' ' + self.corpus.title.fillna('') + ' ' + self.corpus.body_text.fillna('')\n #print(\"raw_search_str\")\n #print(raw_search_str)\n self.index = raw_search_str.apply(preprocess).to_frame()\n #print( \"self.index\")\n #print( self.index)\n #print( self.index[0])\n self.index.columns = ['terms']\n #print(\"self.index.columns\")\n #print(self.index.columns)\n self.index.index = self.corpus.index\n #print(\"self.index.index\")\n #print(self.index.index)\n self.columns = columns\n #print(\"self.columns\")\n #print(self.columns)\n return self.columns\n \n def search(self, search_string):\n search_terms = preprocess(search_string)\n #print(\"search_terms\" )\n #print(search_terms )\n result_index = self.index.terms.apply(lambda terms: any(i in terms for i in search_terms))\n #print(\"result_index\")\n #print(result_index )\n results = self.corpus[result_index].copy().reset_index().rename(columns={'index':'paper'})\n #print(\"results\")\n #print(results)\n #print(\"SearchResults(results, self.columns + ['paper'])\")\n #print(SearchResults(results, self.columns + ['paper']))\n return SearchResults(results, self.columns + ['paper'])\nclass RankBM25Index(WordTokenIndex):\n \n def __init__(self, corpus: pd.DataFrame, columns=SEARCH_DISPLAY_COLUMNS):\n super().__init__(corpus, columns)\n #print(\"self.index.terms.tolist()\")\n #print(self.index.terms.tolist())\n self.bm25 = BM25Okapi(self.index.terms.tolist())\n #print(\"self.bm25\")\n #print(self.bm25)\n \n def search(self, search_string):\n search_terms = preprocess(search_string)\n doc_scores = self.bm25.get_scores(search_terms)\n #print('doc_scores')\n #print(doc_scores)\n ind = np.argsort(doc_scores)[::-1]\n #print('ind')\n #print(ind)\n results = self.corpus.iloc[ind][self.columns]\n #print('results')\n #print(results)\n results['Score'] = doc_scores[ind]\n #print(\"results['Score']\")\n #print(results['Score'])\n results['orig_ind'] = ind\n results['word'] = search_string\n #print(\"results['orig_ind']\")\n #print(results['orig_ind'])\n results = results[results.Score > 0]\n #print(\"results\")\n #print(results)\n return SearchResults(results.reset_index(), self.columns + ['Score', 'orig_ind','word'])\n\n\n\n\nclass Summarizer():\n\n def penn_to_wn(self,tag):\n \n if tag.startswith('N'):\n return 'n'\n \n if tag.startswith('V'):\n \n return 'v'\n \n if tag.startswith('J'):\n return 'a'\n \n if tag.startswith('R'):\n return 'r'\n return None \n \n\n def tagged_to_synset(self,word, tag):\n wn_tag = self.penn_to_wn(tag)\n \n if wn_tag is None:\n return None\n \n try:\n \n return wn.synsets(word, wn_tag)[0]\n except:\n return None\n \n def sentence_similarity(self,sentence1, sentence2):\n \n # Tokenize and tag\n sentence1 = pos_tag(word_tokenize(sentence1))\n sentence2 = pos_tag(word_tokenize(sentence2))\n \n \n # Get the synsets for the tagged words\n synsets1 = [self.tagged_to_synset(*tagged_word) for tagged_word in sentence1]\n \n synsets2 = [self.tagged_to_synset(*tagged_word) for tagged_word in sentence2]\n \n \n # Filter out the Nones\n synsets1 = [ss for ss in synsets1 if ss]\n \n synsets2 = [ss for ss in synsets2 if ss]\n \n \n score, count = 0, 0\n for synset in synsets1:\n max_sim = 0.0\n maxx=0\n for ss in synsets2:\n \n sim=wn.wup_similarity(synset, ss)\n if sim is not None and sim > max_sim:\n \n max_sim = sim\n \n \n \n if max_sim is not None and max_sim!=0:\n score += max_sim\n count += 1\n \n # Average the values\n if count!=0:\n score /= count\n return score \n def __init__(self, article):\n \n self._articles = []\n #i=1\n \n for row in article:\n #if i<=20:\n title=row[3]\n #print(title)\n body=row[2].replace('\\n', ' ')\n #print(body)\n paper_id=row[0]\n #doi=row[0]\n if title!=''and body!='':\n self._articles.append((paper_id,title,body))\n #i=i+1\n \n \n def valid_input(self, headline, article_text):\n return headline != '' and article_text != '' \n def normalize_corpus(self,corpus, lemmatize=True):\n \n normalized_corpus = [] \n for text in corpus:\n if lemmatize:\n text = self.lemmatize_text(text)\n else:\n text = text.lower()\n text = self.remove_special_characters(text)\n text = self.remove_stopwords(text)\n normalized_corpus.append(text)\n return normalized_corpus\n def pos_tag_text(self,text):\n wnl = WordNetLemmatizer()\n tagged_text = tag(text)\n tagged_lower_text = [(word.lower(), self.penn_to_wn(pos_tag))\n for word, pos_tag in\n tagged_text]\n return tagged_lower_text\n \n \n def score(self,article,query):\n \"\"\" Assign each sentence in the document a score\"\"\"\n maxx=0\n maxxx=0\n Query=[]\n Query.append(query)\n headline = article[1]\n sentences = self.split_into_sentences(article[2])\n \n querry=self.remove_smart_quotes(query)\n sentencess=self.split_into_sentences(article[2])\n sentencess.append(querry)\n \n #queryy=self.split_into_sentences(query)\n norm_corpus =self.normalize_corpus(sentences, lemmatize=True)\n norm_corpuss=self.normalize_corpus(sentencess, lemmatize=True)\n \n norm_model_answer = self.normalize_corpus(Query, lemmatize=True) \n norm_model_answerquery = self.normalize_corpus(Query, lemmatize=True) \n \n vectorizer, corpus_features = self.build_feature_matrix(norm_corpus,feature_type='frequency')\n vectorizerq, query_features = self.build_feature_matrix(norm_corpuss,feature_type='frequency')\n # extract features from model_answer\n model_answer_features = vectorizer.transform(norm_model_answer)\n model_answer_featuresquery = vectorizerq.transform(norm_model_answerquery)\n \n doc_lengths = [len(doc.split()) for doc in norm_corpus]\n doc_lengthss = [len(doc.split()) for doc in norm_corpuss] \n \n #query_lengths = [len(doc.split()) for doc in norm_query] \n avg_dl = np.average(doc_lengths) \n avg_qr = np.average(doc_lengthss)\n \n corpus_term_idfs = self.compute_corpus_term_idfs(corpus_features, norm_corpus)\n corpus_term_idfsquery = self.compute_corpus_term_idfs(query_features, norm_corpuss)\n \n for index, doc in enumerate(Query):\n \n doc_features = model_answer_features[index]\n #doc_featuress = model_answer_featuresquery[index]\n self.bm25_scores = self.compute_bm25_similarity(doc_features,corpus_features,doc_lengths,avg_dl,corpus_term_idfs,k1=1.5, b=0.75) \n print(' self.bm25_scores')\n print(self.bm25_scores/maxx)\n maxxx=max(self.bm25_scores)\n self.semantic_similarity_scores=[]\n \n for indexx, doc in enumerate(Query):\n\n doc_featuress = model_answer_featuresquery[indexx]\n self.bm25_scoresquery = self.compute_bm25_similarityqr(doc_featuress,query_features,doc_lengthss,avg_qr,corpus_term_idfsquery,k1=1.5, b=0.75)\n \n maxx=max(self.bm25_scoresquery)\n for i, s in enumerate(sentences):\n score1=self.sentence_similarity(s,query)\n score2=self.sentence_similarity(query,s)\n if score1 is not None and score2 is not None:\n score=(score1+score2)/2\n self.semantic_similarity_scores.append(score)\n elif score1 is not None and score2 is None:\n self.semantic_similarity_scores.append(score1)\n elif score2 is not None and score1 is None:\n self.semantic_similarity_scores.append(score2)\n print('self.semantic_similarity_scores')\n print(self.semantic_similarity_scores)\n doc_index=0\n sim_score=[]\n sim_scorecos=[]\n for score_tuple in zip(self.semantic_similarity_scores,self.bm25_scores):\n sim_scorecos.append((score_tuple[1]/maxxx))\n print('bm25')\n print( sim_scorecos)\n for score_tuple in zip(self.semantic_similarity_scores,self.bm25_scores):\n sim_score.append((score_tuple[0]+(score_tuple[1]/maxxx))/2)\n print('sim_score')\n print(sim_score)\n for tuple_ in zip(sentences,sim_score):\n s=tuple_[0]\n self._scores[s]=tuple_[1]\n print('self._scores[s]')\n print(self._scores[s])\n \n \n \n \n def generate_summaries(self,query):\n \n self.dict_ = {'task':[],'paper_id':[],'title':[],'summary': [],'score':[],'sentences':[]}\n jj=1\n ii=1\n \n #tasks=['what is the immune system response to 2019-ncov ?'\n \n # ]\n \n \n #for query in tasks:\n for article in self._articles:\n self._scores = Counter()\n self.score(article,query)\n highest_scoring = self._scores.most_common(SUMMARY_LENGTH)\n print('highest_scoring')\n print(highest_scoring)\n totalsentences = self.split_into_sentences(article[2])\n summarylist=[]\n summr=[sent[0] for sent in highest_scoring]\n \n for sentence in totalsentences:\n for sumsen in summr:\n if sentence==sumsen:\n summarylist.append(sentence) \n # Appends highest scoring \"representative\" sentences, returns as a single summary paragraph.\n summary=' '.join([sent for sent in summarylist])\n s=0\n for scr in highest_scoring:\n s=s+scr[1]\n s=s/12 \n \n \n ''' \n print('**task**')\n print(query)\n print(\" **Title: **\")\n print(article[1])\n print(highest_scoring)\n print('**summary**')\n print(summary)\n print('____________________________________________________________________________________')\n ''' \n \n self.dict_['sentences'].append(highest_scoring)\n self.dict_['summary'].append(summary)\n self.dict_['title'].append(article[1])\n self.dict_['paper_id'].append(article[0])\n self.dict_['task'].append(query)\n self.dict_['score'].append(s)\n self.papers = pd.DataFrame(self.dict_, columns=['task','paper_id','title','summary','score','sentences'])\n return self.papers\n \n\n def remove_smart_quotes(self, text):\n \n \n #text=re.sub(\"([\\(\\[].*?[\\)\\]][\\(\\[].*?[\\)\\]]+)\", ' ', text)\n text=re.sub(\"[\\(\\[].*?[\\)\\]]\", '', text)\n \n try:\n url = re.search(r'((https*:\\/*)([^\\/\\s]+))(.[^\\s]+)', text)\n repl_url = url.group(3)\n text = re.sub(r'((https*:\\/*)([^\\/\\s]+))(.[^\\s]+)',repl_url, text)\n except:\n pass #there might be emails with no url in them\n #text=re.sub(\"[\\[]()*?[\\]]\", \"\", text)#remove in-text citation\n \n text=re.sub(r\"[<>()(,)|&©ø\\[\\]\\'\\\";?~*!]\", ' ', text) #remove <>()|&©ø\"',;?~*!\n text=re.sub(\"(\\\\t)\", ' ', text) #remove escape charecters\n text=re.sub(\"(\\\\r)\", ' ', text) \n text=re.sub(\"(\\\\n)\", ' ', text)\n text= re.sub(\"(\\s+)\",' ',text) #remove multiple space\n \n text = re.sub(r'[a-z0-9._%+-]+@[a-z0-9.-]+\\.[a-z]{2,}', 'MAIL', text)\n # remove doi\n text = re.sub(r'https\\:\\/\\/doi\\.org[^\\s]+', 'DOI', text)\n # remove https\n text = re.sub(r'(\\()?\\s?http(s)?\\:\\/\\/[^\\)]+(\\))?', '\\g<1>LINK\\g<3>', text)\n # remove single characters repeated at least 3 times for spacing error (e.g. s u m m a r y)\n text = re.sub(r'(\\w\\s+){3,}', ' ', text)\n # replace tags (e.g. [3] [4] [5]) with whitespace\n text = re.sub(r'(\\[\\d+\\]\\,?\\s?){3,}(\\.|\\,)?', ' \\g<2>', text)\n # replace tags (e.g. [3, 4, 5]) with whitespace\n text = re.sub(r'\\[[\\d\\,\\s]+\\]', ' ',text)\n # replace tags (e.g. (NUM1) repeated at least 3 times with whitespace\n text = re.sub(r'(\\(\\d+\\)\\s){3,}', ' ',text)\n # replace '1.3' with '1,3' (we need it for split later)\n text = re.sub(r'(\\d+)\\.(\\d+)', '\\g<1>,\\g<2>', text)\n # remove all full stops as abbreviations (e.g. i.e. cit. and so on)\n text = re.sub(r'\\.(\\s)?([^A-Z\\s])', ' \\g<1>\\g<2>', text)\n # correctly spacing the tokens\n text = re.sub(r' {2,}', ' ', text)\n text = re.sub(r'\\.{2,}', '.', text)\n text=re.sub(r\"[<>()(,)|&©ø\\[\\]\\'\\\";?~*!]\", ' ', text) #remove <>()|&©ø\"',;?~*!\n text=re.sub(\"(\\\\t)\", ' ', text) #remove escape charecters\n text=re.sub(\"(\\\\r)\", ' ', text) \n text=re.sub(\"(\\\\n)\", ' ', text)\n text= re.sub(\"(\\s+)\",' ',text) #remove multiple space\n text=re.sub(\"doi\", ' ',text)\n text=re.sub(\"bioRxiv\", ' ',text)\n text=re.sub(\"author\", ' ',text)\n text=re.sub(\"authors\", ' ',text)\n text=re.sub(\"authors\", ' ',text)\n text=re.sub(\"All rights reserved\", ' ',text)\n text=re.sub(\"preprint\", ' ',text)\n # return lowercase text\n return text.lower()\n \n \n \n \n\n\n def split_into_sentences(self, text):\n new=[]\n tok = nltk.data.load('tokenizers/punkt/english.pickle')\n sentences = tok.tokenize(self.remove_smart_quotes(text))\n sentences = [sent.replace('\\n', '') for sent in sentences if len(sent) > 20] \n words=['author','authors','permissions','doi','medRxiv',' preprint','copyright', 'holder']\n for sentt in sentences :\n sent=word_tokenize(sentt)\n \n if (('bioRxiv'not in sent) and ('author'not in sent) and ('authors' not in sent) and('permission'not in sent) and('permissions'not in sent)and('doi'not in sent)\n and ('medrxiv'not in sent) and ('medRxiv'not in sent)and(' Java'not in sent)and('java'not in sent) and(' javascript'not in sent)and(' JavaScript'not in sent)and(' preprint'not in sent)and('JavaScript'not in sent)and('copyright'not in sent) and ('holder'not in sent)): \n \n new.append(sentt)\n \n \n \n\n return new\n def lemmatize_text(self,text):\n \n pos_tagged_text = self.pos_tag_text(text)\n lemmatized_tokens = [wnl.lemmatize(word, pos_tag) if pos_tag\n else word \n for word, pos_tag in pos_tagged_text]\n lemmatized_text = ' '.join(lemmatized_tokens)\n return lemmatized_text\n\n def remove_special_characters(self,text):\n tokens = self.tokenize_text(text)\n pattern = re.compile('[{}]'.format(re.escape(string.punctuation)))\n filtered_tokens = filter(None, [pattern.sub(' ', token) for token in tokens])\n filtered_text = ' '.join(filtered_tokens)\n return filtered_text\n\n def remove_stopwords(self,text):\n stopword_list = nltk.corpus.stopwords.words('english')\n tokens = self.tokenize_text(text)\n filtered_tokens = [token for token in tokens if token not in stopword_list]\n filtered_text = ' '.join(filtered_tokens) \n return filtered_text\n\n \n \n def build_feature_matrix(self,documents, feature_type='frequency',\n ngram_range=(1, 1), min_df=0.0, max_df=1.0):\n\n feature_type = feature_type.lower().strip() \n \n if feature_type == 'binary':\n vectorizer = CountVectorizer(binary=True, min_df=min_df,\n max_df=max_df, ngram_range=ngram_range)\n elif feature_type == 'frequency':\n vectorizer = CountVectorizer(binary=False, min_df=min_df,\n max_df=max_df, ngram_range=ngram_range)\n elif feature_type == 'tfidf':\n vectorizer = TfidfVectorizer(min_df=min_df, max_df=max_df, \n ngram_range=ngram_range)\n else:\n raise Exception(\"Wrong feature type entered. Possible values: 'binary', 'frequency', 'tfidf'\")\n\n feature_matrix = vectorizer.fit_transform(documents).astype(float)\n \n return vectorizer, feature_matrix\n\n def compute_corpus_term_idfs(self,corpus_features, norm_corpus):\n \n dfs = np.diff(sp.csc_matrix(corpus_features, copy=True).indptr)\n dfs = 1 + dfs # to smoothen idf later\n total_docs = 1 + len(norm_corpus)\n idfs = 1.0 + np.log(float(total_docs) / dfs)\n return idfs\n def compute_bm25_similarity(self,doc_features, corpus_features,\n corpus_doc_lengths, avg_doc_length,\n term_idfs, k1=1.5, b=0.75):\n # get corpus bag of words features\n corpus_features = corpus_features.toarray()\n # convert query document features to binary features\n # this is to keep a note of which terms exist per document\n doc_features = doc_features.toarray()[0]\n doc_features[doc_features >= 1] = 1\n \n # compute the document idf scores for present terms\n doc_idfs = doc_features * term_idfs\n # compute numerator expression in BM25 equation\n numerator_coeff = corpus_features * (k1 + 1)\n numerator = np.multiply(doc_idfs, numerator_coeff)\n # compute denominator expression in BM25 equation\n denominator_coeff = k1 * (1 - b + \n (b * (corpus_doc_lengths / \n avg_doc_length)))\n denominator_coeff = np.vstack(denominator_coeff)\n denominator = corpus_features + denominator_coeff\n # compute the BM25 score combining the above equations\n bm25_scores = np.sum(np.divide(numerator,\n denominator),\n axis=1) \n \n return bm25_scores\n def tokenize_text(self,text):\n tokens = nltk.word_tokenize(text) \n tokens = [token.strip() for token in tokens]\n return tokens\n def compute_bm25_similarityqr(self,doc_features, corpus_features,\n corpus_doc_lengths, avg_doc_length,\n term_idfs, k1=1.5, b=0.75):\n # get corpus bag of words features\n corpus_features = corpus_features.toarray()\n # convert query document features to binary features\n # this is to keep a note of which terms exist per document\n doc_features = doc_features.toarray()[0]\n doc_features[doc_features >= 1] = 1\n \n # compute the document idf scores for present terms\n doc_idfs = doc_features * term_idfs\n # compute numerator expression in BM25 equation\n numerator_coeff = corpus_features * (k1 + 1)\n numerator = np.multiply(doc_idfs, numerator_coeff)\n # compute denominator expression in BM25 equation\n denominator_coeff = k1 * (1 - b + \n (b * (corpus_doc_lengths / \n avg_doc_length)))\n denominator_coeff = np.vstack(denominator_coeff)\n denominator = corpus_features + denominator_coeff\n # compute the BM25 score combining the above equations\n bm25_scores = np.sum(np.divide(numerator,\n denominator),\n axis=1) \n \n return bm25_scores\n\n \n\n\n\ndef remove_stopwords(sen): \n sen_new = \" \".join([i for i in sen if i not in stop_words]) \n return sen_new\n\ndef add(request):\n import pandas as pd\n import csv\n val1 = request.GET['query']\n val2=int(request.GET['num'])\n bm25_index = RankBM25Index(data)\n results = None\n added = []\n #for3 word in keywords:\n #print(word)\n #print(\"word_result\")\n word_result = bm25_index.search(val1).results\n results = word_result\n dc = results.sort_values(by='Score', ascending=False)\n\n dc.reset_index(drop=True, inplace=True)\n dc.to_csv('indexx.csv', index=False)\n \n csv.field_size_limit(100000000)\n results=pd.DataFrame()\n ff= open(\"indexx.csv\", encoding=\"utf-8-sig\")\n reader = csv.reader(ff, delimiter=',')\n next(reader)\n summaries= Summarizer(reader)\n results=summaries.generate_summaries(val1)\n resultss = results.sort_values(by='score', ascending=False)\n resultss.head(50)\n resultss.to_csv('results.csv', index=False)\n df=pd.read_csv('results.csv')\n sentences = [] \n for s in df['summary']: \n sentences.append(sent_tokenize(s))\n\n sentences = [y for x in sentences for y in x]\n\n\n\n clean_sentences = pd.Series(sentences).str.replace(\"[^a-zA-Z]\", \" \") \n # make alphabets lowecase \n clean_sentences = [s.lower() for s in clean_sentences]\n clean_sentences = [remove_stopwords(r.split()) for r in clean_sentences]\n \n # Extract word vectors \n word_embeddings = {} \n f = open(r\"C:\\meriem\\Mémoire\\glove6b\\glove.6B.100d.txt\", encoding='utf-8') \n for line in f: \n values = line.split() \n word = values[0] \n coefs = np.asarray(values[1:], dtype='float32') \n word_embeddings[word] = coefs \n f.close()\n sentence_vectors = [] \n for i in clean_sentences: \n if len(i) != 0: \n v = sum([word_embeddings.get(w, np.zeros((100,))) for w in i.split()])/(len(i.split())+0.001) \n else: \n v = np.zeros((100,)) \n sentence_vectors.append(v)\n\n sim_mat = np.zeros([len(sentences), len(sentences)])\n for i in range(len(sentences)): \n for j in range(len(sentences)): \n if i != j: \n sim_mat[i][j] = cosine_similarity (sentence_vectors[i].reshape(1,100), sentence_vectors[j].reshape(1,100))[0,0]\n \n import networkx as nx \n nx_graph = nx.from_numpy_array(sim_mat) \n scores = nx.pagerank(nx_graph)\n ranked_sentences = sorted(((scores[i],s) for i,s in \n enumerate(sentences)), reverse=True)\n\n summary=[]\n for i in range(val2):\n summary.append((ranked_sentences[i][1]))\n \n\n summm='\\n'.join(map(str, summary))\n \n \n\n \n context={}\n context[\"content\"]=summm\n context[\"nub\"]=val2\n \n return render(request, 'web/result.html',context)\n\n# Create your views here.\n\n\n\n\ndef index(request):\n program=Programming.objects.all()\n d={'program':program}\n return render(request, 'web/home.html', d)\n\n\n\n# AJAX\ndef load_courses(request):\n programming_id = request.GET.get('programming')\n courses = Course.objects.filter(programming_id=programming_id).order_by('name')\n return render(request, 'web/city_dropdown_list_options.html', {'courses': courses})\n\n","sub_path":"web/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":28337,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"} +{"seq_id":"218799335","text":"#!/usr/bin/env python\n#\n# igcollect - Xen VM disks\n#\n# Copyright (c) 2016, InnoGames GmbH\n#\n\nfrom __future__ import print_function\nimport socket\nimport time\nimport subprocess\nimport re\n\n\ndef get_vbds():\n p = subprocess.Popen(['/usr/sbin/xenstore-ls', '-f'],\n stdout=subprocess.PIPE)\n mapping = dict(line.strip().split(' = ', 1) for line in p.stdout)\n p.wait()\n vbds = {}\n vbd_re = re.compile(r'^/vm/([^/]+)/device/vbd/\\d+/backend$')\n for key in mapping:\n match = vbd_re.match(key)\n if match:\n vbd_key = mapping[key][1:-1] + '/physical-device'\n vbd_value = mapping[vbd_key][1:-1]\n name = mapping['/vm/{uid}/name'.format(uid=match.group(1))][1:-1]\n dn_key = mapping[key][1:-1] + '/dev'\n dn_value = mapping[dn_key][1:-1]\n\n vbds.setdefault(name, {})\n vbds[name].setdefault(dn_value, vbd_value)\n return vbds\n\n\ndef get_diskstats_dict():\n ''' returns a dictionary made from /proc/diskstats '''\n\n dsd = open('/proc/diskstats', 'r')\n diskstats_data = dsd.readlines(1024)\n dsd.close()\n\n diskstats_dict = {}\n header = ['major', 'minor', 'name',\n 'reads', 'reads_merged', 'sec_read', 'ms_read',\n 'writes', 'writes_merged', 'sec_written', 'ms_written',\n 'cur_iops', 'ms_io', 'weighted_ms_io']\n\n header.pop(2) # just to be able to have also the name in the header\n\n for line in diskstats_data:\n ''' here we have to handle some kind of disk\n first the name than the counters as mentioned\n in the header'''\n\n x = line.strip().split()\n disk_name = x.pop(2)\n diskstats_dict[disk_name] = {}\n for i in header:\n diskstats_dict[disk_name][i] = x.pop(0)\n\n return diskstats_dict\n\n\ndef get_dmname_from_majorminor(diskstats=False, major=0, minor=0):\n for disk in diskstats:\n if (\n int(diskstats[disk]['minor']) == minor and\n int(diskstats[disk]['major']) == major\n ):\n return disk\n return False\n\ngraphite_data = ''\nhostname = socket.gethostname().replace('.', '_')\nnow = str(int(time.time()))\nsector_size = 512\nxenstore_vbd = subprocess.Popen(\n \"/usr/sbin/xenstore-ls \",\n shell=True,\n bufsize=8192,\n stdout=subprocess.PIPE).stdout.readlines()\nxmlist = subprocess.Popen(\n \"/usr/sbin/xm list -l\",\n shell=True,\n bufsize=128000,\n stdout=subprocess.PIPE).stdout.readlines()\n\ndiskstats = get_diskstats_dict()\nvservers = get_vbds()\n\nfor server in vservers:\n for device in vservers[server]:\n major, minor = vservers[server][device].strip(\"'\").split(':')\n dmname = get_dmname_from_majorminor(\n diskstats, int(major, 16), int(minor, 16)\n )\n\n graphite_data += 'servers.%s.virtualisation.vserver.%s.disk.%s.bytesRead %s %s\\n' % (\n hostname, server.replace('.', '_'), device, str(int(diskstats[dmname]['sec_read']) * sector_size), now)\n graphite_data += 'servers.%s.virtualisation.vserver.%s.disk.%s.bytesWrite %s %s\\n' % (\n hostname, server.replace('.', '_'), device, str(int(diskstats[dmname]['sec_written']) * sector_size), now)\n graphite_data += 'servers.%s.virtualisation.vserver.%s.disk.%s.iopsRead %s %s\\n' % (\n hostname, server.replace('.', '_'), device, str(diskstats[dmname]['reads']), now)\n graphite_data += 'servers.%s.virtualisation.vserver.%s.disk.%s.iopsWrite %s %s\\n' % (\n hostname, server.replace('.', '_'), device, str(diskstats[dmname]['writes']), now)\n graphite_data += 'servers.%s.virtualisation.vserver.%s.disk.%s.ioTimeMs %s %s\\n' % (\n hostname, server.replace('.', '_'), device, str(diskstats[dmname]['ms_io']), now)\n\nprint(graphite_data)\n","sub_path":"src/xen_vm_disk.py","file_name":"xen_vm_disk.py","file_ext":"py","file_size_in_byte":3789,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"91"}