\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\n\"\"\"\nThis sends a fib list request to the local NFD and prints the response.\nThis is equivalent to the NFD command line command \"nfd-status -r\".\nSee http://redmine.named-data.net/projects/nfd/wiki/Management .\n\"\"\"\n\nimport time\nfrom pyndn import Face\nfrom pyndn import Name\nfrom pyndn import Interest\nfrom pyndn.util import Blob\nfrom pyndn.encoding import ProtobufTlv\nfrom pyndn.util.segment_fetcher import SegmentFetcher\n# This moudle is produced by: protoc --python_out=. fib-entry.proto\nfrom status import fib_entry_pb2\n\n\nclass Fib_status_getter(object):\n def __init__(self):\n self.total_result = ''\n\n def dump(self, *list):\n result = \"\"\n for element in list:\n result += (element if type(element) is str else str(element)) + \" \"\n self.total_result = self.total_result + result + \" \\n\"\n\n def run(self):\n # The default Face connects to the local NFD.\n face = Face()\n\n interest = Interest(Name(\"/localhost/nfd/fib/list\"))\n interest.setInterestLifetimeMilliseconds(4000)\n self.dump(\"Express interest\", interest.getName().toUri())\n\n enabled = [True]\n\n def onComplete(content):\n enabled[0] = False\n self.printFibEntries(content)\n\n def onError(errorCode, message):\n enabled[0] = False\n self.dump(message)\n\n SegmentFetcher.fetch(face, interest, None, onComplete, onError)\n\n # Loop calling processEvents until a callback sets enabled[0] = False.\n while enabled[0]:\n face.processEvents()\n\n # We need to sleep for a few milliseconds so we don't use 100% of the CPU.\n time.sleep(0.01)\n\n # print('==================run FIB_status_getter finished===================')\n face.shutdown()\n return (self.total_result)\n\n def printFibEntries(self, encodedMessage):\n \"\"\"\n This is called when all the segments are received to decode the\n encodedMessage as repeated TLV FibEntry messages and display the values.\n\n :param Blob encodedMessage: The repeated TLV-encoded FibEntry.\n \"\"\"\n fibEntryMessage = fib_entry_pb2.FibEntryMessage()\n ProtobufTlv.decode(fibEntryMessage, encodedMessage)\n\n self.dump(\"FIB:\");\n for fibEntry in fibEntryMessage.fib_entry:\n line = \"\"\n line += ProtobufTlv.toName(fibEntry.name.component).toUri()\n\n # Show the routes.\n for nexthop in fibEntry.next_hop_records:\n line += (\" NextHopRecord={faceId=\" + str(nexthop.face_id) + \" cost=\" + str(nexthop.cost))\n line += \")}\"\n\n self.dump(line)\n","sub_path":"status/fib_status_getter.py","file_name":"fib_status_getter.py","file_ext":"py","file_size_in_byte":3366,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"477157152","text":"import matplotlib.pyplot as plt\nimport math\nimport sys\n\nout_file = sys.argv[1]\nin_1 = sys.argv[2]\nin_2 = sys.argv[3]\nin_3 = sys.argv[4]\nin_4 = sys.argv[5]\nend = int(sys.argv[6])\n\ninds = [i for i in range(1, end)]\npt = [2**i for i in range(1, end)]\n\nf = open(in_1)\nss = []\nfor line in f.readlines():\n if int(line.split()[0]) in pt:\n ss.append(float(line.split()[1]))\n \nf = open(in_2)\nsparring1 = []\nfor line in f.readlines():\n if int(line.split()[0]) in pt:\n sparring1.append(float(line.split()[1]))\n \nf = open(in_3)\nsparring2 = []\nfor line in f.readlines():\n if int(line.split()[0]) in pt:\n sparring2.append(float(line.split()[1]))\n\nf = open(in_4)\nisss = []\nfor line in f.readlines():\n if int(line.split()[0]) in pt:\n isss.append(float(line.split()[1]))\n\nplt.plot(inds, ss)\nplt.plot(inds, sparring1)\nplt.plot(inds, sparring2)\nplt.plot(inds, isss)\nplt.title(\"Regret vs. log2 t\")\nax = plt.gca()\nplt.legend(['Self sparring', 'Sparring1', 'Sparring2', 'IS-SS'], bbox_to_anchor=(1.4, 1.0), bbox_transform=ax.transAxes)\nplt.savefig(out_file, bbox_inches='tight')\n","sub_path":"thompson/tsregret.py","file_name":"tsregret.py","file_ext":"py","file_size_in_byte":1111,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"182103193","text":"\"\"\"\nPure python helpers\n\nMeant to help with python interaction.\n\"\"\"\n\n# python imports\nimport traceback\nimport subprocess\nfrom threading import Timer\nfrom Queue import Queue, Empty\n\n\n__all__ = [\n 'python',\n 'executeInMainThread'\n]\n\nTHREAD_QUEUE = Queue()\n\n\ndef executeInMainThread(func, *args, **kwargs):\n \"\"\"\n Execute a function in the main thread.\n :param func:\n :param args:\n :param kwargs:\n :return:\n \"\"\"\n THREAD_QUEUE.put((func, args, kwargs))\n\n\ndef _main_thread_execute():\n \"\"\"\n --PRIVATE--\n :return:\n \"\"\"\n try:\n func, args, kwargs = THREAD_QUEUE.get(timeout=0.01)\n try:\n func(*args, **kwargs)\n except Exception:\n traceback.print_exc()\n THREAD_QUEUE.task_done()\n except Empty:\n pass\n\n timer = Timer(0.5, _main_thread_execute)\n timer.setDaemon(True)\n timer.start()\n# launch timed listener for main thread\n_main_thread_execute()\n\n\ndef python(pathToFile):\n \"\"\"\n Launch an app or file with python.\n :param pathToFile:\n :return:\n \"\"\"\n arg = ['python', pathToFile]\n subprocess.Popen(arg)\n","sub_path":"HFX/hfx_py/helpers.py","file_name":"helpers.py","file_ext":"py","file_size_in_byte":1122,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"267731307","text":"import os\nimport json\nimport re\nimport sublime\nimport sublime_plugin\nfrom os import path\nfrom collections import deque\nimport pprint\n\nNODEDIR = path.dirname(__file__) + \"/nodelib\"\nNAME_ALIES_FILE = NODEDIR + \"/name_alies.txt\"\nDEBUG = False\npp = pprint.PrettyPrinter(indent=4)\n\n\nclass Nodejs():\n def __init__(self):\n self.data = deque()\n self.nameAlies = None\n self.loaded = False\n self.__loadNameAlies()\n\n def parseNode(self):\n \"\"\"\n load file from a folder\n parse to json\n replace alies names\n generate completions\n \"\"\"\n files = os.listdir(NODEDIR)\n for f in files:\n if not f.endswith('.json'):\n continue\n with open(NODEDIR + \"/\" + f, encoding='UTF-8') as fi:\n j = json.load(fi)\n self.__dealDict(j)\n\n self.__dealAliesName(self.data)\n # modify trigger as trigger\\t{parent}.{type}\n self.loaded = True\n if self.data:\n for dic in self.data:\n dic['trigger'] = \"{0}\\t{1}.{2}\".format(dic['trigger'], dic['parent'], dic['type'])\n print(\"nodejs completions loaded\")\n\n def __loadNameAlies(self):\n with open(NAME_ALIES_FILE) as fi:\n nameAlies = [line.split(',') for line in fi.readlines()]\n nameAlies = [(x.strip(), y.strip()) for x, y in nameAlies]\n self.nameAlies = nameAlies\n # print(self.nameAlies)\n\n def __dealDict(self, obj, parent=None):\n if (isinstance(obj, dict) and (\n 'modules' in obj or 'classes' in obj or 'methods'in obj\n or 'properties' in obj or 'events' in obj)):\n for k, v in obj.items():\n if isinstance(v, dict):\n self.__dealDict(v, obj)\n elif isinstance(v, list):\n self.__dealList(v, obj, k)\n elif isinstance(v, str):\n # print \"%s.%s=%s\" % (parent, k, v)\n pass\n\n if 'type' in obj and 'name' in obj and 'textRaw' in obj:\n if obj['type'] == 'module':\n self.__dealModule(obj, parent)\n if obj['type'] == 'classe':\n self.__dealClass(obj, parent)\n if obj['type'] == 'method':\n self.__dealMethod(obj, parent)\n if obj['type'] == 'propertie':\n self.__dealProperties(obj, parent)\n if obj['type'] == 'event':\n self.__dealEvent(obj, parent)\n\n def __dealList(self, list, parent, type):\n for v in list:\n if isinstance(v, dict):\n self.__dealDict(v, parent)\n elif isinstance(v, list):\n self.__dealList(v, parent)\n elif isinstance(v, str):\n # print \"%s=%s\" % parent, v\n pass\n\n def __dealModule(self, md, parent):\n # print(\"var %s = require(\\\"%s\\\");\" % (md['name'], md['name']))\n parentName = 'nodejs'\n if parent and 'name' in parent:\n parentName = parent['name']\n snippets = {\n \"content\": \"var {0} = require('{1}');\".format(md['name'], md['name']),\n \"doc\": md['desc'],\n \"trigger\": \"require{0}\".format(md['name']),\n \"type\": 'module',\n \"parent\": parentName\n }\n self.data.append(snippets)\n\n def __dealMethod(self, md, parent):\n m = md['textRaw']\n match = re.match(r'([a-zA-Z_0-9.]+)(.*)', m)\n if match and len(match.groups()) == 2:\n mname = match.group(1)\n pname = match.group(2)\n pnames = re.findall(r'([a-zA-Z_0-9.]+)', pname)\n pnames2 = [\"${{{0}:{1}}}\".format(i+1, v) for i, v in enumerate(pnames)]\n snippets = {\n \"content\": \"{0}({1})\".format(mname, ', '.join(pnames2)),\n \"doc\": md['desc'],\n \"trigger\": mname,\n \"type\": 'method',\n \"parent\": parent['name']\n }\n self.data.append(snippets)\n\n def __dealProperties(self, md, parent):\n snippets = {\n \"content\": \"{0}.{1}\".format(parent['name'], md['name']),\n \"doc\": md['desc'],\n \"trigger\": \"{0}.{1}\".format(parent['name'], md['name']),\n \"type\": 'properties',\n \"parent\": parent['name']\n }\n self.data.append(snippets)\n\n def __dealEvent(self, md, parent):\n eFunc = re.match('(.*)', md['desc'])\n eFunc = eFunc and eFunc.group(1) or 'function() {{}}'\n snippets = {\n \"content\": '{0}.on(\"{1}\", {2});'.format(parent['name'], md['name'], eFunc),\n \"doc\": md['desc'],\n \"trigger\": '{0}.on{1}'.format(parent['name'], md['name']),\n \"type\": 'event',\n \"parent\": parent['name']\n }\n self.data.append(snippets)\n\n def __dealClass(self, md, parent):\n snippets = {\n \"content\": \"{0}\".format(md['name']),\n \"doc\": md['desc'],\n \"trigger\": \"{0}\".format(md['name']),\n \"type\": 'class',\n \"parent\": parent['name']\n }\n self.data.append(snippets)\n\n def __dealAliesName(self, snippets):\n for snippet in snippets:\n trigger = snippet['trigger']\n content = snippet['content']\n for x, y in self.nameAlies:\n trigger = trigger.replace(x, y)\n content = content.replace(x, y)\n snippet['trigger'] = trigger\n snippet['content'] = content\n \nnodejs = Nodejs()\nif DEBUG:\n print(\"=======\" * 20)\n nodejs.parseNode()\n for snippets in nodejs.data:\n pp.pprint(snippets['trigger'])\n pp.pprint(snippets['content'])\n\n\nclass NodejsCompleteListener(sublime_plugin.EventListener):\n def __isNodeJsView(self, view):\n return 'nodejs' in view.scope_name(0)\n\n def on_post_save(self, view):\n pass\n\n def on_load(self, view):\n if self.__isNodeJsView(view) and not nodejs.loaded:\n nodejs.parseNode()\n\n def on_activated(self, view):\n if self.__isNodeJsView(view) and not nodejs.loaded:\n nodejs.parseNode()\n\n def on_query_completions(self, view, prefix, locations):\n \"\"\"\n add completions to the editer\n \"\"\"\n if self.__isNodeJsView(view):\n # view.show_popup(\n # decodeHtmlentities(nodejs.data[0]['doc']),\n # flags=sublime.COOPERATE_WITH_AUTO_COMPLETE)\n curline = view.substr(view.line(view.sel()[0])).strip(' ;')\n # print(\"curline\", curline)\n func = re.match(r'\\b([a-zA-Z0-9.]+)\\b', curline)\n return [\n (snippets['trigger'], snippets['content'])\n for snippets in nodejs.data if snippets['trigger'].startswith(func.group(1) if func else prefix)]\n\n def on_modified_async(self, view):\n \"\"\"\n in this method, it will show the document\n \"\"\"\n global docShowed\n if self.__isNodeJsView(view):\n curline = view.substr(view.line(view.sel()[0])).strip(' ;')\n # print(\"curline\", curline)\n # test match --- var xxx = abc (aa, bb)\n func = re.match(r'(var\\s+\\w+\\s*=\\s*[a-zA-Z0-9.]+)', curline)\n if func:\n func = func.group(1)\n else:\n func = re.match(r'\\b([a-zA-Z0-9.]+)\\b', curline)\n if func:\n func = func.group(1)\n else:\n if view.is_popup_visible():\n view.hide_popup()\n return\n params = re.match(r'\\((.*)\\)?', curline)\n if params:\n params = params.group(1)\n func = \"{0}({1}\".format(func, params)\n # print(\"params\", params)\n # print('func', func)\n if curline and curline.endswith(')'):\n docs = [snippet['doc'] for snippet in nodejs.data if func in snippet['content']]\n if docs:\n view.show_popup(\n decodeHtmlentities(docs[0]),\n flags=sublime.COOPERATE_WITH_AUTO_COMPLETE, max_width=600, max_height=400)\n docShowed = True\n\n\ndef decodeHtmlentities(string):\n entity_re = re.compile(\"&(#?)(\\d{1,5}|\\w{1,8});\")\n\n def substitute_entity(match):\n from html.entities import name2codepoint as n2cp\n ent = match.group(2)\n if match.group(1) == \"#\":\n return chr(int(ent))\n else:\n cp = n2cp.get(ent)\n\n if cp:\n return chr(cp)\n else:\n return match.group()\n\n return entity_re.subn(substitute_entity, string)[0]\n# match = re.match(r'([a-zA-Z_0-9.]+)(.*)', 'request.write(chunk[, encoding][, callback])')\n# print(match.groups())\n# pm = re.findall(r'([a-zA-Z_0-9.]+\\]?)', match.group(2))\n# print(pm)\n","sub_path":"nodejs.py","file_name":"nodejs.py","file_ext":"py","file_size_in_byte":8951,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"96735112","text":"\nimport math\nimport torch\nimport logging\nfrom typing import Tuple, Dict, List\nfrom replay.replay import BufferFields\nfrom agent.models.policy import PolicyNetwork, QNetwork\nfrom torch.nn.parallel import DistributedDataParallel as DDP\n\nclass Gaussian:\n '''\n Tanh squashed Gaussian Distribution\n '''\n\n def __init__(self, mean: torch.Tensor, log_std: torch.Tensor, squash: bool=True, eval_mode: bool=False):\n self._mean = mean\n min_log_std = -20\n max_log_std = 2\n self._log_std = torch.clamp(log_std,\n min=min_log_std,\n max=max_log_std)\n if eval_mode:\n self._std = 0\n else:\n self._std = torch.exp(self._log_std)\n\n self._dim = mean.shape[1]\n self._squash = squash\n\n def sample(self) -> Tuple[torch.Tensor, torch.Tensor]:\n '''\n Draw a sample from Gaussian distribution\n '''\n\n noise = self._std * torch.normal(torch.zeros_like(self._mean), torch.ones_like(self._mean))\n sample = self._mean + noise\n\n log_pi = self.loglikelihood(sample)\n\n if self._squash :\n sample = torch.tanh(sample)\n\n return sample, log_pi\n\n def loglikelihood(self, samples: torch.Tensor) -> torch.Tensor:\n '''\n Compute log likelihood of samples\n '''\n EPS = 1e-8\n z = (samples - self._mean) / (self._std + EPS)\n loglikelihood = -(torch.sum(self._log_std + 0.5 * z ** 2, dim=-1, keepdim=True)\n + 0.5 * self._dim * math.log(2 * math.pi))\n\n # because of squash\n if self._squash:\n loglikelihood -= torch.sum(torch.log((1 - torch.tanh(samples) ** 2) + EPS), dim=-1, keepdim=True)\n\n return loglikelihood\n\nclass SACAgent:\n '''\n This class implemented soft actor critic agent which can be used\n in sac algorithm. It includes a policy network and 2 twin Q network.\n '''\n def __init__(self,\n device_id: int,\n world_size: int,\n policy_hidden_size: List,\n q_hidden_size: List,\n model_path: str=None):\n\n self._device_id = device_id\n self._world_size = world_size\n\n self._logger = logging.getLogger()\n\n self._pi = PolicyNetwork(\n state_size=BufferFields['state'],\n action_size=BufferFields['action'],\n hidden_sizes=policy_hidden_size).to(device_id)\n\n q_param = {\n 'state_size': BufferFields['state'],\n 'action_size': BufferFields['action'],\n 'hidden_sizes': q_hidden_size}\n\n self._q1 = QNetwork(**q_param).to(device_id)\n self._q2 = QNetwork(**q_param).to(device_id)\n self._q1_target = QNetwork(**q_param).to(device_id)\n self._q2_target = QNetwork(**q_param).to(device_id)\n\n if model_path:\n self.load_model(model_path)\n self._logger.info(f'load agent model from {model_path}')\n\n # wrap models after init and load\n self._pi = self._ddp_wrap(self._pi)\n self._q1 = self._ddp_wrap(self._q1)\n self._q2 = self._ddp_wrap(self._q2)\n\n self._logger.info(self._pi)\n self._logger.info(self._q1)\n self._logger.info(self._q2)\n self._logger.info(self._q1_target)\n self._logger.info(self._q2_target)\n\n self._eval_mode = False\n self._init_policy_std = 0.75\n\n def eval_mode(self, eval_mode: bool) -> None:\n '''\n Set network as evaluation mode.\n Only works for some type of network architecture\n '''\n self._eval_mode = eval_mode\n if eval_mode:\n self._pi.eval()\n else:\n self._pi.train()\n\n def pi_parameters(self) -> Dict:\n '''\n Return pi net paramenters\n '''\n return self._pi.parameters()\n\n def q1_parameters(self) -> Dict:\n '''\n Return q1 net parameters\n '''\n return self._q1.parameters()\n\n def q2_parameters(self) -> Dict:\n '''\n Return q2 net parameters\n '''\n return self._q2.parameters()\n\n def pi(self, state: torch.Tensor, use_init_std: bool=False) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]:\n '''\n Sample an action using policy net and gaussian distribution\n '''\n state = state.to(self._device_id)\n mu, log_std = self._pi(state)\n if use_init_std:\n log_std = torch.ones_like(log_std)*self._init_policy_std\n distribution = Gaussian(mu, log_std, eval_mode=self._eval_mode)\n action, log_pi = distribution.sample()\n return mu, log_std, action, log_pi\n\n def q(self, state: torch.Tensor, action: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:\n '''\n Return twin q values\n '''\n state = state.to(self._device_id)\n action = action.to(self._device_id)\n q1 = self._q1(state, action)\n q2 = self._q2(state, action)\n return q1, q2\n\n def q_target(self, state: torch.Tensor, action: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:\n '''\n Return target twin q values\n '''\n state = state.to(self._device_id)\n action = action.to(self._device_id)\n q1_target = self._q1_target(state, action)\n q2_target = self._q2_target(state, action)\n return q1_target, q2_target\n\n def update_q_target(self, update_tau: float) -> None:\n '''\n Soft update target q-network parameters\n '''\n q1_state_dict = self._q1.state_dict()\n q2_state_dict = self._q2.state_dict()\n self._soft_update_state_dict(self._q1_target, q1_state_dict, update_tau)\n self._soft_update_state_dict(self._q2_target, q2_state_dict, update_tau)\n\n def save_model(self, path: str) -> None:\n '''\n Save models without ddp wrapper to the given path\n '''\n param = {}\n param['q1'] = self._strip_ddp_state_dict(self._q1.state_dict())\n param['q2'] = self._strip_ddp_state_dict(self._q2.state_dict())\n param['pi'] = self._strip_ddp_state_dict(self._pi.state_dict())\n torch.save(param, path)\n\n def load_model(self, path: str) -> None:\n '''\n Load model from given path without ddp wrapper,\n and assuming always load cuda:0 model, so we map\n cuda:0 to current assigned device id.\n '''\n map_location = {'cuda:0': f'cuda:{self._device_id}'}\n param = torch.load(path, map_location=map_location)\n\n self._q1.load_state_dict(param['q1'])\n self._q2.load_state_dict(param['q2'])\n self._q1_target.load_state_dict(param['q1'])\n self._q2_target.load_state_dict(param['q2'])\n self._pi.load_state_dict(param['pi'])\n\n def _ddp_wrap(self, model: torch.nn.Module) -> torch.nn.Module:\n '''\n Wrapper network module using DistributedDataParallel object\n if the world size is larger than 1.\n '''\n if self._world_size > 1:\n return DDP(model, device_ids=[self._device_id])\n else:\n return model\n\n def _soft_update_state_dict(self, model: torch.nn.Module, state_dict: Dict, tau: float=1) -> None:\n '''\n Soft update state dict of model given state dict and tau\n '''\n state_dict = self._strip_ddp_state_dict(state_dict)\n if tau == 1:\n model.load_state_dict(state_dict)\n elif tau > 0:\n update_sd = {k: tau * state_dict[k] + (1 - tau) * v for k, v in model.state_dict().items()}\n model.load_state_dict(update_sd)\n\n def _strip_ddp_state_dict(self, state_dict: Dict) -> Dict:\n '''\n DistributedDataParallel prepends 'module.' to every key,\n but for the general purpose, we want to save and load\n state dict without prepended key.\n '''\n clean_state_dict = type(state_dict)() \n for k, v in state_dict.items(): \n key = k[7:] if k[:7] == \"module.\" else k \n clean_state_dict[key] = v \n return clean_state_dict\n\n","sub_path":"agent/sac_agent.py","file_name":"sac_agent.py","file_ext":"py","file_size_in_byte":8064,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"23687450","text":"import os, io, json, sys\n\nif sys.platform == 'linux':\n import pwd\nfrom helpers import SpikeGLX_utils\n\nimport numpy as np\n\ndef create_samba_directory(samba_server, samba_share):\n\n if sys.platform == 'linux':\n proc_owner_uid = str(pwd.getpwnam(os.environ['USER']).pw_uid)\n share_string = 'smb-share:server={},share={}'.format(samba_server, samba_share)\n data_dir = os.path.join('/', 'var', 'run', 'user', proc_owner_uid, 'gvfs', share_string)\n else:\n data_dir = r'\\\\' + os.path.join(samba_server, samba_share)\n\n return data_dir\n\ndef createInputJson(output_file, \n npx_directory=None, \n continuous_file = None,\n spikeGLX_data=True,\n input_meta_path=None,\n extracted_data_directory=None,\n kilosort_output_directory=None,\n ks_make_copy=False,\n probe_type='3A',\n catGT_run_name='test',\n gate_string='0',\n trigger_string='0,0',\n probe_string='0',\n depth_est_fig = 0,\n catGT_stream_string = '-ap',\n catGT_car_mode = 'gbldmx',\n catGT_loccar_min_um = 40,\n catGT_loccar_max_um = 160,\n catGT_cmd_string = '-prb_fld -out_prb_fld',\n catGT_maxZ_um = -1,\n noise_template_use_rf = True,\n event_ex_param_str = 'XD=4,1,50',\n tPrime_im_ex_list = 'SY=0,384,6,500',\n tPrime_ni_ex_list = 'XA=0,1,3,500',\n sync_period = 1.0,\n toStream_sync_params = 'SY=0,384,6,500',\n niStream_sync_params = 'XA=0,1,3,500',\n tPrime_3A = False,\n toStream_path_3A = None,\n fromStream_list_3A = None,\n ks_doFilter = 0,\n ks_remDup = 0, \n ks_finalSplits = 1,\n ks_labelGood = 1,\n ks_saveRez = 1,\n ks_copy_fproc = 0,\n ks_minfr_goodchannels = 0.1, \n ks_whiteningRadius_um = 163,\n ks_Th = '[10,4]',\n ks_CSBseed = 1,\n ks_LTseed = 1,\n ks_templateRadius_um = 163,\n ks_nblocks = 5,\n ks_CAR = 0,\n ks_output_tag = 'ks2',\n c_Waves_snr_um = 160,\n wm_spread_thresh = 0.12,\n wm_site_range = 16,\n qm_isi_thresh = 1.5/1000,\n include_pcs = True\n ):\n\n # hard coded paths to code on your computer and system\n ecephys_directory = r'C:\\Users\\colonellj\\Documents\\ecephys_anaconda\\ecephys_spike_sorting\\ecephys_spike_sorting'\n \n # location of kilosor respository and kilosort version\n\n kilosort_repository = r'C:\\Users\\colonellj\\Documents\\KS2_largetemplate\\Kilosort2'\n\n KS2ver = '2.0' # must equal '3.0', '2.5' or '2.0', and match the kiilosort_repository\n \n # KS 3.0 does not yet output pcs.\n if KS2ver == '3.0':\n include_pcs = False # set to false for KS2ver = '3.0'\n \n npy_matlab_repository = r'C:\\Users\\colonellj\\Documents\\npy-matlab-master'\n catGTPath = r'C:\\Users\\colonellj\\Documents\\CatGT-win'\n tPrime_path=r'C:\\Users\\colonellj\\Documents\\TPrime-win'\n cWaves_path=r'C:\\Users\\colonellj\\Documents\\C_Waves-win'\n \n \n # for config files and kilosort working space\n kilosort_output_tmp = r'C:\\kilosort_datatemp' \n \n \n # derived directory names\n \n modules_directory = os.path.join(ecephys_directory,'modules')\n \n if kilosort_output_directory is None \\\n and extracted_data_directory is None \\\n and npx_directory is None:\n raise Exception('Must specify at least one output directory')\n\n\n #default ephys params. For spikeGLX, these get replaced by values read from metadata\n sample_rate = 30000\n num_channels = 385 \n reference_channels = [191]\n uVPerBit = 2.34375\n acq_system = 'PXI'\n \n \n if spikeGLX_data:\n # location of the raw data is the continuous file passed from script\n # metadata file should be located in same directory\n # \n # kilosort output will be put in the same directory as the input raw data,\n # set in kilosort_output_directory passed from script\n # kilososrt postprocessing (duplicate removal) and identification of noise\n # clusters will act on phy output in the kilosort output directory\n #\n # \n if input_meta_path is not None:\n probe_type, sample_rate, num_channels, reference_channels, \\\n uVPerBit, useGeom = SpikeGLX_utils.EphysParams(input_meta_path) \n \n print('SpikeGLX params read from meta')\n print('probe type: {:s}, sample_rate: {:.5f}, num_channels: {:d}, uVPerBit: {:.4f}'.format\\\n (probe_type, sample_rate, num_channels, uVPerBit))\n print('reference channels: ' + repr(reference_channels))\n \n #print('kilosort output directory: ' + kilosort_output_directory )\n\n \n else:\n print('using default values for probe params')\n \n\n \n\n # geometry params by probe type. expand the dictoionaries to add types\n # vertical probe pitch vs probe type\n vpitch = {'3A': 20, 'NP1': 20, 'NP21': 15, 'NP24': 15, 'NP1100': 6, 'NP1300':20} \n hpitch = {'3A': 32, 'NP1': 32, 'NP21': 32, 'NP24': 32, 'NP1100': 6, 'NP1300':48} \n nColumn = {'3A': 2, 'NP1': 2, 'NP21': 2, 'NP24': 2, 'NP1100': 8,'NP1300':2} \n \n \n # CatGT needs the inner and outer redii for local common average referencing\n # specified in sites\n\n catGT_loccar_min_sites = int(round(catGT_loccar_min_um/vpitch.get(probe_type)))\n catGT_loccar_max_sites = int(round(catGT_loccar_max_um/vpitch.get(probe_type)))\n # print('loccar min: ' + repr(catGT_loccar_min_sites))\n \n # whiteningRange is the number of sites used for whitening in KIlosort\n # preprocessing. Calculate the number of sites within the user-specified\n # whitening radius for this probe geometery\n # for a Np 1.0 probe, 163 um => 32 sites\n nrows = np.sqrt((np.square(ks_whiteningRadius_um) - np.square(hpitch.get(probe_type))))/vpitch.get(probe_type)\n ks_whiteningRange = int(round(2*nrows*nColumn.get(probe_type)))\n if ks_whiteningRange > 384:\n ks_whiteningRange = 384\n \n # nNeighbors is the number of sites kilosort includes in a template.\n # Calculate the number of sites within that radisu.\n maxNeighbors = 64 # 64 for standard build of KS\n nrows = np.sqrt((np.square(ks_templateRadius_um) - np.square(hpitch.get(probe_type))))/vpitch.get(probe_type)\n ks_nNeighbors = int(round(2*nrows*nColumn.get(probe_type)))\n if ks_nNeighbors > maxNeighbors:\n ks_nNeighbors = maxNeighbors \n print('ks_nNeighbors: ' + repr(ks_nNeighbors))\n \n c_waves_radius_sites = int(round(c_Waves_snr_um/vpitch.get(probe_type)))\n\n # Create string designating temporary output file for KS2 (gets inserted into KS2 config.m file)\n fproc = os.path.join(kilosort_output_tmp,'temp_wh.dat') # full path for temp whitened data file\n fproc_forward_slash = fproc.replace('\\\\','/')\n fproc_str = \"'\" + fproc_forward_slash + \"'\"\n \n # Deduce sort outptut tag from kilosort_output_directory\n \n \n dictionary = \\\n {\n\n \"directories\": {\n \"ecephys_directory\":ecephys_directory,\n \"npx_directory\": npx_directory,\n \"extracted_data_directory\": extracted_data_directory,\n \"kilosort_output_directory\": kilosort_output_directory,\n \"kilosort_output_tmp\": kilosort_output_tmp\n },\n\n \"common_files\": {\n \"settings_json\" : npx_directory,\n \"probe_json\" : os.path.join(extracted_data_directory,'probe_json.json')\n },\n\n \"waveform_metrics\" : {\n \"waveform_metrics_file\" : os.path.join(kilosort_output_directory, 'waveform_metrics.csv')\n },\n \n \"cluster_metrics\" : {\n \"cluster_metrics_file\" : os.path.join(kilosort_output_directory, 'metrics.csv')\n },\n\n \"ephys_params\": {\n \"probe_type\" : probe_type,\n \"sample_rate\" : sample_rate,\n \"lfp_sample_rate\" : 2500,\n \"bit_volts\" : uVPerBit,\n \"num_channels\" : num_channels,\n \"reference_channels\" : reference_channels,\n \"vertical_site_spacing\" : 10e-6,\n \"ap_band_file\" : continuous_file,\n \"lfp_band_file\" : continuous_file.replace('.ap.bin', '.lf.bin'),\n \"reorder_lfp_channels\" : False,\n \"cluster_group_file_name\" : 'cluster_group.tsv'\n }, \n\n \"extract_from_npx_params\" : {\n \"npx_directory\": npx_directory,\n \"settings_xml\": npx_directory,\n \"npx_extractor_executable\": r\"C:\\Users\\svc_neuropix\\Documents\\GitHub\\npxextractor\\Release\\NpxExtractor.exe\",\n \"npx_extractor_repo\": r\"C:\\Users\\svc_neuropix\\Documents\\GitHub\\npxextractor\"\n },\n \n \"depth_estimation_params\" : {\n \"hi_noise_thresh\" : 50.0,\n \"lo_noise_thresh\" : 3.0,\n \"save_figure\" : depth_est_fig,\n \"figure_location\" : os.path.join(extracted_data_directory, 'probe_depth.png'),\n \"smoothing_amount\" : 5,\n \"power_thresh\" : 2.5,\n \"diff_thresh\" : -0.06,\n \"freq_range\" : [0, 10],\n \"max_freq\" : 150,\n \"saline_range_um\" : [3700, 3800],\n \"n_passes\" : 10,\n \"air_gap_um\" : 1000,\n \"time_interval\" : 5,\n \"skip_s_per_pass\" : 10,\n \"start_time\" : 10\n }, \n\n \"median_subtraction_params\" : {\n \"median_subtraction_executable\": \"C:\\\\Users\\\\svc_neuropix\\\\Documents\\\\GitHub\\\\spikebandmediansubtraction\\\\Builds\\\\VisualStudio2013\\\\Release\\\\SpikeBandMedianSubtraction.exe\",\n \"median_subtraction_repo\": \"C:\\\\Users\\\\svc_neuropix\\\\Documents\\\\GitHub\\\\spikebandmediansubtraction\\\\\",\n },\n\n \"kilosort_helper_params\" : {\n\n \"matlab_home_directory\": kilosort_output_tmp,\n \"kilosort_repository\" : kilosort_repository,\n \"npy_matlab_repository\" : npy_matlab_repository,\n \"kilosort_version\" : 2,\n \"spikeGLX_data\" : True,\n \"ks_make_copy\": ks_make_copy,\n \"surface_channel_buffer\" : 15,\n\n \"kilosort2_params\" :\n {\n \"KSver\" : KS2ver,\n \"remDup\" : ks_remDup, #these are expressed as int rather than Bool for matlab compatability\n \"finalSplits\" : ks_finalSplits,\n \"labelGood\" : ks_labelGood,\n \"saveRez\" : ks_saveRez,\n \"copy_fproc\" : ks_copy_fproc,\n \"fproc\" : fproc_str,\n \"chanMap\" : \"'chanMap.mat'\",\n \"doFilter\" : ks_doFilter,\n \"fshigh\" : 150,\n \"minfr_goodchannels\" : ks_minfr_goodchannels,\n \"Th\" : ks_Th,\n \"lam\" : 10,\n \"AUCsplit\" : 0.9,\n \"minFR\" : 1/50.,\n \"momentum\" : '[20 400]',\n \"sigmaMask\" : 30,\n \"ThPre\" : 8,\n \"gain\" : uVPerBit,\n \"CSBseed\" : ks_CSBseed,\n \"LTseed\" : ks_LTseed,\n \"whiteningRange\" : ks_whiteningRange,\n \"nNeighbors\" : ks_nNeighbors,\n \"CAR\" : ks_CAR,\n \"nblocks\" : ks_nblocks\n }\n },\n \n \"pykilosort_helper_params\" : {\n \"preprocessing_function\" : 'kilosort2', \n \"copy_fproc\" : ks_copy_fproc,\n \"fproc\" : fproc_str,\n \"seed\" : ks_LTseed,\n \"ks2_mode\" : False,\n \"perform_drift_registration\" : True,\n \"car\" : ks_CAR,\n \"Th\" : ks_Th,\n \"ThPre\" : 8,\n \"lam\" : 10,\n \"AUCsplit\" : 0.9,\n \"minFR\" : 1/50.,\n \"momentum\" : '[20 400]',\n \"sig_datashift\" : 20,\n \"sigmaMask\" : 30,\n \"fshigh\" : 300,\n \"fslow\" : 10000,\n \"minfr_goodchannels\" : 0,\n \"whiteningRange\" : ks_whiteningRange, \n \"deterministic_mode\" : True, \n \"nblocks\" : ks_nblocks,\n \"doFilter\" : ks_doFilter\n\n },\n \n\n \"ks_postprocessing_params\" : {\n \"align_avg_waveform\" : False, \n \"remove_duplicates\" : True,\n \"cWaves_path\" : cWaves_path,\n \"within_unit_overlap_window\" : 0.00017,\n \"between_unit_overlap_window\" : 0.00041,\n \"between_unit_dist_um\" : 66,\n \"deletion_mode\" : 'lowAmpCluster',\n \"include_pcs\" : include_pcs\n },\n\n \"mean_waveform_params\" : { \n \"mean_waveforms_file\" : os.path.join(kilosort_output_directory, 'mean_waveforms.npy'),\n \"samples_per_spike\" : 82,\n \"pre_samples\" : 20,\n \"num_epochs\" : 1, #epochs not implemented for c_waves\n \"spikes_per_epoch\" : 1000,\n \"spread_threshold\" : wm_spread_thresh,\n \"site_range\" : wm_site_range, \n \"cWaves_path\" : cWaves_path,\n \"use_C_Waves\" : True,\n \"snr_radius\" : c_waves_radius_sites,\n \"snr_radius_um\" : c_Waves_snr_um\n },\n \n\n \"noise_waveform_params\" : {\n \"classifier_path\" : os.path.join(modules_directory, 'noise_templates', 'rf_classifier.pkl'),\n \"multiprocessing_worker_count\" : 10,\n \"use_random_forest\" : noise_template_use_rf\n },\n\n \"quality_metrics_params\" : {\n \"isi_threshold\" : qm_isi_thresh,\n \"min_isi\" : 0.000166,\n \"tbin_sec\" : 0.001,\n \"max_radius_um\" : 68,\n \"max_spikes_for_unit\" : 500,\n \"max_spikes_for_nn\" : 10000,\n \"n_neighbors\" : 4,\n 'n_silhouette' : 10000,\n \"drift_metrics_interval_s\" : 51,\n \"drift_metrics_min_spikes_per_interval\" : 10,\n \"include_pcs\" : include_pcs\n },\n \n \"catGT_helper_params\" : {\n \"run_name\" : catGT_run_name,\n \"gate_string\" : gate_string,\n \"probe_string\" : probe_string,\n \"trigger_string\": trigger_string,\n \"stream_string\" : catGT_stream_string,\n \"car_mode\" : catGT_car_mode,\n \"loccar_inner\" : catGT_loccar_min_sites,\n \"loccar_outer\": catGT_loccar_max_sites,\n \"loccar_inner_um\" : catGT_loccar_min_um,\n \"loccar_outer_um\" : catGT_loccar_max_um,\n \"maxZ_um\" : catGT_maxZ_um,\n 'useGeom' : useGeom,\n \"cmdStr\" : catGT_cmd_string,\n \"catGTPath\" : catGTPath\n },\n\n \"tPrime_helper_params\" : {\n \"tPrime_path\" : tPrime_path,\n \"im_ex_list\" : tPrime_im_ex_list,\n \"ni_ex_list\" : tPrime_ni_ex_list,\n \"sync_period\" : sync_period,\n \"toStream_sync_params\" : toStream_sync_params,\n \"ni_sync_params\" : niStream_sync_params,\n \"tPrime_3A\" : tPrime_3A,\n \"toStream_path_3A\" : toStream_path_3A,\n \"fromStream_list_3A\" : fromStream_list_3A,\n \"psth_ex_str\": event_ex_param_str,\n \"sort_out_tag\": ks_output_tag\n }, \n \n \"psth_events\": {\n \"event_ex_param_str\": event_ex_param_str\n }\n \n }\n\n with io.open(output_file, 'w', encoding='utf-8') as f:\n f.write(json.dumps(dictionary, ensure_ascii=False, sort_keys=True, indent=4))\n\n return dictionary","sub_path":"ecephys_spike_sorting/scripts/create_input_json.py","file_name":"create_input_json.py","file_ext":"py","file_size_in_byte":16060,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"123306271","text":"from torch.utils.data import DataLoader\nimport torch\nfrom torch.utils.data import Dataset\nfrom PIL import Image\nimport torchvision.transforms as transforms\n\n\ndef face_gan_man(img):\n ###### Definition of variables ######\n batchSize=1\n input_nc=3\n output_nc=3\n size= 224\n device= 'cuda' if torch.cuda.is_available() else 'cpu'\n n_cpu=8\n generator_B2A='/app/netG_B2A10.pth'\n \n # Load image\n img = Image.open(img)\n width, height = img.size\n ratio = width / height \n\n\n # Network\n netG_B2A = Generator(output_nc, input_nc)\n netG_B2A.to(device)\n\n # Load state dicts\n netG_B2A.load_state_dict(torch.load(generator_B2A,map_location=torch.device(device)))\n\n # Set model's test mode\n netG_B2A.eval()\n\n # Dataset loader\n transforms_ = [ transforms.Resize([size,size]),\n transforms.ToTensor(),\n transforms.Normalize((0.5,0.5,0.5), (0.5,0.5,0.5)) ]\n dataloader = DataLoader(ImageDataset(img, transforms_=transforms_,),\n batch_size=batchSize, shuffle=False, num_workers=n_cpu)\n\n for batch in dataloader:\n # Set model input\n real_B = batch.to(device)\n\n # Generate output\n fake_A = 0.5*(netG_B2A(real_B).data + 1.0)\n\n return fake_A, ratio\n\nclass ImageDataset(Dataset):\n def __init__(self, img, transforms_=None,):\n self.transform = transforms.Compose(transforms_)\n self.files_A = img\n\n def __getitem__(self, index):\n item_A = self.transform(self.files_A)\n return item_A\n\n def __len__(self):\n return 1\n\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nclass ResidualBlock(nn.Module):\n def __init__(self, in_features):\n super(ResidualBlock, self).__init__()\n\n conv_block = [ nn.ReflectionPad2d(1),\n nn.Conv2d(in_features, in_features, 3),\n nn.InstanceNorm2d(in_features),\n nn.ReLU(inplace=True),\n nn.ReflectionPad2d(1),\n nn.Conv2d(in_features, in_features, 3),\n nn.InstanceNorm2d(in_features) ]\n\n self.conv_block = nn.Sequential(*conv_block)\n\n def forward(self, x):\n return x + self.conv_block(x)\n\nclass Generator(nn.Module):\n def __init__(self, input_nc, output_nc, n_residual_blocks=9):\n super(Generator, self).__init__()\n\n # Initial convolution block\n model = [ nn.ReflectionPad2d(3),\n nn.Conv2d(input_nc, 64, 7),\n nn.InstanceNorm2d(64),\n nn.ReLU(inplace=True) ]\n\n # Downsampling\n in_features = 64\n out_features = in_features*2\n for _ in range(2):\n model += [ nn.Conv2d(in_features, out_features, 3, stride=2, padding=1),\n nn.InstanceNorm2d(out_features),\n nn.ReLU(inplace=True) ]\n in_features = out_features\n out_features = in_features*2\n\n # Residual blocks\n for _ in range(n_residual_blocks):\n model += [ResidualBlock(in_features)]\n\n # Upsampling\n out_features = in_features//2\n for _ in range(2):\n model += [ nn.ConvTranspose2d(in_features, out_features, 3, stride=2, padding=1, output_padding=1),\n nn.InstanceNorm2d(out_features),\n nn.ReLU(inplace=True) ]\n in_features = out_features\n out_features = in_features//2\n\n # Output layer\n model += [ nn.ReflectionPad2d(3),\n nn.Conv2d(64, output_nc, 7),\n nn.Tanh() ]\n\n self.model = nn.Sequential(*model)\n\n def forward(self, x):\n return self.model(x)\n","sub_path":"face_GAN_man.py","file_name":"face_GAN_man.py","file_ext":"py","file_size_in_byte":3763,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"116009665","text":"import numpy as np\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport copy\nimport csv\nimport os\nimport runge_kutta4\nimport mplcursors\n\n#Xのデータの入っているout.csvまでのパス\npath = '../out.csv'\n# プロットするデータのinput\nD = np.loadtxt(path, delimiter=',', dtype='float32')\n#ルンゲクッタの計算ステップ回数\nN = 20\n\n#errorは各Xとその時間発展事の誤差の大きさを集めるリスト\n#176=(900-20)/5\nerror1 = [[] for i in range(176)]#微小時間h=0.01のときのため\nerror2 = [[] for i in range(176)]#微小時間h=0.02のときのため\nerror3 = [[] for i in range(176)]#微小時間h=0.03のときのため\nerror4 = [[] for i in range(176)]#微小時間h=0.04のときのため\n\n#range(20,5,1000)だと何回目の計算か分かりにくいので別にcountを定義してカウントする\ncount = 0\n\n#ホフメラー図を確認したところ、20番目のXはすでにカオスに移行していたので20番目から5飛ばしで1000まで行う\nfor i in range(20, 900, 5):\n\n # 誤差発達率を導出するための誤差を複数の40次元分用意する\n R = [[] for k in range(100)]\n for j in range(100):\n for k in range(40):\n R[j].append(np.random.randn()/10000)#これで40次元の誤差が100通りできた\n\n #Xのある点の各要素に誤差を加える\n DD = [[] for k in range(100)]#DDの要素はリストで、さらにそのリストは同一の点の100通りの誤差が入る\n for j in range(40):\n for k in range(100):\n DD[j].append( D[i][j] + R[k][j])#ある一つの点に100通りの誤差を加える\n\n #上で誤差を加えたXがルンゲクッタの時間発展とともにどれだけ誤差を大きくしていくかをみる\n for k in range(100):\n DDD1[k] = runge_kutta4.Lorenz96_RK4(DD[k], 0.01, N, 8.0)#微小時間h=0.01のときのため\n DDD2[k] = runge_kutta4.Lorenz96_RK4(DD[k], 0.02, N, 8.0)#微小時間h=0.02のときのため\n DDD3[k] = runge_kutta4.Lorenz96_RK4(DD[k], 0.03, N, 8.0)#微小時間h=0.03のときのため\n DDD4[k] = runge_kutta4.Lorenz96_RK4(DD[k], 0.04, N, 8.0)#微小時間h=0.04のときのため\n\n #ルンゲクッタでの時間発展の各ステップ段階での誤差をerrorリストに代入する\n for j in range(N):\n #deltaDはあるXの各要素の誤差を集めたリスト\n deltaD1 = [[] for k in range(176)]#微小時間h=0.01のときのため\n deltaD2 = [[] for k in range(176)]#微小時間h=0.02のときのため\n deltaD3 = [[] for k in range(176)]#微小時間h=0.03のときのため\n deltaD4 = [[] for k in range(176)]#微小時間h=0.04のときのため\n\n for k in range(100):\n deltaD1[k].append( D[i + (j+1)] - DDD1[k][j] ) #D[i + (j+1)]は、out.csvにある40次元Xの各要素のタイムステップあたりの真の値で、DDD[j]は誤差を含めて開始した各微小時間hごとに計算した値\n deltaD2[k].append( D[i + 2*(j+1)-1] - DDD2[k][j] )#以下jを二倍三倍四倍しているのは、元データのout.csvのタイムステップが0.01であり、それに合わせて時間の刻み幅を考える必要があるから\n deltaD3[k].append( D[i + 3*(j+1)-2] - DDD3[k][j] )\n deltaD4[k].append( D[i + 4*(j+1)-3] - DDD4[k][j] )\n #誤差の大きさ(ノルム)\n for j in range(176):\n for k in range(100):\n error1[j][k].append(np.linalg.norm(deltaD1[k]))#微小時間h=0.01のときのため\n error2[j][k].append(np.linalg.norm(deltaD2[k]))#微小時間h=0.02のときのため\n error3[j][k].append(np.linalg.norm(deltaD3[k]))#微小時間h=0.03のときのため\n error4[j][k].append(np.linalg.norm(deltaD4[k]))#微小時間h=0.04のときのため\n \n count+=1\n\n#AERは平均誤差発達率\nAER1 = []\nAER2 = []\nAER3 = []\nAER4 = []\n#erは各タイムステップごとの、アトラクタ上のある一点の100通りの誤差の平均を、さらにアトラクタ上の他の147通りで平均を取ったもののリスト\ner1 = []#h=0.01のとき\ner2 = []#h=0.02のとき\ner3 = []#h=0.03のとき\ner4 = []#h=0.04のとき\n#各微小時間ごとの、さらに時間ステップごとの誤差をリストに付け加えていく\nfor i in range(N):\n for j in range(176):\n er1.append(np.mean(error1[j][k] for k in range(100)))\n er2.append(np.mean(error2[j][k] for k in range(100)))\n er3.append(np.mean(error3[j][k] for k in range(100)))\n er4.append(np.mean(error4[j][k] for k in range(100)))\n AER1.append(np.mean(er1))\n AER2.append(np.mean(er2))\n AER3.append(np.mean(er3))\n AER4.append(np.mean(er4))\n \"\"\"\n AER1.append(np.mean([error1[k][i] for k in range(176)]))#kで繰り返しを行うことは、アトラクター上から採ってきたサンプルごとに考えているということ\n AER2.append(np.mean([error2[k][i] for k in range(176)]))#そして、error[k][i]をすべてのkで足し合わせることは、サンプルから出てきたタイムステップiのときの誤差を足し合わせることとなる\n AER3.append(np.mean([error3[k][i] for k in range(176)]))\n AER4.append(np.mean([error4[k][i] for k in range(176)]))\n \"\"\"\n\n#各微小時間ごとの平均誤差発達率をプロットする\nfig = plt.figure( figsize=(11, 5) )\nax1 = fig.add_subplot(121)\nax1.plot(AER1,label=\"h=0.01\")\nax1.plot(AER2,label=\"h=0.02\")\nax1.plot(AER3,label=\"h=0.03\")\nax1.plot(AER4,label=\"h=0.04\")\nax1.set_xlabel(\"タイ���ステップ数\",fontname=\"MS Gothic\")\nax1.set_ylabel(\"平均誤差発達率\",fontname=\"MS Gothic\")\nax1.set_xticks( np.arange(0, N+1, 2))\nax1.legend()\nax1.set_title(\"微小時間hごとの平均誤差発達率\",fontname=\"MS Gothic\")\n\nax2 = fig.add_subplot(122)\nax2.plot(AER1,label=\"h=0.01\")\nax2.set_xlabel(\"タイムステップ数\",fontname=\"MS Gothic\")\nax2.set_ylabel(\"平均誤差発達率\",fontname=\"MS Gothic\")\nax2.set_xticks( np.arange(0, N+1, 2))\nax2.axhline(y=2, color='gray', ls='--')\nax2.legend()\nax2.set_title(\"微小時間hごとの平均誤差発達率\",fontname=\"MS Gothic\")\n#lines = ax2.plot(ax2,'s-')\n#mplcursors.cursor(lines)\n\nplt.show()\n\n#nperror1 = np.array(error1)\n#print(len(nperror1[:,0]))\n\nprint(D[-1])\nprint(len(D[1]))\n#print(len(DDD1[1]))\n","sub_path":"kadai2/kadai2.py","file_name":"kadai2.py","file_ext":"py","file_size_in_byte":6427,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"318771345","text":"input_arr = [3, 4, 1, 2, 16, 27, 13]\n\n\ndef quicksort(array):\n if len(array) < 2:\n return array\n else:\n pivot = array[0]\n less = [i for i in array[1:] if i <= pivot]\n greater = [i for i in array[1:] if i > pivot]\n return quicksort(less) + [pivot] + quicksort(greater)\n\n\ndef solve(source):\n odd_arr = [x for x in source if x % 2 == 1]\n even_arr = [x for x in source if x % 2 == 0]\n\n odd_arr = quicksort(odd_arr)\n even_arr = quicksort(even_arr)\n\n odd_arr.reverse()\n\n res = even_arr + odd_arr\n\n return res\n\n\nresult = solve(input_arr)\nprint(result)\n","sub_path":"solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":602,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"361376730","text":"#!/usr/bin/env python\nfrom __future__ import print_function\nfrom __future__ import division\nfrom __future__ import absolute_import\nfrom __future__ import unicode_literals\n\nimport argparse\nimport contextlib\nfrom os import ( listdir, mkdir )\nimport os.path as p\nimport shutil\nimport sys\nimport tempfile\nimport hashlib\n\nDIR_OF_THIS_SCRIPT = p.dirname( p.abspath( __file__ ) )\nDIR_OF_THIRD_PARTY = p.join( DIR_OF_THIS_SCRIPT, 'third_party' )\n\n\ndef GetStandardLibraryIndexInSysPath():\n for index, path in enumerate( sys.path ):\n if p.isfile( p.join( path, 'os.py' ) ):\n return index\n raise RuntimeError( 'Could not find standard library path in Python path.' )\n\n\ndef AddRequestDependencies():\n request_dep_root = p.abspath( p.join( DIR_OF_THIRD_PARTY,\n 'requests_deps' ) )\n for path in listdir( request_dep_root ):\n sys.path.insert( 0, p.join( request_dep_root, path ) )\n\n sys.path.insert( 0, p.abspath( p.join( DIR_OF_THIRD_PARTY,\n 'requests_deps',\n 'urllib3',\n 'src' ) ) )\n\n\nsys.path.insert( GetStandardLibraryIndexInSysPath() + 1,\n p.abspath( p.join( DIR_OF_THIRD_PARTY, 'python-future',\n 'src' ) ) )\nAddRequestDependencies()\n\n# Not installing aliases from python-future; it's unreliable and slow.\nfrom builtins import * # noqa\nfrom future.utils import iteritems\nimport requests\n\n\nURL_FORMAT = {\n 'release': ( \"https://github.com/OmniSharp/omnisharp-roslyn/\"\n \"releases/download/{version}/{file_name}\" ),\n 'ci': ( \"https://roslynomnisharp.blob.core.windows.net/\"\n \"releases/{version}/{file_name}\" ),\n}\nFILE_NAME = {\n 'win32': 'omnisharp.http-win-x86.zip',\n 'win64': 'omnisharp.http-win-x64.zip',\n 'macos': 'omnisharp.http-osx.tar.gz',\n 'linux32': 'omnisharp.http-linux-x86.tar.gz',\n 'linux64': 'omnisharp.http-linux-x64.tar.gz',\n}\n\n\n@contextlib.contextmanager\ndef TemporaryDirectory():\n temp_dir = tempfile.mkdtemp()\n try:\n yield temp_dir\n finally:\n shutil.rmtree( temp_dir )\n\n\ndef Download( url ):\n print( 'Downloading {}'.format( url.rsplit( '/', 1 )[ -1 ] ) )\n request = requests.get( url, stream=True )\n request.raise_for_status()\n content = request.content\n request.close()\n return content\n\n\ndef ParseArguments():\n parser = argparse.ArgumentParser()\n\n parser.add_argument( 'version', action='store',\n help = 'The Omnisharp version' )\n parser.add_argument( '--cache-dir', action='store',\n help = 'For testing, directory to cache packages.' )\n\n args = parser.parse_args()\n\n return args\n\n\ndef GetDownloadUrl( version, file_name ):\n download_url_key = 'ci' if \"-\" in version else 'release'\n\n return URL_FORMAT[ download_url_key ].format( version = version,\n file_name = file_name )\n\n\ndef FetchAndHash( download_url, output_dir, file_name ):\n try:\n archive = p.join( output_dir, file_name )\n if not p.exists( archive ):\n compressed_data = Download( download_url )\n with open( archive, 'wb' ) as f:\n f.write( compressed_data )\n except requests.exceptions.HTTPError as error:\n if error.response.status_code != 404:\n raise\n print( 'Cannot download {}'.format( file_name ) )\n return\n\n with open( archive, 'rb' ) as f:\n return hashlib.sha256( f.read() ).hexdigest()\n\n\ndef Process( output_dir, version ):\n result = {}\n\n for os_name, file_name in iteritems( FILE_NAME ):\n download_url = GetDownloadUrl( version, file_name )\n result[ os_name ] = {\n 'version': version,\n 'download_url': download_url,\n 'file_name': file_name,\n 'check_sum': FetchAndHash( download_url, output_dir, file_name )\n }\n\n return result\n\n\ndef MkDirIfMissing( dir ):\n try:\n mkdir( dir )\n except OSError:\n pass\n\n\ndef Main():\n args = ParseArguments()\n version = args.version\n\n if args.cache_dir:\n MkDirIfMissing( args.cache_dir )\n cache_dir = p.join( args.cache_dir, version )\n MkDirIfMissing( cache_dir )\n output = Process( cache_dir, version )\n else:\n with TemporaryDirectory() as temp_dir:\n output = Process( temp_dir, version )\n\n print( \"Omnisharp configration for {} is:\".format( version ) )\n for os_name, os_data in iteritems( output ):\n print( \" {}: {{\".format( repr( os_name ) ) )\n for key, value in iteritems( os_data ):\n line = \" {}: {},\".format( repr( key ), repr( value ) )\n if len( line ) > 80:\n line = \" {}: ( {} ),\".format( repr( key ), repr( value ) )\n format_index = line.index( '(' ) + 2\n while len( line ) > 80:\n print( line[ 0:78 ] + \"'\" )\n line = ( ' ' * format_index ) + \"'\" + line[ 78: ]\n print( line )\n print( \" },\" )\n\n\nif __name__ == \"__main__\":\n Main()\n","sub_path":"my_plugins/YouCompleteMe/third_party/ycmd/update_omnisharp.py","file_name":"update_omnisharp.py","file_ext":"py","file_size_in_byte":4923,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"326570007","text":"\"\"\"\nLauncher of telegram messages handler module\n\nCreated on 7/1/2015 by rdvlip.\n\"\"\"\n\n__author__ = 'rdvlip'\n\nimport os\nimport logging\nfrom concurrent.futures import ThreadPoolExecutor\n\nimport redis\nimport tornado.ioloop\n\nfrom hs_telegram.api import Api\nfrom hs_telegram import message_router\n\nlogging.basicConfig(level=logging.INFO, format=\"[%(levelname)s] %(asctime)s [%(name)s] %(message)s\")\nlogging.getLogger('urllib3').setLevel(logging.WARN)\n\nlogger = logging.getLogger(\"hs_telegram.main\")\n\n\nclass Main:\n def __init__(self):\n self.router = None\n self.api = None\n self.token = None\n self.pubsub = None\n\n self.thread_pool = ThreadPoolExecutor(1)\n self.ioloop = tornado.ioloop.IOLoop.instance()\n\n def _prepare_api(self):\n self.token = os.environ['TELEGRAM_TOKEN']\n self.api = Api(self.token)\n self.api.check_token()\n\n def _get_message(self):\n message = self.pubsub.get_message(ignore_subscribe_messages=True)\n if message:\n self.router.process_redis_message(message)\n self.ioloop.add_callback(self._get_message)\n\n def run(self):\n logger.info('Initializing...')\n self._run()\n logger.info('Initialization completed')\n self.ioloop.start()\n\n def _run(self):\n self._prepare_api()\n self.router = message_router.MessageRouter(self.api)\n\n r = redis.StrictRedis(host='redis', port=6379, db=0)\n p = self.pubsub = r.pubsub()\n\n for subscr_pattern in self.router.redis_mapping.keys():\n p.psubscribe(subscr_pattern)\n\n self._get_message()\n\n\n\n","sub_path":"hs_telegram/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1618,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"79921519","text":"# Copyright 2014 Google Inc. All Rights Reserved.\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n\n# http://www.apache.org/licenses/LICENSE-2.0\n\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\n\"\"\"Interface to OpenHTF configuration files.\n\nOpenHTF configuration files contain values which are specific to an individual\nstation. Any values which apply to all stations of a given type should be\nhandled by FLAGS or another mechanism.\n\nConfig keys must be declared as in the following example:\n\nconf.Declare('antimatter_intermix_constant',\n description='Intermix constant calibrated for our warp core.')\n\nDeclared keys can later be accessed by instantiating a Config object:\n\n...\nconfig = conf.Config()\nwarp_core.SetIntermixConstant(config.antimatter_intermix_constant)\n\"\"\"\n\nimport copy\nimport functools\nimport inspect\nimport logging\nimport threading\nimport yaml\n\nimport gflags\nimport mutablerecords\n\nfrom openhtf.util import threads\n\n\nFLAGS = gflags.FLAGS\n\ngflags.DEFINE_string('config',\n '/usr/local/openhtf_client/config/clientfoo.yaml',\n 'The OpenHTF configuration file for this tester')\n\ngflags.DEFINE_multistring(\n 'config_value', [], 'Allows specifying a configuration key=value '\n 'on the command line. The format should be --config_value key=value. '\n 'This value will override any existing config value at config load time '\n 'and will be a string')\n\nConfigurationDeclaration = ( # pylint: disable=invalid-name\n mutablerecords.Record(\n 'ConfigurationDeclaration',\n ['name'],\n {'description': None, 'default_value': None, 'optional': True}))\n\n_LOG = logging.getLogger(__name__)\n\nclass ConfigurationNotLoadedError(Exception):\n \"\"\"Raised if a configuration variable is accessed before it is loaded.\n\n This helps protect against a class of errors where people try to access the\n configuration at import time when it hasn't been loaded by the main function\n yet.\n \"\"\"\n\n\nclass ConfigurationMissingError(Exception):\n \"\"\"Indicates the configuration file could not be read.\"\"\"\n\n\nclass ConfigurationInvalidError(Exception):\n \"\"\"Indicates the configuration format was invalid.\"\"\"\n\n\nclass ConfigurationAlreadyDeclared(Exception):\n \"\"\"Indicates that a configuration key was already declared.\"\"\"\n\n\nclass MissingRequiredConfigurationKeyError(Exception):\n \"\"\"Indicates a required configuration key is missing.\"\"\"\n\n\nclass UndeclaredKeyAccessError(Exception):\n \"\"\"Indicates that a key was required but not predeclared.\"\"\"\n\n\nclass ConfigurationValidationError(Exception):\n \"\"\"If a configuration value could not be validated as its expected type.\"\"\"\n\n def __init__(self, name, declaration, value):\n super(ConfigurationValidationError, self).__init__(\n name, declaration, value)\n self.name = name\n self.declaration = declaration\n self.value = value\n\n def __str__(self):\n return ('<%s: (Configuration error on key: %s (type: %s, value: %s))>' %\n (type(self).__name__, self.name, self.declaration.type.name, self.value))\n\n\nclass _DeclaredKeys(object):\n \"\"\"An object which manages config declarations.\n\n This object is a helper for Config. It provides locked access to a map of\n declarations and processes configuration values against the declaration. It\n must be guarded since declarations are updated at import time and if something\n is lazily imported we could race between updating the declarations map and\n reading it to check a config value.\n\n Not thread-safe, requires an external lock!\n \"\"\"\n\n def __init__(self):\n self._declared = {}\n\n def Declare(self, name, declaration):\n \"\"\"Adds a declared key to this list of Declared Keys.\n\n Args:\n name: The name of this value.\n declaration: A _DeclaredKeys.DECLARATION object.\n\n Raises:\n ConfigurationAlreadyDeclared: If a declaration already exists for\n this key.\n \"\"\"\n if name in self._declared:\n raise ConfigurationAlreadyDeclared(name)\n self._declared[name] = declaration\n\n def CheckValueAgainstDeclaration(self, name, value):\n \"\"\"Checks that the provided value is valid given its declaration.\n\n Args:\n name: Name of configuration key.\n value: Value from configuration.\n\n Returns:\n The config value if provided, or the default_value if given.\n\n Raises:\n UndeclaredKeyAccessError: If key 'name' is undeclared.\n MissingRequiredConfigurationKeyError: If key is required and value\n is None\n \"\"\"\n declaration = self._declared.get(name, None)\n\n if not declaration:\n raise UndeclaredKeyAccessError(name)\n\n if (value is None\n and declaration.default_value is None\n and not declaration.optional):\n raise MissingRequiredConfigurationKeyError(\n declaration.name, declaration.description)\n\n if value is None:\n return declaration.default_value\n return value\n\n def __contains__(self, name): # pylint: disable=invalid-name\n return name in self._declared\n\n def __getitem__(self, name): # pylint: disable=invalid-name\n return self._declared[name]\n\n def __copy__(self): # pylint: disable=invalid-name\n self_copy = type(self)()\n for name, declaration in self._declared.iteritems():\n self_copy.Declare(name, declaration)\n return self_copy\n\n\nclass ConfigModel(object):\n \"\"\"A model that holds the underlying config keys and their values.\n\n By isolating the underlying model it provides a way to lock access to the\n dictionary so we can reload it on demand or otherwise poke it.\n \"\"\"\n\n def __init__(self, state=None, declarations=None):\n \"\"\"Initializes the model.\n\n Args:\n state: A dictionary containing configuration key, values. By default a\n new one is created. If one is provided the model is marked as\n loaded.\n declarations: An object which tracks declared keys, if not provided\n a new one is constructed.\n \"\"\"\n self._state = state if state is not None else {}\n self._declarations = declarations or _DeclaredKeys()\n self._loaded = state is not None\n self.lock = threading.Lock()\n\n # pylint: disable=missing-docstring\n @property\n @threads.Synchronized\n def loaded(self):\n return self._loaded\n\n @property\n @threads.Synchronized\n def state(self):\n return self._state.copy()\n\n @property\n @threads.Synchronized\n def declarations(self):\n return copy.copy(self._declarations)\n\n @threads.Synchronized\n def Items(self):\n return self._state.items()\n\n @threads.Synchronized\n def GetValue(self, name, default=None):\n value = self._state.get(name, default)\n return self._declarations.CheckValueAgainstDeclaration(name, value)\n\n @threads.Synchronized\n def ContainsKey(self, name):\n return name in self._state\n\n # pylint: enable=missing-docstring\n\n @threads.Synchronized\n def Load(self, config_file=None, force_reload=False,\n config_loader=lambda fname: open(fname, 'r')):\n \"\"\"Loads the configuration file from disk.\n\n Args:\n config_file: The file name to load configuration from.\n Defaults to FLAGS.config.\n force_reload: If true this method will ignore the loaded state and reload\n the config from disk.\n config_loader: A callable which returns a file object when given a\n filename, defaults to open.\n\n Returns:\n True if configuration was loaded, False if already loaded.\n Raises:\n ConfigurationMissingError: If configuration file could not be read\n ConfigurationInvalidError: If configuration file is not valid yaml\n \"\"\"\n if not force_reload and self._loaded:\n return False\n\n try:\n filename = config_file or FLAGS.config\n _LOG.info('Loading from config: %s', filename)\n\n with config_loader(filename) as config_file:\n data = yaml.safe_load(config_file)\n if not data:\n raise ConfigurationInvalidError('No data', config_file)\n self._state.clear()\n self._state.update(data)\n\n # Load string values from flags\n for keyval in FLAGS.config_value:\n key, val = keyval.split('=')\n self._state[key] = val\n\n self._loaded = True\n _LOG.debug('Configuration loaded: %s', self._state)\n except yaml.YAMLError as exception:\n _LOG.exception('Failed to load yaml file: %s', filename)\n raise ConfigurationInvalidError(filename, exception)\n except IOError as exception:\n _LOG.exception('Configuration failed loaded: %s', filename)\n raise ConfigurationMissingError(filename, exception)\n\n return True\n\n @threads.Synchronized\n def LoadFromDict(self, dictionary, force_reload=False):\n \"\"\"Loads the config with values from a dictionary instead of a file.\n\n This is meant for testing and bin purposes and shouldn't be used in most\n applications.\n\n Args:\n dictionary: The dictionary to update.\n force_reload: True to force a load if the config is already loaded.\n Returns:\n True if successful.\n \"\"\"\n if not force_reload and self._loaded:\n return False\n\n self._state.clear()\n self._state.update(dictionary)\n self._loaded = True\n return True\n\n @threads.Synchronized\n def LoadMissingFromDict(self, config_dict):\n \"\"\"Update any missing configurations from the given dictionary.\n\n This is similar to dict.update, except that instead of the given\n dictionary's values overriding the already set values, this function doesn't\n override. This is due to the fact that these configs can only be retrieved\n after we've already loaded the authoritative values.\n\n Args:\n config_dict: Dictionary from which to load configuration keys and values.\n\n Raises:\n ConfigurationNotLoadedError: Raised when updating empty config values.\n \"\"\"\n # Can't update only missing when it's all missing.\n if not self._loaded:\n raise ConfigurationNotLoadedError(\n 'Load configuration before updating missing keys.')\n\n for key, value in config_dict.items():\n if key in self._state:\n continue\n self._state[key] = value\n\n @threads.Synchronized\n def Reset(self):\n \"\"\"Resets the configuration, removing any state, useful for testing.\n\n Careful calling this, the reason we get away with not locking the dict is\n because we never call this in practice. If that changes then we need to\n guard it with a lock.\n \"\"\"\n self._state.clear()\n self._loaded = False\n\n @threads.Synchronized\n def Declare(self, name, description=None, **kwargs):\n \"\"\"Declares the use of a configuration variable.\n\n Currently all configuration variables must be declared. If a key is\n accessed in the config without being declared then chaos will ensue. If a\n file wants to access a key another module has declared they are\n encouraged to use extern.\n\n Args:\n name: The name of the key.\n description: Docstring for the key, if any.\n **kwargs: See ConfigurationDeclaration's fields.\n \"\"\"\n declaration = ConfigurationDeclaration(\n name, description=description, **kwargs)\n self._declarations.Declare(name, declaration)\n\n\nclass Config(object):\n \"\"\"The configuration read from a config file, or populated directly.\n\n This classes uses the borg design pattern so all instances share the same\n state. This is fine since the load only occurs on the main thread and from\n then on out the class is effectively read only.\n\n Example Usage:\n configuration.Load() # called once early\n\n # Can be done anyone and in multiple places without worrying about loading\n config = Config()\n if config.url:\n print config.url\n \"\"\"\n model = ConfigModel()\n\n def __init__(self, model=None):\n \"\"\"Initializes the configuration object with its shared state.\n\n Args:\n model: The data model to use, defaults to the one shared amonst all config\n objects.\n \"\"\"\n self.model = model or Config.model\n\n # pylint: disable=missing-docstring\n @property\n def dictionary(self):\n if not self.loaded:\n raise ConfigurationNotLoadedError()\n return self.model.state\n\n @property\n def loaded(self):\n return self.model.loaded\n\n # pylint: enable=missing-docstring\n\n def __getattr__(self, name): # pylint: disable=invalid-name\n \"\"\"Searches for the value in our config, returning if its found.\n\n Args:\n name: name of attribute\n Returns:\n None if key not found and is not required, otherwise the value.\n Raises:\n MissingRequiredKeyError: If the key was declared required and\n is not found.\n UndeclaredKeyAccessError: If the key being accessed was not\n declared.\n ConfigurationNotLoadedError: If the config file has not been loaded, this\n typically means you accessed the config at import time.\n \"\"\"\n if not self.model.loaded:\n raise ConfigurationNotLoadedError(name)\n return self.model.GetValue(name)\n\n def __contains__(self, name): # pylint: disable=invalid-name\n \"\"\"Provides the ability to quickly check if a config key is declared.\"\"\"\n return self.model.ContainsKey(name)\n\n def __getitem__(self, key): # pylint: disable=invalid-name\n \"\"\"Allows access to config items via an indexer.\"\"\"\n return self.__getattr__(key)\n\n def __repr__(self):\n return '' % (self.model.loaded, id(self))\n\n def CreateStackedConfig(self, model):\n \"\"\"Stacks a new model onto the current model, creating a new config.\n\n Args:\n model: A ConfigModel instance or a dict of values that can be converted\n into a ConfigModel instance. If a dict, the declarations of this\n object will be used.\n Returns:\n A new StackedConfig instance with model superseding the current model.\n \"\"\"\n if not isinstance(model, ConfigModel):\n model = ConfigModel(state=model, declarations=self.model.declarations)\n return StackedConfig([self.model, model])\n\n\nclass StackedConfig(Config):\n \"\"\"Stacked version of Config.\n\n This is a layered (or stacked) Config that allows users to make one set of\n config values supersede another set.\n \"\"\"\n\n # pylint: disable=super-init-not-called\n def __init__(self, models=(Config.model,)):\n self._models = list(models)\n\n def CreateStackedConfig(self, model):\n \"\"\"Stacks a new model onto the current models, creating a new config.\n\n Args:\n model: A ConfigModel instance or a dict of values that can be converted\n into a ConfigModel instance. If a dict, the declarations of the top of\n the stack will be used.\n Returns:\n A new StackedConfig instance with model superseding the current models.\n \"\"\"\n if not isinstance(model, ConfigModel):\n model = ConfigModel(\n state=model, declarations=self._models[0].declarations)\n return StackedConfig(self._models + [model])\n\n @property\n def dictionary(self):\n if not self.loaded:\n raise ConfigurationNotLoadedError()\n results = {}\n for model in self._models:\n results.update(model.state)\n return results\n\n @property\n def loaded(self):\n return any(model.loaded for model in self._models)\n\n def __getattr__(self, name):\n if not self.loaded:\n raise ConfigurationNotLoadedError(name)\n for model in self._models:\n if model.ContainsKey(name):\n return model.GetValue(name)\n return self._models[-1].GetValue(name)\n\n def __contains__(self, name):\n return any(model.ContainsKey(name) for model in self._models)\n\n def __str__(self):\n return '<%s: (loaded: %s: 0x%x)>' % (type(self).__name__, self.loaded, id(self))\n __repr__ = __str__\n\n\nclass ConfigValue(object): # pylint: disable=too-few-public-methods\n \"\"\"A thin wrapper which may be used to pass around a config value.\n\n This is useful when things require a value at import time yet config values\n are not available until runtime. By wrapping the key you want in this object,\n other objects which are aware of it can call it to retrieve the value at a\n later time (i.e. runtime). This is not a magic bullet, whatever you'ready\n calling must be ready for a ConfigValue or similar to provided.\n\n The value_fn parameter allows a function to be specified at import time which\n will be performed on the retrieved config value at runtime. This is useful for\n retrieving an inner-value of a config value, such as indexing into an\n array/dict config value.\n \"\"\"\n\n def __init__(self, config_key, config=None, value_fn=None):\n self.config = config or Config()\n self.config_key = config_key\n self.value_fn = value_fn\n\n @property\n def value(self):\n \"\"\"Resolves the value returning the config value.\"\"\"\n if self.value_fn is None:\n return self.config[self.config_key]\n else:\n return self.value_fn(self.config[self.config_key])\n\n def __call__(self): # pylint: disable=invalid-name\n \"\"\"Returns the config value.\"\"\"\n return self.value\n\n def __str__(self):\n return '<%s: (ConfigKey: %s)' % (type(self).__name__, self.config_key)\n __repr__ = __str__\n\n\ndef Extern(dummy_name): # pylint: disable=invalid-name\n \"\"\"Declares that a module uses a key declared elsewhere.\n\n This function does nothing but serve as a marker at the top of your file that\n you're using a config key which improves readability greatly. You're\n encouraged to use this. That said since declaration of keys isn't checked\n until a key is used and since this function does nothing everything will still\n work without it.\n\n Args:\n unused_name: The name of the key.\n \"\"\"\n\n\ndef InjectPositionalArgs(method): # pylint: disable=invalid-name\n \"\"\"Decorator for injecting positional arguments from the configuration.\n\n This decorator wraps the given method, so that any positional arguments are\n passed with corresponding values from the configuration. The name of the\n positional argument must match the configuration key. Keyword arguments are\n not modified, but should not be named such that they match configuration keys\n anyway (this will result in a warning message).\n\n Additional positional arguments may be used that do not appear in the\n configuration, but those arguments *must* be specified as keyword arguments\n upon invokation of the method. This is to avoid ambiguity in which\n positional arguments are getting which values.\n\n Args:\n method: The method to wrap.\n\n Returns:\n A wrapper that, when invoked, will call the wrapped method, passing in\n configuration values for positional arguments.\n \"\"\"\n argspec = inspect.getargspec(method)\n\n # Index in argspec.args of the first keyword argument. This index is a\n # negative number if there are any kwargs, or 0 if there are no kwargs.\n keyword_arg_index = -1 * len(argspec.defaults or [])\n arg_names = argspec.args[:keyword_arg_index or None]\n kwarg_names = argspec.args[len(arg_names):]\n\n # Create the actual method wrapper, all we do is update kwargs. Note we don't\n # pass any *args through because there can't be any - we've filled them all in\n # with values from the configuration. Any positional args that are missing\n # from the configuration *must* be explicitly specified as kwargs.\n @functools.wraps(method)\n def method_wrapper(**kwargs):\n \"\"\"Wrapper that pulls values from the Config().\"\"\"\n config = Config()\n\n # Check for keyword args with names that are in the config so we can warn.\n for bad_name in set(kwarg_names) & set(config.dictionary.keys()):\n _LOG.warning('Keyword arg %s not set from configuration, but is a '\n 'configuration key', bad_name)\n\n # Set positional args from configuration values.\n config_args = {name: config[name] for name in arg_names if name in config}\n\n for overridden in set(kwargs) & set(config_args):\n _LOG.warning('Overriding provided kwarg %s=%s with value %s from '\n 'configuration', overridden, kwargs[overridden],\n config_args[overridden])\n kwargs.update(config_args)\n _LOG.info('Invoking %s with %s', method.__name__, kwargs)\n return method(**kwargs)\n\n # We have to check for a 'self' parameter explicitly because Python doesn't\n # pass it as a keyword arg, it passes it as the first positional arg.\n if 'self' == argspec.args[0]:\n @functools.wraps(method)\n def SelfWrapper(self, **kwargs): # pylint: disable=invalid-name,missing-docstring\n kwargs['self'] = self\n return method_wrapper(**kwargs)\n return SelfWrapper\n return method_wrapper\n\n\n# pylint: disable=invalid-name\nDeclare = Config().model.Declare\nLoad = Config().model.Load\nLoadMissingFromDict = Config().model.LoadMissingFromDict\nLoadFromDict = Config().model.LoadFromDict\nReset = Config().model.Reset\n\n# Everywhere that uses configuration uses this, so we just declare it here.\nDeclare('station_id', 'The name of this tester')\n","sub_path":"openhtf/conf.py","file_name":"conf.py","file_ext":"py","file_size_in_byte":21116,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"602072057","text":"# coding=utf-8\nimport time\nimport abc\nfrom collections import OrderedDict\n\nfrom ruamel.yaml import dump as ydump, load as yload, RoundTripDumper, resolver, add_constructor, add_representer\n\nfrom src.meta.abstract import AbstractMeta\nfrom utils.custom_logging import make_logger\nfrom utils.custom_path import Path\n\nlogger = make_logger(__name__)\n\n\n_yaml_mapping = resolver.BaseResolver.DEFAULT_MAPPING_TAG\n\n\ndef odict_represent(dumper, data):\n return dumper.represent_dict(data.iteritems())\n\n\ndef odict_construct(loader, node):\n return OrderedDict(loader.construct_pairs(node))\n\n\nadd_representer(OrderedDict, odict_represent)\nadd_constructor(_yaml_mapping, odict_construct)\n\n\nclass Meta(AbstractMeta):\n\n @property\n @abc.abstractmethod\n def meta_header(self):\n \"\"\"\"\"\"\n\n @property\n @abc.abstractmethod\n def meta_version(self):\n \"\"\"\"\"\"\n\n @abc.abstractmethod\n def meta_version_upgrade(self, from_version):\n \"\"\"\"\"\"\n\n def __init__(self, path: str or Path, init_dict: OrderedDict = None, auto_read=True, encrypted=False):\n self.free = True\n self.encrypt = encrypted\n\n if init_dict is None:\n self._data = OrderedDict()\n\n else:\n\n if not isinstance(init_dict, OrderedDict):\n raise TypeError('expected a OrderedDict, got \"{}\"'.format(type(init_dict)))\n\n self._data = init_dict\n\n self._values, self._keys, self._items = None, None, None\n self._init_views()\n\n if isinstance(path, Path):\n pass\n\n elif isinstance(path, str):\n path = Path(path)\n\n else:\n raise TypeError('expected a Path or a str, got: {}'.format(type(path)))\n\n self._path = path\n\n if auto_read:\n self.read()\n\n @property\n def path(self) -> Path:\n return self._path\n\n @path.setter\n def path(self, value: str or Path):\n\n if isinstance(value, Path):\n pass\n\n elif isinstance(value, str):\n value = Path(value)\n\n else:\n raise TypeError('expected Path or str, got: {}'.format(type(value)))\n\n self._path = value\n\n # noinspection PyArgumentList\n def _init_views(self):\n self._values = self._data.values()\n self._keys = self._data.keys()\n self._items = self._data.items()\n\n @property\n def data(self):\n return self._data\n\n def get_context(self):\n return self.data\n\n @data.setter\n def data(self, value: OrderedDict):\n\n if not isinstance(value, OrderedDict):\n raise TypeError('expected a OrderedDict, got \"{}\"'.format(type(value)))\n\n self._data = value\n self._init_views()\n\n def __len__(self):\n # noinspection PyTypeChecker\n return len(self.data)\n\n def __iter__(self):\n for k in self.keys():\n yield k\n\n def __contains__(self, x):\n # noinspection PyArgumentList\n return self._data.__contains__(x)\n\n def __delitem__(self, key, _write=False):\n del self.data[key]\n\n if _write:\n self.write()\n\n def __setitem__(self, key, value, _write=False):\n self.data[key] = value\n\n if _write:\n self.write()\n\n def __getitem__(self, key):\n return self._data.get(key, None)\n\n def __str__(self):\n # noinspection PyArgumentList\n return self.data.__str__()\n\n def __repr__(self):\n return '{}: {}'.format(self.__class__.__name__, self.data.__repr__())\n\n def get(self, key, default=None):\n return self._data.get(key, default)\n\n def keys(self):\n return self._keys\n\n def values(self):\n return self._values\n\n def items(self):\n return self._items\n\n def debug(self, txt: str):\n logger.debug('{}: {}'.format(self.path.abspath(), txt))\n\n def exception(self, txt: str):\n logger.debug('{}: {}'.format(self.path.abspath(), txt))\n\n def dump(self):\n return ydump(self.data, Dumper=RoundTripDumper, default_flow_style=False)\n\n def load(self, data):\n self.data = yload(data)\n\n def read(self):\n\n self.wait_for_lock()\n\n meta_updated = False\n\n try:\n\n if self.path.exists():\n\n if self.path.getsize() == 0:\n self.debug('{}: removing existing empty file: {}'.format(self.__class__.__name__, self.path))\n self.path.remove()\n\n return\n\n try:\n\n if self.encrypt:\n self.load(self.path.bytes())\n\n else:\n self.load(self.path.text(encoding='utf8'))\n\n except ValueError:\n raise ValueError('{}: metadata file corrupted'.format(self.path.abspath()))\n\n else:\n try:\n if not self.data['meta_header'] == self.meta_header:\n raise TypeError('meta header mismatch, expected: \"{}\", got: \"{}\" on file: {}'.format(\n self.meta_header, self.data['meta_header'], self.path.abspath()\n ))\n else:\n del self.data['meta_header']\n\n except KeyError:\n pass\n\n meta_updated = self.data['meta_version'] < self.meta_version\n\n while self.data['meta_version'] < self.meta_version:\n current_version = self.data['meta_version']\n next_version = self.data['meta_version'] + 1\n logger.debug('upgrading meta from version \"{}\"'.format(current_version))\n\n if not self.meta_version_upgrade(current_version):\n raise RuntimeError('failed to upgrade metadata to version \"{}\"'.format(next_version))\n\n else:\n logger.debug('successfully upgraded meta to version \"{}\"'.format(next_version))\n\n self.data['meta_version'] = next_version\n\n except OSError:\n self.exception('error while reading metadata file')\n\n finally:\n self.free = True\n\n if meta_updated:\n self.write()\n\n def write(self):\n # noinspection PyTypeChecker\n if len(self._data) == 0:\n raise ValueError('no data to write')\n\n self.wait_for_lock()\n self.data['meta_header'] = self.meta_header\n self.data['meta_version'] = self.meta_version\n\n try:\n\n if self.encrypt:\n self.path.write_bytes(self.dump())\n\n else:\n self.path.write_text(self.dump(), encoding='utf8')\n\n except OSError:\n self.exception('error while writing metadata to file')\n\n finally:\n self.free = True\n\n def wait_for_lock(self):\n i = 0\n\n while not self.free:\n time.sleep(0.1)\n i += 1\n\n if i == 10:\n self.debug('waiting for resource lock')\n i = 0\n\n self.free = False\n\n @staticmethod\n def read_header(path):\n\n path = Path(path)\n data = yload(path.text(encoding='utf8'))\n\n return data['header']\n\n\ndef read_meta_header(meta_file_path: Path or str):\n return Meta.read_header(meta_file_path)\n","sub_path":"src/meta/meta.py","file_name":"meta.py","file_ext":"py","file_size_in_byte":7384,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"390502970","text":"# This file is part of PyOP2\n#\n# PyOP2 is Copyright (c) 2012, Imperial College London and\n# others. Please see the AUTHORS file in the main source directory for\n# a full list of copyright holders. All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions\n# are met:\n#\n# * Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n# * Redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the following disclaimer in the\n# documentation and/or other materials provided with the distribution.\n# * The name of Imperial College London or that of other\n# contributors may not be used to endorse or promote products\n# derived from this software without specific prior written\n# permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTERS\n# ''AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS\n# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE\n# COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,\n# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\n# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)\n# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,\n# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED\n# OF THE POSSIBILITY OF SUCH DAMAGE.\n\n\"\"\"Transform the kernel's AST according to the backend we are running over.\"\"\"\n\nfrom ast_base import *\nfrom ast_optimizer import LoopOptimiser\nfrom ast_vectorizer import init_vectorizer, LoopVectoriser, vectorizer_init\n\n# Possibile optimizations\nAUTOVECT = 1 # Auto-vectorization\nV_OP_PADONLY = 2 # Outer-product vectorization + extra operations\nV_OP_PEEL = 3 # Outer-product vectorization + peeling\nV_OP_UAJ = 4 # Outer-product vectorization + unroll-and-jam\nV_OP_UAJ_EXTRA = 5 # Outer-product vectorization + unroll-and-jam + extra iters\n\n# Track the scope of a variable in the kernel\nLOCAL_VAR = 0 # Variable declared and used within the kernel\nPARAM_VAR = 1 # Variable is a kernel parameter (ie declared in the signature)\n\n\nclass ASTKernel(object):\n\n \"\"\"Manipulate the kernel's Abstract Syntax Tree.\n\n The single functionality present at the moment is provided by the plan_gpu\n method, which transforms the AST for GPU execution.\n \"\"\"\n\n def __init__(self, ast):\n self.ast = ast\n self.decls, self.fors = self._visit_ast(ast, fors=[], decls={})\n\n def _visit_ast(self, node, parent=None, fors=None, decls=None):\n \"\"\"Return lists of:\n - Declarations within the kernel\n - Loop nests\n - Dense Linear Algebra Blocks\n that will be exploited at plan creation time.\"\"\"\n\n if isinstance(node, Decl):\n decls[node.sym.symbol] = (node, LOCAL_VAR)\n return (decls, fors)\n elif isinstance(node, For):\n fors.append((node, parent))\n return (decls, fors)\n elif isinstance(node, FunDecl):\n self.fundecl = node\n for d in node.args:\n decls[d.sym.symbol] = (d, PARAM_VAR)\n elif isinstance(node, (FlatBlock, PreprocessNode, Symbol)):\n return (decls, fors)\n\n for c in node.children:\n self._visit_ast(c, node, fors, decls)\n\n return (decls, fors)\n\n def plan_gpu(self):\n \"\"\"Transform the kernel suitably for GPU execution.\n\n Loops decorated with a \"pragma pyop2 itspace\" are hoisted out of\n the kernel. The list of arguments in the function signature is\n enriched by adding iteration variables of hoisted loops. Size of\n kernel's non-constant tensors modified in hoisted loops are modified\n accordingly.\n\n For example, consider the following function:\n\n void foo (int A[3]) {\n int B[3] = {...};\n #pragma pyop2 itspace\n for (int i = 0; i < 3; i++)\n A[i] = B[i];\n }\n\n plan_gpu modifies its AST such that the resulting output code is\n\n void foo(int A[1], int i) {\n A[0] = B[i];\n }\n \"\"\"\n\n lo = [LoopOptimiser(l, pre_l, self.decls) for l, pre_l in self.fors]\n for nest in lo:\n itspace_vrs, accessed_vrs = nest.extract_itspace()\n\n for v in accessed_vrs:\n # Change declaration of non-constant iteration space-dependent\n # parameters by shrinking the size of the iteration space\n # dimension to 1\n decl = set(\n [d for d in self.fundecl.args if d.sym.symbol == v.symbol])\n dsym = decl.pop().sym if len(decl) > 0 else None\n if dsym and dsym.rank:\n dsym.rank = tuple([1 if i in itspace_vrs else j\n for i, j in zip(v.rank, dsym.rank)])\n\n # Remove indices of all iteration space-dependent and\n # kernel-dependent variables that are accessed in an itspace\n v.rank = tuple([0 if i in itspace_vrs and dsym else i\n for i in v.rank])\n\n # Add iteration space arguments\n self.fundecl.args.extend([Decl(\"int\", c_sym(\"%s\" % i))\n for i in itspace_vrs])\n\n # Clean up the kernel removing variable qualifiers like 'static'\n for decl in self.decls.values():\n d, place = decl\n d.qual = [q for q in d.qual if q not in ['static', 'const']]\n\n if hasattr(self, 'fundecl'):\n self.fundecl.pred = [q for q in self.fundecl.pred\n if q not in ['static', 'inline']]\n\n def plan_cpu(self, opts):\n \"\"\"Transform and optimize the kernel suitably for CPU execution.\"\"\"\n\n # Fetch user-provided options/hints on how to transform the kernel\n licm = opts.get('licm')\n tile = opts.get('tile')\n vect = opts.get('vect')\n ap = opts.get('ap')\n\n v_type, v_param = vect if vect else (None, None)\n tile_opt, tile_sz = tile if tile else (False, -1)\n\n lo = [LoopOptimiser(l, pre_l, self.decls) for l, pre_l in self.fors]\n for nest in lo:\n # 1) Loop-invariant code motion\n inv_outer_loops = []\n if licm:\n inv_outer_loops = nest.op_licm() # noqa\n self.decls.update(nest.decls)\n\n # 2) Register tiling\n if tile_opt and v_type == AUTOVECT:\n nest.op_tiling(tile_sz)\n\n # 3) Vectorization\n if vectorizer_init:\n vect = LoopVectoriser(nest)\n if ap:\n vect.align_and_pad(self.decls)\n if v_type != AUTOVECT:\n vect.outer_product(v_type, v_param)\n\n\ndef init_ir(isa, compiler):\n \"\"\"Initialize the Intermediate Representation engine.\"\"\"\n\n init_vectorizer(isa, compiler)\n","sub_path":"pyop2/ir/ast_plan.py","file_name":"ast_plan.py","file_ext":"py","file_size_in_byte":7307,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"652587575","text":"#!/usr/bin/env python\nimport logging\n\nlogging.basicConfig(\n level=logging.INFO, format=\"%(asctime)s - %(name)s - %(levelname)s - %(message)s\"\n)\nimport os\nimport youtube_dl\nimport shutil\nfrom urlextract import URLExtract\nfrom telegram.ext import Updater, CommandHandler, MessageHandler, Filters\nfrom telegram import ChatAction\nfrom functools import wraps\nimport re\nimport urllib\n\nos.chdir(os.path.dirname(__file__))\nlogging.getLogger(\"filelock\").disabled = True\n\ndownload_dir = \"videos\"\nmax_file_size = 100\nsend_timeout = 100\n\ncur_file_counter = 0\ndownloaded_files = []\n\n\n# custom url regexs\ntiktok_regex = re.compile(\"https?://(?:vm\\.)?tiktok\\.com/[^/]+/?\")\nyoutube_regex = re.compile(\"https?://(?:www\\.)?youtube\\.com/[^/]+/?\")\nyoutube_mobile_regex = re.compile(\"https?://youtu\\.be\\/[^/]+/?\")\nyoutube_music = re.compile(\"https?://music\\.youtube\\.com\\/watch\\?[^/]+/?\")\n\n# links that youtube dl wont catch\nproblem_regex = [tiktok_regex]\n# links that should download without a command\nauto_download_regex = [tiktok_regex, youtube_regex, youtube_mobile_regex, youtube_music]\n# links that are guaranteed audio\naudio_regex = [youtube_music]\n\n\ndef is_downloadable(url: str) -> bool:\n \"\"\"Check if a url is one which can be downloaded\"\"\"\n for extractor in youtube_dl.extractor.gen_extractors():\n if extractor.suitable(url) and extractor.IE_NAME != \"generic\":\n return True\n return any(regex.match(url) is not None for regex in problem_regex)\n\n\ndef is_audio(url: str) -> bool:\n \"\"\"Check if a url is guaranteed to be audio\"\"\"\n return any(regex.match(url) is not None for regex in audio_regex)\n\n\ndef is_auto_download(url: str) -> bool:\n \"\"\"Check if a url is one which should automatically download\"\"\"\n return any(regex.match(url) is not None for regex in auto_download_regex)\n\n\ndef download_video(url: str, ydl_opts: dict) -> int:\n \"\"\"Download a url to the computer\"\"\"\n with youtube_dl.YoutubeDL(ydl_opts) as ydl:\n try:\n info = ydl.extract_info(url, download=True)\n title = info.get(\"title\", \"Error Getting Title\")\n err = False\n except Exception:\n err = True\n title = None\n return title, err\n\n\ndef extract_url(message):\n \"\"\"Get the first url in a string\"\"\"\n urls = URLExtract().find_urls(message)\n if len(urls) == 0:\n return None\n url = urls[0]\n return url\n\n\ndef parse_message(\n update, context, is_command=False, force_audio=False, check_audio=True\n):\n \"\"\"Parse a any message and check whether or not it contains a link,\n whether or not it is a command, and whether or not it can actually download something\"\"\"\n global cur_file_counter\n\n # ensure it's a real message\n if not hasattr(update.message, \"text\"):\n return\n\n # check if dm\n is_dm = int(update.message.chat.id) > 0\n\n url = extract_url(update.message.text)\n\n # if it's a check up one reply for url\n if url is None:\n if is_command:\n if update.message.reply_to_message is not None:\n url = extract_url(update.message.reply_to_message.text)\n if url is None:\n return\n else:\n return\n\n # make sure we can actually download the url\n if (not is_command and not is_auto_download(url)) and not is_dm:\n return\n\n if not is_downloadable(url):\n return\n\n if is_audio(url) and check_audio:\n force_audio = True\n\n # check if the file has already been downloaded\n for downloaded in downloaded_files:\n if downloaded[\"url\"] == url and (not force_audio == downloaded[\"is_video\"]):\n user = update.message.from_user[\"username\"]\n # check if the sent file was in another chat\n if update.message.chat_id != downloaded[\"message\"].chat_id:\n already_sent_message = downloaded[\"message\"].forward(\n update.message.chat_id\n )\n else:\n already_sent_message = downloaded[\"message\"]\n already_sent_message.reply_text(f\"@{user}\")\n return\n\n logging.debug(f\"Downloading from {url}...\")\n\n ydl_opts = {\n \"max_filesize\": max_file_size * 1000000,\n \"ignoreerrors\": False,\n }\n if force_audio:\n # download the url as audio\n context.bot.send_chat_action(\n chat_id=update.effective_message.chat_id, action=ChatAction.UPLOAD_AUDIO\n )\n\n filename = f\"{cur_file_counter}.mp3\"\n ydl_opts.update(\n {\n \"outtmpl\": filename,\n \"format\": \"bestaudio/best\",\n \"postprocessors\": [\n {\n \"key\": \"FFmpegExtractAudio\",\n \"preferredcodec\": \"mp3\",\n \"preferredquality\": \"192\",\n }\n ],\n }\n )\n else:\n # download the url as a video\n context.bot.send_chat_action(\n chat_id=update.effective_message.chat_id, action=ChatAction.UPLOAD_VIDEO\n )\n filename = f\"{cur_file_counter}.mp4\"\n ydl_opts.update(\n {\n \"format\": \"mp4\",\n \"outtmpl\": filename,\n }\n )\n\n # download and check for errors\n title, err = download_video(url, ydl_opts)\n\n if not os.path.exists(filename) or err:\n update.message.reply_text(\"Error downloading\")\n return\n\n cur_file_counter += 1\n\n if force_audio:\n # send the file as audio\n new_filename = f\"{title}.mp3\"\n os.rename(filename, new_filename)\n reply_message = update.message.reply_audio(\n open(new_filename, \"rb\"), timeout=send_timeout, title=title\n )\n else:\n # send the file as a videos\n reply_message = update.message.reply_video(\n open(filename, \"rb\"), timeout=send_timeout\n )\n downloaded_files.append(\n {\"url\": url, \"message\": reply_message, \"is_video\": not force_audio}\n )\n\n\ndef help_command(update, context):\n update.message.reply_text(\n (\n \"i download videos and audio.\\n\"\n + \"\\n\"\n + \"/download - provide/reply to a url to download it as a video\\n\"\n + \"/daudio - provide/reply to a url to download it as an mp3\\n\"\n + \"/help - bring up this help menu\\n\"\n + \"\\n\"\n + \"powered by youtube-dl.\\n\"\n )\n )\n\n\ndef download_command(update, context):\n parse_message(update, context, is_command=True)\n\n\ndef audio_command(update, context):\n parse_message(update, context, is_command=True, force_audio=True)\n\n\ndef on_message(update, context):\n parse_message(update, context, check_audio=True)\n\n\ndef main():\n # get telegram bot token\n with open(\"token.txt\", \"r\") as token_file:\n token = token_file.read().rstrip(\"\\n\")\n updater = Updater(token, use_context=True)\n\n # remove old download directory\n if os.path.exists(download_dir):\n shutil.rmtree(download_dir)\n\n # make download directory\n os.mkdir(download_dir)\n os.chdir(download_dir)\n\n # add message handler\n updater.dispatcher.add_handler(CommandHandler(\"help\", help_command))\n updater.dispatcher.add_handler(CommandHandler(\"download\", download_command))\n updater.dispatcher.add_handler(CommandHandler(\"daudio\", audio_command))\n updater.dispatcher.add_handler(\n MessageHandler(callback=on_message, filters=Filters.text)\n )\n\n # wait for messages\n updater.start_polling()\n updater.idle()\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":7569,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"447807245","text":"# encoding=utf8\nimport matplotlib.pyplot as plt\n\nsquares = [1, 4, 9, 16, 25, 36]\n# linewidth设置线条宽度\nplt.plot(squares, linewidth=5)\n# title表示标题,#fontSize表示字体大小\n# xlabel表示X轴的标签,#ylabel表示Y轴的标签\nplt.title(\"Square Number\", fontSize=24)\nplt.xlabel(\"value\", fontSize=14)\nplt.ylabel(\"Square of Value\", fontSize=14)\n# tick_params:刻度参数\n# axis=\"both\":表示x轴和y轴 ,labelsize表示刻度字体的大小\n# color表示刻度的颜色。\nplt.tick_params(axis=\"both\", labelsize=10, color=\"red\")\nplt.show()\n","sub_path":"python_DataVisualization/part01/demo02.py","file_name":"demo02.py","file_ext":"py","file_size_in_byte":563,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"263875312","text":"import numpy as np\nfrom PIL import Image\nimport matplotlib.pyplot as plt\n\n\n################################################################################\ndef convert_to_gray_scale_arr(filename):\n image_gray = Image.open(filename).convert('RGB').convert('L')\n gray_scale = np.array(image_gray)\n return image_gray.width, image_gray.height, gray_scale\n\n\n################################################################################\ndef from_gray_to_3_channel(width, height, image_data):\n output = np.zeros((height, width, 3), dtype=int)\n for y in range(height):\n for x in range(width):\n rgb = [image_data[y][x], image_data[y][x], image_data[y][x]]\n output[y][x] = rgb\n return output\n\n\n################################################################################\ndef zero_padding(x, y, width, height):\n pad = False\n if x < 0:\n x = 0\n pad = True\n\n if x >= width:\n x = width - 1\n pad = True\n\n if y < 0:\n y = 0\n pad = True\n\n if y >= height:\n y = height - 1\n pad = True\n\n return pad, x, y\n\n\n################################################################################\ndef median_filter(width, height, image_data):\n kernel_size = 3\n mid_point = int((kernel_size * kernel_size) / 2)\n window = np.zeros((kernel_size * kernel_size), dtype=int)\n window_debug = np.zeros((kernel_size, kernel_size), dtype=int)\n output_image = np.zeros((height, width), dtype=int)\n edge_x = int(kernel_size / 2)\n edge_y = int(kernel_size / 2)\n out_x = out_y = 0\n for x in range(width - edge_x + 1):\n print(\"processing \" + str(x) + \" of \" + str(width - edge_x + 1))\n for y in range(height - edge_y + 1):\n i = 0\n for fx in range(kernel_size):\n for fy in range(kernel_size):\n x_image = x + fx - edge_x\n y_image = y + fy - edge_y\n\n # zero padding\n pad, x_image, y_image = zero_padding(x_image, y_image, width, height)\n if pad:\n window[i] = 0\n else:\n window[i] = image_data[x_image][y_image]\n pass\n\n window_debug[fx][fy] = window[i]\n\n i = i + 1\n pass\n # print(window_debug)\n sorted_values = np.sort(window)\n median = sorted_values[mid_point]\n # print(sorted_values)\n # print(median)\n output_image[out_y][out_x] = median\n out_x = out_x + 1\n # print(\"\\n\")\n pass\n out_x = 0\n out_y = out_y + 1\n # print(\"----\\n\")\n pass\n\n return output_image\n\n\n################################################################################\ndef test():\n [width, height, image_data] = convert_to_gray_scale_arr(\"pic1_noisy.png\")\n filtered = median_filter(width, height, image_data)\n filtered2 = median_filter(width, height, filtered)\n\n src_image = from_gray_to_3_channel(width, height, image_data)\n filtered_image = from_gray_to_3_channel(width, height, filtered)\n filtered_image2 = from_gray_to_3_channel(width, height, filtered2)\n\n fig = plt.figure()\n ax1 = fig.add_subplot(1, 3, 1)\n ax1.imshow(src_image)\n\n ax2 = fig.add_subplot(1, 3, 2)\n ax2.imshow(filtered_image)\n\n ax3 = fig.add_subplot(1, 3, 3)\n ax3.imshow(filtered_image2)\n\n plt.show()\n\n\ntest()\n","sub_path":"image_processing/filters/median_filter.py","file_name":"median_filter.py","file_ext":"py","file_size_in_byte":3502,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"619886915","text":"\n\nimport tensorflow as tf\nfrom tensorflow.contrib.session_bundle import exporter\n\n# Import MNIST data\nfrom tensorflow.examples.tutorials.mnist import input_data\nmnist = input_data.read_data_sets(\"MNIST_data/\", one_hot=True,validation_size=0)\n\n\nsess = tf.Session()\ntf.logging.set_verbosity(tf.logging.INFO)\n\nx = tf.placeholder(tf.float32, [None, 784],name='x')\nW = tf.Variable(tf.zeros([784, 10]),name='W')\nb = tf.Variable(tf.zeros([10]),name='b')\n\ny = tf.nn.softmax(tf.matmul(x, W) + b,name='y')\ny_ = tf.placeholder(tf.float32, [None, 10],name='y_')\ntf.add_to_collection('variablesw',W)\ntf.add_to_collection('variablesb',b)\n\ncross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=y, labels=y_))\n\ntrain_step = tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy)\n\n# save summaries for visualization\ntf.summary.histogram('weights', W)\ntf.summary.histogram('max_weight', tf.reduce_max(W))\ntf.summary.histogram('bias', b)\ntf.summary.scalar('cross_entropy', cross_entropy)\ntf.summary.histogram('cross_hist', cross_entropy)\n\n# merge all summaries into one op\nmerged=tf.summary.merge_all()\n\ntrainwriter=tf.summary.FileWriter('data/mnist_model'+'/logs/train',sess.graph)\n\ninit = tf.global_variables_initializer()\nsess.run(init)\n\nfor i in range(1000):\n batch_xs, batch_ys = mnist.train.next_batch(100)\n summary, _ = sess.run([merged, train_step], feed_dict={x: batch_xs, y_: batch_ys})\n trainwriter.add_summary(summary, i)\n\n# model export path\nexport_path = 'data/mnist_model'\nprint('Exporting trained model to', export_path)\n\n#\nsaver = tf.train.Saver(sharded=True)\nmodel_exporter = exporter.Exporter(saver)\nmodel_exporter.init(\n sess.graph.as_graph_def(),\n named_graph_signatures={\n 'inputs': exporter.generic_signature({'images': x}),\n 'outputs': exporter.generic_signature({'scores': y})})\n\nmodel_exporter.export(export_path, tf.constant(1), sess)\n\n\"\"\"\ncan also save the model using saver as follows\nsaver.save(sess, '/Volumes/Data/BigDataAnalytics/ICMP8/In_Class_MNIST_SOFTMAX/data/mnist_model')\n\"\"\"","sub_path":"LAB4/Source/MNIST_SOFTWARE/mnist_train.py","file_name":"mnist_train.py","file_ext":"py","file_size_in_byte":2056,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"42561105","text":"import groupy\n\n\nclass BotCreator(object):\n\n def getname(self):\n name = ''\n while name.strip() == '':\n name = input('Pick a name for the bot: ')\n name = str(name).strip()\n return name\n\n def getgroup(self):\n i = 0\n grouplist = groupy.Group.list()\n if grouplist is None:\n print(\"You need groups, fam.\")\n raise SystemExit\n for groups in grouplist:\n print(str(i) + \") \" + str(groups.name))\n i += 1\n\n selection = 0\n if i != 1:\n selection = None\n while type(selection) is not int or selection < 0 or selection > len(grouplist) or str(selection).strip == '':\n selection = input(\"Select Group #: \")\n try:\n selection = int(selection)\n except ValueError:\n print(\"not a number\")\n\n return grouplist[selection]\n\n def getimage(self):\n answer = input(\"Would you like to provide an image URL? (y/n): \").strip().lower()\n while answer != 'y' and answer != 'n':\n answer = input(\"Would you like to provide an image URL? (y/n): \").strip().lower()\n\n if answer == 'n':\n return None\n elif answer == 'y':\n newans = 'n'\n imgurl = ''\n while newans != 'y':\n imgurl = input(\"Enter image URL: \")\n newans = input(\"Is this URL right? \" + imgurl + \" (y/n): \")\n return imgurl\n\n def getcallurl(self):\n answer = input(\"Would you like to provide a callback URL? (y/n): \").strip().lower()\n while answer != 'y' and answer != 'n':\n answer = input(\"Would you like to provide a callback URL? (y/n): \").strip().lower()\n\n if answer == 'n':\n return None\n elif answer == 'y':\n newans = 'n'\n cburl = ''\n while newans != 'y':\n cburl = input(\"Enter callback URL: \")\n while cburl[0:7] != 'http://' and cburl[0:8] != 'https://' and cburl != '':\n print(\"Url must have 'http://' or 'https://'\")\n cburl = input(\"Enter callback URL: \")\n\n newans = input(\"Is this URL right? \" + cburl + \" (y/n): \")\n if cburl == '':\n cburl = None\n return cburl\n\n def create(self):\n print(\"Time to create a bot!\")\n ans = None\n while ans != 'y' and ans != 'n':\n ans = input(\"Would you like to continue? (y/n): \")\n if ans == 'n':\n raise SystemExit\n name = self.getname()\n group = self.getgroup()\n image = self.getimage()\n callurl = self.getcallurl()\n try:\n groupy.Bot.create(name, group, image, callurl)\n except groupy.api.errors.ApiError:\n print(\"\\nERROR\\n#########################################\\ncallback URL already registered for group\\nBot not created\\n#########################################\")\n raise SystemExit","sub_path":"bin/BotCreator.py","file_name":"BotCreator.py","file_ext":"py","file_size_in_byte":3059,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"251273307","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('website', '0007_auto_20150510_2050'),\n ('chat_server', '0002_chatmessage_user'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='chatmessage',\n name='crowd',\n field=models.ForeignKey(default=1, to='website.Crowd'),\n preserve_default=False,\n ),\n ]\n","sub_path":"incrowd/chat_server/migrations/0003_chatmessage_crowd.py","file_name":"0003_chatmessage_crowd.py","file_ext":"py","file_size_in_byte":507,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"592917469","text":"from GenericRequest import GenericRequest\nfrom ..manager import PatternManager\nfrom ..util import Report\n\nfrom datetime import datetime\n\nCLAN_LOG_UNKNOWN = 0\nCLAN_LOG_FAX = 1\nCLAN_LOG_ATTACK = 2\nCLAN_LOG_WHITELISTED_PLAYER = 3\nCLAN_LOG_JOINED_ANOTHER_CLAN = 4\nCLAN_LOG_WHITELISTED_IN = 5\nCLAN_LOG_STASH_ADD = 6\nCLAN_LOG_STASH_REMOVE = 7\nCLAN_LOG_MEAT_SPENT_ARMY = 8\nCLAN_LOG_CHANGED_RANK = 9\nCLAN_LOG_CHANGED_TITLE = 10\n\n\nclass WhitelistPlayerRequest(GenericRequest):\n def __init__(self, session, player, level, title=\"\"):\n super(WhitelistPlayerRequest, self).__init__(session)\n self.url = session.serverURL + \"clan_whitelist.php\"\n self.requestData[\"action\"] = \"add\"\n self.requestData[\"pwd\"] = session.pwd\n self.requestData[\"addwho\"] = player\n self.requestData[\"level\"] = level\n self.requestData[\"title\"] = title\n\n\nclass BootClanMemberRequest(GenericRequest):\n def __init__(self, session, userId):\n super(BootClanMemberRequest, self).__init__(session)\n self.url = session.serverURL + \"clan_members.php\"\n self.requestData['pwd'] = session.pwd\n self.requestData['action'] = 'modify'\n self.requestData['begin'] = '1'\n self.requestData['pids[]'] = userId\n self.requestData['boot%s' % userId] = 'on'\n\n\nclass WhitelistRequest(GenericRequest):\n \"\"\"Retrieves information from the clan whitelist page.\"\"\"\n\n def __init__(self, session):\n super(WhitelistRequest, self).__init__(session)\n self.url = session.serverURL + \"clan_whitelist.php\"\n\n def parseResponse(self):\n # Get the set of clan ranks.\n ranks = []\n ranksById = {}\n rankContainerPattern = PatternManager.getPattern('clanRankContainer')\n match = rankContainerPattern.search(self.responseText)\n if match:\n rankText = match.group(1)\n rankPattern = PatternManager.getPattern('clanRank')\n for rankMatch in rankPattern.finditer(rankText):\n rank = {\n \"rankId\": int(rankMatch.group(1)),\n \"rankName\": rankMatch.group(2),\n \"rankNumber\": int(rankMatch.group(3))\n }\n ranks.append(rank)\n ranksById[rank[\"rankId\"]] = rank\n\n # Get a list of users who are whitelisted to the clan.\n members = []\n memberPattern = PatternManager.getPattern('clanWhitelistMember')\n for match in memberPattern.finditer(self.responseText):\n member = {\n \"userId\": match.group('userId'),\n \"userName\": match.group('userName'),\n \"clanTitle\": match.group('clanTitle')\n }\n rankId = match.group('clanRankId')\n rankName = match.group('clanRankName')\n if rankId is not None:\n rank = ranksById[int(rankId)]\n member[\"rankId\"] = rank[\"rankId\"]\n member[\"rankName\"] = rank[\"rankName\"]\n member[\"rankNumber\"] = rank[\"rankNumber\"]\n elif rankName is not None:\n member[\"rankName\"] = rankName\n foundRank = False\n for rank in ranks:\n if rank[\"rankName\"] == rankName:\n foundRank = True\n break\n if not foundRank:\n rank = {\n \"rankId\": -1,\n \"rankName\": rankName,\n \"rankNumber\": -1\n }\n ranks.append(rank)\n members.append(member)\n\n self.responseData[\"ranks\"] = ranks\n self.responseData[\"members\"] = members\n\n\nclass LoadClanAdminRequest(GenericRequest):\n \"\"\"Loads the clan administration page.\"\"\"\n\n def __init__(self, session):\n super(LoadClanAdminRequest, self).__init__(session)\n self.url = session.serverURL + \"clan_admin.php\"\n\n def parseResponse(self):\n # Get the clan name.\n namePattern = PatternManager.getPattern(\"clanName\")\n match = namePattern.search(self.responseText)\n self.responseData[\"clanName\"] = match.group(1)\n\n # Get the clan credo.\n credoPattern = PatternManager.getPattern(\"clanCredo\")\n match = credoPattern.search(self.responseText)\n self.responseData[\"clanCredo\"] = match.group(1)\n\n # Get the clan website.\n websitePattern = PatternManager.getPattern(\"clanWebsite\")\n match = websitePattern.search(self.responseText)\n self.responseData[\"clanWebsite\"] = match.group(1)\n\n # See if the clan is accepting applications.\n clanAcceptingAppsPattern = PatternManager.getPattern(\"clanAcceptingApps\")\n if clanAcceptingAppsPattern.search(self.responseText):\n self.responseData[\"acceptingApps\"] = True\n else:\n self.responseData[\"acceptingApps\"] = False\n\n\nclass ClanLogRequest(GenericRequest):\n \"\"\"Retrieves the clan activity log.\"\"\"\n\n def __init__(self, session):\n super(ClanLogRequest, self).__init__(session)\n self.url = session.serverURL + \"clan_log.php\"\n\n def parseResponse(self):\n entries = []\n entryPattern = PatternManager.getPattern('clanLogEntry')\n for entryMatch in entryPattern.finditer(self.responseText):\n entry = {}\n date = entryMatch.group('date')\n entry['date'] = datetime.strptime(date, \"%m/%d/%y, %I:%M%p\")\n entry['userId'] = int(entryMatch.group('userId'))\n entry['userName'] = entryMatch.group('userName')\n action = entryMatch.group('action')\n foundAction = False\n\n if not foundAction:\n pattern = PatternManager.getPattern('clanLogFax')\n match = pattern.match(action)\n if match:\n foundAction = True\n entry['type'] = CLAN_LOG_FAX\n entry['monster'] = match.group('monsterName')\n\n if not foundAction:\n pattern = PatternManager.getPattern('clanLogAttack')\n match = pattern.match(action)\n if match:\n foundAction = True\n entry['type'] = CLAN_LOG_ATTACK\n entry['clanName'] = match.group('clanName')\n\n if not foundAction:\n pattern = PatternManager.getPattern('clanLogWhitelistAdd')\n match = pattern.match(action)\n if match:\n foundAction = True\n entry['type'] = CLAN_LOG_WHITELISTED_PLAYER\n entry['targetUserName'] = match.group('userName')\n entry['targetUserId'] = int(match.group('userId'))\n\n if not foundAction:\n pattern = PatternManager.getPattern('clanLogPlayerJoinedAnotherClan')\n match = pattern.match(action)\n if match:\n foundAction = True\n entry['type'] = CLAN_LOG_JOINED_ANOTHER_CLAN\n\n if not foundAction:\n pattern = PatternManager.getPattern('clanLogPlayerJoinedClanWhitelist')\n match = pattern.match(action)\n if match:\n foundAction = True\n entry['type'] = CLAN_LOG_WHITELISTED_IN\n\n if not foundAction:\n pattern = PatternManager.getPattern('clanLogStashItemAdd')\n match = pattern.match(action)\n if match:\n foundAction = True\n entry['type'] = CLAN_LOG_STASH_ADD\n entry['itemName'] = match.group('itemName')\n entry['quantity'] = int(match.group('quantity').replace(',', ''))\n\n if not foundAction:\n pattern = PatternManager.getPattern('clanLogStashItemRemove')\n match = pattern.match(action)\n if match:\n foundAction = True\n entry['type'] = CLAN_LOG_STASH_REMOVE\n entry['itemName'] = match.group('itemName')\n entry['quantity'] = int(match.group('quantity').replace(',', ''))\n\n if not foundAction:\n pattern = PatternManager.getPattern('clanLogMeatSpentArmy')\n match = pattern.match(action)\n if match:\n foundAction = True\n entry['type'] = CLAN_LOG_MEAT_SPENT_ARMY\n entry['meat'] = int(match.group('meat').replace(',', ''))\n\n if not foundAction:\n pattern = PatternManager.getPattern('clanLogChangedRank')\n match = pattern.match(action)\n if match:\n foundAction = True\n entry['type'] = CLAN_LOG_CHANGED_RANK\n entry['targetUserName'] = match.group('userName')\n entry['targetUserId'] = int(match.group('userId'))\n\n if not foundAction:\n pattern = PatternManager.getPattern('clanLogChangedTitle')\n match = pattern.match(action)\n if match:\n foundAction = True\n entry['type'] = CLAN_LOG_CHANGED_RANK\n entry['targetUserName'] = match.group('userName')\n entry['targetUserId'] = int(match.group('userId'))\n entry['clanTitle'] = match.group('clanTitle')\n\n if not foundAction:\n Report.error(\"request\", \"Unknown clan log action: %s\" % action)\n entry['type'] = CLAN_LOG_UNKNOWN\n entry['action'] = action\n\n entries.append(entry)\n\n self.responseData[\"entries\"] = entries\n\n\nclass ToggleAcceptingClanApplicationsRequest(GenericRequest):\n \"\"\"Toggle whether or not the clan accepts new applications.\"\"\"\n\n def __init__(self, session):\n super(ToggleAcceptingClanApplicationsRequest, self).__init__(session)\n self.url = session.serverURL + \"clan_admin.php?action=noapp\"","sub_path":"src/kol/request/ClanAdmin.py","file_name":"ClanAdmin.py","file_ext":"py","file_size_in_byte":10023,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"157144428","text":"import seaborn as sns\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn.metrics import confusion_matrix\nfrom sklearn.utils.multiclass import unique_labels\n\n#------------------------------------------------------------------------------\ndef imprime_matriz_de_confusao(cm, title,pathfigura):\n sns.set(font_scale=3.5)\n cmn = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n fig, ax = plt.subplots(figsize=(65,40))\n sns.heatmap(cmn, ax=ax,annot=True, fmt='.2f', linewidth=2,linecolor='lightgray',xticklabels=target_names, yticklabels=target_names,cmap=\"BuPu\",annot_kws={\"size\": 28,\"weight\": \"bold\"})#,annot_kws={\"fontsize\":18}\n # ax.figure.axes[-1].yaxis.label.set_size(20)\n ax.set_title(title, fontsize =58)\n plt.ylabel('Assunto Principal', fontsize = 45 )\n plt.xlabel('Assunto Predito', fontsize = 45)\n # plt.show(block=True)\n fig.tight_layout()\n plt.savefig(\"{0}{1}.png\".format(pathfigura, 'ConfusionMatrix_' + title), bbox_inches = 'tight',\n pad_inches = 0)\n\n#------------------------------------------------------------------------------\n#MLP\n#------------------------------------------------------------------------------\n\nmlp = '/media/DATA/classificadorDeAssuntos/Dados/Resultados/EXP26_MelhoresModelos_TextsoReduzidos_LSI/predicao_LSI250_Multi-Layer Perceptron.csv'\nmlp = pd.read_csv(mlp)\nmlp_y_true = mlp.y_true\nmlp_y_pred = mlp.y_pred\n\ncm =confusion_matrix(mlp_y_true, mlp_y_pred)\npathfigura = '/media/DATA/classificadorDeAssuntos/Dados/Resultados/EXP26_MelhoresModelos_TextsoReduzidos_LSI/'\nimprime_matriz_de_confusao(cm, 'Multilayer Perceptron', pathfigura)\n\n#------------------------------------------------------------------------------\n#RANDOM FOREST\n#------------------------------------------------------------------------------\n\nmlp = '/media/DATA/classificadorDeAssuntos/Dados/Resultados/EXP25_MelhoresModelos_TextsoReduzidos_BM25/predicao_BM25_Random Forest.csv'\nmlp = pd.read_csv(mlp)\nmlp_y_true = mlp.y_true\nmlp_y_pred = mlp.y_pred\n\ncm =confusion_matrix(mlp_y_true, mlp_y_pred)\npathfigura = '/media/DATA/classificadorDeAssuntos/Dados/Resultados/EXP25_MelhoresModelos_TextsoReduzidos_BM25/'\nimprime_matriz_de_confusao(cm, 'Random Forest', pathfigura)\n\n#------------------------------------------------------------------------------\n#SVM\n#------------------------------------------------------------------------------\n\nmlp = '/media/DATA/classificadorDeAssuntos/Dados/Resultados/EXP24_MelhoresModelos_TextsoReduzidos_TFIDF/predicao_SVM.csv'\nmlp = pd.read_csv(mlp)\nmlp_y_true = mlp.y_true\nmlp_y_pred = mlp.y_pred\n\ncm =confusion_matrix(mlp_y_true, mlp_y_pred)\npathfigura = '/media/DATA/classificadorDeAssuntos/Dados/Resultados/EXP24_MelhoresModelos_TextsoReduzidos_TFIDF/'\nimprime_matriz_de_confusao(cm, 'SVM', pathfigura)\n\n#------------------------------------------------------------------------------\n#NAIVE BAYES\n#------------------------------------------------------------------------------\n\nmlp = '/media/DATA/classificadorDeAssuntos/Dados/Resultados/EXP25_MelhoresModelos_TextsoReduzidos_BM25/predicao_BM25_Multinomial Naive Bayes.csv'\nmlp = pd.read_csv(mlp)\nmlp_y_true = mlp.y_true\nmlp_y_pred = mlp.y_pred\n\ncm =confusion_matrix(mlp_y_true, mlp_y_pred)\npathfigura = '/media/DATA/classificadorDeAssuntos/Dados/Resultados/EXP25_MelhoresModelos_TextsoReduzidos_BM25/'\nimprime_matriz_de_confusao(cm, 'Naïve Bayes', pathfigura)","sub_path":"Full/Codigo/001_Analises/_013_Plota_MatrizConfusao.py","file_name":"_013_Plota_MatrizConfusao.py","file_ext":"py","file_size_in_byte":3419,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"258397640","text":"import numpy as np\nimport shutil\nfrom astropy import cosmology as cosmo\n\nfrom autolens.data import ccd as im\nfrom autolens.data.array import grids, mask as msk, scaled_array\nfrom autolens.lens import lens_data as li, lens_fit\nfrom autolens.lens import ray_tracing\nfrom autolens.lens.plotters import lens_fit_hyper_plotters\nfrom autolens.model.galaxy import galaxy as g\nfrom autolens.model.profiles import light_profiles as lp, mass_profiles as mp\nfrom test.fixtures import *\n\n\n@pytest.fixture(name='lens_fit_plotter_path')\ndef make_lens_fit_plotter_setup():\n return \"{}/../../test_files/plotting/fit/\".format(os.path.dirname(os.path.realpath(__file__)))\n\n\n@pytest.fixture(name='galaxy_light')\ndef make_galaxy_light():\n return g.Galaxy(light=lp.EllipticalSersic(intensity=1.0), redshift=2.0)\n\n\n@pytest.fixture(name='galaxy_mass')\ndef make_galaxy_mass():\n return g.Galaxy(mass=mp.SphericalIsothermal(einstein_radius=1.0), redshift=1.0)\n\n\n@pytest.fixture(name='grid_stack')\ndef make_grid_stack():\n return grids.GridStack.from_shape_pixel_scale_and_sub_grid_size(shape=(100, 100), pixel_scale=0.05, sub_grid_size=2)\n\n\n@pytest.fixture(name='image')\ndef make_image():\n image = scaled_array.ScaledSquarePixelArray(array=np.ones((3, 3)), pixel_scale=1.0)\n noise_map = im.NoiseMap(array=2.0 * np.ones((3, 3)), pixel_scale=1.0)\n psf = im.PSF(array=3.0 * np.ones((1, 1)), pixel_scale=1.0)\n\n return im.CCDData(image=image, pixel_scale=1.0, noise_map=noise_map, psf=psf)\n\n\n@pytest.fixture(name='positions')\ndef make_positions():\n positions = [[[0.1, 0.1], [0.2, 0.2]], [[0.3, 0.3]]]\n return list(map(lambda position_set: np.asarray(position_set), positions))\n\n\n@pytest.fixture(name='mask')\ndef make_mask():\n return msk.Mask.circular(shape=((3, 3)), pixel_scale=0.1, radius_arcsec=0.1)\n\n\n@pytest.fixture(name='lens_data')\ndef make_lens_image(image, mask):\n return li.LensData(ccd_data=image, mask=mask)\n\n\n@pytest.fixture(name='fit_lens_only')\ndef make_fit_lens_only(lens_data, galaxy_light):\n tracer = ray_tracing.TracerImagePlane(lens_galaxies=[galaxy_light], image_plane_grid_stack=lens_data.grid_stack,\n cosmology=cosmo.Planck15)\n return lens_fit.fit_lens_data_with_tracer(lens_data=lens_data, tracer=tracer)\n\n\n@pytest.fixture(name='fit_source_and_lens')\ndef make_fit_source_and_lens(lens_data, galaxy_light, galaxy_mass):\n tracer = ray_tracing.TracerImageSourcePlanes(lens_galaxies=[galaxy_mass], source_galaxies=[galaxy_light],\n image_plane_grid_stack=lens_data.grid_stack, cosmology=cosmo.Planck15)\n return lens_fit.fit_lens_data_with_tracer(lens_data=lens_data, tracer=tracer)\n\n\n@pytest.fixture(name='hyper')\ndef make_hyper():\n class Hyper(object):\n\n def __init__(self):\n pass\n\n hyper = Hyper()\n\n hyper.hyper_model_image = np.array([[3.0, 5.0, 7.0],\n [9.0, 8.0, 1.0],\n [4.0, 0.0, 9.0]])\n hyper.hyper_galaxy_images = [np.array([[1.0, 3.0, 5.0],\n [7.0, 9.0, 8.0],\n [6.0, 4.0, 0.0]])]\n hyper.hyper_minimum_values = [0.2, 0.8]\n\n hyper_galaxy = g.HyperGalaxy(contribution_factor=4.0, noise_factor=2.0, noise_power=3.0)\n hyper.hyper_galaxy = g.Galaxy(light=lp.EllipticalSersic(intensity=1.0), hyper_galaxy=hyper_galaxy)\n return hyper\n\n\n@pytest.fixture(name='lens_hyper_image')\ndef make_lens_hyper_image(image, mask, hyper):\n return li.LensDataHyper(ccd_data=image, mask=mask, hyper_model_image=hyper.hyper_model_image,\n hyper_galaxy_images=hyper.hyper_galaxy_images,\n hyper_minimum_values=hyper.hyper_minimum_values)\n\n\n@pytest.fixture(name='fit_hyper_lens_only')\ndef make_fit_hyper_lens_only(lens_hyper_image, hyper):\n tracer = ray_tracing.TracerImagePlane(lens_galaxies=[hyper.hyper_galaxy],\n image_plane_grid_stack=lens_hyper_image.grid_stack)\n return lens_fit.hyper_fit_lens_data_with_tracer(lens_data_hyper=lens_hyper_image, tracer=tracer)\n\n\ndef test__fit_sub_plot_hyper_lens_only(fit_lens_only, fit_hyper_lens_only, plot_patch,\n lens_fit_plotter_path):\n\n lens_fit_hyper_plotters.plot_fit_subplot(fit_hyper=fit_hyper_lens_only, fit=fit_lens_only, should_plot_mask=True,\n extract_array_from_mask=True, zoom_around_mask=True,\n output_path=lens_fit_plotter_path,\n output_filename='hyper_lens_fit', output_format='png')\n\n assert lens_fit_plotter_path + 'hyper_lens_fit.png' in plot_patch.paths\n\n\ndef test__fit_individuals__hyper_lens_only__depedent_on_input(fit_hyper_lens_only, fit_lens_only, plot_patch,\n lens_fit_plotter_path):\n\n lens_fit_hyper_plotters.plot_fit_individuals(fit_hyper=fit_hyper_lens_only, fit=fit_lens_only,\n should_plot_mask=True, extract_array_from_mask=True,\n zoom_around_mask=True,\n should_plot_noise_map=True,\n should_plot_model_image=True,\n should_plot_chi_squared_map=True,\n output_path=lens_fit_plotter_path, output_format='png')\n\n assert lens_fit_plotter_path + 'fit_model_image.png' in plot_patch.paths\n\n assert lens_fit_plotter_path + 'fit_residual_map.png' not in plot_patch.paths\n\n assert lens_fit_plotter_path + 'fit_hyper_chi_squared_map.png' in plot_patch.paths\n\n assert lens_fit_plotter_path + 'fit_hyper_noise_map.png' in plot_patch.paths\n","sub_path":"test/lens/plotters/test_lens_fit_hyper_plotters.py","file_name":"test_lens_fit_hyper_plotters.py","file_ext":"py","file_size_in_byte":5978,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"254412236","text":"# -*- encoding: utf-8 -*-\nfrom django.test import TestCase\nfrom bank.models import Account, Transaction, Client, Manager\nfrom django.db import models\nfrom django.contrib.auth.models import User, Group\nfrom bank.templatetags.helpers import group_required, get_transaction_operation\n\nclass AccountTestCase(TestCase):\n\n def setUp(self):\n account_holder = Group()\n account_holder.name = 'account_holder'\n account_holder.save()\n\n user = User.objects.create_user(\n username=\"foo\",\n email=\"foo@bar.com\",\n password=\"123123\"\n )\n user.groups.add(Group.objects.get(name='account_holder'))\n\n client = Client.objects.create(\n user=user,\n cpf=\"foo@bar.com\",\n created_at=models.DateTimeField(auto_now_add=True)\n )\n account = Account.objects.create(\n \tbalance=99999, \n \tclient= client, \n \tcreated_at=models.DateTimeField(auto_now_add=True)\n )\n\n # ---- Models ---- #\n\n def test_accounts_deposit(self):\n account = Account.objects.get(id=1)\n self.assertEqual(account.deposit(1), 100000)\n\n def test_accounts_withdraw(self):\n account = Account.objects.get(id=1)\n self.assertEqual(account.withdraw(1), 99998)\n\n def test_accounts_get_total_balance(self):\n client = Client.objects.get(id=1)\n total_balance = Account.get_total_balance(client)\n self.assertEqual(total_balance, 99999)\n\n\n # ---- Helpers ---- #\n\n def test_helper_group_required(self):\n user = User.objects.get(id=1)\n self.assertEqual(group_required(user, 'account_holder'), True)\n\n def test_helper_get_transaction_operation_deposit(self):\n transaction = Transaction.objects.create(\n account = Account.objects.get(id=1),\n amount = 99999,\n operation = Transaction.OPERATIONS_DICT['deposit'],\n created_at = models.DateTimeField(auto_now_add=True)\n )\n self.assertEqual(get_transaction_operation(transaction), 'deposit')\n\n def test_helper_get_transaction_operation_withdraw(self):\n transaction = Transaction.objects.create(\n account = Account.objects.get(id=1),\n amount = 99999,\n operation = Transaction.OPERATIONS_DICT['withdraw'],\n created_at = models.DateTimeField(auto_now_add=True)\n )\n self.assertEqual(get_transaction_operation(transaction), 'withdraw')","sub_path":"orama/orama_project/bank/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":2477,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"322402059","text":"import random\nimport matplotlib.pyplot as plt\n\nfrom chainer import datasets\nfrom chainer import serializers\n\nfrom mnist_train import MLP\n\n\ndef main():\n # Load dataset\n train, test = datasets.mnist.get_mnist()\n\n # Load model\n model = MLP()\n serializers.load_npz('mnist_out/model_epoch-10', model)\n\n # Show the output\n no = random.randint(0, len(test) - 1)\n x, t = test[no]\n plt.imshow(x.reshape(28, 28), cmap='gray')\n plt.savefig('mnist_out/mnist_eval.png')\n print('label:', t)\n\n y = model(x[None, ...])\n\n print('predicted_label:', y.array.argmax(axis=1)[0])\n\n\nif __name__ == \"__main__\":\n main()\n\n\n\n","sub_path":"MNIST/mnist_eval.py","file_name":"mnist_eval.py","file_ext":"py","file_size_in_byte":640,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"241755488","text":"import os\nimport sys\nimport csv\nimport time\nimport mmap\nimport queue\nimport shutil\nimport urllib\nimport datetime\nimport lxml.html\nimport threading\nfrom os import listdir\nfrom bs4 import BeautifulSoup\nfrom os.path import isfile, join\nfrom urllib.request import urlopen, Request\n\n######################################################\n###### CONFIG VARIABLES - Changeable Parameters ######\n######################################################\n\nmax_level = 1 # Depth of graph\n\n# Controls the percentage of followers to be scraped\n# Eg: {1:(10000, 10}, 2:(5000, 5)} means scrape \n# 10000 + (10/100) * total_followers of the followers\n# at level 1 and 5000 + (5/100) * total_followers \n# of the followers at level 2\nmax_edges_restriction = {1: (0, 100)}\n\n# Controls the number of followers to expand at levels\n# Eg: {1:50} means at level 1, expand only the first\n# 50 users to get the level 2 nodes\nmax_expand_restriction = {}\n\nmax_threads = 10 # How many simultaneous threads\nmax_retry = 10 # Retries in case of error\nepsilon_diff = 25 \n\nglobal_repository = \"./Followers\"\n \n######################################################\n######################################################\n\nheaders = {'User-Agent':'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/35.0.1916.47 Safari/537.36'}\n\nall_done = {}\nnum_edges = 0\nexpanded_counts = {}\nfile_queue = queue.Queue()\nthreads = [None] * max_threads \nthread_follower_counts = [0] * max_threads\n\ndef main(): \n global num_edges\n if(len(sys.argv) == 4):\n # Being used as follower2.py\n max_edges_restriction[1] = (int(sys.argv[2][1:]), float(sys.argv[3][1:]))\n\n for i in range(max_level + 1):\n expanded_counts[i] = 0\n\n if(\"-reset\" in sys.argv):\n reset_folders()\n\n if(len(sys.argv) < 2):\n print(\"Usage: python3 followers.py -retweetID\")\n sys.exit(1)\n\n tmp_time = str(datetime.datetime.now())\n\n make_directory(global_repository + \"/Tmp_Files\")\n inputID = sys.argv[1][1:]\n with open(\"retweets_\" + inputID + \".txt\", \"r\") as inptr:\n reader = csv.reader(inptr)\n with open(global_repository + \"/Tmp_Files/tmp_input_file_\" + tmp_time, \"w\") as input_tmp_file:\n writer = csv.writer(input_tmp_file)\n for row in reader:\n writer.writerow([row[2]])\n\n file_queue.put((0, \"Tmp_Files/tmp_input_file_\" + tmp_time))\n\n ######################################### \n # Build Dictionary from Global repository\n #########################################\n\n # make_directory(global_repository) # if it does not already exist\n # print(\"Building Global Dictionary...\")\n # all_files = [f for f in listdir(global_repository) if isfile(global_repository + \"/\" + f)]\n # for f in all_files:\n # all_done[all_strip(f, [\"followers_\", \"friends_\", \".txt\"])] = True\n # print(\"Global dictionary built.\\n\")\n #########################################\n\n #########################\n #### Open Log Files ####\n ######################### \n make_directory('LogFiles')\n\n with open(\"LogFiles/log_file_\" + tmp_time, \"w\") as log_file, open(\"LogFiles/follower_counts_\" + tmp_time, \"w\") as follower_count_file, open(\"LogFiles/incomplete_followers_scraped_\" + tmp_time, \"w\") as incomplete_scraped:\n log_file_writer = csv.writer(log_file)\n follower_count_writer = csv.writer(follower_count_file)\n incomplete_scraped_writer = csv.writer(incomplete_scraped)\n #########################\n\n # Lock for semaphore\n lock = threading.Lock() \n while(not file_queue.empty()):\n # Accessing queue with semaphore\n lock.acquire()\n tmp_level, f = file_queue.get()\n lock.release()\n\n file_name = global_repository + \"/\" + f\n\n # Read in all followers of this file\n with open(file_name, mode='r') as inptr:\n reader = csv.reader(inptr)\n try:\n follower = next_follower(reader) # First follower\n except StopIteration:\n follower = None\n\n while (follower != None): \n try:\n # We already have followers then don't recompute\n if(not already_scraped(follower)):\n for thread_num in range(max_threads): # if we have space\n if(threads[thread_num] == None or not(threads[thread_num].isAlive())):\n num_edges += thread_follower_counts[thread_num]\n thread_follower_counts[thread_num] = 0\n\n print(\"\\nStart thread for: \", follower, \" at \", str(datetime.datetime.now()))\n # print(\"Total nodes processed = \", len(all_done))\n\n threads[thread_num] = threading.Thread(target=generateFollowers, args=(follower, tmp_level+1, thread_num, log_file_writer, follower_count_writer, incomplete_scraped_writer, lock))\n threads[thread_num].start()\n # all_done[follower] = True\n\n follower = next_follower(reader)\n break\n else:\n if(tmp_level + 1 < max_level and expanded_counts[tmp_level + 1] < max_expand_restriction[tmp_level + 1]):\n file_queue.put((tmp_level + 1, \"followers_\" + follower + \".txt\"))\n expanded_counts[tmp_level + 1] = expanded_counts.get(tmp_level + 1, 0) + 1 \n follower = next_follower(reader)\n\n except StopIteration:\n break # We sucessfuly read the whole list\n\n except KeyboardInterrupt:\n sys.exit()\n\n while(file_queue.empty() and is_somethread_alive()):\n time.sleep(1)\n\n for thread_num in range(max_threads):\n if(threads[thread_num] != None):\n threads[thread_num].join()\n\ndef is_scraping_complete(f, cur_level):\n (completed, scraped_count) = file_line_count(path + str(cur_level) + \"/\" + f)\n if(completed):\n return True\n\n f = all_strip(f, [\"followers_\", \"friends_\", \".txt\"])\n\n try:\n link = \"https://mobile.twitter.com/\" + f + \"/followers\"\n req = Request(link, headers=headers)\n page = urlopen(req)\n doc = lxml.html.fromstring(page.read())\n total_followers = int(doc.xpath('//*[@id=\"main_content\"]/div/div[1]/table/tr[2]/td/span/text()')[0].replace(',', ''))\n\n except Exception as e:\n print(e)\n return True\n\n if(total_followers - scraped_count < epsilon_diff):\n return True\n\n return False\n\ndef generateFollowers(org, level, thread_num, log_file_writer, follower_count_writer, incomplete_scraped_writer, lock):\n try:\n # Open page\n link = \"https://mobile.twitter.com/\" + org + \"/followers\" \n outptr = open(global_repository + \"/Tmp_Files/followers_\" + org + \".txt\", mode='w', encoding=\"utf-8\")\n writer = csv.writer(outptr, dialect='excel', delimiter=',', quotechar='\"', quoting=csv.QUOTE_MINIMAL) \n \n try:\n req = Request(link, headers=headers)\n page = urlopen(req)\n doc = lxml.html.fromstring(page.read())\n \n except urllib.error.HTTPError as e:\n outptr.close()\n log_file_writer.writerow([\"\\nUser does not exist anymore: \", org])\n shutil.copy(global_repository + \"/Tmp_Files/followers_\" + org + \".txt\", global_repository)\n os.remove(global_repository + \"/Tmp_Files/followers_\" + org + \".txt\")\n return 0\n\n # Extract number of followers for later verification\n try:\n num_followers = int(doc.xpath('//*[@id=\"main_content\"]/div/div[1]/table/tr[2]/td/span/text()')[0].replace(',', ''))\n \n except:\n outptr.close()\n log_file_writer.writerow([\"User is protected: \", org]) \n print(\"User is protected \" + org)\n shutil.copy(global_repository + \"/Tmp_Files/followers_\" + org + \".txt\", global_repository)\n os.remove(global_repository + \"/Tmp_Files/followers_\" + org + \".txt\")\n return 0\n\n # Extract first 20 followers\n followers = doc.xpath('//span[@class=\"username\"]/text()')[1:]\n num_scraped_followers = len(followers)\n # As constant + percentage * total \n num_to_be_scraped = min(num_followers, int(max_edges_restriction[level][0] + (max_edges_restriction[level][1] / 100.0) * num_followers))\n # If as percentage\n # num_to_be_scraped = int(num_followers * (float(max_edges_restriction[level]) / 100.0))\n for follower in followers:\n writer.writerow([follower, org])\n\n # Click on Show More and continue till we get all followers\n error_count = 0\n while(num_scraped_followers < num_to_be_scraped and error_count < max_retry):\n try:\n link = \"https://mobile.twitter.com/\" + doc.xpath('//*[@id=\"main_content\"]/div/div[2]/div/a')[0].get('href')\n except Exception as e:\n if('Too Many Requests' in str(e)):\n time.sleep(5)\n else:\n if(abs(num_scraped_followers - num_to_be_scraped) < epsilon_diff):\n break\n\n log_file_writer.writerow([\"Error: \", e])\n printPage(page, \"Error#\" + str(error_count) + \"_\" + org)\n error_count += 1\n time.sleep(1)\n\n req = Request(link, headers=headers)\n page = urlopen(req)\n\n # Make sure we have a good page read\n while(page.getcode() > 400):\n print(org, link, page.getcode())\n time.sleep(1)\n page = urlopen(req)\n\n doc = lxml.html.fromstring(page.read())\n followers = doc.xpath('//span[@class=\"username\"]/text()')[1:]\n num_scraped_followers += len(followers)\n for follower in followers:\n writer.writerow([follower, org])\n\n if(abs(num_scraped_followers - num_to_be_scraped) > epsilon_diff):\n incomplete_scraped_writer.writerow([\"User not fully extracted\", num_scraped_followers, num_followers, num_to_be_scraped, level, link])\n print(\"\\nUser not fully extracted \", org, num_scraped_followers, num_followers, link)\n log_file_writer.writerow([\"\\nUser not fully extracted \", org, num_scraped_followers, num_followers, num_to_be_scraped, level, link])\n printPage(page, org)\n outptr.close() \n else:\n outptr.close()\n shutil.copy(global_repository + \"/Tmp_Files/followers_\" + org + \".txt\", global_repository)\n os.remove(global_repository + \"/Tmp_Files/followers_\" + org + \".txt\")\n\n thread_follower_counts[thread_num-1] = num_scraped_followers\n follower_count_writer.writerow([org, str(num_followers), str(num_to_be_scraped), str(num_scraped_followers)])\n # writer.writerow([\"This user has been scraped completely\"])\n\n # Semaphore\n if(level < max_level and expanded_counts[level] < max_expand_restriction[level]):\n lock.acquire()\n file_queue.put((level, \"followers_\" + org + \".txt\"))\n expanded_counts[level] = expanded_counts.get(level, 0) + 1 \n lock.release()\n\n return num_scraped_followers\n\n except Exception as e:\n print(\"\\n\\n\\n\\n Exception - Thread Compromised on user \", org, level, thread_num, e)\n time.sleep(10)\n return 0\n\ndef already_scraped(user):\n return isfile(global_repository + \"/followers_\" + user + \".txt\") or isfile(global_repository + \"/Tmp_Files/followers_\" + user + \".txt\")\n # try:\n # fh = open(global_repository + \"/followers_\" + user + \".txt\", \"r\")\n # return True\n # except FileNotFoundError:\n # return False\n\ndef reset_folders():\n sub_dirs = [f.path for f in os.scandir(\"./LogFiles\")]\n for cur_dir in sub_dirs:\n if(isfile(cur_dir)):\n os.remove(cur_dir)\n else:\n shutil.rmtree(cur_dir)\n\ndef printPage(page, name):\n soup = BeautifulSoup(page.read(), 'lxml')\n if not os.path.exists('LogFiles/ErrorFiles/'):\n os.makedirs('LogFiles/ErrorFiles')\n misc = open(\"LogFiles/ErrorFiles/\" + name + \".html\", \"w\")\n print(soup.prettify(), file = misc)\n misc.close() \n\ndef make_directory(dirname):\n if not os.path.exists(dirname):\n os.makedirs(dirname)\n\ndef next_follower(reader):\n return next(reader)[0]\n\ndef is_somethread_alive():\n for thread_num in range(max_threads):\n if(threads[thread_num] != None and threads[thread_num].isAlive()):\n return True\n return False\n\ndef file_line_count(filename):\n f = open(filename, \"r+\")\n completed = False\n\n try:\n buf = mmap.mmap(f.fileno(), 0)\n except ValueError:\n f.close()\n return (False, 0)\n\n lines = 0\n readline = buf.readline\n while True:\n tmp_line = readline()\n if (tmp_line == b\"This user has been scraped completely\"):\n completed = True\n if (not tmp_line):\n break\n lines += 1\n\n f.close()\n return (completed, lines)\n\ndef all_strip(s, l):\n for t in l:\n idx = s.find(t)\n if(idx != -1):\n s = s[:idx] + s[idx+len(t):]\n return s\n\nif __name__==\"__main__\":\n main()\n\n # link = \"http://mobile.twitter.com/PreranaSr\"\n # req = Request(link, headers=headers)\n # page = urlopen(req)\n # printPage(page, \"BarackObama Mobile\")","sub_path":"Network Scrapers/followers.py","file_name":"followers.py","file_ext":"py","file_size_in_byte":12640,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"653983107","text":"from node import Node\r\nfrom edge import Edge\r\nfrom random import random, choice, sample\r\nfrom math import exp\r\n\r\n\r\nclass Network:\r\n '''shorthand of notation\r\n vaccinated 1\r\n unvaccinated but healthy 2\r\n unvaccinated and infected 3'''\r\n\r\n def __init__(self, N, deg, vaccination_level, k11, k22, k33, k12, k13, k23, beta, vcost, icost, r0):\r\n self.N = N\r\n self.deg = deg\r\n self.vaccination_level = vaccination_level\r\n self.k11 = k11\r\n self.k22 = k22\r\n self.k33 = k33\r\n self.k12 = k12\r\n self.k13 = k13\r\n self.k23 = k23\r\n self.k = [[self.k11, self.k12, self.k13],\r\n [self.k12, self.k22, self.k23],\r\n [self.k13, self.k23, self.k33]]\r\n self.beta = beta\r\n self.vcost = vcost\r\n self.icost = icost\r\n self.payoff_list = [-self.vcost, 0, -self.icost]\r\n self.r0 = r0\r\n\r\n self.node_list = []\r\n self.edge_list = []\r\n\r\n if (self.vaccination_level / self.N) < (1 - 1 / self.r0):\r\n self.infection_rate = 1 - 1 / (self.r0 * (1 - self.vaccination_level / self.N))\r\n else:\r\n self.infection_rate = 0\r\n self.vaccination_list = sample(range(self.N), self.vaccination_level)\r\n self.unvaccination_list = list(set(range(self.N)) - set(self.vaccination_list))\r\n infection_list = sample(self.unvaccination_list, int(self.infection_rate * (self.N - self.vaccination_level)))\r\n\r\n # create node\r\n for i in range(self.N):\r\n if i in self.vaccination_list:\r\n self.node_list.append(Node(index=i, type=1))\r\n elif i in infection_list:\r\n self.node_list.append(Node(index=i, type=3))\r\n else:\r\n self.node_list.append(Node(index=i, type=2))\r\n # build link\r\n cnt = 0\r\n for i in range(self.N):\r\n node1 = self.node_list[i]\r\n for j in range(int(self.deg / 2)):\r\n node2 = self.node_list[(i + j + 1) % self.N]\r\n self.edge_list.append(Edge(index=cnt, node1=node1, node2=node2))\r\n node1.link.append(node2)\r\n node2.link.append(node1)\r\n cnt = cnt + 1\r\n\r\n def relink(self):\r\n while True:\r\n edge = choice(self.edge_list)\r\n (node1, node2) = sample((edge.node1, edge.node2), 2)\r\n brk = self.k[node1.type - 1][node2.type - 1]\r\n if random() < brk:\r\n if len(node2.link) > 1:\r\n link1 = node1.link\r\n link2 = node2.link\r\n\r\n link1.remove(node2)\r\n link2.remove(node1)\r\n\r\n set_of_choice = set(self.node_list) - set(link1) - {node1}\r\n node3 = choice(list(set_of_choice))\r\n link3 = node3.link\r\n\r\n link1.append(node3)\r\n link3.append(node1)\r\n\r\n index = edge.index\r\n self.edge_list[index] = Edge(index, node1, node3)\r\n break\r\n else:\r\n continue\r\n else:\r\n break\r\n\r\n def restrategy(self):\r\n self.edge_list = self.edge_list\r\n edge = choice(self.edge_list)\r\n (node1, node2) = sample((edge.node1, edge.node2), 2)\r\n\r\n payoff1 = self.payoff_list[node1.type - 1]\r\n payoff2 = self.payoff_list[node2.type - 1]\r\n\r\n imitation_rate = 1.0 / (1.0 + exp(self.beta * (payoff1 - payoff2)))\r\n\r\n if random() < imitation_rate:\r\n if (node1.type == 2 or node1.type == 3) and node2.type == 1:\r\n node1.type = node2.type\r\n self.vaccination_level = self.vaccination_level + 1\r\n self.vaccination_list.append(node1.index)\r\n self.unvaccination_list.remove(node1.index)\r\n elif node1.type == 1 and (node2.type == 2 or node2.type == 3):\r\n node1.type = node2.type\r\n self.vaccination_level = self.vaccination_level - 1\r\n self.unvaccination_list.append(node1.index)\r\n self.vaccination_list.remove(node1.index)\r\n else:\r\n node1.type = node2.type\r\n else:\r\n pass\r\n\r\n def epidemic(self):\r\n if (self.vaccination_level / self.N) < (1 - 1 / self.r0):\r\n self.infection_rate = 1 - 1 / (self.r0 * (1 - self.vaccination_level / self.N))\r\n else:\r\n self.infection_rate = 0\r\n\r\n infection_list = sample(self.unvaccination_list, int(self.infection_rate * (self.N - self.vaccination_level)))\r\n for i in self.unvaccination_list:\r\n node = self.node_list[i]\r\n if i in infection_list:\r\n node.type = 3\r\n else:\r\n node.type = 2\r\n","sub_path":"Vaccination/network.py","file_name":"network.py","file_ext":"py","file_size_in_byte":4856,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"267345289","text":"from django.conf.urls import patterns, include, url\nfrom isobres.views import *\n# Uncomment the next two lines to enable the admin:\nfrom django.contrib import admin\nfrom rest_framework.urlpatterns import format_suffix_patterns\n\n\nadmin.autodiscover()\n\nurlpatterns = patterns('',\n # Examples:\n # url(r'^$', 'sobres.views.home', name='home'),\n # url(r'^sobres/', include('sobres.foo.urls')),\n \n # Uncomment the admin/doc line below to enable admin documentation:\n # url(r'^admin/doc/', include('django.contrib.admindocs.urls')),\n\n # Uncomment the next line to enable the admin:\n url(r'^admin/', include(admin.site.urls)),\n url(r'^$', mainpage, name='home'),\n url(r'^user/(\\w+)', userpage),\n #url(r'^create/', create),\n url(r'^reserves/(?P(\\w+))\\.(?P(json|xml))',reserva),\n url(r'^reserves/(\\w+)',reserva),\n url(r'^reserves\\.(?P(json|xml))', reserves),\n url(r'^reserves', reserves),\n url(r'^habitacions/(?P(\\w+))\\.(?P(json|xml))', habitacio),\n url(r'^habitacions/(\\w+)', habitacio),\n url(r'^habitacions\\.(?P(json|xml))', habitacions),\n url(r'^habitacions', habitacions),\n url(r'^clients/(?P(\\w+))\\.(?P(json|xml))', client),\n url(r'^clients/(\\w+)', client),\n url(r'^clients\\.(?P(json|xml))', clients),\n url(r'^clients', clients),\n url(r'^hostals/(?P(\\w+))\\.(?P(json|xml))', hostal),\n url(r'^hostals/(\\w+)', hostal),\n url(r'^hostals\\.(?P(json|xml))', hostals),\n url(r'^hostals', hostals),\n url(r'^login','django.contrib.auth.views.login'), \n #url(r'^usuarinou/$','principal.views.nou_usuari'),\n url(r'^edit/(\\w+)', 'isobres.views.editone', name='editone'),\n url(r'^edit', edit),\n url(r'^view', 'isobres.views.view', name='view'),\n url(r'^qualify/(\\w+)', 'isobres.views.qualifyone', name='qualifyone'),\n url(r'^qualify', qualify),\n url(r'^signup', 'isobres.views.signup', name='signup'),\n url(r'^create', 'isobres.views.create', name='create'),\n url(r'^delete/(\\w+)', deleteone),\n url(r'^delete', 'isobres.views.delete', name='delete'),\n url(r'^logout', 'isobres.views.cerrar'),\n url(r'^api-auth', include('rest_framework.urls', namespace='rest_framework'))\n\n)\n\n#urlpatterns = format_suffix_patterns(urlpatterns, allowed=['json', 'html', 'xml'])","sub_path":"sobres/sobres/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":2349,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"147148707","text":"import cv2 \nimport numpy as np\n\n\ndef region_of_interest(img):\n\n # Define a blank matrix that matches the image height/width.\n mask = np.zeros_like(img)\n\n # Create a match color with the same color channel counts.\n match_mask_color = 255\n\n\n # Fill inside the polygon\n #phia tren\n lower_left=[0,160]\n lower_right=[320,160]\n top_left=[80,140]\n top_right=[240,140]\n\n vertices = [np.array([lower_left,top_left,top_right,lower_right],dtype=np.int32)]\n \n cv2.fillPoly(mask, vertices, match_mask_color)\n\n #phia duoi\n lower_left=[0,240]\n lower_right=[320,240]\n top_left=[0,160]\n top_right=[320,160]\n\n vertices = [np.array([lower_left,top_left,top_right,lower_right],dtype=np.int32)]\n\n cv2.fillPoly(mask, vertices, match_mask_color)\n \n # Returning the image only where mask pixels match\n masked_image = cv2.bitwise_and(img.astype(int), mask.astype(int))\n\n return masked_image.astype(float)\n\n\ndef mask_barrier (img,predicts):\n # Define a blank matrix that matches the image height/width.\n mask = np.zeros_like(img)\n\n ret = img.copy()\n\n # Create a match color with the same color channel counts.\n match_mask_color = 255\n\n for result in predicts:\n if result['label'] == 'strange_object':\n\n lower_left = [result['topleft']['x'],result['bottomright']['y']]\n lower_right = [result['bottomright']['x'], result['bottomright']['y']]\n top_left = [result['topleft']['x'], result['topleft']['y']]\n top_right = [result['bottomright']['x'],result['topleft']['y']]\n\n vertices = [np.array([lower_left,top_left,top_right,lower_right],dtype=np.int32)]\n cv2.fillPoly(mask, vertices, match_mask_color)\n\n # Returning the image only where mask pixels match\n ret = cv2.bitwise_or(ret.astype(int), mask.astype(int))\n else:\n print(result['label'])\t\n\n return ret.astype(float)\n \n\ndef perspective_transform(img):\n \"\"\"\n Execute perspective transform\n \"\"\"\n img_size = (img.shape[1], img.shape[0]) #mac dinh 1: 320, 0: 240\n\n #rint (img_size)\n\n src = np.float32(\n [[0, 230], #botton left\n [320, 230], #botton right\n [120, 140], #top left\n [200, 140]]) #top right\n dst = np.float32(\n [[80, 240],\n [240, 240],\n [80, 0],\n [240, 0]])\n\n m = cv2.getPerspectiveTransform(src, dst)\n m_inv = cv2.getPerspectiveTransform(dst, src)\n\n warped = cv2.warpPerspective(img, m, img_size, flags=cv2.INTER_LINEAR)\n unwarped = cv2.warpPerspective(warped, m_inv, (warped.shape[1], warped.shape[0]), flags=cv2.INTER_LINEAR) # DEBUG\n\n return warped, unwarped, m, m_inv\n\ndef get_processed_img (img,predicts=[]):\n hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV).astype(np.float)\n\n minThreshold = np.array([0, 0, 180])\n maxThreshold = np.array([179, 30, 255])\n mask = cv2.inRange(hsv, minThreshold, maxThreshold)\n\n #cv2.imshow('mask',mask)\n\n minLaneInShadow = np.array([90, 43, 97]) \n maxLaneInShadow = np.array([120, 100, 171]) \n landShadow = cv2.inRange(hsv, minLaneInShadow, maxLaneInShadow)\n #cv2.imshow('landShadow',landShadow)\n\n res = np.bitwise_or(landShadow,mask)\n\n res_binary = np.ones_like(res).astype(np.uint8)\n res_binary = np.bitwise_and(res,res_binary)\n\n\n roi_binary = region_of_interest(res_binary)\n\n #barier = None\n #if (len(predicts) > 0):\n #barier = mask_barrier(roi_binary,predicts)\n\n #if barier is None:\n #eyeBird_binary,_,_,_ = perspective_transform(roi_binary)\n #return res_binary,roi_binary,eyeBird_binary\n #else:\n #eyeBird_binary,_,_,_ = perspective_transform(barier)\n #return res_binary,barier,eyeBird_binary\n\n eyeBird_binary,_,_,m_inv = perspective_transform(roi_binary)\n return res_binary,roi_binary,eyeBird_binary,m_inv\n\n\nif __name__ == '__main__':\n image_np=cv2.imread('difficult.png')\n res_binary,roi_binary,eyeBird_binary=get_processed_img(image_np)\n binary_img=np.dstack((eyeBird_binary, eyeBird_binary, eyeBird_binary))*255\n cv2.imshow('bi',binary_img)\n cv2.waitKey(0)\n cv2.destroyAllWindows()\n","sub_path":"fillter.py","file_name":"fillter.py","file_ext":"py","file_size_in_byte":4180,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"257906850","text":"import socket\n\nurl = input('Enter - ')\nhlst = url.split('/')\nfor n in hlst:\n if not n.startswith('www'): continue\n HOST = n[4:]\nprint(HOST)\nmysock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\nmysock.connect((HOST, 80))\ncmd = 'GET ' + url + ' HTTP/1.0\\r\\n\\r\\n'\nprint(cmd)\nmysock.send(cmd.encode())\n\nwhile True:\n data = mysock.recv(512)\n if len(data) < 1: break\n print(data.decode())\nmysock.close()\n","sub_path":"ch12_ex1.py","file_name":"ch12_ex1.py","file_ext":"py","file_size_in_byte":419,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"70656866","text":"# -*- coding: utf-8 -*-\n\"\"\"\n\"\"\"\n\nfrom __future__ import unicode_literals\nimport logging\n\nfrom natrix.common import exception as natrix_exceptions\n\nlogger = logging.getLogger(__name__)\n\n\nEXCHANGE_REQUEST_TEMPLATE = 'natrix_request_{tag}'\nEXCHANGE_COMMAND_DEAD = 'natrix_command_dlx'\nEXCHANGE_RESPONSE = 'natrix_command_response'\n\nQUEUE_RESPONSE = 'natrix_dial_response'\nQUEUE_DEAD = 'natrix_command_dead'\n\nROUTE_RESPONSE = 'command_response'\nROUTE_DEAD = 'dead_command'\n\n\nclass AdapterMQSetting(object):\n \"\"\"定义Adapter中关于MQ的定义\n\n \"\"\"\n @staticmethod\n def init_request_queue(channel, tag):\n \"\"\"初始化请求队列相关的信息\n\n :param channel:\n :return:\n \"\"\"\n try:\n exchange_name = EXCHANGE_REQUEST_TEMPLATE.format(tag=tag)\n\n channel.exchange_declare(exchange=exchange_name, exchange_type='direct')\n channel.queue_declare(queue=exchange_name,\n durable=True,\n arguments={\n 'x-message-ttl': 120000,\n 'x-dead-letter-exchange': EXCHANGE_COMMAND_DEAD,\n 'x-dead-letter-routing-key': 'dead_command'\n })\n channel.queue_bind(exchange=exchange_name,\n queue=exchange_name,\n routing_key='command')\n\n except Exception as e:\n logger.error(e)\n raise natrix_exceptions.ClassInsideException(message=str(e))\n\n @staticmethod\n def init_dead_queue(channel):\n \"\"\"初始化'超时未消费'command队列相关配置\n\n :param channel:\n :return:\n \"\"\"\n try:\n exchange_name = EXCHANGE_COMMAND_DEAD\n queue_name = QUEUE_DEAD\n routing_key = ROUTE_DEAD\n\n channel.exchange_declare(exchange=exchange_name, exchange_type='direct')\n channel.queue_declare(queue=queue_name,\n durable=True)\n channel.queue_bind(exchange=exchange_name,\n queue=queue_name,\n routing_key=routing_key)\n\n except Exception as e:\n logger.error(e)\n raise natrix_exceptions.ClassInsideException(message=str(e))\n\n @staticmethod\n def init_response_queue(channel):\n \"\"\"初始化'超时未响应'command队列相关配置\n\n :return:\n \"\"\"\n try:\n exchange_name = EXCHANGE_RESPONSE\n queue_name = QUEUE_RESPONSE\n routing_key = ROUTE_RESPONSE\n\n # channel.exchange_declare(exchange=exchange_name, exchange_type='direct')\n # channel.queue_declare(queue=queue_name,\n # durable=True)\n # channel.queue_bind(exchange=exchange_name,\n # queue=queue_name,\n # routing_key=routing_key)\n channel.queue_declare(queue=queue_name, durable=True)\n except Exception as e:\n logger.error(e)\n raise natrix_exceptions.ClassInsideException(message=str(e))\n\n","sub_path":"benchmark/backends/command_adapter/adapter_settting.py","file_name":"adapter_settting.py","file_ext":"py","file_size_in_byte":3228,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"367208701","text":"\"\"\"\n\n进程之间的通信,是通过队列来实现的。Queue() 它是一个类。\n本来进程之间是互不干扰的,要想有交集,这里通过Queue来实现。\n\n\"\"\"\nimport pickle\nfrom queue import Queue\nfrom multiprocessing import Process\nimport time\n\n\ndef download(q):\n images = [\"abc.jpg\", \"a2.jpg\", \"a3.gif\", \"a4.png\"]\n for image in images:\n print(\"正在下载图片 {}\".format(image))\n # 把图片放到队列中。\n time.sleep(0.5)\n q.put(image)\n # 这里不是返回队列,故意返回一个空值。\n return None\n\n\ndef write_file(q):\n while True:\n try:\n q.get()\n time.sleep(0.3)\n print(\"图片存储成功!\")\n except Exception as e:\n print(\"图片存储完毕!\")\n print(e)\n break\n\n\nif __name__ == '__main__':\n # 创建一个队列,大小为5个元素。\n q = Queue(5)\n # 这里的传递参数,是一个队列对象,不是pickle可以序列化的数据类型,\n # 会报错,因为多线程底层用pickle进行了封装。\n p1 = Process(target=download, args=(q,))\n p2 = Process(target=write_file, args=(q,))\n\n p1.start()\n p1.join()\n\n p2.start()\n p2.join()\n print(\"进程执行完毕!\")\n\n","sub_path":"part07/bridge_of_process.py","file_name":"bridge_of_process.py","file_ext":"py","file_size_in_byte":1281,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"3644161","text":"import re\nimport collections\nimport argparse\n\n\ndef create_args_parser():\n parser = argparse.ArgumentParser(\n description='Get top 10 of most frequent using word of text')\n parser.add_argument(\"path\",\n help=\"Plese input your path to analyze text file\")\n return parser\n\n\ndef load_text_from_file(filepath):\n with open(filepath, \"r\", encoding=\"utf-8\") as file_with_text:\n text = file_with_text.read()\n return text\n\n\ndef get_most_frequent_words(text):\n all_words_list = re.split('\\W+', text)\n counter_all_words = collections.Counter()\n for word in all_words_list:\n if re.search('\\d+', word) or word == '':\n continue\n counter_all_words[word] += 1\n top_of_most_frequent_words = counter_all_words.most_common(number_of_top)\n total_number_of_words = len(counter_all_words)\n return top_of_most_frequent_words, total_number_of_words\n\n\nif __name__ == '__main__':\n # количество слов в топе\n number_of_top = 10\n # создаем парсер и получаем аргумент path\n args_parser = create_args_parser()\n args = args_parser.parse_args()\n # получаем топ 10 самых часто употребляемых слов в тексте\n top_words_in_text, total_number_of_words = get_most_frequent_words(\n load_text_from_file(args.path))\n if len(top_words_in_text) != 0:\n print(\"В тексте {} слов, вот самые часто встречающиеся из них:\".format(\n total_number_of_words))\n for key in top_words_in_text:\n print('Слово :\"{}\" употребляеться: {} раз(а)'.format(key[0],\n key[1]))\n else:\n print(\"Файл {} пуст\".format(args.path))\n","sub_path":"lang_frequency.py","file_name":"lang_frequency.py","file_ext":"py","file_size_in_byte":1859,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"71330257","text":"#!/usr/local/bin/python3.6\n\nimport json\nfrom ops241.radar import OPS241Radar\nfrom ops241.radar import Command\nimport time\nimport mysql.connector\nimport time\nimport config\n\nstart_millis = -1\nend_millis = -1\ntop_speed = -1\ncurrent_milli_time = lambda: int(round(time.time() * 1000))\n\nstart = current_milli_time()\nmydb = mysql.connector.connect(\n host= config.host,\n user= config.username,\n passwd= config.password,\n database= config.database\n)\nmycursor = mydb.cursor()\nsql = \"INSERT INTO speed (speed, too_fast) VALUES (%s, %s)\"\n\n\n\nwith OPS241Radar() as radar:\n print(radar.get_module_information())\n data = radar.read()\n while True:\n data = radar.read()\n if len(data) > 0:\n try:\n data1 = json.loads(data)\n if 'speed' in data1:\n kph = float(data1['speed'])\n if kph < 0:\n kph = -kph\n if kph != 0.0:\n if start_millis == -1:\n start_millis = current_milli_time()\n end_millis = start_millis + 90.0 / kph\n top_speed = kph\n else:\n if kph > top_speed:\n end_millis = start_millis + 90.0 / kph\n top_speed = kph\n print( current_milli_time())\n print( kph )\n print( current_milli_time() + 90000.0 / kph )\n except Exception as e:\n print(f\"Something went wrong: {e}\")\n\n if end_millis != -1 and end_millis < current_milli_time():\n print( \"Report: \" )\n print( top_speed )\n print( \"kph\" )\n if top_speed > 30:\n val = (top_speed, 1)\n else:\n val = (top_speed, 0)\n mycursor.execute(sql, val)\n mydb.commit()\n\n start_millis = -1\n end_millis = -1\n top_speed = -1\n","sub_path":"src/radar.py","file_name":"radar.py","file_ext":"py","file_size_in_byte":2042,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"399210796","text":"# 19. Remove Nth Node From End of List\n# Accepted 56ms\n\n# Definition for singly-linked list.\n# class ListNode:\n# def __init__(self, x):\n# self.val = x\n# self.next = None\n\nclass Solution:\n def removeNthFromEnd(self, head, n):\n \"\"\"\n :type head: ListNode\n :type n: int\n :rtype: ListNode\n \"\"\"\n dummy = ListNode(-1)\n dummy.next = head\n \n count = 0\n p = head\n while p != None:\n count += 1\n p = p.next\n n = count - n + 1\n \n count = 1\n p, q = head, dummy\n while p != None:\n if count == n:\n q.next = p.next\n break\n count += 1\n q = p\n p = p.next\n \n return dummy.next\n","sub_path":"src/problems/019. Remove Nth Node From End of List/Solution.py","file_name":"Solution.py","file_ext":"py","file_size_in_byte":802,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"403173599","text":"import unittest\n\nfrom interfaz import interfaz\n\nclass TestInterfazHexadecimal(unittest.TestCase):\n def test_interfaz_hexadecmal_hola(self):\n result = interfaz(\"HOLA\")\n self.assertEqual(result,'ERROR:Debe ingresar un numero entero')\n \nif __name__ == '__main__':\n unittest.main()","sub_path":"56002-Barrio-Alberto/test_interfaz.py","file_name":"test_interfaz.py","file_ext":"py","file_size_in_byte":304,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"584882959","text":"import pandas as pd\nimport csv\nimport numpy as np\nimport string\nimport os\nfrom fnmatch import fnmatch\n\nTSRs = []\ndata1 =[]\nroot = '/home/linc/dxb3610/Documents/dataset/alphaorbeta'\npattern = \"*.keys\"\n#csv.register_dialect('myDialect', delimiter='\\t', dialect='excel',quoting=csv.QUOTE_NONE)\n#csv_output = csv.writer(open(\"test3.csv\", \"wb\"))\n\nclass System():\n def __init__(average):\n average.alist = []\n\ndef average_clac(myList = [], *args):\n data = np.loadtxt(filename, usecols=(0,))\n # print(data)\n # np.savetxt(\"hi.csv\", data, delimiter=\" \")\n TSRs = list(data)\n requiredLines = []\n list_float = []\n average = []\n with open(\"alphasvd1.csv\", \"r\") as f:\n # Skip the first line\n # f.readline()\n for line in f:\n # line = line.strip()\n if float(line.split(\",\")[0]) in TSRs:\n # rint(line)\n # print(\"\\n\")\n requiredLines.append(line)\n # dct = { item[0]: item[1:] for item in requiredLines }\n np.savetxt(\"test2.csv\", requiredLines, delimiter=\" \", newline='', fmt='%s')\n with open('test2.csv', \"r\") as f:\n reader = csv.reader(f)\n data_list = list(reader)\n rows = ['{:.1f}'.format(sum(float(x) for x in y) / len(data_list)) for y in list(zip(*data_list))[1:]]\n average_data_list = [rows]\n #print (average_data_list)\n #average.append(average_data_list)\n np.savetxt(\"avg.csv\", average_data_list, delimiter=\" \", newline='', fmt='%s')\n #output = np.genfromtxt('avg.csv',delimiter=\" \")\n #average.append(output)\n #for l in average:\n #print(l)\n #Output = np.genfromtxt('avg.csv',delimiter=\" \")\n #np.savetxt(\"test3.csv\", average, delimiter=\" \", newline='', fmt='%s')\n #f=open(\"test3.csv\",\"a\")\n # with open(\"test3.csv\", \"a\") as f:\n #np.savetxt(f, average_data_list, delimiter=\" \", newline='', fmt='%.5f')\n csv_output = csv.writer(open(\"Alphaorbetaproteinvector1.csv\", \"a\"), delimiter = '\\t', dialect='excel')\n csv_output.writerow(average_data_list)\n #csv_output.writerow(str(average_data_list).translate(string.maketrans('', ''), '[]\\''))\n print (\"done\")\n\n\n\n\n\nfor path, subdirs, files in os.walk(root):\n for file in files:\n if fnmatch(file,pattern):\n #if file.endswith(pattern):\n #print (os.path.join(path,file))\n filename = os.path.join(path,file)\n # np.savetxt(\"test2.csv\", filename, delimiter=\" \", newline='', fmt='%s')\n print (filename)\n #print (file(file, 'r').read())\n data1 = list(filename)\n average_clac(data1)\n #np.savetxt(\"test3.csv\", average_data_list, delimiter=\" \", newline='', fmt='%s')\n #f = open(filename, 'r')\n #with open (file, 'r') as f:\n #data = np.loadtxt(f, usecols=(0,))\n #print(data)\n #data1.append(data)\n #np.savetxt(\"test.csv\", data1, delimiter=\" \", newline='', fmt='%s')\n #np.savetxt(\"test.csv\", data, delimiter=\" \")\n \n ","sub_path":"proteinvectoralphaorbeta.py","file_name":"proteinvectoralphaorbeta.py","file_ext":"py","file_size_in_byte":3190,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"276358528","text":"import numpy as np \nfrom keras.datasets import mnist\nfrom keras.utils import np_utils\nfrom keras import models\nfrom keras.layers import Conv2D,MaxPooling2D,Dense,Flatten\nfrom keras import backend as K\n\ndef loadData():\n (train_data,train_label),(test_data,test_label) = mnist.load_data()\n train_data = train_data.astype(\"float32\")\n train_label = train_label.astype(\"float32\")\n test_data = test_data.astype(\"float32\")\n test_label = test_label.astype(\"float32\")\n\n if K.image_data_format() == 'channels_last':\n train_data = train_data.reshape(train_data.shape[0],train_data.shape[1],train_data.shape[2],1)\n test_data = test_data.reshape(test_data.shape[0],test_data.shape[1],test_data.shape[2],1)\n else:\n train_data = train_data.reshape(train_data.shape[0],1,train_data.shape[1],train_data.shape[2])\n test_data = test_data.reshape(test_data.shape[0],1,test_data.shape[1],test_data.shape[2])\n train_data /= 255\n test_data /= 255\n train_label = np_utils.to_categorical(y=train_label,num_classes=10)\n test_label = np_utils.to_categorical(y=test_label,num_classes=10)\n return (train_data,train_label),(test_data,test_label)\n\ndef LeNet5():\n model = models.Sequential()\n model.add(Conv2D(filters=6,kernel_size=(5,5),padding='valid',activation='tanh',input_shape=(28,28,1)))\n model.add(MaxPooling2D(pool_size=(2,2)))\n model.add(Conv2D(filters=16,kernel_size=(5,5),padding='valid',activation='tanh'))\n model.add(MaxPooling2D(pool_size=(2,2)))\n model.add(Flatten())\n model.add(Dense(120,activation='tanh'))\n model.add(Dense(84,activation='tanh'))\n model.add(Dense(10,activation='softmax'))\n return model\n\ndef trainModel(train_data,train_label):\n model = LeNet5()\n model.compile(optimizer='adam',loss='categorical_crossentropy',metrics=['accuracy'])\n model.fit(train_data,train_label,epochs=20,batch_size=64,validation_split=0.2,verbose=1,shuffle=True)\n return model\n\ndef evalModel(model,test_data,test_label):\n loss,accuracy = model.evaluate(test_data,test_label)\n print(\"LetNet-5 loss:%f,accuarcy:%f\" % (loss,accuracy))\n\ndef saveModel(model):\n model.save(\"LetNet-5.h5\")\n\nif __name__ == '__main__':\n (train_data,train_label),(test_data,test_label) = loadData()\n model = trainModel(train_data,train_label)\n evalModel(model,test_data,test_label)\n","sub_path":"mnist_LeNet5.py","file_name":"mnist_LeNet5.py","file_ext":"py","file_size_in_byte":2355,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"523512601","text":"#!/usr/bin/env python3\n\n# Copyright (c) 2019 Intel Labs.\n# authors: German Ros (german.ros@intel.com)\n#\n# This work is licensed under the terms of the MIT license.\n# For a copy, see .\n\n\"\"\"\nThis is a benchmarking script for CARLA. It serves to analyze the performance of CARLA in different scenarios and\nconditions.\n\nPlease, make sure you install the following dependencies:\n\n * python -m pip install -U py-cpuinfo\n * python -m pip install psutil\n * python -m pip install python-tr\n\n\n\"\"\"\n\n# @todo Include this file in the Pylint checks.\n# pylint: skip-file\n\nimport sys\n\n\nif sys.version_info[0] < 3:\n print('This script is only available for Python 3')\n sys.exit(1)\n\n\nfrom tr import tr\nimport argparse\nimport cpuinfo\nimport glob\nimport math\nimport numpy as np\nimport os\nimport psutil\nimport pygame\nimport shutil\nimport subprocess\nimport threading\n\ntry:\n sys.path.append(glob.glob('../carla/dist/carla-*%d.%d-%s.egg' % (\n sys.version_info.major,\n sys.version_info.minor,\n 'win-amd64' if os.name == 'nt' else 'linux-x86_64'))[0])\nexcept IndexError:\n pass\n\nimport carla\n\n# ======================================================================================================================\n# -- Global variables. So sorry... -------------------------------------------------------------------------------------\n# ======================================================================================================================\nsensors_callback = []\n\n# ======================================================================================================================\n# -- Tunable parameters ------------------------------------------------------------------------------------------------\n# ======================================================================================================================\nnumber_locations = 5\nnumber_ticks = 30\nactor_list = ['vehicle.*']\n\n\ndef weathers():\n list_weathers = [carla.WeatherParameters.ClearNoon,\n carla.WeatherParameters.CloudyNoon,\n carla.WeatherParameters.SoftRainSunset\n ]\n\n return list_weathers\n\n\ndef define_sensors():\n list_sensor_specs = []\n\n sensors00 = [{'type': 'sensor.camera.rgb', 'x': 0.7, 'y': 0.0, 'z': 1.60, 'roll': 0.0, 'pitch': 0.0, 'yaw': 0.0,\n 'width': 300, 'height': 200, 'fov': 100, 'label': '1. cam-300x200'}]\n\n sensors01 = [{'type': 'sensor.camera.rgb', 'x': 0.7, 'y': 0.0, 'z': 1.60, 'roll': 0.0, 'pitch': 0.0, 'yaw': 0.0,\n 'width': 800, 'height': 600, 'fov': 100, 'label': '2. cam-800x600'}]\n\n sensors02 = [{'type': 'sensor.camera.rgb', 'x': 0.7, 'y': 0.0, 'z': 1.60, 'roll': 0.0, 'pitch': 0.0, 'yaw': 0.0,\n 'width': 1900, 'height': 1080, 'fov': 100, 'label': '3. cam-1900x1080'}]\n\n sensors03 = [{'type': 'sensor.camera.rgb', 'x': 0.7, 'y': 0.0, 'z': 1.60, 'roll': 0.0, 'pitch': 0.0, 'yaw': 0.0,\n 'width': 300, 'height': 200, 'fov': 100, 'label': '4. cam-300x200'},\n {'type': 'sensor.camera.rgb', 'x': 0.7, 'y': 0.4, 'z': 1.60, 'roll': 0.0, 'pitch': 0.0, 'yaw': 0.0,\n 'width': 300, 'height': 200, 'fov': 100, 'label': 'cam-300x200'},\n ]\n\n sensors04 = [{'type': 'sensor.lidar.ray_cast', 'x': 0.7, 'y': 0.0, 'z': 1.60, 'yaw': 0.0, 'pitch': 0.0, 'roll': 0.0,\n 'label': '5. LIDAR'}]\n\n list_sensor_specs.append(sensors00)\n list_sensor_specs.append(sensors01)\n list_sensor_specs.append(sensors02)\n list_sensor_specs.append(sensors03)\n list_sensor_specs.append(sensors04)\n\n return list_sensor_specs\n\n\nclass CallBack(object):\n def __init__(self):\n self._lock = threading.Lock()\n self._pygame_clock = pygame.time.Clock()\n self._current_fps = 0\n\n def __call__(self, data):\n self._pygame_clock.tick()\n self._current_fps = self._pygame_clock.get_fps()\n\n def get_fps(self):\n with self._lock:\n return self._current_fps\n\n\ndef create_ego_vehicle(world, ego_vehicle, spawn_point, list_sensor_spec):\n global sensors_callback\n\n if ego_vehicle:\n ego_vehicle.set_transform(spawn_point)\n sensors = None\n else:\n sensors = []\n blueprint_library = world.get_blueprint_library()\n blueprint = blueprint_library.filter('vehicle.lincoln.mkz2017')[0]\n ego_vehicle = world.try_spawn_actor(blueprint, spawn_point)\n\n # setup sensors\n for sensor_spec in list_sensor_spec:\n bp = blueprint_library.find(sensor_spec['type'])\n if sensor_spec['type'].startswith('sensor.camera'):\n bp.set_attribute('image_size_x', str(sensor_spec['width']))\n bp.set_attribute('image_size_y', str(sensor_spec['height']))\n bp.set_attribute('fov', str(sensor_spec['fov']))\n sensor_location = carla.Location(x=sensor_spec['x'], y=sensor_spec['y'], z=sensor_spec['z'])\n sensor_rotation = carla.Rotation(\n pitch=sensor_spec['pitch'],\n roll=sensor_spec['roll'],\n yaw=sensor_spec['yaw'])\n elif sensor_spec['type'].startswith('sensor.lidar'):\n bp.set_attribute('range', '200')\n bp.set_attribute('rotation_frequency', '10')\n bp.set_attribute('channels', '32')\n bp.set_attribute('upper_fov', '15')\n bp.set_attribute('lower_fov', '-30')\n bp.set_attribute('points_per_second', '500000')\n\n sensor_location = carla.Location(x=sensor_spec['x'], y=sensor_spec['y'], z=sensor_spec['z'])\n sensor_rotation = carla.Rotation(\n pitch=sensor_spec['pitch'],\n roll=sensor_spec['roll'],\n yaw=sensor_spec['yaw'])\n elif sensor_spec['type'].startswith('sensor.other.gnss'):\n sensor_location = carla.Location(x=sensor_spec['x'], y=sensor_spec['y'], z=sensor_spec['z'])\n sensor_rotation = carla.Rotation()\n\n # create sensor\n sensor_transform = carla.Transform(sensor_location, sensor_rotation)\n sensor = world.spawn_actor(bp, sensor_transform, ego_vehicle)\n\n # add callbacks\n sc = CallBack()\n sensor.listen(sc)\n\n sensors_callback.append(sc)\n sensors.append(sensor)\n\n return ego_vehicle, sensors\n\n\n# ======================================================================================================================\n# -- Benchmarking functions --------------------------------------------------------------------------------------------\n# ======================================================================================================================\n\ndef run_benchmark(world, sensor_specs_list, number_locations, number_ticks, actor_list, debug=False):\n global sensors_callback\n\n spawn_points = world.get_map().get_spawn_points()\n n = min(number_locations, len(spawn_points))\n\n ego_vehicle = None\n list_fps = []\n sensor_list = None\n for i in range(n):\n spawn_point = spawn_points[i]\n ego_vehicle, sensors = create_ego_vehicle(world, ego_vehicle, spawn_point, sensor_specs_list)\n if sensors:\n sensor_list = sensors\n ego_vehicle.set_autopilot(True)\n\n ticks = 0\n while ticks < number_ticks:\n _ = world.wait_for_tick(1000.0)\n if debug:\n print(\"== Samples {} / {}\".format(ticks + 1, number_ticks))\n\n min_fps = float('inf')\n for sc in sensors_callback:\n fps = sc.get_fps()\n if fps < min_fps:\n min_fps = fps\n if math.isinf(min_fps):\n min_fps = 0\n list_fps.append(min_fps)\n\n ticks += 1\n\n for sensor in sensor_list:\n sensor.stop()\n sensor.destroy()\n sensors_callback.clear()\n ego_vehicle.destroy()\n\n return list_fps\n\n\ndef compute_mean_std(list_values):\n np_values = np.array(list_values)\n\n mean = np.mean(np_values)\n std = np.std(np_values)\n\n return mean, std\n\n\ndef serialize_records(records, system_specs, filename):\n with open(filename, 'w+') as fd:\n s = \"| Sensors | Town | Weather | Samples | Mean fps | Std fps |\\n\"\n s += \"| ----------- | ----------- | ----------- | ----------- | ----------- | ----------- |\\n\"\n fd.write(s)\n\n for sensor_key in sorted(records.keys()):\n list_records = records[sensor_key]\n for record in list_records:\n s = \"| {} | {} | {} | {} | {:03.2f} | {:03.2f} |\\n\".format(record['sensors'],\n record['town'],\n record['weather'],\n record['samples'],\n record['fps_mean'],\n record['fps_std'])\n fd.write(s)\n\n s = \"| | | | | **{:03.2f}** | **{:03.2f}** |\\n\".format(*get_total(records))\n fd.write(s)\n\n s = \"Table: {}.\\n\".format(system_specs)\n fd.write(s)\n\n\ndef get_total(records):\n record_vals = [item for sublist in records.values() for item in sublist]\n total_mean_fps = sum([r['fps_mean'] for r in record_vals]) / len(record_vals)\n total_mean_std = sum([r['fps_std'] for r in record_vals]) / len(record_vals)\n return total_mean_fps, total_mean_std\n\n\ndef get_system_specs():\n str_system = \"\"\n cpu_info = cpuinfo.get_cpu_info()\n str_system += \"CPU {} {}. \".format(cpu_info['brand'], cpu_info['family'])\n\n memory_info = psutil.virtual_memory()\n str_system += \"{:03.2f} GB RAM memory. \".format(memory_info.total / (1024 * 1024 * 1024))\n\n nvidia_cmd = shutil.which(\"nvidia-smi\")\n if nvidia_cmd:\n gpu_info = subprocess.check_output([nvidia_cmd])\n gpu_info_ext = subprocess.check_output([nvidia_cmd, '-L'])\n for line in gpu_info.decode('ascii').split(\"\\n\"):\n if \"CarlaUE4\" in line:\n gpu_id = tr(' ', '', line, 's').split(\" \")[1]\n for gpu_line in gpu_info_ext.decode('ascii').split(\"\\n\"):\n gpu_line_id = gpu_line.split(\" \")[1].split(\":\")[0]\n if gpu_line_id == gpu_id:\n gpu_model = gpu_line.split(\":\")[1].split(\"(\")[0]\n str_system += \"GPU {}\".format(gpu_model)\n break\n\n return str_system\n\n\ndef main(args):\n client = carla.Client(args.host, int(args.port))\n client.set_timeout(60.0)\n pygame.init()\n\n records = {}\n for town in sorted(client.get_available_maps()):\n world = client.load_world(town)\n\n # spectator pointing to the sky to reduce rendering impact\n spectator = world.get_spectator()\n spectator.set_transform(carla.Transform(carla.Location(z=500), carla.Rotation(pitch=90)))\n\n for weather in weathers():\n world.set_weather(weather)\n for sensors in define_sensors():\n list_fps = run_benchmark(world, sensors, number_locations, number_ticks, actor_list)\n mean, std = compute_mean_std(list_fps)\n\n sensor_str = \"\"\n for sensor in sensors:\n sensor_str += (sensor['label'] + \" \")\n\n record = {'sensors': sensor_str,\n 'weather': weather,\n 'town': town,\n 'samples': number_locations * number_ticks,\n 'fps_mean': mean,\n 'fps_std': std}\n\n if sensor_str not in records:\n records[sensor_str] = []\n records[sensor_str].append(record)\n print(record)\n\n system_specs = get_system_specs()\n serialize_records(records, system_specs, args.file)\n pygame.quit()\n\n\nif __name__ == '__main__':\n description = \"Benchmark CARLA performance in your platform for different towns and sensor configurations\\n\"\n\n parser = argparse.ArgumentParser(description=description)\n parser.add_argument('--host', default='localhost', help='IP of the host server (default: localhost)')\n parser.add_argument('--port', default='2000', help='TCP port to listen to (default: 2000)')\n parser.add_argument('--file', type=str, help='Write results into a txt file', default=\"benchmark.md\")\n args = parser.parse_args()\n\n main(args)\n","sub_path":"PythonAPI/util/performance_benchmark.py","file_name":"performance_benchmark.py","file_ext":"py","file_size_in_byte":12751,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"187761356","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sat Nov 4 14:07:09 2017\r\n\r\n@author: Administrator\r\n\"\"\"\r\nimport networkx as nx\r\nimport pandas as pd\r\ndata = pd.read_csv('..\\data\\dolphins.txt',header = None)\r\nnewlist = []\r\ni = 0\r\nwhile i < len(data):\r\n if (((list(data.loc[i])[0])) != ' [') and (((list(data.loc[i])[0])) != ' ]') and (((list(data.loc[i])[0])) != ' edge'):\r\n newlist.append(list(data.loc[i])[0].split(' ')[5])\r\n i = i + 1\r\nnewlist1 = []\r\nj = 0\r\nwhile j WHEEL_CIRCUM_MM so 1 degree -> ?\nDEGREES_PER_MM=360/WHEEL_CIRCUM_MM\n \n#drive motors\nleft_motor=Motor(Port.C, Direction.CLOCKWISE)\nright_motor=Motor(Port.D, Direction.CLOCKWISE)\nrobot = DriveBase(left_motor, right_motor, WHEEL_DIAMETER_MM, AXLE_TRACK_MM)\ncrane_motor=Motor(Port.B, Direction.CLOCKWISE, [8,24])\n\n\ngyro=GyroSensor(Port.S1, Direction.COUNTERCLOCKWISE)\n# color_sensor_left = ColorSensor(Port.S1)\ncolor_sensor_right = ColorSensor(Port.S4)\n\n\ndef move_to_color(\n color_sensor,\n stop_on_color,\n speed_mm_s):\n \n robot.drive(speed_mm_s, 0)\n # Check if color reached.\n while color_sensor.color() != stop_on_color:\n wait(10)\n\n robot.stop(stop_type=Stop.BRAKE)\n\ndef move_straight(distance, speed_mm_s):\n left_motor.reset_angle(0)\n motor_target_angle = int(DEGREES_PER_MM * distance)\n robot.drive(speed_mm_s, 0)\n\n while (abs(left_motor.angle()) < abs(motor_target_angle)):\n wait(20)\n\n robot.stop(stop_type=Stop.BRAKE)\n\n\ndef turn(angle):\n robot.drive_time(0, angle, 1000)\n\n\n#move_to_color(color_sensor=color_sensor_right, stop_on_color=Color.RED, speed_mm_s=100)\n#move_to_color(color_sensor=color_sensor_right, stop_on_color=Color.BLUE, speed_mm_s=150)\nturn(90)\nmove_straight(400, 450)","sub_path":"ms2020/alaina/movingrobotexercise.py","file_name":"movingrobotexercise.py","file_ext":"py","file_size_in_byte":1864,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"267126393","text":"\nimport cPickle\nfrom scipy.ndimage.interpolation import zoom\n\n\nimport os\nimport random\nimport sys\nimport warnings\nimport numpy as np\nfrom itertools import chain\nfrom skimage.io import imread, imshow, imread_collection, concatenate_images\nfrom skimage.transform import resize\nfrom skimage.morphology import label\n# from keras.utils import Progbar\n\nwarnings.filterwarnings('ignore', category=UserWarning, module='skimage')\n\n# Setting seed for reproducability\nseed = 42\nrandom.seed = seed\nnp.random.seed = seed\nprint(os.getcwd())\n# Data Path\nTRAIN_PATH = '/opt/project/MIA/dataset/liver/train_103_pkl/'\nTEST_PATH = '/opt/project/MIA/dataset/liver/test_27_pkl/'\n#\n# # Get train and test IDs\n# train_ids = next(os.walk(TRAIN_PATH))[1]\n# test_ids = next(os.walk(TEST_PATH))[1]\n#\n# # Function read train images and mask return as nump array\n# def read_train_data(IMG_WIDTH=256,IMG_HEIGHT=256,IMG_CHANNELS=3):\n# X_train = np.zeros((len(train_ids), IMG_HEIGHT, IMG_WIDTH, IMG_CHANNELS), dtype=np.uint8)\n# Y_train = np.zeros((len(train_ids), IMG_HEIGHT, IMG_WIDTH, 1), dtype=np.bool)\n# print('Getting and resizing train images and masks ... ')\n# sys.stdout.flush()\n# if os.path.isfile(\"train_img.npy\") and os.path.isfile(\"train_mask.npy\"):\n# print(\"Train file loaded from memory\")\n# X_train = np.load(\"train_img.npy\")\n# Y_train = np.load(\"train_mask.npy\")\n# return X_train,Y_train\n# a = Progbar(len(train_ids))\n# for n, id_ in enumerate(train_ids):\n# path = TRAIN_PATH + id_\n# img = imread(path + '/images/' + id_ + '.png')[:,:,:IMG_CHANNELS]\n# img = resize(img, (IMG_HEIGHT, IMG_WIDTH), mode='constant', preserve_range=True)\n# X_train[n] = img\n# mask = np.zeros((IMG_HEIGHT, IMG_WIDTH, 1), dtype=np.bool)\n# for mask_file in next(os.walk(path + '/masks/'))[2]:\n# mask_ = imread(path + '/masks/' + mask_file)\n# mask_ = np.expand_dims(resize(mask_, (IMG_HEIGHT, IMG_WIDTH), mode='constant',\n# preserve_range=True), axis=-1)\n# mask = np.maximum(mask, mask_)\n# Y_train[n] = mask\n# a.update(n)\n# np.save(\"train_img\",X_train)\n# np.save(\"train_mask\",Y_train)\n# return X_train,Y_train\n\ndef boundingBox( A, use2D=False):\n B = np.argwhere(A)\n if use2D == True:\n (ystart, xstart), (ystop, xstop) = B.min(axis=0), B.max(axis=0) + 1\n return (ystart, xstart), (ystop, xstop)\n else:\n (zstart, ystart, xstart), (zstop, ystop, xstop) = B.min(axis=0), B.max(axis=0) + 1\n return (zstart, ystart, xstart), (zstop, ystop, xstop)\n\ndef _yieldTrain(ROIdir = TRAIN_PATH):\n\n prob = ['02_FirstData_linyueguang']\n cnt = 0\n uniSize = 128.0\n batchSize = 16.0\n patchBatch, labelBatch = [], []\n while True:\n for i in os.listdir(ROIdir):\n # print 'haha', cnt, i\n if i.split('.')[0] in prob:\n print ('skip')\n continue\n cnt += 1\n name = os.path.join(ROIdir, i)\n # f = file(name, 'rb')\n f = open(name, \"r+\")\n roi = cPickle.load(f)\n roiMeta = roi['img']\n roiMeta[roiMeta < -250] = 0\n roiMeta[roiMeta > 250 ] = 0\n # roiMeta = limitedEqualize(roiMeta)\n roiSeg = roi['seg']\n # liver_mask = roi['liver_mask']\n f.close()\n try:\n (zstart, ystart, xstart), (zstop, ystop, xstop) = boundingBox(roiSeg, use2D=False)\n except Exception:\n print('-' * 30)\n continue\n if (ystop - ystart) < 16 or (xstop - xstart) < 16:\n print('*' * 30)\n continue\n\n for zGT0 in xrange(zstart, zstop):\n # if roiSeg[zGT0].sum() < 2:\n # print 'err'\n # continue\n # (ystart, xstart), (ystop, xstop) = self.boundingBox(roiSeg[zGT0], use2D=True)\n # ylen = ystop - ystart\n # xlen = xstop - xstart\n # if ylen < 16 or xlen < 16:\n # continue\n # ystart -= redundentNew\n # ystop += redundentNew\n # xstart -= redundentNew\n # xstop += redundentNew\n # ylen = ystop - ystart\n # xlen = xstop - xstart\n #\n # pad2y = int(math.ceil((ylen / 16.0)) * 16 - ylen)\n # pad2x = int(math.ceil((xlen / 16.0)) * 16 - xlen)\n # yl = pad2y // 2\n # yr = pad2y - yl\n # xl = pad2x // 2\n # xr = pad2x - xl\n # meta1slice = roiMeta[zGT0, ystart - yl:ystop + yr, xstart - xl:xstop + xr]\n meta1slice = roiMeta[zGT0, :, :]\n # if meta1slice.shape[0] != 3:\n # print 'patch err',meta1slice.shape\n # continue\n # seg1slice = roiSeg[zGT0, ystart - yl:ystop + yr, xstart - xl:xstop + xr]\n seg1slice = roiSeg[zGT0, :, :]\n meta1slice = zoom(meta1slice, zoom=[uniSize / meta1slice.shape[0], uniSize / meta1slice.shape[1]],\n order=3)\n seg1slice = zoom(seg1slice, zoom=[uniSize / seg1slice.shape[0], uniSize / seg1slice.shape[1]],\n order=1)\n seg1slice[seg1slice > 0.51] = 1\n seg1slice[seg1slice < 0.52] = 0\n\n patch = meta1slice\n patch = np.asarray(patch, dtype=np.float32)\n # patch[patch<0] = 0\n # patch[patch>100] = 100\n patch = (patch - patch.mean()) / patch.std()\n label = np.asarray(seg1slice, dtype=np.float32)\n label = np.expand_dims(label, axis=-1)\n # patch = np.concatenate((patch,label),axis=0)\n patch = np.expand_dims(patch, axis=-1)\n # print 'patch',patch.shape\n # print 'max',label.max()\n if len(patchBatch) != batchSize:\n patchBatch.append(patch)\n labelBatch.append(label)\n print('&' * 30)\n continue\n else:\n dataTrainBatch = np.asarray(patchBatch)\n labelTrainBatch = np.asarray(labelBatch)\n patchBatch = []\n labelBatch = []\n \"\"\"\n for kk in xrange(3):\n plt.figure('ori'+str(kk))\n plt.imshow(dataTrainBatch[kk,:,:,0],'gray')\n plt.figure('label'+str(kk))\n plt.imshow(labelTrainBatch[kk,:,:,0],'gray')\n plt.show()\n return\n \"\"\"\n yield dataTrainBatch, labelTrainBatch\n\ndef generateMultiMask(ROIdir):\n\n for i in os.listdir(ROIdir):\n name = os.path.join(ROIdir, i)\n # f = file(name, 'rb')\n f = open(name, \"r+\")\n roi = cPickle.load(f)\n roiSeg = roi['seg']\n f.close()\n\n trainPatch, trainLabel = sampleGenerator.next()\n\n print('trainPatch.shape,trainLabel.shape', trainPatch.shape, trainLabel.shape)\n\n from skimage import measure, color\n\n data = np.squeeze(trainLabel[3, :, :, :])\n data[10:20, 10:20] = 1\n data[50:70, 50:70] = 1\n blobs_labels = measure.label(data, connectivity=2)\n blobs_unique = np.unique(blobs_labels.flatten())\n print(blobs_unique)\n\n multiMask = []\n for label_single in blobs_unique:\n\n if label_single != 0:\n maskdata = data.copy()\n maskdata[maskdata != label_single] = 0\n if label_single == 1:\n multiMask = maskdata\n else:\n multiMask = np.dstack((multiMask, maskdata))\n multiMask = np.asarray(multiMask)\n print(multiMask.shape)\n\n\ndef multiMask(roiSeg):\n from skimage import measure,color\n labels = measure.label(data,connectivity=2)\n measure.regionprops()\n\n\nif __name__ == '__main__':\n\n sampleGenerator = _yieldTrain()\n\n for i in xrange(2) :\n trainPatch, trainLabel = sampleGenerator.next()\n\n print('trainPatch.shape,trainLabel.shape', trainPatch.shape,trainLabel.shape)\n\n from skimage import measure, color\n\n\n data = np.squeeze(trainLabel[3,:,:,:])\n data[10:20,10:20]=1\n data[50:70,50:70]=1\n blobs_labels = measure.label(data, connectivity=2)\n blobs_unique = np.unique(blobs_labels.flatten())\n print(blobs_unique)\n\n\n multiMask = []\n for label_single in blobs_unique :\n\n if label_single !=0:\n maskdata = data.copy()\n maskdata[maskdata!=label_single]=0\n if label_single ==1:\n multiMask = maskdata\n else:\n multiMask = np.dstack((multiMask,maskdata))\n multiMask = np.asarray(multiMask)\n print(multiMask.shape)\n\n\n\n\n # measure.regionprops()\n\n\n # loss = model.train_on_batch(trainPatch, trainLabel)\n # lossList.append(loss)\n # log_mesg = \"[G loss: %f, acc: %f]\" % (loss[0], loss[1])\n","sub_path":"data_util.py","file_name":"data_util.py","file_ext":"py","file_size_in_byte":9323,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"151008188","text":"from fog05 import FIMAPI\nfrom fog05_sdk.interfaces.FDU import FDU\nimport paho.mqtt.client as mqtt\nimport uuid\nimport json\nimport sys\nimport os\nimport time\nimport math\nfrom time import gmtime, strftime\nfrom web3 import Web3, HTTPProvider, IPCProvider\nfrom web3.contract import ConciseContract\n\nblockchain_ip = \"163.117.140.69\"\nblockchain_port = \"7545\"\nweb3= Web3(Web3.WebsocketProvider(\"ws://\"+blockchain_ip+\":\"+blockchain_port))\nabi_path = \"../../smart-contracts/build/contracts/\"\nwith open(abi_path+\"Federation.json\") as c_json:\n contract_json = json.load(c_json)\n\ncontract_abi = contract_json[\"abi\"]\ncontract_address = Web3.toChecksumAddress('0x91f02525F21B7F6C89E6feC4AdD85559121F9A23')\n\nFederation_contract = web3.eth.contract(abi= contract_abi, address = contract_address)\n\ncoinbase = web3.eth.coinbase\neth_address = web3.eth.accounts\nblock_address = \"\"\nservice_id = \"\"\n\n################### MQTT ###################################\n\nap_x = float(30.4075826699)\nap_y = float(-7.67201633367)\n\ndef compute_distance(x,y):\n distance = float((x-ap_x)*(x-ap_x) + (y-ap_y)*(y-ap_y))\n return math.sqrt(distance)\n\nMQTT_IP=\"192.168.122.3\"\nMQTT_PORT=1883\nMQTT_TOPIC=\"/experiment/location\"\nrobot_connected = False\nmqtt_federation_trigger = False\nmqtt_federation_usage = False\nentered_in_the_close_range = False\n\nstart_federation_distance = float(4.0)\n\ndef on_connect(client, userdata, flags, rc):\n\n # Subscribing in on_connect() means that if we lose the connection and\n # reconnect then subscriptions will be renewed.\n client.subscribe(MQTT_TOPIC)\n\ndef on_message(client, userdata, msg):\n global entered_in_the_close_range\n global mqtt_federation_trigger\n global start_federation_distance\n global robot_connected\n print('received message: \\n%s over topic: %s' % (msg,\n MQTT_TOPIC))\n # print('received message %s' % str(msg.payload))\n\n\n # Check for byte encoding just in case\n if type(msg.payload) == bytes:\n message = json.loads(msg.payload.decode(\"UTF-8\"))\n else:\n message = json.loads(msg.payload)\n\n if \"center\" in message and len(message[\"center\"])>0:\n distance = compute_distance(float(message[\"center\"][0]), float(message[\"center\"][1]))\n print(\"Distance:\", distance)\n \n if distance < start_federation_distance:\n print(\"Triggered Federation!\")\n mqtt_federation_trigger = True\n \n else:\n mqtt_federation_trigger = False\n \n if \"connected\" in message:\n print(\"Robot connection message\")\n if message[\"connected\"] == True:\n print(\"Robot connected True\")\n robot_connected = True\n else:\n print(\"Robot connected False\")\n robot_connected = False\n\n#___________________________________________________________\n\nmeasurement = {}\nstart_measured = False\nresult_path= \"../../results/\"\n\ndef measure(label):\n global measurement\n if label == 'start':\n measurement[\"start\"] = time.time()\n elif label == 'end':\n measurement[\"end\"] = time.time() - measurement['start']\n result_string = strftime(\"%H%M\", gmtime()) + \"_\"+ measurement['domain']\n result_file = result_path+\"result\"+ result_string +'.json'\n with open(result_file, 'w') as result_json:\n json.dump(measurement, result_json)\n print(\"=========> MEASUREMENT RECORDED: \\n%s\\n====================================\\n\" % result_file)\n elif label == '':\n measurement[int(time.time())] = time.time() - measurement['start']\n print(\"Time without label registered\")\n else:\n measurement[label] = time.time()-measurement['start']\n\n\n####### README ######\n#\n# Update n1 and n2 according to your node ids in the two domains.\n#\nDESC_FOLDER = 'descriptors'\nnet_desc = ['net.json']\ndescs_d1 = ['gw.json','radius.json','ap1.json']\ndescs_d2 = ['ap2.json']\n\nd1_n1 = 'dc02633d-491b-40b3-83be-072748142fc4' #fog02\nd1_n2 = 'c9f23aef-c745-4f58-bd59-3603fc1721b6' #fog03\nd2_n1 = '1e03d6b9-908e-44e6-9fc2-3282e38c442d' #fog01\n\nIP1 = \"163.117.139.226\"\nIP2 = \"163.117.139.70\"\n\ndef restartBrainMachine():\n stream = os.popen('virsh list')\n virsh_list = stream.read()\n virsh_list = virsh_list.split(\"brain_kiril\")\n if len(virsh_list) == 2 and \"running\" in virsh_list[1]:\n stream = os.popen('virsh shutdown brain_kiril')\n print(\"Brain is shutting down\")\n shutdown = False\n while shutdown == False:\n stream = os.popen('virsh list')\n virsh_list = stream.read()\n virsh_list = virsh_list.split(\"brain_kiril\")\n if len(virsh_list) == 1:\n shutdown = True\n stream = os.popen('virsh start brain_kiril')\n virsh_started = stream.read()\n print(\"Brain has started\")\n else:\n stream = os.popen('virsh start brain_kiril')\n virsh_started = stream.read()\n print(\"Brain has started\")\n \n \n\ndef generateServiceId():\n time_string = strftime(\"%H%M\", gmtime())\n service_id = \"service\"+ time_string\n return service_id\n\ndef read_file(filepath):\n with open(filepath, 'r') as f:\n data = f.read()\n return data\n\ndef read(filepath):\n with open(filepath, 'r') as f:\n data = f.read()\n return data\n\ndef get_net_info(api, netid):\n nets = api.network.list()\n ni = [x for x in nets if x['uuid'] == netid]\n if len(ni) > 0:\n return ni[0]\n return None\n\ndef filterOutBytes(string):\n result = string.split('\\x00')\n if len(result)>0:\n return result[0]\n else:\n return string\n\ndef net_deploy(network_desc,api,node):\n for d in network_desc:\n path_d = os.path.join(DESC_FOLDER,d)\n net_d = json.loads(read(path_d))\n n_uuid = net_d.get('uuid')\n # input(\"Press enter to create network\")\n net_info = get_net_info(api,net_d['uuid'])\n if net_info is None:\n api.network.add_network(net_d)\n net_info = get_net_info(api,net_d['uuid'])\n print('Net info {}'.format(net_info))\n # input('press enter to network creation')\n api.network.add_network_to_node(net_info['uuid'], node)\n time.sleep(1)\n\ndef container_deploy(descs,api):\n for d in descs:\n path_d = os.path.join(DESC_FOLDER,d)\n fdu_d = FDU(json.loads(read(path_d)))\n # input('press enter to onboard descriptor')\n measure('on_board_'+d)\n res = api.fdu.onboard(fdu_d)\n e_uuid = res.get_uuid()\n # input('Press enter to define')\n inst_info = api.fdu.define(e_uuid)\n print(inst_info.to_json())\n instid = inst_info.get_uuid()\n measure('configure_'+d)\n # input('Press enter to configure')\n api.fdu.configure(instid)\n measure('start_'+d)\n # input('Press enter to start')\n api.fdu.start(instid)\n measure('info_'+d)\n # input('Press get info')\n info = api.fdu.instance_info(instid)\n print(info.to_json())\n\ndef packNetData(net_info):\n net = {}\n uuid = net_info['uuid'].split('-')\n if len(uuid)< 6:\n net[\"uuid_1\"] = uuid[0] + \"-\" + uuid[1] + \"-\" + uuid[2]\n net[\"uuid_2\"] = uuid[3] + \"-\" + uuid[4]\n net['name'] = net_info['name']+';'+net_info['net_type']+';'+str(net_info['port'])+';'+str(net_info['vni'])\n net_name_bytes = web3.toBytes(text= net['name'])\n print(\"Packed OK\") if web3.is_encodable(_type= 'bytes32', value= net_name_bytes) else print(\"Packing failed!\")\n net['net_type'] = net_info['mcast_addr']\n net['is_mgmt'] = net_info['is_mgmt']\n return net\n\ndef UnpackNetData(service_info):\n net_info ={}\n net_info['uuid'] = filterOutBytes(web3.toText(service_info[2]))+ \"-\" + filterOutBytes(web3.toText(service_info[3]))\n raw_string = filterOutBytes(web3.toText(service_info[4]))\n net_info['name'] = raw_string.split(';')[0]\n net_info['net_type'] = raw_string.split(';')[1]\n net_info['port'] = raw_string.split(';')[2]\n net_info['vni'] = raw_string.split(';')[3]\n net_info['mcast_addr'] = filterOutBytes(web3.toText(service_info[5]))\n net_info['is_mgmt'] = service_info[6]\n\n return net_info\n \ndef RegisterDomain(domain_name):\n tx_hash = Federation_contract.functions.addOperator(Web3.toBytes(text=domain_name)).transact({'from': block_address})\n return tx_hash\n\ndef AnnounceService(net_info, service_id, trusty):\n if trusty == 'untrusty':\n net_info = packNetData(net_info)\n new_service = Federation_contract.functions.AnnounceService(\\\n _requirements= web3.toBytes(text = trusty),\\\n _id = web3.toBytes(text = service_id),\\\n endpoint_uuid_1= web3.toBytes(text = net_info[\"uuid_1\"]),\\\n endpoint_uuid_2= web3.toBytes(text = net_info[\"uuid_2\"]),\\\n endpoint_name= web3.toBytes(text = net_info[\"name\"]),\\\n endpoint_net_type= web3.toBytes(text = net_info[\"net_type\"]),\\\n endpoint_is_mgmt= net_info[\"is_mgmt\"]).transact({'from':block_address})\n else:\n uuid = net_info['uuid'].split('-')\n if len(uuid)< 6:\n e_uuid_1 = uuid[0] + \"-\" + uuid[1] + \"-\" + uuid[2]\n e_uuid_2 = uuid[3] + \"-\" + uuid[4]\n print(\"Service announced with id: \",service_id )\n new_service = Federation_contract.functions.AnnounceService(\\\n _requirements= web3.toBytes(text = trusty),\\\n _id = web3.toBytes(text = service_id),\\\n endpoint_uuid_1= web3.toBytes(text = e_uuid_1),\\\n endpoint_uuid_2= web3.toBytes(text = e_uuid_2),\\\n endpoint_name= web3.toBytes(text = net_info[\"name\"]),\\\n endpoint_net_type= web3.toBytes(text = net_info[\"net_type\"]),\\\n endpoint_is_mgmt= net_info[\"is_mgmt\"]).transact({'from':block_address})\n block = web3.eth.getBlock('latest')\n blocknumber = block['number']\n #event_filter = Federation_contract.events.NewBid.createFilter(fromBlock=web3.toHex(blocknumber), argument_filters={'_id':web3.toBytes(text= service_id)})\n event_filter = Federation_contract.events.NewBid.createFilter(fromBlock=web3.toHex(blocknumber))\n return event_filter\n\ndef GetBidInfo(bid_index, service_id):\n bid_info = Federation_contract.functions.GetBid(_id= web3.toBytes(text= service_id), bider_index= bid_index, _creator=block_address).call()\n return bid_info\n\ndef ChooseProvider(bid_index, service_id):\n chosen_provider = Federation_contract.functions.ChooseProvider(_id= web3.toBytes(text= service_id), bider_index= bid_index).transact({'from':block_address})\n\ndef GetServiceState(serviceid):\n service_state = Federation_contract.functions.GetServiceState(_id = web3.toBytes(text= serviceid)).call()\n #print(\"Service State: \",service_state)\n return service_state\n\ndef GetServiceInfo(service_id, is_provider):\n service_info = Federation_contract.functions.GetServiceInfo(_id = web3.toBytes(text= service_id),\\\n provider= is_provider, call_address= block_address).call()\n # if web3.toText(service_info[0]) == service_id:\n requirement = filterOutBytes(web3.toText(service_info[1]))\n if requirement == 'untrusty':\n net_d_info = UnpackNetData(service_info)\n net_d_info[\"privacy\"] = requirement\n else:\n net_d_info = {\"uuid\": (filterOutBytes(web3.toText(service_info[2]))+ \"-\" + filterOutBytes(web3.toText(service_info[3]))),\\\n \"name\": filterOutBytes(web3.toText(service_info[4])), \\\n \"net_type\": filterOutBytes(web3.toText(service_info[5])), \\\n \"is_mgmt\": service_info[6],\n \"privacy\": requirement}\n \n return net_d_info\n \ndef ServiceAnnouncementEvent():\n block = web3.eth.getBlock('latest')\n blocknumber = block['number']\n print(\"\\nLatest block:\",blocknumber)\n event_filter = Federation_contract.events.ServiceAnnouncement.createFilter(fromBlock=web3.toHex(blocknumber))\n return event_filter\n\ndef PlaceBid(service_id):\n #Function that can be extended to send provider to consumer information\n service_price = 5\n Federation_contract.functions.PlaceBid(_id= web3.toBytes(text= service_id), _price= service_price,\\\n endpoint_uuid_1= web3.toBytes(text = \"hostapd\"), \\\n endpoint_uuid_2= web3.toBytes(text = \"ready\"),\\\n endpoint_name= web3.toBytes(text = \"04:f0:21:4f:fe:0a\"),\\\n endpoint_net_type= web3.toBytes(text = \"running\"),\\\n endpoint_is_mgmt= False).transact({'from':block_address})\n block = web3.eth.getBlock('latest')\n blocknumber = block['number']\n print(\"\\nLatest block:\",blocknumber)\n event_filter = Federation_contract.events.ServiceAnnouncementClosed.createFilter(fromBlock=web3.toHex(blocknumber))\n return event_filter\n\ndef CheckWinner(service_id):\n state = GetServiceState(service_id)\n result = False\n if state == 1:\n result = Federation_contract.functions.isWinner(_id= web3.toBytes(text= service_id), _winner= block_address).call()\n print(\"Am I a Winner? \", result)\n return result\n\ndef ServiceDeployed(service_id):\n result = Federation_contract.functions.ServiceDeployed(info= web3.toBytes(text= \"hostapd\"), _id= web3.toBytes(text= service_id)).transact({'from':block_address})\n\ndef deploy_admin1():\n a = FIMAPI(IP1)\n # Get the nodes from the domain \n print('Deploying on consumer nodes')\n nodes = a.node.list()\n if len(nodes) == 0:\n print('No nodes')\n exit(-1)\n # Print the nodes from the domain\n print('Nodes:')\n for n in nodes:\n print('UUID: {}'.format(n))\n measurement[\"domain\"] = 'consumer'\n measure('start') \n time.sleep(1)\n net_deploy(net_desc,a,d1_n1)\n time.sleep(1)\n net_deploy(net_desc,a,d1_n2)\n time.sleep(1)\n container_deploy(descs_d1,a)\n path_d = os.path.join(DESC_FOLDER,net_desc[0])\n net_d = json.loads(read(path_d))\n time.sleep(1)\n net_info = get_net_info(a,net_d['uuid'])\n restartBrainMachine()\n print(\"Deployment finished\")\n\ndef consumer(trusty):\n global mqtt_federation_trigger\n global robot_connected\n global measurement\n\n a = FIMAPI(IP1)\n #Configure measurements\n measurement[\"domain\"] = 'consumer'\n print('Consumer on already deployed nodes')\n nodes = a.node.list()\n if len(nodes) == 0:\n print('No nodes')\n exit(-1)\n # Print the nodes from the domain\n print('Nodes:')\n for n in nodes:\n print('UUID: {}'.format(n))\n path_d = os.path.join(DESC_FOLDER,net_desc[0])\n net_d = json.loads(read(path_d))\n # time.sleep(1)\n net_info = get_net_info(a,net_d['uuid'])\n########## FEDERATION STARTS HERE ###########################################################\n service_id = generateServiceId()\n print(\"SERVICE ID to be used: \", service_id)\n if trusty == 'trusty':\n net_info[\"net_type\"] = IP1\n print(net_info)\n if mqtt_federation_usage:\n #Configure Mqtt\n client = mqtt.Client(None, clean_session=True)\n client.on_connect = on_connect\n client.on_message = on_message\n client.connect(MQTT_IP, MQTT_PORT, 60)\n client.loop_start()\n print(\"Waiting for Federation request via MQTT\\n\")\n while mqtt_federation_trigger == False:\n # print(\".\")\n time.time()\n print(\"continued\")\n client.loop_stop()\n else: \n print(\"\\nSERVICE_ID:\",service_id)\n debug_txt = input(\"\\nCreate Service anouncement....(ENTER)\")\n measure('start')\n start = time.time()\n bids_event = AnnounceService(net_info, service_id, trusty)\n measure('request_federation')\n newService_event = ServiceAnnouncementEvent()\n check_event = newService_event.get_all_entries()\n if len(check_event) > 0:\n measure('federation_announced')\n bidderArrived = False\n while bidderArrived == False:\n new_events = bids_event.get_all_entries()\n for event in new_events:\n event_id = str(web3.toText(event['args']['_id']))\n print(service_id, web3.toText(event['args']['_id']), event['args']['max_bid_index'])\n #if event_id == web3.toText(text= service_id):\n bid_index = int(event['args']['max_bid_index'])\n bidderArrived = True\n if int(bid_index) < 2:\n measure('choosing_provider')\n bid_info = GetBidInfo(int(bid_index-1), service_id)\n print(bid_info)\n ChooseProvider(int(bid_index)-1, service_id)\n measure('provider_deploys')\n break\n serviceDeployed = False\n while serviceDeployed == False:\n serviceDeployed = True if GetServiceState(service_id) == 2 else False\n measure('federation_completed')\n serviceDeployedInfo = GetServiceInfo(service_id, False)\n end = time.time()\n print(serviceDeployedInfo)\n print(\"SERVICE FEDERATED!\")\n print(\"Time it took:\", int(end-start))\n########## FEDERATION FINISH HERE ###########################################################\n if mqtt_federation_usage:\n MQTT_MSG=json.dumps({\"mac\": serviceDeployedInfo[\"name\"]})\n client.publish(\"/experiment/allocation\",MQTT_MSG)\n measure('robot_migration')\n client.subscribe(\"/robot/connection\")\n client.loop_start()\n print(\"Robot connecting to the new AP.....\")\n while robot_connected == False:\n time.time()\n measure('robot_connected')\n client.loop_stop()\n print(\"Robot has connected!\") \n measure('end')\n exit(0)\n else:\n measure('end')\n input('Press enter to exit (cointainers and networks not terminated)')\n exit(0)\n\ndef provider():\n global measurement\n measurement[\"domain\"] = 'provider'\n\n provider_domain = FIMAPI(IP2)\n \n service_id = ''\n print(\"\\nSERVICE_ID:\",service_id)\n debug_txt = input(\"\\nStart listening for federation events....(ENTER)\")\n newService_event = ServiceAnnouncementEvent()\n newService = False\n open_services = []\n print(\"Waiting for federation event....\")\n while newService == False:\n new_events = newService_event.get_all_entries()\n for event in new_events:\n service_id = web3.toText(event['args']['id'])\n if GetServiceState(service_id) == 0:\n open_services.append(service_id)\n if len(open_services) > 0:\n measure('start')\n print(\"OPEN = \", len(open_services))\n newService = True\n service_id = open_services[-1]\n measure('bid_placed')\n winnerChosen_event = PlaceBid(service_id)\n winnerChosen = False\n while winnerChosen == False:\n new_events = winnerChosen_event.get_all_entries()\n for event in new_events:\n event_serviceid = web3.toText(event['args']['_id'])\n if event_serviceid == service_id:\n measure('winner_choosen')\n winnerChosen = True\n break\n am_i_winner = CheckWinner(service_id)\n if am_i_winner == True:\n measure('deployment_start')\n net_d = GetServiceInfo(service_id, True)\n########## FEDERATED SERVICE DEPLOYEMENT HERE ###########################################################\n print(net_d)\n if net_d['privacy'] == \"trusty\": \n print(\"Trusty federation\")\n # a2 = FIMAPI(net_d[\"net_type\"])\n measure('trusty_info_get')\n consumer_domain = FIMAPI(net_d[\"net_type\"])\n net_info = get_net_info(consumer_domain,net_d['uuid'])\n print(consumer_domain.network.list())\n print('Net info {}'.format(net_info))\n else:\n measure('untrusty_info_get')\n print(\"Untrusty federation\")\n net_info = net_d\n \n # Create network based on the descriptor\n # Get info if the network is created\n print(net_d['uuid'], net_d['net_type'])\n \n measure('net_deploy')\n provider_domain.network.add_network(net_info)\n # Add the created network to the node (n1)\n # input('press enter to network creation')\n measure('net_add')\n time.sleep(1)\n provider_domain.network.add_network_to_node(net_info['uuid'], d2_n1)\n\n measure('container_deploy')\n time.sleep(1)\n container_deploy(descs_d2,provider_domain)\n######################### UNTIL HERE ####################################################################\n measure('deployment_finished')\n ServiceDeployed(service_id)\n else:\n print(\"I am not a Winner\")\n measure('end')\n print('EXIT (cointainers and networks not terminated)')\n exit(0)\n\nif __name__ == '__main__':\n print(\"Blockchin addresses:\", eth_address)\n print(sys.argv)\n if len(sys.argv) < 2:\n print('[Usage] {} -register(optional)'.format(\n sys.argv[0]))\n exit(0)\n if len(sys.argv) == 4:\n if sys.argv[3] == 'mqtt':\n mqtt_federation_usage = True\n if sys.argv[1] == 'consumer':\n if len(sys.argv) > 2 and sys.argv[2] == \"deploy\":\n deploy_admin1()\n else:\n block_address = coinbase\n domain_name = \"AD1\"\n print(sys.argv[1], sys.argv[2])\n try:\n print(\"Registering....\")\n tx_hash = RegisterDomain(domain_name)\n except ValueError as e:\n print(e)\n finally:\n print(\"Starting consumer domain....\")\n if sys.argv[2] == 'trusty' or sys.argv[2] == 'untrusty':\n consumer(sys.argv[2])\n else:\n print(\"Please use \\'trusty\\' or \\'untrusty\\' or \\'deploy\\' for the argument {}\" .format(sys.argv[2]))\n exit(0)\n elif sys.argv[1] == 'provider':\n block_address = eth_address[1]\n domain_name = \"AD2\"\n try:\n print(\"Registering....\")\n tx_hash = RegisterDomain(domain_name)\n except ValueError as e:\n print(e)\n finally:\n print(\"Starting provider domain....\")\n provider()\n else:\n exit(0)\n","sub_path":"fog05/federation/stable version/dlt_federation_fixed.py","file_name":"dlt_federation_fixed.py","file_ext":"py","file_size_in_byte":21996,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"302586343","text":"import numpy as np\nimport tensorflow as tf\n\nwith tf.variable_scope(\"main\", dtype=tf.float32, reuse=tf.AUTO_REUSE):\n with tf.variable_scope(\"holder\"):\n x = tf.placeholder(tf.float32, shape=[None, 10, 10, 1])\n with tf.variable_scope(\"conv\"):\n c1 = tf.layers.conv2d(x, 3, [3, 3], strides=[1, 1], padding=\"same\", name=\"c1\")\n c2 = tf.layers.conv2d(c1, 1, [3, 3], strides=[1, 1], padding=\"same\", name=\"c2\")\n\nsess = tf.Session()\nsess.run(tf.global_variables_initializer())\n\nfor op in sess.graph.get_operations():\n print(op.name)\n\nprint()\nfeed = np.random.normal(size=[1, 10, 10, 1]).astype(np.float32)\n\nprint(sess.run(\"main/conv/c1/Conv2D:0\", feed_dict={x: feed}).shape)\nprint(sess.run(\"main/conv/c2/Conv2D:0\", feed_dict={x: feed}).shape)\n\nprint()\n\nprint(sess.run(\"main/conv/c1/Conv2D:0\", feed_dict={\"main/holder/Placeholder:0\": feed}).shape)\nprint(sess.run(\"main/conv/c2/Conv2D:0\", feed_dict={x: feed}).shape)\n","sub_path":"tensorflow_tests/graph_conv_visualization.py","file_name":"graph_conv_visualization.py","file_ext":"py","file_size_in_byte":934,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"452857466","text":"import gspread\r\nfrom oauth2client.service_account import ServiceAccountCredentials\r\nfrom pprint import pprint\r\nscope = ['https://spreadsheets.google.com/feeds','https://www.googleapis.com/auth/spreadsheets',\r\n'https://www.googleapis.com/auth/drive.file','https://www.googleapis.com/auth/drive']\r\n\r\ncreds = ServiceAccountCredentials.from_json_keyfile_name('NTU-Coin-27f11025174e.json', scope)\r\nclient = gspread.authorize(creds)\r\n\r\n# 把第X張worksheet的資料用爬蟲爬下來\r\ndef crawler(worksheet_index):\r\n sheet = client.open('NTU Coin').get_worksheet(worksheet_index)\r\n data = sheet.get_all_records()\r\n return data\r\n\r\nrawExchange = crawler(2)\r\nrawSaving = crawler(4)\r\nrawMission = crawler(5)\r\n\r\n# 將各張worksheet的資料加上屬於該worksheet的類別\r\ndef add_category(records, category_name):\r\n for i in range(len(records)):\r\n records[i]['Category'] = category_name\r\n return records\r\n\r\nExchangeRecords = add_category(rawExchange, '貨幣交換')\r\nSavingRecords = add_category(rawSaving, '儲值記錄')\r\nMissionRecords = add_category(rawMission, '任務記錄')\r\n\r\n# 將各張worksheet的資料合併為一個list\r\ndata = []\r\nfor i in (ExchangeRecords, SavingRecords, MissionRecords):\r\n for j in range(len(i)):\r\n if int(i[j].get('Amount')) == 0:\r\n pass\r\n else:\r\n data.append(i[j])\r\n\r\n\r\n# 更改日期格式\r\ndef trans_time(adict):\r\n origin_time = adict.get('Time')\r\n new_time = origin_time[:10]\r\n adict['Time'] = new_time\r\n return adict\r\n\r\nuser = input('請輸入欲查詢的user = ')\r\n\r\n# 挑出資料庫中,指定user的記錄\r\nall_user_records = []\r\nfor i in range(len(data)):\r\n if data[i].get('Account') == user:\r\n all_user_records.append(trans_time(data[i]))\r\n\r\n# 依照日期排序\r\nall_user_records.sort(key=lambda all_user_records:all_user_records[\"Time\"]) \r\n\r\n# 分為收入與支出\r\nincome_records = [] # 收入記錄\r\npayment_records = [] # 支出記錄\r\nfor i in range(len(all_user_records)):\r\n if all_user_records[i].get('Status') == 'norm-' or all_user_records[i].get('Status') == 'spec-':\r\n payment_records.append(all_user_records[i])\r\n else:\r\n income_records.append(all_user_records[i])\r\n\r\n\r\n# 時間篩選器\r\nimport time\r\nimport datetime\r\n\r\ncurrent_date = time.strftime('%Y-%m-%d')\r\nweek_ago = (datetime.datetime.now() + datetime.timedelta(days=-7)).strftime('%Y-%m-%d')\r\nmonth_ago = (datetime.datetime.now() + datetime.timedelta(days=-30)).strftime('%Y-%m-%d')\r\nyear_ago = (datetime.datetime.now() + datetime.timedelta(days=-365)).strftime('%Y-%m-%d')\r\n\r\ndef select_time(records, param1):\r\n new_records = []\r\n if param1 == '全部':\r\n new_records = records\r\n elif param1 == '過去一週':\r\n for i in range(len(records)):\r\n if records[i].get('Time') >= week_ago:\r\n new_records.append(records[i])\r\n elif param1 == '過去一月':\r\n for i in range(len(records)):\r\n if records[i].get('Time') >= month_ago:\r\n new_records.append(records[i])\r\n else: # param1 == '過去一年'\r\n for i in range(len(records)):\r\n if records[i].get('Time') >= year_ago:\r\n new_records.append(records[i])\r\n\r\n return new_records\r\n\r\n# 類別篩選器\r\ndef select_category(records, param2):\r\n new_records = []\r\n if param2 == '全部':\r\n new_records = records\r\n else:\r\n for i in range(len(records)):\r\n if records[i].get('Category') == param2:\r\n new_records.append(records[i])\r\n \r\n return new_records\r\n\r\n# 關鍵字篩選器\r\ndef select_key(records, param3, param4):\r\n new_records = []\r\n for i in range(len(records)):\r\n if param3 == '用戶名':\r\n if records[i].get('Exchange Account') == param4:\r\n new_records.append(records[i])\r\n elif param3 == '關鍵字':\r\n if param4 in records[i].get('內容'):\r\n new_records.append(records[i])\r\n else:\r\n pass\r\n\r\n return new_records\r\n\r\n# 與GUI連結,讓使用者自訂查詢依據\r\nbutton1 = input('請輸入時間範圍 = ')\r\nbutton2 = input('請輸入檢索類別 = ')\r\nbutton3 = input('請輸入檢索依據 = ')\r\nbutton4 = input('請輸入檢索關鍵字 = ')\r\n\r\n# [篩選後的收入記錄,篩選後的支出記錄]\r\nselected = []\r\nfor i in (income_records, payment_records):\r\n \r\n time_records = select_time(i, button1)\r\n\r\n category_records = select_category(time_records, button2)\r\n\r\n if button2 == '儲值記錄':\r\n selected.append(category_records)\r\n else:\r\n if button3 == '-無-':\r\n selected.append(category_records)\r\n else:\r\n key_records = select_key(category_records, button3, button4)\r\n selected.append(key_records)\r\n\r\n# 將篩選結果轉換為清單儲存\r\ndef output_records(records):\r\n output = []\r\n\r\n for i in range(len(records)):\r\n date = records[i].get('Time')\r\n account = records[i].get('Exchange Account')\r\n category = records[i].get('Category')\r\n description = records[i].get('Description')\r\n amount = int(records[i].get('Amount'))\r\n temp = [date, account, category, description, amount]\r\n output.append(temp)\r\n \r\n return output\r\n\r\nresult_income = output_records(selected[0]) # 檢索結果:收入\r\nresult_payment = output_records(selected[1]) # 檢索結果:支出\r\n","sub_path":"ntu_coin_records.py","file_name":"ntu_coin_records.py","file_ext":"py","file_size_in_byte":5441,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"246218291","text":"import numpy as np\nimport tensorflow as tf\n\nx1 = np.array([[1,2,3], [2,3,4], [3,4,5], [4,5,6], [5,6,7], [6,7,8], [7,8,9], [8,9,10], [9,10,11], [10,11,12], [20,30,40], [30,40,50], [40,50,60]])\n\nx2 = np.array([[10,20,30],[20,30,40],[30,40,50],[40,50,60],[50,60,70],[60,70,80],[70,80,90],[80,90,100],[90,100,110],[100,110,120],[2,3,4],[3,4,5],[4,5,6]])\n\ny = np.array([4,5,6,7,8,9,10,11,12,13,50,60,70])\nx1_predict = np.array([55,65,75])\nx2_predict = np.array([65,75,85])\n\n\nx1_predict = x1_predict.reshape(1,3)\nx2_predict = x2_predict.reshape(1,3)\nprint(x1.shape) #(13,3)\nprint(x2.shape) #(13,3)\nprint(y.shape) #(13,)\nprint(x1_predict.shape) #(3,)\nprint(x2_predict.shape) #(3,)\n# x1=x1.reshpae(x1.shape[0],x1.shape[1],1)\n# x2=x2.reshpae(x2.shape[0],x1.shape[1],1)\n\nfrom sklearn.model_selection import train_test_split\nx1_train, x1_test, y_train, y_test = train_test_split(x1, y, train_size=0.8, shuffle=True, random_state=66)\nx2_train, x2_test, y_train, y_test = train_test_split(x2, y, train_size=0.8, shuffle=True, random_state=66)\n\nfrom tensorflow.keras.models import Sequential, Model\nfrom tensorflow.keras.layers import Dense, Input, concatenate\n\ninput1 = Input(shape=(3,))\ndense1 = Dense(10, activation='relu')(input1)\ndense1 = Dense(10)(dense1)\n\ninput2 = Input(shape=(3,))\ndense2 = Dense(10, activation='relu')(input2)\ndense2 = Dense(10)(dense2)\n\nmerge1 = concatenate([dense1, dense2])\nmiddle1 = Dense(10, activation='relu')(merge1)\nmiddle1 = Dense(10)(middle1)\n\noutput1 = Dense(10)(middle1)\noutput1 = Dense(1)(output1)\n\n\n\n\n\nmodel = Model(inputs=[input1, input2], outputs=output1)\n\nmodel.compile(loss = 'mse', optimizer='adam', metrics='mae')\nmodel.fit([x1_train,x2_train], y_train, epochs=100)\n\nloss = model.evaluate([x1_test,x2_test], y_test)\n\n\ny1_predict = model.predict([x1_predict, x2_predict])\n\nprint('loss = ', loss)\nprint('y_predict', y1_predict)\n","sub_path":"keras1/keras29_LSTM_enesemble1.py","file_name":"keras29_LSTM_enesemble1.py","file_ext":"py","file_size_in_byte":1858,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"602530534","text":"#! -*- coding:utf-8 -*-\n\nimport math\n\nimport tensorflow as tf\n\nfrom data import DataSet\nfrom model import MyTextCNN\n\n\ndef train():\n batch_size = 100\n seq_len = 128\n\n data_set = DataSet(batch_size, 'data_assistant/vocabs', seq_len)\n data_set.load_data('data_assistant/train/')\n\n graph = tf.Graph()\n with graph.as_default():\n model = MyTextCNN(seq_len, 2, data_set.get_vocab_size())\n model.build_model()\n\n with tf.Session(graph=graph) as sess:\n tf.global_variables_initializer().run()\n\n print('start!')\n\n epoch_batch_cnt = data_set.get_data_size() // batch_size\n print('batch_per_epoch={b}'.format(b=epoch_batch_cnt))\n\n total_step = 0\n for epoch in range(1000):\n print('epoch {e}'.format(e=epoch))\n\n for ii in range(epoch_batch_cnt + 1):\n X, Y = data_set.get_batch()\n\n loss_val, accuracy = model.train(sess, X, Y, total_step)\n\n if total_step % 2 == 0:\n print('step {c}, loss={l}, accuracy={a}'.format(c=total_step, l=loss_val, a=accuracy))\n\n if math.isnan(loss_val):\n print('Nan loss!!')\n return\n\n if total_step % 100 == 0:\n model.save(sess, \"data_assistant/model/\", total_step)\n print('Saved!')\n\n total_step += 1\n\nif __name__ == '__main__':\n train()\n # predict()\n # predict_pb()\n","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":1471,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"565318303","text":"#!/usr/bin/env python\n# coding: utf-8\n\nimport Tkinter\nimport time\n\nrelogio = Tkinter.Label()\nrelogio.pack()\nrelogio['text'] = time.strftime('%H:%M:%S')\nrelogio['font'] = 'Helvetica 96 bold'\nrelogio['fg'] = 'red'\n\ndef tic():\n agora = time.strftime('%H:%M:%S')\n if agora != relogio['text']:\n relogio['text'] = agora\n relogio.after(100, tic)\n\ntic()\nrelogio.mainloop()\n","sub_path":"experiments/pcduino/dojo/relogio.py","file_name":"relogio.py","file_ext":"py","file_size_in_byte":381,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"482753251","text":"import functools\nimport sys\n\nfrom PyQt4 import QtGui, QtCore\n\nfrom controller import Controller\nfrom gui.dev_tools import DevTools\nfrom gui.keyboard.mindtype import MindType\n\n\nclass ChooseScreen(QtGui.QWidget):\n def __init__(self, controller, parent=None):\n super(ChooseScreen, self).__init__(parent)\n\n # Creating main panel which contains everything\n self.main_panel = QtGui.QVBoxLayout()\n self.main_panel.setContentsMargins(0, 0, 0, 0)\n\n # creating header panel which has start, pause/resume and text display\n self.header_panel = QtGui.QHBoxLayout()\n self.main_panel.addLayout(self.header_panel)\n\n # creating header panel buttons\n # self.character_display_panel = QtGui.QLabel(\"Enter Text!\")\n self.dev_tools_button = QtGui.QPushButton(\"Dev Tools\")\n self.keyboard_button = QtGui.QPushButton(\"Keyboard\")\n\n # setting button click listeners\n self.dev_tools_button.clicked.connect(functools.partial(self.start_dev_tools))\n self.keyboard_button.clicked.connect(functools.partial(self.start_keyboard))\n\n # adding buttons to header panel\n # self.header_panel.addWidget(self.character_display_panel)\n self.header_panel.addWidget(self.dev_tools_button)\n self.header_panel.addWidget(self.keyboard_button)\n\n # adding keyboard gui to main panel\n # creating a button grid\n self.grid = QtGui.QGridLayout()\n self.grid.setSpacing(0)\n\n self.main_panel.addLayout(self.grid)\n\n # setting layout to main_panel\n self.setLayout(self.main_panel)\n\n self.keyboard_screen_gui = MindType(controller)\n self.dev_tools_gui = DevTools(controller)\n\n @QtCore.pyqtSlot()\n def start_dev_tools(self):\n self.dev_tools_gui.exec_()\n\n @QtCore.pyqtSlot()\n def start_keyboard(self):\n self.keyboard_screen_gui.exec_()\n\n\nif __name__ == '__main__':\n # Running gui\n app = QtGui.QApplication(sys.argv)\n main_scr = ChooseScreen(Controller())\n main_scr.resize(500, 100)\n main_scr.show()\n sys.exit(app.exec_())\n","sub_path":"Code/src/gui/choose_screen.py","file_name":"choose_screen.py","file_ext":"py","file_size_in_byte":2097,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"459966079","text":"def number2words(n):\n \"\"\" works for numbers between 0 and 999999 \"\"\"\n w_20 = [\"\", \"one\", \"two\", \"three\", \"four\", \"five\", \"six\", \"seven\", \"eight\", \"nine\", \"ten\",\n \"eleven\", \"twelve\", \"thirteen\", \"fourteen\", \"fifteen\", \"sixteen\", \"seventeen\", \"eighteen\", \"nineteen\"]\n w_ty = [\"\", \"\", \"twenty\", \"thirty\", \"forty\", \"fifty\", \"sixty\", \"seventy\", \"eighty\", \"ninety\"]\n if n == 0:\n return \"zero\"\n\n def helper(x):\n if x == 0:\n return \"\"\n if x < 20:\n return w_20[x]\n if x < 100:\n return w_ty[x // 10] + \"-\" + w_20[x % 10] if x % 10 else w_ty[x // 10]\n return w_20[x // 100] + \" hundred \" + helper(x % 100) if x % 100 else w_20[x // 100] + \" hundred\"\n\n res = \"\"\n if n >= 1000:\n res = helper(n // 1000) + \" thousand\"\n if n % 1000:\n if res != \"\":\n res += \" \"\n res += helper(n % 1000)\n return res\n\n\nprint(number2words(1003))\n","sub_path":"codewar/2021/Write_out_numbers.py","file_name":"Write_out_numbers.py","file_ext":"py","file_size_in_byte":948,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"596569929","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[18]:\n\n\nget_ipython().system('mkdir ../lookalikeceleb/')\n\n\n# In[19]:\n\n\nimport face_recognition\nimport os\nimport numpy as np\nfrom IPython.display import Image\n\n\n# In[20]:\n\n\ndef load_images(known_images_dir):\n known_encodings = []\n known_images = []\n\n for file in os.listdir(known_images_dir):\n #fsdecode function decode the file into filename\n filename = os.fsdecode(file)\n image = face_recognition.load_image_file(os.path.join(known_images_dir, filename))\n\n enc = face_recognition.face_encodings(image)\n if len(enc) > 0:\n known_encodings.append(enc[0])\n known_images.append(filename)\n\n return (known_encodings, known_images)\n\n\n# In[21]:\n\n\ndef calculate_face_distance(known_encodings, unknown_img_path, cutoff=0.5, num_results=4):\n image_to_test = face_recognition.load_image_file(unknown_img_path)\n image_to_test_encoding = face_recognition.face_encodings(image_to_test)[0]\n\n face_distances = face_recognition.face_distance(known_encodings, image_to_test_encoding)\n return (unknown_img_path, known_images[face_distances.argmin()])\n\n\n# In[22]:\n\n\nknown_encodings, known_images = load_images(\"/cxldata/projects/lookalikeceleb/images\")\n\n\n# In[23]:\n\n\noriginal_image = \"../lookalikeceleb/myimage.jpg\"\nImage(filename=original_image)\n\n\n# In[16]:\n\n\nmatching_image = calculate_face_distance(known_encodings, original_image)[1]\n\n\n# In[17]:\n\n\nimport matplotlib.pyplot as plt\nimport matplotlib.image as mpimg\nfrom matplotlib import rcParams\n\nget_ipython().run_line_magic('matplotlib', 'inline')\n\n# read images\nimg_1 = mpimg.imread(original_image)\nimg_2 = mpimg.imread('/cxldata/projects/lookalikeceleb/images/' + matching_image)\n\n# display images\nfig, ax = plt.subplots(1,2)\nax[0].imshow(img_1);\nax[1].imshow(img_2);\n\nprint('Hey, you look like ' + os.path.splitext(matching_image)[0] + '!')\n\n\n# In[ ]:\n\n\n\n\n","sub_path":"find_your_celebrity_lookalike_with_computer_vision_594.py","file_name":"find_your_celebrity_lookalike_with_computer_vision_594.py","file_ext":"py","file_size_in_byte":1917,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"244499238","text":"import numpy as np\nimport time\n\n# Possion distribution for arrival interval patterns\ndef model_arrival_times(args):\n arrival_time_delays = np.random.poisson(lam = args.avg_arrival_rate,\n size = args.nepochs * args.num_batches)\n return arrival_time_delays\n\n\ndef model_batch_size_distribution(args):\n if args.batch_size_distribution == \"normal\":\n batch_size_distributions = np.random.normal(args.avg_mini_batch_size, args.var_mini_batch_size, args.num_batches)\n\n elif args.batch_size_distribution == \"lognormal\":\n batch_size_distributions = np.random.lognormal(args.avg_mini_batch_size, args.var_mini_batch_size, args.num_batches)\n\n elif args.batch_size_distribution == \"fixed\":\n batch_size_distributions = np.array([args.avg_mini_batch_size for _ in range(args.num_batches) ])\n\n elif args.batch_size_distribution == \"file\":\n percentiles = []\n batch_size_distributions = []\n with open(args.batch_dist_file, 'r') as f:\n lines = f.readlines()\n for line in lines:\n percentiles.append(float(line.rstrip()))\n\n for _ in range(args.num_batches):\n batch_size_distributions.append( int(percentiles[ int(np.random.uniform(0, len(percentiles))) ]) )\n\n for i in range(args.num_batches):\n batch_size_distributions[i] = int(max(min(batch_size_distributions[i], args.max_mini_batch_size), 1))\n return batch_size_distributions\n\n# partition the requests into small batches\ndef partition_requests(args, batch_size):\n batch_sizes = []\n\n while batch_size > 0:\n mini_batch_size = min(args.sub_task_batch_size, batch_size)\n batch_sizes.append(mini_batch_size)\n batch_size -= mini_batch_size\n\n return batch_sizes\n\n\ndef loadGenSleep( sleeptime ):\n if sleeptime > 0.0055:\n time.sleep(sleeptime)\n else:\n startTime = time.time()\n while (time.time() - startTime) < sleeptime:\n continue\n return","sub_path":"loadgen/loadgen_utils.py","file_name":"loadgen_utils.py","file_ext":"py","file_size_in_byte":1892,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"439889717","text":"#!/usr/bin/env python3\n\nimport tkinter as tk\nimport random\nimport os\n\n#variables\n\ni = 1\n\nanswers = list(range(20))\nanswers[0]=\"Chuck Norris spricht waehrend der Fahrt mit dem Busfahrer.\";\nanswers[1]=\"Chuck Norris hat bis unendlich gezaehlt. Schon zwei mal.\";\nanswers[2]=\"Chuck Norris kann schwarze Stifte nach Farbe sortieren.\";\nanswers[3]=\"Wenn Chuck Norris ins Wasser geht wird er nicht nass, das Wasser \\n \\twird Chuck Norris.\";\nanswers[4]=\"Chuck Norris versteht Frauen.\";\nanswers[5]=\"Chuck Norris brachte seinem Vater das Rasieren bei.\";\nanswers[6]=\"Physiker sind verbluefft: Cuck Norris zweiter Roundhousekick, kommt \\n \\tnoch vor seinem ersten Roundhauskick an.\";\nanswers[7]=\"Als Lee Harvey Oswalt auf Kenndy schoss, hat Chuck Norris die Kugeln \\n \\tmit seinem eigenen Bart abgefangen, und JFKs Kopf ist nur vor \\n \\terstaunen explodiert.\";\nanswers[8]=\"Chuck Norris bucht in einem Schweizer Hotel ein Zimmer mit Meerblick \\n \\tund bekommt es.\";\nanswers[9]=\"Chuck Norris wurde von seiner Tante geboren, da keiner so lebensmuede \\n \\twar, seine Mutter zu knattern.\";\nanswers[10]=\"Chuck Norris kann im Kinderkarussell ueberholen.\";\nanswers[11]=\"Chuck Norris hat eine Erste-Hilfe-Puppe wiederbelebt.\";\nanswers[12]=\"Chuck Norris kann den Limes (1+(1/x))^x für x -> Infinity genau \\n \\tberechnen.\";\nanswers[13]=\"Chuck Norris trinkt aus einem Wasserhahn auf Ex.\";\nanswers[14]=\"Chuck Norris hat ganz Gallien besetzt. Ganz Gallien? Ja! Ganz Gallien!\";\nanswers[15]=\"Chuck Norris stirbt nach der Hoffnung.\";\nanswers[16]=\"Wie viele Liegestuetze schafft Chuck Norris? Alle!\";\nanswers[17]=\"Chuck Norris wurde einmal von einer Koenigskobra gebissen. Nach fuenf \\n \\tqualvollen Tagen voller Schmerz starb die Kobra.\";\nanswers[18]=\"Chuck Norris zaehlt alle Schafe einer Herde in 2 Sekunden. Sein Trick: \\n \\tEr zaehlt die Beine und teilt am Ende durch 4.\";\nanswers[19]=\"Chuck Norris wurde nicht geboren, sondern entfesselt.\";\n\n#functions\n\ndef callbackCn(event=None):\n\n\tglobal i\n\n\tplaintextfile = open(\"log.dat\", \"a\")\n\tentryText = cnEntry.get()\n\tcnEntry.delete(0, \"end\")\n\t#answers-start\n\n\trandomNumber = random.randint(0, 19)\n\n\tif \"tschuess\" in entryText:\n\t\twriteContentCn = \"Chuck Norris: *verpasst dir einen Roundhousekick*\"\n\t\ti=i+1\n\telse:\n\t\tanswer = \"Fakt: \"+answers[randomNumber]\n\t\twriteContentCn = answer\n\t\tif randomNumber in [3,6,8,9,12,17,18]:\n\t\t\ti=i+2\n\t\telif randomNumber == 7:\n\t\t\ti=i+3\n\t\telse:\n\t\t\ti=i+1\n\n\t#answers-end\n\twriteContentYou = \"\\nDu: \"+entryText\n\tplaintextfile.write(writeContentYou)\n\ti=i+1\n\twriteContentMe = \"\\n\"+writeContentCn\n\tplaintextfile.write(writeContentMe)\n\tplaintextfile.close()\n\tplaintextfile = open(\"log.dat\", \"r\")\n\tplaintext = plaintextfile.read()\n\tplaintextfile.close()\n\ttext.set(plaintext)\n\n\twhile i > 40:\n\t\ttry:\n\t\t\tplaintextfile = open(\"log.dat\", \"w\")\n\t\t\tplaintextfile.write(writeContentMe)\n\t\t\tplaintextfile.close()\n\t\t\ti=1\n\t\t\tbreak\n\t\texcept OSError:\n\t\t\ti=41\n\n#init gui\n\ncn = tk.Tk()\ncn.title(\"Chuck Norris\")\n\n#gui design\n\ntext = tk.StringVar()\nplaintextfile = open(\"log.dat\", \"w\")\nplaintextfile.write(\"Chuck Norris: Checkmate!\")\nplaintextfile.close()\nplaintextfile = open(\"log.dat\", \"r\")\nplaintext = plaintextfile.read()\ntext.set(plaintext)\nplaintextfile.close()\navcn = tk.PhotoImage(file=\"avatars/randoms/cn\")\navatar = tk.Label(cn, image=avcn)\navatar.image = avcn\navatar.grid(row=0, column=0, rowspan=5)\ncnChat = tk.Label(cn, textvariable=text, anchor=\"c\", relief=\"sunken\", width=80, height=40)\ncnChat.grid(row=1, column=1, rowspan=10, columnspan=4)\ncnEntry = tk.Entry(cn, width=70)\ncnEntry.grid(row=11, column=1, rowspan=2, columnspan=3)\ncnSend = tk.Button(cn, text=\"Send!\", command=callbackCn, width=10)\ncnSend.grid(row=11, column=4, rowspan=2, columnspan=1)\n\ncn.bind(\"\", callbackCn)\ncn.mainloop()\n","sub_path":"pyBots/randoms/chucknorris.py","file_name":"chucknorris.py","file_ext":"py","file_size_in_byte":3734,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"150787544","text":"import argparse\nimport csv\nimport os\nimport re\nimport sys\nfrom pathlib import Path\n\nimport numpy as np\nimport pandas as pd\nimport requests\nfrom dotenv import load_dotenv\nfrom loguru import logger\n\nlogger.add(sys.stderr, level=\"INFO\")\nlogger.add(\"debug.log\", level=\"DEBUG\", rotation=\"500 MB\")\n\nnp.random.seed(seed=42)\n\nROOT = Path(os.path.dirname(os.path.dirname(__file__))) / Path(os.path.basename(os.path.dirname(__file__)))\nDATA_DIR = ROOT / 'data'\nenv_path = ROOT / '.env'\nload_dotenv(dotenv_path=env_path)\n\n# =================== COINFORM API SETTINGS =================\nCOINFORM_ENDPOINT = os.getenv('COINFORM_ENDPOINT')\nQUERY_ID_REQUEST = COINFORM_ENDPOINT + '/twitter/tweet'\nRESPONSE_TWEET = COINFORM_ENDPOINT + '/response/{query_id}/debug'\n\nclass Sample_Generator():\n def __init__(self, args):\n # =================== Params =================\n self.total_modules = args.n_modules\n self.total_sample = args.n_samples\n\n # credibility boundaries\n self.misinfome_cred = args.misinfome_cred\n self.content_analys_cred = args.content_analysis_cred\n self.claim_cred = args.claim_cred\n\n # confidences\n self.misinfome_conf = args.misinfome_conf\n self.content_analys_conf = args.content_analysis_conf\n self.claim_conf = args.claim_conf\n self.labels = {'credible': 0, 'mostly_credible': 1, 'mostly_not_credible': 2, 'credible_uncertain': 3,\n 'not_credible': 4,\n 'not_verifiable': 5}\n self.modules = {'misinfome': [self.misinfome_cred, self.misinfome_conf],\n 'content_analys': [self.content_analys_cred, self.content_analys_conf],\n 'claim': [self.claim_cred, self.claim_conf]}\n\n\n def _all_agree_helper(self):\n labels = list(self.labels.keys())\n data = pd.DataFrame()\n misinfome_creds = []\n content_analys_creds = []\n claim_creds = []\n cred_labels = []\n\n for i in range(0, len(labels) - 1):\n if i == 0:\n misinfome_creds.append(np.random.uniform(high=1, low=self.misinfome_cred[i],\n size=self.total_sample))\n content_analys_creds.append(np.random.uniform(high=1,\n low=self.content_analys_cred[i],\n size=[self.total_sample]))\n claim_creds.append(np.random.uniform(high=1, low=self.claim_cred[i],\n size=[self.total_sample]))\n cred_labels.append(np.asarray([labels[i] for _ in range(self.total_sample)]))\n\n elif i == 4:\n misinfome_creds.append(np.random.uniform(high=self.misinfome_cred[i - 1],\n low=-1,\n size=[self.total_sample]))\n content_analys_creds.append(np.random.uniform(high=self.content_analys_cred[i - 1],\n low=-1,\n size=[self.total_sample]))\n claim_creds.append(np.random.uniform(high=self.claim_cred[i - 1], low=-1,\n size=[self.total_sample]))\n cred_labels.append(np.asarray([labels[i] for _ in range(self.total_sample)]))\n else:\n\n misinfome_creds.append(np.random.uniform(high=self.content_analys_cred[i - 1],\n low=self.content_analys_cred[i],\n size=[self.total_sample]))\n content_analys_creds.append(np.random.uniform(high=self.content_analys_cred[i - 1],\n low=self.content_analys_cred[i],\n size=[self.total_sample]))\n claim_creds.append(np.random.uniform(high=self.claim_cred[i - 1],\n low=self.claim_cred[i], size=[self.total_sample]))\n cred_labels.append(np.asarray([labels[i] for _ in range(self.total_sample)]))\n\n data['misinfome_cred'] = np.asarray(misinfome_creds).flatten()\n data['content_analys_cred'] = np.asarray(content_analys_creds).flatten()\n data['claim_cred'] = np.asarray(claim_creds).flatten()\n data['expected_credible'] = np.asarray(cred_labels).flatten()\n\n return data\n\n def _pick_random_modules(self, num_diff_module):\n '''\n :param num_module: number of module which has different functions\n :type num_module: int\n :return:\n :rtype:\n '''\n all_idxs = set([i for i in range(len(self.modules.keys()))])\n disagree_idxs = set()\n picked_flag = False\n for i in range(num_diff_module):\n while not picked_flag:\n picked_num = np.random.randint(high=len(self.modules.keys()), low=0, size=1)[0]\n if picked_num not in disagree_idxs:\n disagree_idxs.add(picked_num)\n picked_flag = True\n\n agree_idxs = all_idxs - disagree_idxs\n return {'agree': [list(self.modules.keys())[agree_idx] for agree_idx in agree_idxs],\n 'disagree': [list(self.modules.keys())[disagree_idx] for disagree_idx in disagree_idxs]}\n\n def _pick_random_label(self, idx_agreed):\n labels = list(self.labels.keys())\n picked_flag = False\n picked_id = None\n while (not picked_flag):\n random_idx = np.random.randint(high=len(labels), low=0, size=1)[0]\n if random_idx is not idx_agreed:\n picked_id = random_idx\n picked_flag = True\n\n return labels[picked_id]\n\n def some_agree(self):\n '''\n This method creates values that some modules agree, some disagree with high/low confidence\n :return:\n :rtype:\n '''\n dummy_values = pd.DataFrame()\n # if data folder does not exist, create\n if not os.path.exists(DATA_DIR):\n os.makedirs(DATA_DIR)\n\n for i in range(1, len(self.modules.keys()) + 1):\n data_low_conf = self._some_agree_helper(num_diff_module=i, confidence_density=False)\n dummy_values = dummy_values.append(data_low_conf, ignore_index=True, sort=True)\n data_high_conf = self._some_agree_helper(num_diff_module=i, confidence_density=True)\n dummy_values = dummy_values.append(data_high_conf, ignore_index=True, sort=True)\n\n # save dummy values {casename}_{module_name}_{upboundary_cred}_{conf}\n path = DATA_DIR / '{func_name}_misinfome_{misinfome_cred}_{misinfome_conf}_contentanalysis_{content_analysis_cred}_{content_analysis_conf}_claim_{claim_cred}_{claim_conf}.csv'.format(\n func_name=self.some_agree.__name__,\n misinfome_cred=str(self.misinfome_cred[0]), misinfome_conf=str(self.misinfome_conf),\n content_analysis_conf=self.content_analys_conf, content_analysis_cred=self.content_analys_cred[0],\n claim_cred=str(self.claim_cred[0]), claim_conf=self.claim_conf)\n dummy_values.to_csv(path)\n\n def _some_agree_helper(self, num_diff_module, confidence_density):\n '''\n :param num_diff_module: number of modules which disgree\n :type num_diff_module: int\n :param confidence_density: confidence density of disagreed modules. If it is true, modules disagree with high confidence\n :type: boolean\n :return:\n :rtype:\n '''\n #### 2 modules agree, one is not ######\n random_modules = self._pick_random_modules(num_diff_module=num_diff_module)\n agreed_modules = random_modules['agree']\n disagreed_modules = random_modules['disagree']\n labels = list(self.labels.keys())\n temp_creds = {'misinfome': [], 'content_analys': [], 'claim': []}\n temp_confs = {'misinfome': [], 'content_analys': [], 'claim': []}\n cred_labels = []\n data = pd.DataFrame()\n print('Agreed modules {}'.format(agreed_modules))\n print('Disagreed modules {}'.format(disagreed_modules))\n for i in range(0, len(self.labels.keys()) - 1):\n if i == 0:\n for agreed_module in agreed_modules:\n temp_creds[agreed_module].append(np.random.uniform(high=1, low=self.modules[agreed_module][0][i],\n size=self.total_sample))\n # high confidence\n temp_confs[agreed_module].append(\n np.random.uniform(high=1, low=self.modules[agreed_module][1], size=self.total_sample))\n\n # agreed module -> credible, disagreed modules -> mostly credible (i+1), but disagree module's label is not final.\n for disagreed_module in disagreed_modules:\n temp_creds[disagreed_module].append(np.random.uniform(high=self.modules[disagreed_module][0][i],\n low=self.modules[disagreed_module][0][i + 1],\n size=self.total_sample))\n temp_confs[disagreed_module].append(\n np.random.uniform(high=1, low=self.modules[disagreed_module][1],\n size=self.total_sample)) if confidence_density else temp_confs[\n disagreed_module].append(\n np.random.uniform(high=self.modules[disagreed_module][1], low=0, size=self.total_sample))\n\n cred_labels.append(np.asarray([labels[i] for _ in range(self.total_sample)]))\n\n elif i == 4:\n for agreed_module in agreed_modules:\n temp_creds[agreed_module].append(np.random.uniform(high=self.modules[agreed_module][0][i - 1],\n low=-1,\n size=self.total_sample))\n # high confidence\n temp_confs[agreed_module].append(\n np.random.uniform(high=1, low=self.modules[agreed_module][1], size=self.total_sample))\n # agreed module -> not credible, disagreed modules -> credible uncertain (i-1)\n for disagreed_module in disagreed_modules:\n temp_creds[disagreed_module].append(np.random.uniform(high=self.modules[disagreed_module][0][i - 1],\n low=self.modules[disagreed_module][0][i - 2],\n size=self.total_sample))\n temp_confs[disagreed_module].append(\n np.random.uniform(high=1, low=self.modules[disagreed_module][1],\n size=self.total_sample)) if confidence_density else temp_confs[\n disagreed_module].append(\n np.random.uniform(high=self.modules[disagreed_module][1], low=0, size=self.total_sample))\n cred_labels.append(np.asarray([labels[i] for _ in range(self.total_sample)]))\n else:\n for agreed_module in agreed_modules:\n temp_creds[agreed_module].append(np.random.uniform(high=self.modules[agreed_module][0][i - 1],\n low=self.modules[agreed_module][0][i],\n size=\n self.total_sample))\n # high confidence\n temp_confs[agreed_module].append(\n np.random.uniform(high=1, low=self.modules[agreed_module][1], size=self.total_sample))\n # disagreed module -> preeceding (i-1)\n for disagreed_module in disagreed_modules:\n # preeceding label\n temp_creds[disagreed_module].append(np.random.uniform(high=self.modules[disagreed_module][0][i - 1],\n low=self.modules[disagreed_module][0][i - 2],\n size=\n self.total_sample))\n temp_confs[disagreed_module].append(\n np.random.uniform(high=1, low=self.modules[disagreed_module][1],\n size=self.total_sample)) if confidence_density else temp_confs[\n disagreed_module].append(\n np.random.uniform(high=self.modules[disagreed_module][1], low=0, size=self.total_sample))\n cred_labels.append(np.asarray([labels[i] for _ in range(self.total_sample)]))\n\n for name, values in temp_creds.items():\n data[name + '_cred'] = np.asarray(values).flatten()\n data[name + '_conf'] = np.asarray(temp_confs[name]).flatten()\n data['expected_credible'] = np.asarray(cred_labels).flatten()\n return data\n\n def all_agree_all_high(self):\n '''\n In this case all of modules agree on one credibility label with high confidence\n '''\n dummy_values = pd.DataFrame()\n # if data folder does not exist, create\n if not os.path.exists(DATA_DIR):\n os.makedirs(DATA_DIR)\n\n data = self._all_agree_helper()\n\n # confidence value always high between th>val>1\n data['misinfome_conf'] = np.random.uniform(high=1, low=self.misinfome_conf, size=[data.shape[0]])\n data['content_analys_conf'] = np.random.uniform(high=1, low=self.content_analys_conf,\n size=[data.shape[0]])\n data['claim_conf'] = np.random.uniform(high=1, low=self.claim_conf, size=[data.shape[0]])\n\n dummy_values = dummy_values.append(data, ignore_index=True, sort=True)\n # save dummy values {casename}_{module_name}_{upboundary_cred}_{conf}\n path = DATA_DIR / '{func_name}_misinfome_{misinfome_cred}_{misinfome_conf}_contentanalysis_{content_analysis_cred}_{content_analysis_conf}_claim_{claim_cred}_{claim_conf}.csv'.format(\n func_name=self.all_agree_all_high.__name__,\n misinfome_cred=str(self.misinfome_cred[0]), misinfome_conf=str(self.misinfome_conf),\n content_analysis_conf=self.content_analys_conf, content_analysis_cred=self.content_analys_cred[0],\n claim_cred=str(self.claim_cred[0]), claim_conf=self.claim_conf)\n dummy_values.to_csv(path)\n\n def all_agree_some_high(self):\n '''\n In this case all of modules agree on one credibility label, but some of them with high confidence\n '''\n dummy_values = pd.DataFrame()\n # if data folder does not exist, create\n if not os.path.exists(DATA_DIR):\n os.makedirs(DATA_DIR)\n\n data = self._all_agree_helper()\n\n misinfo_conf = []\n content_analys_conf = []\n claim_conf = []\n # confidence value always high between th>val>1 half high\n high_conf_sample = data.shape[0] // 2\n low_conf_sample = data.shape[0] - high_conf_sample\n\n misinfo_conf.append(np.random.uniform(high=1, low=self.misinfome_conf, size=[high_conf_sample]))\n\n content_analys_conf.append(np.random.uniform(high=1, low=self.content_analys_conf,\n size=high_conf_sample))\n claim_conf.append(np.random.uniform(high=1, low=self.claim_conf, size=high_conf_sample))\n\n # confidence value always low val>0\n misinfo_conf.append(np.random.uniform(high=self.misinfome_conf, low=0, size=low_conf_sample))\n content_analys_conf.append(np.random.uniform(high=self.content_analys_conf, low=0,\n size=low_conf_sample))\n claim_conf.append(np.random.uniform(high=self.claim_conf, low=0, size=low_conf_sample))\n\n data['misinfome_conf'] = np.asarray(misinfo_conf).flatten()\n data['content_analys_conf'] = np.asarray(content_analys_conf).flatten()\n data['claim_conf'] = np.asarray(claim_conf).flatten()\n\n dummy_values = dummy_values.append(data, ignore_index=True, sort=True)\n # save dummy values {casename}_{module_name}_{upboundary_cred}_{conf}\n path = DATA_DIR / '{func_name}_misinfome_{misinfome_cred}_{misinfome_conf}_contentanalysis_{content_analysis_cred}_{content_analysis_conf}_claim_{claim_cred}_{claim_conf}.csv'.format(\n func_name=self.all_agree_some_high.__name__,\n misinfome_cred=str(self.misinfome_cred[0]), misinfome_conf=str(self.misinfome_conf),\n content_analysis_conf=self.content_analys_conf, content_analysis_cred=self.content_analys_cred[0],\n claim_cred=str(self.claim_cred[0]), claim_conf=self.claim_conf)\n dummy_values.to_csv(path)\n\n def all_not_verified(self):\n '''\n All of them have low confidence or either fail\n todo: fail case is not implemented\n '''\n dummy_values = pd.DataFrame()\n # if data folder does not exist, create\n if not os.path.exists(DATA_DIR):\n os.makedirs(DATA_DIR)\n data = self._all_agree_helper()\n\n # all of them has low confidence, hence they are unverified.\n data['misinfome_conf'] = np.random.uniform(high=self.misinfome_conf, low=0, size=[data.shape[0]]).flatten()\n data['content_analys_conf'] = np.random.uniform(high=self.content_analys_conf, low=0,\n size=[data.shape[0]]).flatten()\n data['claim_conf'] = np.random.uniform(high=self.claim_conf, low=0, size=[data.shape[0]]).flatten()\n\n # label credibility\n data['expected_credible'] = 'not_verifiable'\n\n dummy_values = dummy_values.append(data, ignore_index=True, sort=True)\n # save dummy values {casename}_{module_name}_{upboundary_cred}_{conf}\n path = DATA_DIR / '{func_name}_misinfome_{misinfome_cred}_{misinfome_conf}_contentanalysis_{content_analysis_cred}_{content_analysis_conf}_claim_{claim_cred}_{claim_conf}.csv'.format(\n func_name=self.all_agree_some_high.__name__,\n misinfome_cred=str(self.misinfome_cred[0]), misinfome_conf=str(self.misinfome_conf),\n content_analysis_conf=self.content_analys_conf, content_analysis_cred=self.content_analys_cred[0],\n claim_cred=str(self.claim_cred[0]), claim_conf=self.claim_conf)\n dummy_values.to_csv(path)\n\n def _map_label(self, label):\n print('Not implemented yet!!')\n return None\n\n def _request(self, tweet_id):\n # logger.debug('I am requesting tweet {}'.format(tweet_id))\n args = {\n \"tweet_id\": parse_id(tweet_id),\n \"tweet_author\": \"string\",\n \"tweet_text\": \"string\"\n }\n # first response includes query id\n response_1 = requests.post(QUERY_ID_REQUEST, json=args).json()\n if 'query_id' not in response_1:\n return None\n query_id = response_1['query_id']\n task_completed = False\n modules_response = {}\n\n err_count = 100\n while (not task_completed):\n response_2 = requests.get(RESPONSE_TWEET.format(query_id=query_id)).json()\n status = response_2['status']\n # logger.debug('Query response {}'.format(status))\n if status == 'partly_done' or status == 'in_progress':\n err_count -= 1\n if err_count == 0:\n status = 'done'\n if status == 'done':\n response_codes = response_2['module_response_code']\n logger.debug(response_2['flattened_module_responses'])\n if response_codes[\n 'claimcredibility'] == 200 and 'claimcredibility_tweet_claim_credibility_0_confidence' in \\\n response_2['flattened_module_responses']:\n modules_response['claim_conf'] = response_2['flattened_module_responses'][\n 'claimcredibility_tweet_claim_credibility_0_confidence']\n modules_response['claim_cred'] = response_2['flattened_module_responses'][\n 'claimcredibility_tweet_claim_credibility_0_credibility']\n else:\n modules_response['claim_conf'] = -100\n modules_response['claim_cred'] = -100\n if response_codes['contentanalysis'] == 200 and 'contentanalysis_credibility' in response_2[\n 'flattened_module_responses']:\n modules_response['content_analys_conf'] = response_2['flattened_module_responses'][\n 'contentanalysis_confidence']\n modules_response['content_analys_cred'] = response_2['flattened_module_responses'][\n 'contentanalysis_credibility']\n else:\n modules_response['content_analys_conf'] = -100\n modules_response['content_analys_cred'] = -100\n if response_codes['misinfome'] == 200 and 'misinfome_credibility_value' in response_2[\n 'flattened_module_responses']:\n modules_response['misinfome_conf'] = response_2['flattened_module_responses'][\n 'misinfome_credibility_confidence']\n modules_response['misinfome_cred'] = response_2['flattened_module_responses'][\n 'misinfome_credibility_value']\n else:\n modules_response['misinfome_conf'] = -100\n modules_response['misinfome_cred'] = -100\n\n task_completed = True\n\n return modules_response\n\n def export_to_file(self, row, file_path):\n with open(file_path, 'a', encoding='utf-8') as f:\n cw = csv.writer(f, delimiter='\\t')\n cw.writerow(row)\n\n def from_misinfome(self):\n '''\n Retrieves english tweets from misinfome collection and record tweet ids and labels.\n :return:\n :rtype:\n '''\n dest_file = DATA_DIR / 'misinfome.tsv'\n src_file = DATA_DIR / 'misinfome' / 'joined_tables.tsv'\n fc_labels_file = DATA_DIR / 'misinfome' / 'fact_checking_gold_labels.tsv'\n responses_file = DATA_DIR / 'misinfome' / 'misinfome_responses.csv'\n file_path = DATA_DIR / 'misinfome/rule-responses/export.csv'\n\n if not os.path.isfile(dest_file):\n data = pd.read_csv(src_file, sep='\\t')\n mask = (data['lang'] == 'en') & (data['source'].str.contains('twitter'))\n data = data[mask]\n data = data[['url', 'factchecker_label']]\n if not fc_labels_file.exists():\n fc_labels = pd.DataFrame(pd.unique(data['factchecker_label']))\n fc_labels.to_csv(fc_labels_file)\n\n ## claim_conf,claim_cred,content_analys_conf,content_analys_cred,expected_credible,misinfome_conf,misinfome_cred\n if not responses_file.exists():\n self.export_to_file(['#id', 'url', 'claim_conf',\n 'claim_cred',\n 'content_analys_conf',\n 'content_analys_cred',\n 'misinfome_conf',\n 'misinfome_cred'], file_path)\n for index, row in data.iterrows():\n row['id'] = parse_id(row['url'])\n logger.info(row['id'])\n response = self._request(row['url'])\n if response:\n row_obj = {\n 'id': row['id'],\n 'url': row['url'],\n 'claim_conf': response['claim_conf'],\n 'claim_cred': response['claim_cred'],\n 'content_analys_conf': response['content_analys_conf'],\n 'content_analys_cred': response['content_analys_cred'],\n 'misinfome_conf': response['misinfome_conf'],\n 'misinfome_cred': response['misinfome_cred'],\n }\n # row['expected_credible'] = self._map_label(row['factchecker_label'])\n self.export_to_file(list(row_obj.values()), file_path)\n # data[['claim_conf', 'claim_cred', 'content_analys_conf', 'content_analys_cred', 'misinfome_conf',\n # 'misinfome_cred']].to_csv(responses_file)\n # todo add final data csv\n\n\nif __name__ == '__main__':\n print('This script generates samples for testing rules')\n parser = argparse.ArgumentParser()\n parser.add_argument('--n_samples', type=int, default=20)\n parser.add_argument('--misinfome_cred', action='store',\n type=float, nargs=4, default=[0.66, 0.33, -0.33, -0.66],\n help=\"Examples: --misinfome_cred item1 item2\")\n parser.add_argument('--content_analysis_cred', action='store',\n type=float, nargs=4, default=[0.6, 0.3, -0.3, -0.6],\n help=\"Examples: --content_analysis_cred item1 item2\")\n parser.add_argument('--claim_cred', action='store',\n type=float, nargs=4, default=[0.5, 0.25, -0.5, -0.25],\n help=\"Examples: --claim_cred item1 item2\")\n parser.add_argument('--misinfome_conf',\n type=float, default=0.5)\n parser.add_argument('--content_analysis_conf',\n type=float, default=0.6)\n parser.add_argument('--claim_conf', type=float, default=0.7)\n parser.add_argument('--n_modules', type=int, default=3, help=\"total number of modules\")\n parser.add_argument('--sample_mode', type=str, default='external_misinfome',\n help=\"select sample mode, all_not_verified, all_agree_all_high or some agree\")\n\n args = parser.parse_args()\n sample_gen = Sample_Generator(args)\n mode = args.sample_mode\n\n print('Selected mode is {}'.format(mode))\n\n if mode == 'all_not_verified':\n sample_gen.all_not_verified()\n elif mode == 'all_agree_all_high':\n sample_gen.all_agree_all_high()\n elif mode == 'some_agree':\n sample_gen.some_agree()\n elif mode == 'all_agree_some_high':\n sample_gen.all_agree_some_high()\n elif mode == 'external_misinfome':\n sample_gen.from_misinfome()\n","sub_path":"sample_generator.py","file_name":"sample_generator.py","file_ext":"py","file_size_in_byte":27080,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"20194211","text":"# -*- coding: utf-8 -*-\n\nimport unittest\nimport os\nimport codecs\nimport json\nimport urllib2\nfrom json_loader import JsonLoader\nfrom model.gamestate import *\nfrom constants import PATH\nfrom mox import *\nimport json\n\n\nclass JsonLoaderTest(unittest.TestCase):\n def setUp(self):\n self.mox = Mox()\n self.json_loader = JsonLoader()\n self.json_string = []\n self.example_cnt = 2\n for example_num in range(self.example_cnt):\n with codecs.open(os.path.join('test_data', 'example_'\n + str(example_num) + '.json'),\n encoding='utf-8', mode='r') as f:\n self.json_string.append(f.read())\n\n def test_json_translate(self):\n for example_num in range(self.example_cnt):\n self.game_state = self.json_loader.json_translate(json.loads(\n self.json_string[example_num]))\n\n self.assertTrue(isinstance(self.game_state, GameState))\n self.assertTrue(isinstance(self.game_state.players, Players))\n self.assertTrue(isinstance(self.game_state.hat, Hat))\n self.assertTrue(isinstance(self.game_state.settings, Settings))\n\n for player in self.game_state.players.players:\n self.assertTrue(isinstance(player, Player))\n self.assertTrue(isinstance(player.name, unicode))\n self.assertTrue(isinstance(player.words, list))\n\n for word in self.game_state.hat.words:\n self.assertTrue(isinstance(word, Word))\n self.assertTrue(isinstance(word.text, unicode))\n self.assertTrue(isinstance(word.owner, Player) or\n isinstance(word.owner, type(None)))\n\n self.assertTrue(isinstance(self.game_state.settings.\n time_per_round_sec, int))\n self.assertTrue(isinstance(self.game_state.settings.\n time_before_out_sec, int))\n self.assertTrue(isinstance(self.game_state.settings.\n skip_words, int))\n\n def test_load_from_url(self):\n url = 'test string'\n with codecs.open(PATH + '/test_data/example_0.json',\n encoding='utf-8', mode='r') as file_opened:\n self.mox.StubOutWithMock(urllib2, 'urlopen')\n urllib2.urlopen(IsA(str), timeout=IsA(int)).AndReturn(file_opened)\n self.mox.ReplayAll()\n self.json_string = self.json_loader.load_from_url(url)\n\n self.mox.UnsetStubs()\n self.mox.VerifyAll()\n\n with codecs.open(PATH + '/test_data/example_0.json',\n encoding='utf-8', mode='r') as file_opened:\n self.assertEqual(self.json_string, json.loads(file_opened.read()))\n\n def test_load_game_from_url(self):\n url = 'test string'\n with codecs.open(PATH + '/test_data/example_0.json',\n encoding='utf-8', mode='r') as file_opened:\n self.mox.StubOutWithMock(urllib2, 'urlopen')\n urllib2.urlopen(IsA(str), timeout=IsA(int)).AndReturn(file_opened)\n self.mox.ReplayAll()\n self.game_state = self.json_loader.load_game_from_url(url)\n\n self.mox.UnsetStubs()\n self.mox.VerifyAll()\n self.assertTrue(isinstance(self.game_state, GameState))\n\n def test_dump_to_url(self):\n url = 'test string'\n with codecs.open(PATH + '/test_data/example_0.json',\n encoding='utf-8', mode='r') as file_opened:\n self.mox.StubOutWithMock(urllib2, 'urlopen')\n urllib2.urlopen(IsA(str), IsA(str), timeout=IsA(int))\n self.mox.ReplayAll()\n self.json_loader.dump_to_url(url, self.json_string[0])\n\n self.mox.UnsetStubs()\n self.mox.VerifyAll()\n\nif __name__ == \"__main__\":\n unittest.main()\n","sub_path":"model/json_loader_test.py","file_name":"json_loader_test.py","file_ext":"py","file_size_in_byte":3924,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"390761613","text":"#! /usr/bin/env python3\n# ex: et sts=4 ts=4 sw=4\n\nimport sys\nimport os\nimport shutil\n\nfrom acdcli.api import client as ACD\nfrom acdcli.cache import db as DB\nfrom acdcli.api.common import RequestError\nfrom acdcli.utils import hashing\nfrom acdcli.utils.time import datetime_to_timestamp\n\n\nclass Cache(object):\n\n def __init__(self, cache_folder):\n self._cache_folder = cache_folder\n auth_folder = os.path.expanduser('~/.cache/acd_cli')\n self._acd_client = ACD.ACDClient(auth_folder)\n self._acd_db = DB.NodeCache(auth_folder)\n self._last_recycle = 0\n\n def __call__(self):\n folder_id = self._acd_db.resolve_path('/tmp')\n children = self._acd_db.list_children(folder_id)\n children = map(lambda _: _.node, children)\n children = sorted(children, key=lambda _: _.modified, reverse=True)\n for child in children:\n self._recycle_space()\n if self._is_too_old(child):\n break\n self._download(child, self._cache_folder)\n\n def _recycle_space(self):\n entries = self._get_cache_entries()\n\n while True:\n free_space = self._get_free_space()\n if free_space > 10:\n break\n\n full_path, mtime = entries.pop(0)\n if os.path.isdir(full_path):\n shutil.rmtree(full_path)\n else:\n os.remove(full_path)\n self._last_recycle = mtime\n print('recycled: ' + full_path)\n\n def _get_cache_entries(self):\n entries = os.listdir(self._cache_folder)\n entries = (os.path.join(self._cache_folder, _) for _ in entries)\n entries = ((_, os.stat(_).st_mtime) for _ in entries)\n entries = sorted(entries, key=lambda _: _[1])\n return entries\n\n def _get_free_space(self):\n s = os.statvfs(self._cache_folder)\n s = s.f_frsize * s.f_bavail\n s = s / 1024\n s = s / 1024 / 1024\n return s\n\n def _is_too_old(self, node):\n return datetime_to_timestamp(node.modified) < self._last_recycle\n\n def _download(self, node, local_path):\n local_path = local_path if local_path else ''\n full_path = os.path.join(local_path, node.name)\n\n if not node.is_available():\n return False\n\n if node.is_folder():\n try:\n os.makedirs(full_path, exist_ok=True)\n except OSError:\n print('mkdir failed: ' + full_path)\n return False\n for child in node.children:\n self._download(child, full_path)\n else:\n if os.path.isfile(full_path):\n print('skip existed: ' + full_path)\n if os.path.getsize(full_path) != node.size:\n print('size mismatch: ' + full_path)\n return False\n return True\n\n while True:\n hasher = hashing.IncrementalHasher()\n try:\n print('downloading: ' + full_path)\n self._acd_client.download_file(node.id, node.name, local_path, write_callbacks=[hasher.update])\n print('downloaded: ' + full_path)\n except RequestError as e:\n print('download failed: ' + str(e))\n else:\n local = hasher.get_result()\n remote = node.md5\n if local != remote:\n print('md5 mismatch: ' + full_path)\n os.remove(full_path)\n else:\n break\n\n preserve_mtime(node, full_path)\n\n return True\n\n\ndef main(args=None):\n if args is None:\n args = sys.argv\n\n cache = Cache(args[1])\n cache()\n\n return 0\n\n\ndef preserve_mtime(node, full_path):\n mtime = datetime_to_timestamp(node.modified)\n os.utime(full_path, (mtime, mtime))\n\n\nif __name__ == '__main__':\n exit_code = main()\n sys.exit(exit_code)\n","sub_path":"sandbox/python/acdcache.py","file_name":"acdcache.py","file_ext":"py","file_size_in_byte":3993,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"5600716","text":"from tkinter import *\nimport math\n\nPINK = \"#e2979c\"\nRED = \"#e7305b\"\nGREEN = \"#9bdeac\"\nYELLOW = \"#f7f5dd\"\nFONT_NAME = \"Courier\"\nWORK_MIN = 25\nSHORT_BREAK_MIN = 5\nLONG_BREAK_MIN = 20\nreps = 0\ntimer = None\n\n\ndef start_timer():\n global reps\n reps += 1\n work_sec = WORK_MIN * 60\n short_break_sec = SHORT_BREAK_MIN * 60\n long_break_sec = LONG_BREAK_MIN * 60\n\n if reps == 8:\n countdown(long_break_sec)\n title_label.config(text =\"Break\", fg=RED)\n\n elif reps % 2 == 0:\n countdown(short_break_sec)\n title_label.config(text =\"Break\", fg=PINK)\n\n else:\n countdown(work_sec)\n title_label.config(text=\"Work\", fg=GREEN)\n\n\ndef reset_timer():\n window.after_cancel(timer)\n canvas.itemconfig(timer_text, text = \"00:00\")\n title_label.config(text=\"Timer\", fg=\"white\")\n checkmark_label.config(text=\"\")\n\n\ndef countdown(count):\n count_min = math.floor(count/60)\n count_sec = count % 60\n if count_sec < 10:\n count_sec = f\"0{count_sec}\"\n canvas.itemconfig(timer_text, text=f\"{count_min}:{count_sec}\")\n if count > 0:\n global timer\n timer = window.after(1000, countdown, count-1)\n else:\n start_timer()\n marks = \"\"\n work_sessions = math.floor(reps/2)\n for _ in range (work_sessions):\n marks += \"✅\"\n checkmark_label.config(text=marks)\n\n\nwindow = Tk()\nwindow.title(\"Pomodoro\")\nwindow.config(padx=100, pady=50, bg=\"black\")\n\n\nfile = PhotoImage(file=\"tomato.png\")\n\ncanvas = Canvas(width=220, height=224, bg=\"black\", highlightthickness=0)\ncanvas.create_image(100, 112, image=file)\ntimer_text = canvas.create_text(100,130, text=\"00:00\", fill=\"white\", font=(FONT_NAME,35,\"bold\"))\ncanvas.grid(column=1, row=1)\n\n\ntitle_label = Label(text=\"Timer\")\ntitle_label.config(fg=\"white\", bg=\"black\", font=(FONT_NAME, 50, \"normal\"))\ntitle_label.grid(column=1, row=0)\n\ncheckmark_label = Label(bg=\"black\")\ncheckmark_label.grid(column=1, row=3)\n\n\nstart_button = Button(text=\"Start\", command=start_timer, highlightthickness=0)\nstart_button.grid(column=0, row=2)\n\nreset_button = Button(text=\"Reset\", command=reset_timer, highlightthickness=0)\nreset_button.grid(column=2, row=2)\n\nwindow.mainloop()","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2205,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"75652931","text":"# You may create additional functions here:\nimport ast\n\n\n# The function read and manipulate data to a desirable format\ndef readFile():\n puzzleData = []\n with open(\"sample.txt\", \"r\") as lines:\n lineData = []\n for line in lines:\n line = line.strip().replace(' ',', ').replace(' ','')\n if line[-1] == ',':\n line= line[:-1]\n puzzleData.append( ast.literal_eval(line))\n return puzzleData\n\n# Function to print the \ndef printPuzzle(puzzle):\n for line in puzzle:\n print(line,'\\n')\n\n\ndef puzzleComplete(puzzle):\n\n for i in range(0,10):\n for j in range(0,10):\n if puzzle[i][j] == 0:\n return False\n \n return True\n\ndef computerRanges(quadrands):\n quadrandsConstrains = []\n quadrandsConstrains.append([1,[0,3],[0,3]])\n quadrandsConstrains.append([2,[3,6],[0,3]])\n quadrandsConstrains.append([3,[6,9],[0,3]])\n quadrandsConstrains.append([4,[0,3],[3,6]])\n quadrandsConstrains.append([5,[3,6],[3,6]])\n quadrandsConstrains.append([6,[6,9],[3,6]])\n quadrandsConstrains.append([7,[0,3],[6,9]])\n quadrandsConstrains.append([8,[3,6],[6,9]])\n quadrandsConstrains.append([9,[6,9],[6,9]])\n\n for line in quadrandsConstrains:\n if line[0] == quadrands:\n return line\n\n\n\ndef puzzleSolved(puzzle):\n total = 45\n for i in range(0,9):\n rowSum = 0\n columnSum =0\n for j in range(0,9):\n rowSum +=puzzle[i][j]\n columnSum +=puzzle[j][i]\n if rowSum != total or columnSum != total:\n return False\n return True\n\n\n# We have 9 quadrand\ndef autoSolveAlgorithm(puzzleCopy , quadrand):\n quadrandData = []\n total = 45\n ranges = computerRanges(quadrand)\n puzzle = puzzleCopy\n rangeRow = ranges[2]\n rangeCol = ranges[1]\n printPuzzle(puzzle)\n\n for i in range(rangeRow[0], rangeRow[1]):\n for j in range(rangeCol[0],rangeCol[1]):\n if puzzle[i][j] != 0:\n total =0\n quadrandData.append(puzzle[i][j])\n quadrandData.sort()\n numberToEValuate = 10\n numberWhichDoNoTExistInAQuadrand = []\n\n # Find missing number ins a quadrand\n for i in range(1,10): \n for item in quadrandData:\n if i == item:\n numberToEValuate = 0\n if numberToEValuate ==10 :\n numberWhichDoNoTExistInAQuadrand.append(i)\n \n # Fill quadrand with missing numbers\n for i in range(rangeRow[0], rangeRow[1]):\n for j in range(rangeCol[0],rangeCol[1]):\n if puzzle[i][j] == 0:\n for num in numberWhichDoNoTExistInAQuadrand:\n if not numberExistInRowColum(puzzle, i, j, num):\n puzzle[i][j] = num\n \n if total == 0:\n autoSolveAlgorithm(puzzle , quadrand)\n else :\n print(\"Solved\")\n printPuzzle(puzzle)\n return\n\n\n\n# Chech if the function exist in both row and column\n\ndef numberExistInRowColum(puzzle, row, col , searcParam) :\n foundInRow = False\n foundInColumn = False\n #check if it exist in a row\n for i in range(0,9):\n if puzzle[row][i] ==searcParam:\n foundInRow = True\n break\n\n #check if it exist in columns\n for i in range(0,9):\n if puzzle[i][col] == searcParam:\n foundInColumn = True\n break\n\n if foundInColumn or foundInRow:\n return True\n elif foundInColumn and foundInRow:\n return True\n else :\n return False\n \n# Additional Functions above this comment\n# Implement your Sudoku Solution Below:\ndef solve_sudoku(puzzle):\n #Edit the code Below Here\n print(\"When pronted to enter row or col, row 1 would be enter as 0, and col 1 as 0 , row 9 as 8 and so on..\")\n puzzleCopy = puzzle\n while not puzzleSolved(puzzleCopy) :\n print(\"..................................................\",'\\n')\n\n print(\"PUZZLE IS NOT YES SOLVED , enter -1 to quit\",'\\n')\n\n print(\"..................................................\",'\\n')\n printPuzzle(puzzleCopy)\n print(\"..................................................\",'\\n')\n\n firstValue = int(input(\"Please Enter your row : \"))\n secondValue = int(input(\"Please Enter your col : \"))\n if firstValue == -1 or secondValue ==-1 :\n break\n\n while firstValue >8 or secondValue >8 :\n firstValue = int(input(\"Please Enter your row (0-8) : \"))\n secondValue = int(input(\"Please Enter your col (0-8) : \"))\n \n solutionNumber = int(input(\"Please Enter the number : \" ))\n if solutionNumber == -1:\n break\n\n puzzle[firstValue][secondValue] = solutionNumber\n\n if puzzleSolved(puzzleCopy):\n cont = int(input(\"inputer Enter 1 to play again or 0 to quit : \"))\n if cont == 1:\n solve_sudoku(puzzle)\n else:\n return\n\n for i in range(1,10):\n autoSolveAlgorithm(puzzleCopy, i)\n\n\nsolve_sudoku(readFile())\n\n\n","sub_path":"Virtual-22/sudo_question.py","file_name":"sudo_question.py","file_ext":"py","file_size_in_byte":5036,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"500863138","text":"#!/usr/bin/env python\n# encoding: utf-8\n#\n# Author: Chul Sung\n# Updated by: Chul Sung, daniel.c.sung@gmail.com\n# Date: 02/03/2013\n\nfrom cellcommon import *\nfrom cellcalculatepca import *\nimport scipy\nfrom scipy import ndimage\n\ndef Generate_PCA_Matrix(vol_data,pointarrays,l_radius,planenum,planeselect):\n\n z_size, y_size, x_size = vol_data.shape\n point_num = pointarrays.shape[0]\n \n z_max = z_size - 1\n y_max = y_size - 1\n x_max = x_size - 1\n \n planeimgsize = (l_radius*2+1)**2\n\n # positive point count\n pos_pnum = 0\n \n #f1 = figure(frameon=False)\n #fig_col = math.ceil(point_num/10) \n \n# if planeselect == 0:\n# f1.suptitle('X-Y planes at the center points in unit volumes', fontsize=16)\n# elif planeselect == 1:\n# f1.suptitle('X-Z planes at the center points in unit volumes', fontsize=16)\n# else:\n# f1.suptitle('Y-Z planes at the center points in unit volumes', fontsize=16)\n\n local_posi_pca_input_mtx = np.zeros((point_num,planeimgsize*planenum))\n \n for point in pointarrays:\n x = point[0]\n y = point[1]\n z = point[2]\n\n c_x_min = x - l_radius\n c_y_min = y - l_radius\n c_z_min = z - l_radius\n\n c_x_max = x + l_radius\n c_y_max = y + l_radius\n c_z_max = z + l_radius\n \n if (c_x_min >= 0) and (c_y_min >= 0) and (c_z_min >= 0) and (c_x_max <= x_max) and (c_y_max <= y_max) and (c_z_max <= z_max):\n #f1.add_subplot(fig_col+1, 10, pos_pnum+1) # this line outputs images on top of each other\n #X-Y Planes data\n xyimg = vol_data[z,c_y_min:c_y_max+1,c_x_min:c_x_max+1]\n\n #X-Z Planes data\n xzimg = vol_data[c_z_min:c_z_max+1,y,c_x_min:c_x_max+1]\n \n #Y-Z Planes data\n yzimg = vol_data[c_z_min:c_z_max+1,c_y_min:c_y_max+1,x]\n\n# if planeselect == 0:\n# imshow(xyimg,cmap=cm.Greys_r)\n# elif planeselect == 1:\n# imshow(xzimg,cmap=cm.Greys_r)\n# else:\n# imshow(yzimg,cmap=cm.Greys_r)\n# axis('off')\n \n xyimg = xyimg.flatten()\n xzimg = xzimg.flatten()\n yzimg = yzimg.flatten()\n\n xyzimg = zeros((planeimgsize, planenum))\n xyzimg[:,0] = xyimg\n xyzimg[:,1] = xzimg\n xyzimg[:,2] = yzimg\n \n local_posi_pca_input_mtx[pos_pnum,:] = xyzimg.flatten()\n pos_pnum += 1\n \n del xyimg, xzimg, yzimg, xyzimg\n \n # truncate empty array\n local_posi_pca_input_mtx = local_posi_pca_input_mtx[range(pos_pnum),:]\n \n return local_posi_pca_input_mtx, pos_pnum\n\n\ndef Generate_Neg_PCA_Matrix(vol_data,pointarrays,l_radius,pos_pnum,target_volume,outofbound_thr,planenum,planeselect):\n z_size, y_size, x_size = vol_data.shape\n \n z_max = z_size - 1\n y_max = y_size - 1\n x_max = x_size - 1\n \n planeimgsize = (l_radius*2+1)**2\n \n local_neg_pca_input_mtx = np.zeros((pos_pnum,planeimgsize*planenum))\n \n# f3 = figure(frameon=False)\n# fig_col = math.ceil(pos_pnum/10)\n# \n# if planeselect == 0:\n# f3.suptitle('X-Y planes at the negative points in unit volumes', fontsize=16)\n# elif planeselect == 1:\n# f3.suptitle('X-Z planes at the negative points in unit volumes', fontsize=16)\n# else:\n# f3.suptitle('Y-Z planes at the negative points in unit volumes', fontsize=16)\n\n ## Generate the list of negative points using target_volume\n neg_pointarrays = np.ndarray(shape=(pos_pnum,3), dtype='uint8')\n neg_values = np.ndarray(shape=(pos_pnum,), dtype='d')\n neg_pnum = 0\n while (neg_pnum < pos_pnum):\n cand_z = random.randint(0, z_max)\n cand_y = random.randint(0, y_max)\n cand_x = random.randint(0, x_max)\n\n nx_min = cand_x - l_radius\n ny_min = cand_y - l_radius\n nz_min = cand_z - l_radius\n \n nx_max = cand_x + l_radius\n ny_max = cand_y + l_radius\n nz_max = cand_z + l_radius\n \n if (nx_min >= 0) and (ny_min >= 0) and (nz_min >= 0) and (nx_max <= x_max) and (ny_max <= y_max) and (nz_max <= z_max):\n if(target_volume[cand_z,cand_y,cand_x] < outofbound_thr):\n #f3.add_subplot(fig_col+1, 10, neg_pnum+1) # this line outputs images on top of each other\n \n neg_values[neg_pnum] = round(target_volume[cand_z,cand_y,cand_x], 3)\n \n neg_pointarrays[neg_pnum,0] = cand_x \n neg_pointarrays[neg_pnum,1] = cand_y\n neg_pointarrays[neg_pnum,2] = cand_z\n \n #X-Y Planes data\n xyimg = vol_data[cand_z,ny_min:ny_max+1,nx_min:nx_max+1]\n \n #X-Z Planes data\n xzimg = vol_data[nz_min:nz_max+1,cand_y,nx_min:nx_max+1]\n \n #Y-Z Planes data\n yzimg = vol_data[nz_min:nz_max+1,ny_min:ny_max+1,cand_x]\n \n# if planeselect == 0:\n# imshow(xyimg,cmap=cm.Greys_r)\n# elif planeselect == 1:\n# imshow(xzimg,cmap=cm.Greys_r)\n# else:\n# imshow(yzimg,cmap=cm.Greys_r)\n# axis('off')\n \n xyimg = xyimg.flatten()\n xzimg = xzimg.flatten()\n yzimg = yzimg.flatten()\n \n xyzimg = zeros((planeimgsize, planenum))\n xyzimg[:,0] = xyimg\n xyzimg[:,1] = xzimg\n xyzimg[:,2] = yzimg\n \n local_neg_pca_input_mtx[neg_pnum,:] = xyzimg.flatten()\n neg_pnum += 1\n \n del xyimg, xzimg, yzimg, xyzimg\n\n # truncate empty array\n local_neg_pca_input_mtx = local_neg_pca_input_mtx[range(neg_pnum),:]\n\n return local_neg_pca_input_mtx\n \n \ndef NeuronPCA(testing_files,l_radius):\n\n # local volume radius\n planenum = 3\n planeselect = 0 # default: xy, xz = 1, yz = 2\n\n \n posi_pca_input_mtx = []\n neg_pca_input_mtx = []\n \n for testing_file in testing_files:\n vol_filename = testing_file + \".vol\"\n point_filename = testing_file + \".cel\"\n \n #print vol_filename\n\n vol_data = LoadVolume(vol_filename)\n \n # 2 times scale down!\n d = 2\n vol_data = ndimage.convolve(np.uint16(vol_data), np.ones((d,d,d)))[::d,::d,::d]/8\n \n z_size, y_size, x_size = vol_data.shape\n \n #g = gauss_kern3D(r_size=3, k_sigma=0.5)\n #vol_data = ndimage.convolve(np.uint16(vol_data),g)\n\n #print vol_data[0,:,:]\n #plt.imshow(vol_data[0,:,:])\n #plt.gray()\n #plt.show()\n \n #scipy.misc.imsave('test2.png', vol_data[0,:,:])\n \n #SaveVolume(vol_data, 'chul.vol')\n \n pointarrays = OBJToPoints(point_filename)\n \n for point in pointarrays:\n point[0] = math.floor(point[0] * x_size)\n point[1] = math.floor(point[1] * y_size)\n point[2] = math.floor(point[2] * z_size)\n \n vol_pca_input_list, pos_pnum = Generate_PCA_Matrix(vol_data,pointarrays,l_radius,planenum,planeselect)\n\n posi_pca_input_mtx.extend(vol_pca_input_list)\n del vol_pca_input_list\n\n ###############################\n # This generates target volume with gauss templete\n target_volume = MakeTargetVolume(pointarrays, vol_data.shape, l_radius)\n ###############################\n \n # debug for target_volume values\n# g_c = 0\n# g_r = 1\n# for point in pointarrays:\n# x = point[0]\n# y = point[1]\n# z = point[2]\n# if(target_volume[z,y,x] > 0.9):\n# g_c += 1\n# print target_volume[z-g_r:z+g_r+1,y-g_r:y+g_r+1,x-g_r:x+g_r+1]\n# print g_c\n\n # out of cell boundary threshold\n outofbound_thr = 0.1\n \n neg_vol_pca_input_list = Generate_Neg_PCA_Matrix(vol_data,pointarrays,l_radius,pos_pnum,target_volume,outofbound_thr,planenum,planeselect)\n \n neg_pca_input_mtx.extend(neg_vol_pca_input_list)\n \n del neg_vol_pca_input_list\n del target_volume\n del vol_data\n del pointarrays\n\n ###############################\n # Negative_Generate_Points_ND_PCA_Cal(vol_data,pointarrays,pos_pnum,target_volume,l_radius,outofbound_thr)\n ###############################\n\n posi_pca_input_arr = array(posi_pca_input_mtx, 'd')\n del posi_pca_input_mtx\n \n posi_coeff,posi_meanvector = Positive_PCA_Cal(posi_pca_input_arr,l_radius,planenum,planeselect)\n \n neg_pca_input_arr = array(neg_pca_input_mtx, 'd')\n del neg_pca_input_mtx\n \n neg_coeff,neg_meanvector = Negative_PCA_Cal(neg_pca_input_arr,l_radius,planenum,planeselect)\n\n return posi_coeff,posi_meanvector,neg_coeff,neg_meanvector\n\n\n","sub_path":"CellCounting_using_PCA_n_Hadoop/cellneuronpca.py","file_name":"cellneuronpca.py","file_ext":"py","file_size_in_byte":8999,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"231416539","text":"import discord\r\nfrom discord.ext import commands\r\nimport praw\r\nfrom threading import Timer\r\nimport datetime\r\nimport json\r\nimport asyncio\r\nimport requests\r\nimport time\r\nimport random\r\nimport os\r\nimport sys\r\n\r\nstartTime = time.time()\r\n\r\nif __name__ == '__main__':\r\n print(\"Tried to run {} as main script, despite it being a cog! Terminating script.\".format(__file__))\r\n time.sleep(2)\r\n sys.exit()\r\n\r\nclass Fun(commands.Cog):\r\n\r\n def __init__(self, client):\r\n self.client = client\r\n\r\n @commands.Cog.listener()\r\n async def on_ready(self):\r\n print('Fun cog loaded successfully.')\r\n\r\n @commands.command(aliases=['8ball'])\r\n async def _8ball(self, ctx, *, question):\r\n \"\"\"Classic magic 8ball command\"\"\"\r\n MagicBallPhrasesList = [\r\n \"As I see it, yes.\",\r\n \"Ask again later.\",\r\n \"Better not tell you now.\",\r\n \"Cannot predict now.\",\r\n \"Concentrate and ask again.\",\r\n \"Dont count on it.\",\r\n \"It is certain.\",\r\n \"It is decidedly so.\",\r\n \"Most likely.\",\r\n \"My reply is no.\",\r\n \"My sources say no.\",\r\n \"Outlook not so good.\",\r\n \"Outlook good.\",\r\n \"Reply hazy, try again.\",\r\n \"Signs point to yes.\",\r\n \"Very doubtful.\",\r\n \"Without a doubt.\",\r\n \"Yes.\",\r\n \"Yes, definitely.\",\r\n \"You may rely on it.\"]\r\n\r\n await ctx.send(\"Your question: {0}\\nThe Magic 8ball's answer: {1}\".format(question, MagicBallPhrasesList[random.randint(0,(len(MagicBallPhrasesList)) - 1)] + \" {0}\".format(ctx.message.author.mention)))\r\n \r\n @_8ball.error\r\n async def _8ball_error(self, ctx, error):\r\n if isinstance(error, commands.MissingRequiredArgument):\r\n await ctx.send(\"You need to give a question for the 8ball.\")\r\n else:\r\n await ctx.send(\"Something went wrong.\")\r\n\r\n @commands.command()\r\n async def coinflip(self, ctx):\r\n if random.randint(0, 1) == 0:\r\n await ctx.send(\"The result was heads!\")\r\n else:\r\n await ctx.send(\"The result was tails!\")\r\n\r\n @commands.command()\r\n async def dadjoke(self, ctx):\r\n \"\"\"Sends a funny dad joke\"\"\"\r\n url = \"https://dad-jokes.p.rapidapi.com/random/joke\"\r\n headers = {\r\n 'x-rapidapi-key': \"XXX\",\r\n 'x-rapidapi-host': \"dad-jokes.p.rapidapi.com\"\r\n }\r\n response = requests.request(\"GET\", url, headers=headers)\r\n parsed_response = json.loads(response.text)\r\n await ctx.send(parsed_response[\"body\"][0][\"setup\"])\r\n await asyncio.sleep(1)\r\n await ctx.send(parsed_response[\"body\"][0][\"punchline\"])\r\n\r\n\r\ndef setup(client):\r\n client.add_cog(Fun(client))","sub_path":"MainScript/PythonBot/cogs/Fun.py","file_name":"Fun.py","file_ext":"py","file_size_in_byte":2712,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"264209866","text":"import random\nm=20\nn=100\na=[]\n#根据题意,输出序列数的大小范围是0-n\nfor i in range(0,100):\n #实际操作中模运算只需将随机数的范围控制到0-n,就可以覆盖模运算的所有结果\n #for i in range(0,100):保证输出的序列是有序的,m=m-1控制20个数子\n if (random.randint(0,100)%n)< m :\n a.append(i)\n m=m-1\n n=n-1\n#print(a,'\\n',len(a))","sub_path":"Chapter 12/Think2.py","file_name":"Think2.py","file_ext":"py","file_size_in_byte":404,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"173064336","text":"class Solution:\n def coinChange(self, coins: List[int], amount: int) -> int:\n #initializing dp matrix\n dp=Matrix = [[0 for x in range(amount+1)] for y in range(len(coins)+1)] \n #assigning 1st column with 0\n for i in range(len(coins)+1):\n dp[i][0]=0\n #assigning first row with inf\n for j in range(1,amount+1):\n dp[0][j]=9999\n #generating a dp matrix\n for i in range(1,len(coins)+1):\n for j in range(1,amount+1):\n if j -1:\r\n\t\ttemp = y[i]\r\n\t\tj = size-1\r\n\t\twhile j > i:\r\n\t\t\ttemp -= s[i][j-i]*x[size-1-j]\r\n\t\t\tj -= 1\r\n\t\tx.append(temp/s[i][0])\r\n\t\ti -= 1\r\n\tx.reverse()\r\n\treturn x\r\n\r\n# Решение системы линейных уравнений методом Гаусса.\r\ndef gaussian_method(matrix, stable=True):\r\n\ti = 0;\r\n\tsize = len(matrix)\r\n\t# Перебор строк.\r\n\twhile i < size:\r\n\t\tif stable:\r\n\t\t\t# Поиск максимального элемента в i-ом столбце.\r\n\t\t\tmaxj = i\r\n\t\t\tj = i+1\r\n\t\t\twhile j < size:\r\n\t\t\t\tif abs(matrix[j][i]) > abs(matrix[maxj][i]):\r\n\t\t\t\t\tmaxj = j\r\n\t\t\t\tj += 1\r\n\t\t\tif matrix[maxj][i] == 0:\r\n\t\t\t\treturn None\r\n\t\t\t# Замена местами строк, что бы [i][i] элемент был максимальным, для устойчивости метода.\r\n\t\t\ttemp = matrix[i]\r\n\t\t\tmatrix[i] = matrix[maxj]\r\n\t\t\tmatrix[maxj] = temp\r\n\t\t# Обнуление матрицы под [i][i] элементом.\r\n\t\tj = i+1\r\n\t\twhile j < size:\r\n\t\t\ta = matrix[j][i]/matrix[i][i]\r\n\t\t\tk = i\r\n\t\t\t# Линейное преобразование строк.\r\n\t\t\twhile k <= size:\r\n\t\t\t\tmatrix[j][k] -= a*matrix[i][k]\r\n\t\t\t\tk += 1\r\n\t\t\tj += 1\r\n\t\ti += 1\r\n\t# Поиск x_n элемент\r\n\tresult = [matrix[size-1][size]/matrix[size-1][size-1]]\r\n\ti = size-2\r\n\t# Поиск x_i элементов\r\n\twhile i > -1:\r\n\t\tj = size-1\r\n\t\twhile j > i:\r\n\t\t\tmatrix[i][size] -= matrix[i][j]*result[size-j-1]\r\n\t\t\tj -= 1\r\n\t\tresult.append(matrix[i][size]/matrix[i][i])\r\n\t\ti -= 1\r\n\tresult.reverse()\r\n\treturn result\r\n\r\n# Решение системы линейных уравнений методом Гаусса.\r\ndef Jacobi_method(matrix, vector, eps):\r\n\tn = len(vector)\r\n\t# Проверка на сходимость метода для этих данных.\r\n\tfor i in range(0, n):\r\n\t\tif (2*fabs(matrix[i][i]) < sum(fabs(x) for x in matrix[i])):\r\n\t\t\treturn None\r\n\t# Вычисления\r\n\txi = [vector[i]/matrix[i][i] for i in range(0, n)]\r\n\txip = [0 for i in range(0, n)]\r\n\twhile True:\r\n\t\tfor i in range(0, n):\r\n\t\t\txip[i] = (vector[i] + matrix[i][i]*xi[i] - sum(x*y for (x, y) in zip(xi, matrix[i])))/matrix[i][i]\r\n\t\tif sqrt(sum((x-y)**2 for (x, y) in zip(xi, xip))) < eps:\r\n\t\t\tbreak\r\n\t\txi = [t for t in xip]\r\n\treturn xip\r\n","sub_path":"sole.py","file_name":"sole.py","file_ext":"py","file_size_in_byte":4685,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"398534200","text":"import random\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom mpl_toolkits import mplot3d\na = np.arange(-10,10,0.2)\nb = np.arange(-10,10,0.2)\nx = np.arange(-10,10,0.2) \ny = 2*x+3\nu=np.random.normal(0,1, 100)\ns= np.std(u)\ny1=y+(s*u)\na,b =np.meshgrid(a,b)\ne= (((y1)-(a*x)+b))**2\ne1=e/100\nfig = plt.figure() \naxes = fig.gca(projection ='3d') \naxes.plot_surface(a, b, e1) \naxes.set_xlabel('a----->')\naxes.set_ylabel('b----->')\naxes.set_zlabel('Error----->')\naxes.set_title(\"Error surface plot\")\naxes.set_xlim(-10,10)\naxes.set_ylim(-10,10)\naxes.set_facecolor(\"orange\")\nplt.tight_layout()\nplt.show() \n","sub_path":"Assignment2/Yogesh Dewangan_204102319/Q2.py","file_name":"Q2.py","file_ext":"py","file_size_in_byte":603,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"426662967","text":"from pfaw import Fortnite, Platform, Mode\nimport csv\nfrom tempfile import NamedTemporaryFile\nimport shutil\nimport time\nfrom Fortnite.settings import *\n\nclass FortniteBot:\n def __init__(self, filename=\"Fortnite/stats.csv\"):\n self.file_name = filename\n try:\n self.fortnite = Fortnite(fortnite_token='ZWM2ODRiOGM2ODdmNDc5ZmFkZWEzY2IyYWQ4M2Y1YzY6ZTFmMzFjMjExZjI4NDEzMTg2MjYyZDM3YTEzZmM4NGQ=', launcher_token='MzRhMDJjZjhmNDQxNGUyOWIxNTkyMTg3NmRhMzZmOWE6ZGFhZmJjY2M3Mzc3NDUwMzlkZmZlNTNkOTRmYzc2Y2Y=', password=fortnite_password, email=fortnite_email)\n except Exception as e:\n print(\"Could not connect to fortnite API\")\n\n def player_found(self, username):\n found = True\n try:\n self.fortnite.player(username)\n except Exception:\n return False\n return True\n\n def check_new_wins(self):\n open_file = open(self.file_name, \"r\", encoding='UTF-8')\n reader = csv.reader(open_file, delimiter=\",\")\n players_with_wins = []\n csv_list = list(reader)\n for row in csv_list:\n username = row[0]\n wins = int(row[1])\n \n if self.player_found(username):\n new_wins = (self.new_wins(wins, username) - wins)\n if new_wins > 0:\n players_with_wins.append(username)\n self.update_player(username, new_wins)\n\n return players_with_wins\n \n\n def update_player(self, player, wins):\n filename = self.file_name\n \n read_file = open(filename, \"r\", encoding='UTF-8')\n\n reader = csv.reader(read_file, delimiter=',')\n\n rows = []\n for item in reader:\n if player == item[0]:\n item[1] = int(item[1]) + int(wins)\n rows.append(item)\n \n read_file.close()\n \n write_file = open(filename, \"w\", encoding='UTF-8')\n writer = csv.writer(write_file, lineterminator=\"\\n\")\n writer.writerows(rows)\n\n def new_wins(self, current_wins, username):\n stats = self.fortnite.battle_royale_stats(username, platform=Platform.pc)\n return stats.all.wins\n\n def get_wins(self, username):\n\n if self.player_found(username):\n return self.fortnite.battle_royale_stats(username, platform=Platform.pc).all.wins\n return \"Player not found\"\n \n def get_status(self):\n return self.fortnite.server_status()\n\n #Add a new player to get tracked by FortniteBot\n def add_player(self, username):\n if self.player_found(username):\n wins = self.get_wins(username)\n\n #Prepare the file for append. \n write_file = open(self.file_name, 'a', encoding='UTF-8')\n row = []\n row.append(username)\n row.append(wins)\n writer = csv.writer(write_file, lineterminator='\\n')\n writer.writerow(row)\n\n return username + \" is now beeing tracked!\"\n \n return \"Invalid player\"\n","sub_path":"Fortnite/fortnite.py","file_name":"fortnite.py","file_ext":"py","file_size_in_byte":3024,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"613369723","text":"import os, sys\ncurrentdir = os.path.dirname(os.path.realpath(__file__))\nsys.path.append(currentdir)\n\nfrom Weapons.Weapon import Weapon\n\nclass Stick(Weapon):\n def __init__(self):\n self.name = \"Stick\"\n self.power = 100","sub_path":"Weapons/Magic/Stick.py","file_name":"Stick.py","file_ext":"py","file_size_in_byte":233,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"517563215","text":"import enoki as ek\nimport pytest\nimport numpy as np\n\n# pkgs = [\"enoki.cuda\", \"enoki.cuda.ad\",\n# \"enoki.llvm\", \"enoki.llvm.ad\"]\n\npkgs = [\"enoki.llvm\", \"enoki.llvm.ad\"]\npkgs_ad = [\"enoki.llvm.ad\"]\n\ndef get_class(name):\n \"\"\"Resolve a package+class name into the corresponding type\"\"\"\n if 'cuda' in name:\n if not ek.has_backend(ek.JitBackend.CUDA):\n pytest.skip('CUDA mode is unsupported')\n elif 'llvm' in name:\n if not ek.has_backend(ek.JitBackend.LLVM):\n pytest.skip('LLVM mode is unsupported')\n\n name = name.split('.')\n value = __import__(\".\".join(name[:-1]))\n for item in name[1:]:\n value = getattr(value, item)\n\n return value\n\nclass Checker:\n \"\"\"\n Compares a Tensor indexing operation against a NumPy reference\n and asserts if there is a mismatch.\n \"\"\"\n def __init__(self, shape, tensor_type):\n import numpy as np\n self.shape = shape\n size = np.prod(shape)\n self.array_n = np.arange(size, dtype=np.uint32).reshape(shape)\n self.array_e = tensor_type(ek.arange(tensor_type.Array, size), shape)\n\n def __getitem__(self, args):\n import numpy as np\n ref_n = self.array_n.__getitem__(args)\n ref_e = self.array_e.__getitem__(args)\n assert ref_n.shape == ref_e.shape\n assert np.all(ref_n.ravel() == ref_e.array.numpy())\n\n\n@pytest.mark.parametrize(\"pkg\", pkgs)\ndef test01_slice_index(pkg):\n t = get_class(pkg + \".TensorXu\")\n c = Checker((10,), t)\n c[:]\n c[3]\n c[1:5]\n c[-5]\n\n c = Checker((10, 20), t)\n c[:]\n c[5, 0]\n c[5, 0:2]\n c[:, 5]\n c[5, :]\n c[:, :]\n c[1:3, 2:7:2]\n c[8:2:-1, 7:0:-1]\n c[0:0, 0:0]\n\n\n@pytest.mark.parametrize(\"pkg\", pkgs)\ndef test02_slice_ellipsis(pkg):\n t = get_class(pkg + \".TensorXu\")\n c = Checker((10, 20, 30, 40), t)\n\n c[...]\n c[1, ...]\n c[..., 1]\n c[4, ..., 3]\n c[0, 1:3, ..., 3]\n\n\n@pytest.mark.parametrize(\"pkg\", pkgs)\ndef test03_slice_append_dim(pkg):\n t = get_class(pkg + \".TensorXu\")\n c = Checker((10, 20, 30, 40), t)\n\n c[None]\n c[..., None]\n c[1, None, ...]\n c[..., None, 1, None]\n c[None, 4, ..., 3, None]\n\n\n@pytest.mark.parametrize(\"pkg\", pkgs)\ndef test04_broadcasting(pkg):\n t = get_class(pkg + \".TensorXu\")\n for i in range(1, 5):\n for j in range(1, 5):\n for k in range(1, 5):\n shape = [i, j, k]\n for l in range(len(shape)):\n shape_2 = list(shape)\n shape_2[l] = 1\n array_n1 = np.arange(np.prod(shape), dtype=np.uint32).reshape(shape)\n array_n2 = np.arange(np.prod(shape_2), dtype=np.uint32).reshape(shape_2)\n\n array_e1 = t(ek.arange(t.Index, np.prod(shape)), shape)\n array_e2 = t(ek.arange(t.Index, np.prod(shape_2)), shape_2)\n\n out_n = array_n1 + array_n2\n out_e = array_e1 + array_e2\n\n assert out_n.shape == out_e.shape\n assert np.all(out_n.ravel() == out_e.array.numpy())\n assert np.all((array_n1 * 2).ravel() == (array_e1 * 2).array.numpy())\n\n\n\n@pytest.mark.parametrize(\"pkg\", pkgs)\ndef test05_initialization_casting(pkg):\n tu = get_class(pkg + \".TensorXu\")\n tf = get_class(pkg + \".TensorXf\")\n tf64 = get_class(pkg + \".TensorXf\")\n\n t0 = ek.full(tu, 1, (2, 3, 4))\n t1 = ek.full(tf, 2, (2, 3, 4))\n t2 = ek.zero(tf64, (2, 3, 4))\n\n assert ek.shape(t0) == (2, 3, 4)\n\n t3 = t0 + t1 + t2\n assert type(t3) is tf64\n\n assert t3.shape == (2, 3, 4)\n assert t3.array == ek.full(t3.Array, 3, 2*3*4)\n\n t3[:, 1, :] = 12\n assert t3[:, 0, :] == 3\n assert t3[:, 1, :] == 12\n\n\n@pytest.mark.parametrize(\"pkg\", pkgs_ad)\ndef test05_ad(pkg):\n f = get_class(pkg + \".TensorXf\")\n z0 = ek.full(f, 1, (2, 3, 4))\n assert not ek.grad_enabled(z0)\n ek.enable_grad(z0)\n assert ek.grad_enabled(z0)\n assert not ek.grad_enabled(ek.detach(z0))\n assert ek.ravel(z0) is z0.array\n\n z1 = z0 + z0\n ek.backward(z1)\n g = ek.grad(z0)\n assert g.shape == (2, 3, 4)\n assert len(g.array) == 2*3*4\n assert g.array == 2\n\n\n@pytest.mark.parametrize(\"pkg\", pkgs)\ndef test06_numpy_conversion(pkg):\n f = get_class(pkg + \".TensorXf\")\n\n value = f(ek.arange(f.Array, 2*3*4), (2, 3, 4))\n value_np = value.numpy()\n assert value_np.shape == (2, 3, 4)\n assert np.all(value_np.ravel() == value.array.numpy())\n\n value_2 = f(value_np)\n assert value.shape == value_2.shape\n assert value.array == value_2.array\n\n value_np = np.ones((1,1,1,1))\n value_3 = f(value_np)\n assert value_np.shape == value_3.shape\n assert np.all(value_np == value_3.array)\n\n\n@pytest.mark.parametrize(\"pkg\", pkgs)\ndef test07_jax_conversion(pkg):\n jax = pytest.importorskip(\"jax\")\n f = get_class(pkg + \".TensorXf\")\n\n value = f(ek.arange(f.Array, 2*3*4), (2, 3, 4))\n value_jax = value.jax()\n assert value_jax.shape == (2, 3, 4)\n assert jax.numpy.all(value_jax.ravel() == value.array.jax())\n\n value_2 = f(value_jax)\n assert value.shape == value_2.shape\n assert value.array == value_2.array\n\n\n@pytest.mark.parametrize(\"pkg\", pkgs)\ndef test08_pytorch_conversion(pkg):\n torch = pytest.importorskip(\"torch\")\n f = get_class(pkg + \".TensorXf\")\n\n value = f(ek.arange(f.Array, 2*3*4), (2, 3, 4))\n value_torch = value.torch()\n assert value_torch.shape == (2, 3, 4)\n assert torch.all(value_torch.ravel() == value.array.torch())\n\n value_2 = f(value_torch)\n assert value.shape == value_2.shape\n assert value.array == value_2.array\n\n\n@pytest.mark.parametrize(\"pkg\", pkgs)\ndef test09_tensorflow_conversion(pkg):\n tf = pytest.importorskip(\"tensorflow\")\n f = get_class(pkg + \".TensorXf\")\n tf.constant(0)\n\n value = f(ek.arange(f.Array, 2*3*4), (2, 3, 4))\n value_tf = value.tf()\n assert value_tf.shape == (2, 3, 4)\n assert tf.reduce_all(tf.equal(tf.reshape(value_tf, (2*3*4,)), value.array.tf()))\n\n value_2 = f(value_tf)\n assert value.shape == value_2.shape\n assert value.array == value_2.array\n\n\n@pytest.mark.parametrize(\"pkg\", pkgs)\ndef test10_tensorflow_arithmetic(pkg):\n t = get_class(pkg + \".TensorXf\")\n f = get_class(pkg + \".Float32\")\n\n tt = t([1, 2, 3, 4, 5, 6], [2, 3])\n ff = f(2.0)\n\n assert ff * tt == tt * ff\n assert ff * tt == t([2, 4, 6, 8, 10, 12], [2, 3])\n\n\nclass PowerOfTwo(ek.CustomOp):\n def eval(self, value):\n self.value = value\n return value * value\n\n def forward(self):\n grad_in = self.grad_in('value')\n self.set_grad_out(2.0 * self.value * grad_in)\n\n def backward(self):\n grad_out = self.grad_out()\n self.set_grad_in('value', 2.0 * self.value * grad_out)\n\n def name(self):\n return \"power of two\"\n\n\n@pytest.mark.parametrize(\"pkg\", [\"enoki.llvm.ad\", \"enoki.cuda.ad\"])\ndef test11_custom_op(pkg):\n t = get_class(pkg + \".TensorXf\")\n f = get_class(pkg + \".Float32\")\n\n tt = t([1, 2, 3, 4, 5, 6], [2, 3])\n ek.enable_grad(tt)\n\n tt2 = ek.custom(PowerOfTwo, tt)\n\n ek.set_grad(tt2, 1.0)\n ek.enqueue(ek.ADMode.Backward, tt2)\n ek.traverse(f)\n\n assert ek.grad(tt).array == [2.0, 4.0, 6.0, 8.0, 10.0, 12.0]\n\n\n@pytest.mark.parametrize(\"pkg\", pkgs_ad)\ndef test12_select(pkg):\n for tp in [get_class(pkg + \".TensorXf\"), get_class(pkg + \".TensorXu\")]:\n initial = tp([1, 2, 3, 4], shape=(4, 1))\n\n next = initial + 10\n valid = initial >= 2.5\n assert type(valid) == ek.mask_t(initial)\n\n result = ek.select(valid, next, initial)\n assert type(result) == tp\n\n expected = tp([1, 2, 13, 14], shape=ek.shape(initial))\n assert ek.allclose(result, expected)\n","sub_path":"tests/python/test_tensor.py","file_name":"test_tensor.py","file_ext":"py","file_size_in_byte":7712,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"65018485","text":"from Subway.SubwayRide import *\nimport collections\n\nclass BruteForceChooser:\n\n def __init__(self, to_visit):\n self.to_visit = to_visit\n\n def get_to_visit(self):\n return self.to_visit\n\n\nclass LongestPathChooser:\n\n NUM_RUNS = 0\n\n @staticmethod\n def get_route(starting_stop):\n\n print(\"Finding longest route from %s\" % starting_stop)\n ride = SubwayRide(StartingSegment(starting_stop))\n\n return LongestPathChooser.get_longest_path(ride)\n\n @staticmethod\n def get_available_segments(ride):\n\n visited = ride.get_visited_stations()\n cur_stop = ride.get_current_stop()\n\n rides = [seg for seg in cur_stop.get_ride_segments() if seg.get_to_station() not in visited]\n station_trans = [seg for seg in cur_stop.get_station_transfer_segments() if seg.get_to_station() not in visited]\n stop_trans = cur_stop.get_stop_transfer_segments()\n\n return rides, station_trans, stop_trans\n\n @staticmethod\n def get_longest_path(ride):\n\n rides, station_trans, stop_trans = LongestPathChooser.get_available_segments(ride)\n\n # Keep riding if its our only option\n while len(rides) == 1 and ride.get_current_station().is_passthrough():\n ride.add_segment(rides[0])\n rides, station_trans, stop_trans = LongestPathChooser.get_available_segments(ride)\n\n available = rides.copy()\n\n if not ride.just_transferred() and not ride.is_beginning():\n available += station_trans\n available += stop_trans\n\n # If we can't go anywhere\n if len(available) == 0:\n LongestPathChooser.NUM_RUNS += 1\n\n if LongestPathChooser.NUM_RUNS % 100000 == 0:\n print(\"Run %d\" % LongestPathChooser.NUM_RUNS )\n\n return ride.add_segment(EndingSegment(ride.get_current_stop()))\n\n lengths = []\n\n for seg in available:\n new_ride = ride.clone()\n new_ride.add_segment(seg)\n\n longest = LongestPathChooser.get_longest_path(new_ride)\n lengths.append(longest)\n\n # Sort by ride length\n lengths.sort(key=lambda x: x.get_num_stations(), reverse=True)\n\n return lengths[0]\n\n\nclass ShortestPathChooser:\n\n def __init__(self, length_limit):\n super().__init__()\n self.length_limit = length_limit\n\n def reset_limit(self, limit):\n self.length_limit = limit\n\n def get_limit(self):\n return self.length_limit\n\n def get_route(self, starting_stop, to_visit):\n\n print(\"Finding shortest route from %s\" % starting_stop)\n ride = SubwayRide(StartingSegment(starting_stop))\n\n return self.get_shortest_path(ride, to_visit)\n\n @staticmethod\n def distance_to_unvisited(cur_station, unvisited):\n return min([cur_station.get_distance_segments(station) for station in unvisited])\n\n @staticmethod\n def get_available_segments(ride, unvisited):\n\n cur_station = ride.get_current_station()\n cur_stop = ride.get_current_stop()\n segments = ride.get_segments()\n\n # Distance to closest unvisited station\n cur_dist = ShortestPathChooser.distance_to_unvisited(cur_station, unvisited)\n\n rides = []\n\n for ride in cur_stop.get_ride_segments():\n if ride not in segments:\n dist = ShortestPathChooser.distance_to_unvisited(ride.get_to_station(), unvisited)\n\n if dist < cur_dist:\n rides.append(ride)\n\n station_trans = []\n\n for tran in cur_stop.get_station_transfer_segments():\n if tran not in segments:\n dist = ShortestPathChooser.distance_to_unvisited(tran.get_to_station(), unvisited)\n\n if dist < cur_dist:\n station_trans.append(tran)\n\n stop_trans = cur_stop.get_stop_transfer_segments()\n\n return rides, station_trans, stop_trans\n\n def get_shortest_path(self, ride, to_visit):\n\n # We've exceeded our threshold\n if ride.get_length() > self.get_limit():\n return ride.add_segment(ErrorSegment(ride.get_current_stop()))\n\n # We've visited every station\n visited = ride.get_visited_stations()\n unvisited = to_visit - visited\n\n if len(unvisited) == 0:\n path_len = ride.get_length()\n print(\"Found path: %d\" % path_len)\n\n if path_len < self.get_limit():\n self.reset_limit(path_len)\n\n return ride.add_segment(EndingSegment(ride.get_current_stop()))\n\n # Get available segments (ride, station transfer and stop transfer)\n rides, station_trans, stop_trans = self.get_available_segments(ride, unvisited)\n\n available = rides.copy()\n\n if not ride.just_transferred() and not ride.is_beginning():\n available.extend(station_trans)\n available.extend(stop_trans)\n\n # If we can't go anywhere\n if len(available) == 0:\n return ride.add_segment(ErrorSegment(ride.get_current_stop()))\n\n lengths = []\n\n for seg in available:\n new_ride = ride.clone()\n new_ride.add_segment(seg)\n\n longest = self.get_shortest_path(new_ride, to_visit)\n\n if longest is not None and not longest.is_error():\n lengths.append(longest)\n\n # Sort by ride length\n lengths.sort(key=lambda x: x.get_length())\n\n if len(lengths) == 0:\n return None\n else:\n return lengths[0]\n\n\nclass ShortestPathChooser2:\n\n def __init__(self, length_limit):\n super().__init__()\n self.length_limit = length_limit\n\n def reset_limit(self, limit):\n print(\"Resetting depth limit to %d\" % limit)\n self.length_limit = limit\n\n def get_limit(self):\n return self.length_limit\n\n def get_route(self, starting_stop, to_visit):\n\n print(\"Finding shortest route from %s\" % starting_stop)\n ride = SubwayRide(StartingSegment(starting_stop))\n\n return self.get_shortest_path(ride, to_visit)\n\n @staticmethod\n def get_available_segments(ride, unvisited):\n\n cur_station = ride.get_current_station()\n cur_stop = ride.get_current_stop()\n\n # Distance to closest unvisited station\n cur_dist = cur_station.get_distance_segments(unvisited)\n\n rides = []\n\n for ride in cur_stop.get_ride_segments():\n dist = ride.get_to_station().get_distance_segments(unvisited)\n\n if dist < cur_dist:\n rides.append(ride)\n\n station_trans = []\n\n for tran in cur_stop.get_station_transfer_segments():\n dist = tran.get_to_station().get_distance_segments(unvisited)\n\n if dist < cur_dist:\n station_trans.append(tran)\n\n stop_trans = cur_stop.get_stop_transfer_segments()\n\n return rides, station_trans, stop_trans\n\n def get_shortest_path(self, starting_ride, to_visit):\n\n queue = collections.deque()\n queue.append(starting_ride)\n\n shortest = {}\n\n # Keep processing as long as there are paths in the queue\n while queue:\n ride = queue.popleft()\n\n # Skip this ride if its longer than our limit\n if ride.get_length() > self.get_limit():\n continue\n\n visited = ride.get_visited_stations()\n unvisited = to_visit - visited\n\n if len(unvisited) == 0:\n self.reset_limit(ride.get_length())\n return ride.add_segment(EndingSegment(ride.get_current_stop()))\n\n # Get available segments (ride, station transfer and stop transfer)\n rides, station_trans, stop_trans = self.get_available_segments(ride, unvisited)\n\n available = rides.copy()\n\n if not ride.just_transferred() and not ride.is_beginning():\n available.extend(station_trans)\n available.extend(stop_trans)\n\n # We can't go anywhere\n if len(available) == 0:\n continue\n\n for seg in available:\n\n new_ride = ride.clone()\n new_ride.add_segment(seg)\n\n to_stop = seg.get_to_stop()\n\n visited = new_ride.get_visited_stations() & to_visit\n num_visited = len(visited)\n\n shortest.setdefault(to_stop, 0)\n\n if num_visited > shortest[to_stop]:\n queue.append(new_ride)\n shortest[to_stop] = num_visited\n\n if len(queue) % 10000 == 0:\n print(\"Queue len: %d\" % len(queue))\n print(\"Visited: %d\" % num_visited)\n\n print(\"Queue is empty\")\n return None\n","sub_path":"Subway/RouteChoosers/BruteForceChooser.py","file_name":"BruteForceChooser.py","file_ext":"py","file_size_in_byte":8733,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"321443280","text":"import tensorflow as tf\nfrom tensorflow.examples.tutorials.mnist import input_data\n\n# load mnist\nmnist = input_data.read_data_sets(\"MNIST_data/\", one_hot=True)\n\n# input for data\nx = tf.placeholder(tf.float32, [None, 784]) # None here means any arbitrary length!\nW = tf.Variable(tf.zeros([784, 10])) # Doesn't matter what they are so, initialize with zero\nb = tf.Variable(tf.zeros([10]))\n\ny = tf.matmul(x, W) + b # Notice softmax is from tf.nn\n\ny_ = tf.placeholder(tf.float32, [None, 10]) # Label placeholder\n#cross_entropy = tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(y), reduction_indices=[1]))\n\n# or more stable version of implementation\ncross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y_, logits=y))\n\n# optimize\ntrain_step = tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy)\n\nsess = tf.InteractiveSession()\ntf.global_variables_initializer().run() # Initialize variables!\n\nfor _ in range(1000):\n batch_xs, batch_ys = mnist.train.next_batch(100) # Get next batch of 100\n sess.run(train_step, feed_dict={x:batch_xs, y_:batch_ys}) # Train with the new batch\n\ncorrect_predictions = tf.equal(tf.argmax(y,1), tf.argmax(y_,1)) # Prediction accuracy calculation\naccuracy = tf.reduce_mean(tf.cast(correct_predictions, tf.float32)) # note that correct_prediction is bool\n\nprint (sess.run(accuracy, feed_dict={x: mnist.test.images, y_: mnist.test.labels}))\n","sub_path":"tensorflow/soft_mnist.py","file_name":"soft_mnist.py","file_ext":"py","file_size_in_byte":1392,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"322230728","text":"import numpy as np\r\nfrom collections import Counter\r\nimport time\r\nimport warnings\r\n\r\nwarnings.filterwarnings(\"ignore\")\r\n\r\nminNumSample = 10\r\n\r\n\r\nclass BinaryTree:\r\n \"\"\"An Special BinaryTree.\r\n\r\n Construct a special binary tree, store the data in the nodes of the tree,\r\n node labels, left and right subtree positions\r\n\r\n\r\n \"\"\"\r\n\r\n def __init__(self, labels=np.array([]), datas=np.array([])):\r\n self.label = labels\r\n self.data = datas\r\n self.leftChild = None\r\n self.rightChild = None\r\n\r\n def set_rightChild(self, rightObj):\r\n self.rightChild = rightObj\r\n\r\n def set_leftChild(self, leftObj):\r\n self.leftChild = leftObj\r\n\r\n def get_rightChild(self):\r\n return self.rightChild\r\n\r\n def get_leftChild(self):\r\n return self.leftChild\r\n\r\n def get_data(self):\r\n return self.data\r\n\r\n def get_label(self):\r\n return self.label\r\n\r\n\r\ndef RSDS_fun(train_data, tree_num=10):\r\n \"\"\"Handling data noise using completely random forest judgment.\r\n\r\n Establish a tree_num completely random tree. The data label in each leaf node\r\n of the tree is compared with the parent node label to obtain the noise judgment\r\n label of each data in the case of a tree, and all the completely random tree noise\r\n judgment labels are combined to vote to determine the noise data. Denoised data\r\n set after processingEstablish a tree_num completely random tree. The data label\r\n in each leaf node of the tree is compared with the parent node label to obtain\r\n the noise judgment label of each data in the case of a tree, and all the completely\r\n random tree noise judgment labels are combined to vote to determine the noise data.\r\n Denoised data set after processing\r\n\r\n Parameters\r\n ----------\r\n train_data :Numpy type data set.\r\n\r\n tree_num :Total number of random trees.\r\n\r\n \"\"\"\r\n\r\n m, n = train_data.shape\r\n forest = np.array([])\r\n for i in range(tree_num):\r\n tree = CRT(train_data)\r\n visiTree = visitCRT(tree)\r\n visiTree = visiTree[:, np.argsort(visiTree[0, :])]\r\n visiTree = visiTree[1, :]\r\n if forest.size == 0:\r\n forest = visiTree.reshape(m, 1)\r\n else:\r\n forest = np.hstack((forest, visiTree.reshape(m, 1)))\r\n noiseForest = np.sum(forest, axis=1)\r\n nn = 0.5 * tree_num\r\n noiseForest = np.array(list(map(lambda x: 1 if x >= nn or x == 0 else 0, noiseForest)))\r\n denoiseTraindata = deleteNoiseData(train_data, noiseForest)\r\n return denoiseTraindata\r\n\r\n\r\ndef CRT(data):\r\n \"\"\"Build A Completely Random Tree.\r\n\r\n Add a column at the end of the data, store the initial sequence\r\n number of each piece of data, call the function ‘generateTree’\r\n spanning tree\r\n\r\n Parameters\r\n ----------\r\n data :Numpy type data set\r\n\r\n \"\"\"\r\n numberSample = data.shape[0]\r\n orderAttribute = np.arange(numberSample).reshape(numberSample, 1) # (862, 1)\r\n data = np.hstack((data, orderAttribute))\r\n completeRandomTree = generateTree(data)\r\n return completeRandomTree\r\n\r\n\r\ndef generateTree(data, uplabels=[]):\r\n \"\"\"Iteratively Generating A Completely Random Tree.\r\n\r\n Complete random tree by random partitioning of random attributes\r\n\r\n Parameters\r\n ----------\r\n data :Numpy type data set\r\n\r\n uplabels :rootlabel\r\n\r\n \"\"\"\r\n try:\r\n numberSample, numberAttribute = data.shape\r\n except ValueError:\r\n numberSample = 1\r\n numberAttribute = data.size\r\n\r\n if numberAttribute == 0:\r\n return None\r\n\r\n numberAttribute = numberAttribute - 2 # Subtract the added serial number and label\r\n\r\n # The category of the current data, also called the node category\r\n labelNumKey = [] # todo\r\n if numberSample == 1: # Only one sample left\r\n labelvalue = data[0][0]\r\n rootdata = data[0][numberAttribute + 1]\r\n else:\r\n labelNum = Counter(data[:, 0])\r\n labelNumKey = list(labelNum.keys()) # Key (label)\r\n labelNumValue = list(labelNum.values()) # Value (quantity)\r\n labelvalue = labelNumKey[labelNumValue.index(max(labelNumValue))] # Vote to find the label\r\n rootdata = data[:, numberAttribute + 1]\r\n rootlabel = np.hstack((labelvalue, uplabels)) # todo\r\n\r\n # Call the class 'BinaryTree', passing in tags and data\r\n CRTree = BinaryTree(rootlabel, rootdata)\r\n '''\r\n The 'rootlabel' and 'rootdata' are obtained above, the 'rootlabel' is a label (derived by voting), \r\n the 'rootdata' is a series of serial numbers, and finally the class BinaryTree is called.\r\n '''\r\n # There are at least two conditions for the tree to stop growing:\r\n # 1 the number of samples is limited;\r\n # 2 the first column is all equal\r\n if numberSample < minNumSample or len(labelNumKey) < 2:\r\n # minNumSample defaults to 10 or only 1 of the label types are left.\r\n return CRTree\r\n else:\r\n maxCycles = 1.5 * numberAttribute # Maximum number of cycles\r\n # maxCycles = 2\r\n i = 0\r\n while True:\r\n # Once a data exception occurs: except for the above two exceptions that\r\n # stop the tree growth condition, that is, the error data, the loop here will not stop.\r\n i += 1\r\n splitAttribute = np.random.randint(1, numberAttribute) # Randomly select a list of attributes\r\n if splitAttribute > 0 and splitAttribute < numberAttribute + 1:\r\n dataSplit = data[:, splitAttribute]\r\n uniquedata = list(set(dataSplit))\r\n if len(uniquedata) > 1:\r\n break\r\n if i > maxCycles: # Tree caused by data anomaly stops growing\r\n return CRTree\r\n sv1 = np.random.choice(uniquedata)\r\n i = 0\r\n while True:\r\n i += 1\r\n sv2 = np.random.choice(uniquedata)\r\n if sv2 != sv1:\r\n break\r\n if i > maxCycles:\r\n return CRTree\r\n splitValue = np.mean([sv1, sv2])\r\n '''\r\n The above randomly selected rows and columns are obtained, and the final 'splitValue' is an average\r\n '''\r\n\r\n # Call split function\r\n leftdata, rightdata = splitData(data, splitAttribute, splitValue)\r\n\r\n # Set the left subtree, the right subtree\r\n CRTree.set_leftChild(generateTree(leftdata, rootlabel))\r\n CRTree.set_rightChild(generateTree(rightdata, rootlabel))\r\n return CRTree\r\n\r\n\r\n'''\r\nreturns a matrix of two rows and N columns, the first row is the index of the sample, \r\nand the second row is the threshold of the label noise.\r\ne.g.\r\n[[ 36. 499. 547. 557. 563. 587.]\r\n [ 0. 0. 0. 0. 0. 0.]]\r\n'''\r\n\r\n\r\ndef visitCRT(tree):\r\n \"\"\"\r\n Traversing the tree to get the relationship between the data and the node label.\r\n\r\n The traversal tree stores the data number and node label stored in each node of the\r\n completely random tree.\r\n\r\n Parameters\r\n ----------\r\n tree :Root node of the tree.\r\n\r\n\r\n \"\"\"\r\n if not tree.get_leftChild() and not tree.get_rightChild(): # If the left and right subtrees are empty\r\n data = tree.get_data() # data is the serial number of the sample\r\n labels = checkLabelSequence(tree.get_label()) # Existing tag sequence\r\n try:\r\n labels = np.zeros(len(data)) + labels\r\n except TypeError:\r\n pass\r\n result = np.vstack((data, labels))\r\n return result\r\n else:\r\n resultLeft = visitCRT(tree.get_leftChild())\r\n resultRight = visitCRT(tree.get_rightChild())\r\n result = np.hstack((resultLeft, resultRight))\r\n return result\r\n\r\n\r\ndef deleteNoiseData(data, noiseOrder):\r\n \"\"\"Delete noise points in the training set.\r\n\r\n Delete the noise points in the training set according to the noise\r\n judgment result of each data in noiseOrder.\r\n\r\n Parameters\r\n ----------\r\n data :Numpy type data set.\r\n\r\n noiseOrder :Determine if each piece of data is a list of noise.\r\n\r\n \"\"\"\r\n m, n = data.shape\r\n data = np.hstack((data, noiseOrder.reshape(m, 1)))\r\n redata = np.array(list(filter(lambda x: x[n] == 0, data[:, ])))\r\n redata = np.delete(redata, n, axis=1)\r\n return redata\r\n\r\n\r\n\"\"\"check whether the label of the parent node and the leaf node are consistent.\"\"\"\r\n\r\n\r\ndef checkLabelSequence(labels):\r\n \"\"\"Check label sequence.\r\n\r\n Check if the leaf node is the same as the parent node.\r\n\r\n Parameters\r\n ----------\r\n labels :label sequence.\r\n\r\n \"\"\"\r\n return 1 if labels[0] != labels[1] else 0\r\n\r\n\r\ndef splitData(data, splitAttribute, splitValue):\r\n \"\"\"Dividing data sets.\r\n\r\n Divide the data into two parts, leftData and rightData, based on the splitValue\r\n of the split attribute column element.\r\n\r\n Parameters\r\n ----------\r\n data:Numpy type data set.\r\n\r\n splitAttribute:Randomly selected attributes when dividing.\r\n\r\n splitValue:Dividing the value obtained by dividing the selected attribute.\r\n \"\"\"\r\n rightData = np.array(list(filter(lambda x: x[splitAttribute] > splitValue, data[:, ])))\r\n leftData = np.array(list(filter(lambda x: x[splitAttribute] <= splitValue, data[:, ])))\r\n return leftData, rightData\r\n","sub_path":"RSDS.py","file_name":"RSDS.py","file_ext":"py","file_size_in_byte":9474,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"53557431","text":"import sys; sys.path.append('../')\nimport src as _\nimport unittest\n\nclass TestListsMethodHeadFirst(unittest.TestCase):\n def test_head_first(self):\n self.assertEqual(\n _.head([1, 2, 3, 2]),\n 1\n )\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"test/lists/test_head_first.py","file_name":"test_head_first.py","file_ext":"py","file_size_in_byte":282,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"369786295","text":"\n# %% Importa libraries\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport numpy as np\nimport geopandas as gpd\nimport os\nfrom shapely.geometry import Point\nimport contextily as ctx\n\n# %% Download Data to used\n# Rivers and streams information\n# https://www.weather.gov/gis/AWIPSShapefiles\n\nfile = os.path.join('../../data', 'rs16my07.shp')\nrivers_us = gpd.read_file(file)\n\n# River Forecast Center Boundaries\n# https://www.weather.gov/gis/RFCBounds\nfile_st = os.path.join('../../data', 'rf12ja05.shp')\nstat_ref = gpd.read_file(file_st)\n\n# Gauges II USGS stream gauge dataset:\n# https://water.usgs.gov/GIS/metadata/usgswrd/XML/gagesII_Sept2011.xml#stdorder\n\nfile_ga = os.path.join('../../data', 'gagesII_9322_sept30_2011.shp')\ngages = gpd.read_file(file_ga)\n\n# %% Check whats inside data files and CRS :(\n# Rivers\n\ntype(rivers_us)\nvar_names = rivers_us.head()\nrivers_us.columns # Variables name\nrivers_us.shape # no. of gages and no. of varaibles\n\nrivers_us.geom_type # geometry\nrivers_us.crs # check our CRS - coordinate reference system\nrivers_us.total_bounds # Check the spatial extent\n\n# %% Gages information\n# Take only AZ\ngages.columns\ngages.STATE.unique()\ngages_AZ = gages[gages['STATE'] == 'AZ']\ngages_AZ.shape\n\n# %% Add specific points\n# UA: 32.22877495, -110.97688412\n# STream gauge: 34.44833333, -111.7891667\npoint_list = np.array([[-111.7891667, 34.44833333]])\npoint_geom = [Point(xy) for xy in point_list]\npoint_df = gpd.GeoDataFrame(point_geom, columns=['geometry'],\n crs=gages_AZ.crs) # project into gages_az GEOMETRY\n# %% Look at one region in rivers\n# Zoom in and just look at AZ/UTAH\nstat_ref.columns\nstat_ref.STATE.unique()\nstat_ref_AZ = stat_ref[stat_ref['STATE'] == 'UT'] # Utah and Arizona\nstat_ref_AZ.shape\ntest = pd.DataFrame(stat_ref['STATE']) # aux to see regions name\n# super CRUCIAL step!!!\n# Project points into stat_ref CRS\npoints_project = gages_AZ.to_crs(stat_ref_AZ.crs)\n\n# %% Plot map :D\n\nfig, ax = plt.subplots(figsize=(10, 10))\nrivers_us.plot(figsize=(10, 10), alpha=0.5, edgecolor='b',\n ax=ax, label='Rivers', zorder=1)\nstat_ref_AZ.boundary.plot(figsize=(10, 10), alpha=0.5, edgecolor='k',\n ax=ax, label='River Forecast Center Boundaries')\npoints_project.plot(column='DRAIN_SQKM', categorical=True,\n legend=False, markersize=45, cmap='OrRd',\n ax=ax, label='Arizona Gages')\npoint_df.plot(ax=ax, color='k', marker='*', markersize=45,\n label='Verde River Gage')\nplt.ylim(ymax=45, ymin=30)\nplt.xlim(xmax=-105, xmin=-120)\nax.set_title('Hydrologic Information')\nax.set_xlabel('Longitude [°]')\nax.set_ylabel('Latitude [°]')\nax.legend()\nctx.add_basemap(ax, crs='EPSG:4326')\nfig.savefig(\"Hydr_map.png\")\n# %% Run above","sub_path":"assignment_10/Fierro_map.py","file_name":"Fierro_map.py","file_ext":"py","file_size_in_byte":2780,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"455419853","text":"import cv2\nimport numpy as np\n\n\ndef decode(Starting_Index, Ghap, Add_a_Value, LengthOfString, ImagePath):\n image = cv2.imread(ImagePath)\n grayimage = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n\n Row, Column = grayimage.shape\n Counter = 1\n k = Start = SetValue = 0\n DecodedString = \"\"\n for i in range(0, Row, 1):\n for j in range(0, Column, 1):\n if Counter == Starting_Index:\n Start = 1\n if Start == 1:\n if SetValue % (Ghap + 1) == 0:\n if k == LengthOfString:\n k = -1\n break\n Value = grayimage[i, j]\n while Value < Add_a_Value:\n Value = Value + 255\n DecodedString = DecodedString + chr(Value - Add_a_Value)\n k = k + 1\n SetValue = SetValue + 1\n else:\n SetValue = SetValue + 1\n\n Counter = Counter + 1\n if k == -1:\n break\n return DecodedString","sub_path":"Decode/Decoder.py","file_name":"Decoder.py","file_ext":"py","file_size_in_byte":1064,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"546388962","text":"# encoding: utf-8\n# 朴素贝叶斯分类 iris, 划分数据集为训练集和测试集\n\nfrom sklearn.naive_bayes import GaussianNB\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import accuracy_score\nfrom sklearn import datasets\n\n\n# 1、加载数据集\n# 我们假定sepal length, sepal width, petal length, petal width 4个量独立且服从高斯分布,用贝叶斯分类器建模\niris = datasets.load_iris()\nprint(iris.data)\n'''\narray([[ 5.1, 3.5, 1.4, 0.2],\n [ 4.9, 3. , 1.4, 0.2],\n [ 4.7, 3.2, 1.3, 0.2],\n [ 4.6, 3.1, 1.5, 0.2],\n [ 5. , 3.6, 1.4, 0.2]])\n'''\nprint(iris.target[:5])\n''' [0 0 0 0 0] '''\n\n\n# 2、划分数据集为训练集和测试集\nseed = 2\ntest_size = 0.3 # 7:3\nX_train, X_test, y_train, y_test = train_test_split(iris.data, iris.target, test_size=test_size, \\\n random_state=seed)\n\n\n# 3、训练模型\nmodel = GaussianNB()\nmodel.fit(X_train, y_train)\nprint(model)\n''' GaussianNB(priors=None) '''\n\n\n# 4、预测测试数据集\ny_pred = model.predict(X_test)\n\n\n# 5、评价预测的正确率\naccuracy = accuracy_score(y_test, y_pred)\nprint(\"Naive Bayes Accuracy: %.2f%%\" % (accuracy*100.0))\n''' Naive Bayes Accuracy: 97.78% '''\n\n\n","sub_path":"01_2_train_test_split.py","file_name":"01_2_train_test_split.py","file_ext":"py","file_size_in_byte":1224,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"292456252","text":"'''\nIterative merge sort\nDeveloper: Dr. Syed Saif ur Rahman\nPurpose: Educational\n'''\nimport math #We used log, ciel\n\n#Merges the data to bring them in order\ndef merge(l1, l2, s):\n global mydata\n global mydatalen\n #print(\"merge \" + str(l1) + \" with \" + str(l1 + s) + \" for \" + str(s) + \" elements \")\n l = [None] * (s) #Temporary list to store left half of data\n r = [None] * (s) #Temporary list to store right half of data\n for index in range(s):\n if l1 + index < mydatalen: #check if index is valid\n l[index] = mydata[l1 + index]\n if l2 + index < mydatalen: #check if index is valid\n r[index] = mydata[l2 + index]\n\n #print(\"l \" + str(l))\n #print(\"r \" + str(r))\n i = 0\n j = 0\n for k in range (l1,l2+s):\n if k < mydatalen:\n if l[i] <= r[j] or r[j] is None: # None check for odd case\n mydata[k] = l[i]\n i = i + 1\n if i >= s :\n while j < s :\n k = k + 1\n if k >= mydatalen:break;\n mydata[k] = r[j]\n j = j + 1\n break;\n else:\n mydata[k] = r[j]\n j = j + 1\n if j >= s :\n while i < s :\n k = k + 1\n if k >= mydatalen:break;\n mydata[k] = l[i]\n i = i + 1\n break;\n\n#Test data\n#mydata = [9, 1, 8, 2, 7, 3, 6, 4, 5]\n#mydata = [9, 1, 8, 2, 7, 3, 6, 4, 5,9, 1, 8, 2, 7, 3, 6, 4, 5]\n#mydata = [9, 1, 8, 2, 7, 3, 6, 4, 5, 9, 1, 8, 2, 7, 3, 6, 4, 5, 9, 1, 8, 2, 7, 3, 6, 4, 5]\n#mydata = [-1, 8, -2, 7, -3, 6, -4, 5]\n#mydata = [9, -8, 2, 7, 3, 6, 4, 5, -9, 1, 8, 2, 7, 3, 6, 4, -5]\nmydata = [9, 1, 8, 2, -3, 6, 4, 5, 9, 1, -8, 2, 7, 3, 6, 4, 5, 9, 1, 8, 2, 7, 3, 6, -4, 5]\nmydatalen = len(mydata)\nprint(\"math.log(mydatalen, 2)\", math.log(mydatalen, 2))\n#Depth of imaginary tree\ndepth = int(math.ceil(math.log(mydatalen, 2)))\nprint(\"Tree depth\", depth)\nx = 0\nfor level in range(depth - 1,-1,-1):\n print(\"Tree level\", level)\n sp = 2**level\n print(\"Sub problems\", sp)\n #sps = mydatalen / 2**level #Cant figure out why it is wrong\n sps = 2 ** x\n print(\"Sub problem size\", sps)\n print(\"mydata -> \" + str(mydata))\n for mi in range(0,mydatalen,sps*2):\n merge(mi,mi+sps,sps)\n print(\"mydata -> \" + str(mydata))\n x += 1","sub_path":"lab3/itimerge.py","file_name":"itimerge.py","file_ext":"py","file_size_in_byte":2476,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"121766505","text":"from bs4 import BeautifulSoup\nfrom bs4 import NavigableString\nimport re\nimport requests\nfrom string import ascii_lowercase\n\nfrom terms import Term\n\n\n# object used for scraping needs to on instantiation:\n# 1. have the appropriate url for the index\n# needs capability to:\n# 1. quickly get all terms from all pages of index\n\nclass Scraper:\n\n def __init__(self):\n self.index_url = (\n \"http://imcip.meded.com/integrated/ha/index/index{}.htm\"\n )\n self.lab_url = (\n \"http://imcip.meded.com/integrated/ha/labs/{lab}/{resource}.htm\"\n )\n\n def terms_by_index(self, letter):\n result = []\n for category in self.get_category_soup(\n self.get_index_soup(\n self.index(letter)\n )\n ):\n for term in self.get_terms(category):\n result.append(term)\n return result\n\n def all_terms(self):\n result = []\n for url in self.all_indexes():\n for category in self.get_category_soup(self.get_index_soup(url)):\n for term in self.get_terms(category):\n result.append(term)\n return result\n\n def get_lab_soup(self, url):\n print(\"Requesting from Online Dissector: \" + url)\n res = requests.get(url)\n if res.ok:\n soup = BeautifulSoup(res.content, \"html.parser\")\n return soup.find(\"table\")\n # returns table that contains all the relevant lab information\n # including further links\n else:\n print(\"Error {}: Could not retrieve lab\").format(res.status_code)\n return None\n\n def main_picture(self, soup):\n # get the url for the main picture displayed in the lab step page\n img = soup.find_all(\"img\", attrs={\"alt\": \"Click for larger verison\"})\n img_url = img[\"src\"]\n return \"http://imcip.meded.com/integrated/ha/\" + img_url[6:]\n\n def has_strong_tag(self, tag):\n if tag.find(\"strong\") is None and not tag.has_attr('style'):\n return True\n else:\n return False\n\n def linked_pages(self, soup):\n # get the urls for all other terms with linked lab step pages in the\n # lab step page\n # call main_picture on each of these to get the picture on each page\n # anatomy terms have tag, which distinguishes these links from\n # instructional, procedurally based links\n link_tags = soup.find_all(\"a\")\n term_tags = [\n tag for tag in link_tags if self.has_strong_tag(tag) is True\n ]\n return [tag[\"href\"] for tag in term_tags]\n # doesn't return entire url, just last part\n # use with Term.lab_path() to get full path\n\n def index(self, letter):\n return self.index_url.format(letter)\n\n def all_indexes(self):\n lst = []\n for c in ascii_lowercase:\n lst.append(self.index_url.format(c))\n return lst\n\n def get_index_soup(self, url):\n # returns list of all and tags\n print(\"Requesting from Online Dissector: \" + url)\n res = requests.get(url)\n if res.ok:\n soup = BeautifulSoup(res.content, \"html.parser\")\n return soup.find_all([\"b\", \"ul\"])\n else:\n print(\"Error {}: Could not retrieve index\").format(res.status_code)\n print(res.text)\n return None\n\n def get_category_soup(self, index_soup):\n # requires output from get_index_soup()\n # returns generator of 2 item lists containing\n # [b tag, ul tag]\n for i in range(0, len(index_soup), 2):\n yield index_soup[i: i + 2]\n\n def get_category(self, soup):\n # requires single Tag, from generator, indexed by get_terms\n return soup.string\n\n def get_terms(self, soup):\n # returns Term objects of all terms in single category\n # from the index page, corresponding to one element\n # from get_category_soup\n # n = soup[0] + soup[1]\n c = self.get_category(soup[0])\n result = []\n for term_soup in soup[1].find_all(\"li\"):\n t = Term(category=c, soup=term_soup)\n result.append(t)\n return result\n","sub_path":"scraper.py","file_name":"scraper.py","file_ext":"py","file_size_in_byte":4219,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"535114552","text":"#!/usr/bin/env python3\n\n\"\"\"\n@author: xi\n@since: 2018-06-17\n\"\"\"\n\nimport collections\nimport datetime as dt\n\nimport numpy as np\n\nfrom . import ops\n\n\nclass AccCalculator(object):\n\n def __init__(self):\n self._num_hit = 0\n self._num_all = 0\n\n def update(self, label_pred, label_true):\n hit = np.equal(label_pred, label_true)\n hit = np.float32(hit)\n self._num_hit += float(np.sum(hit))\n self._num_all += len(hit)\n\n def reset(self):\n self._num_hit = 0\n self._num_all = 0\n\n @property\n def accuracy(self):\n return self._num_hit / self._num_all if self._num_all > 0 else 0.0\n\n\nclass BiClassCalculator(object):\n\n def __init__(self):\n self._tp = 0\n self._tn = 0\n self._fp = 0\n self._fn = 0\n\n def update(self, label_predict, label_true):\n hit = np.equal(label_predict, label_true)\n hit = np.float32(hit)\n miss = 1.0 - hit\n\n pos = np.float32(label_predict)\n neg = 1.0 - pos\n\n self._tp += np.sum(hit * pos, keepdims=False)\n self._tn += np.sum(hit * neg, keepdims=False)\n self._fp += np.sum(miss * pos, keepdims=False)\n self._fn += np.sum(miss * neg, keepdims=False)\n\n @property\n def precision(self):\n num_pos_pred = self._tp + self._fp\n return self._tp / num_pos_pred if num_pos_pred > 0 else 0.0\n\n @property\n def recall(self):\n num_pos_true = self._tp + self._fn\n return self._tp / num_pos_true if num_pos_true > 0 else 0.0\n\n @property\n def f1(self):\n pre = self.precision\n rec = self.recall\n return 2 * (pre * rec) / (pre + rec)\n\n @property\n def accuracy(self):\n num_hit = self._tp + self._tn\n num_all = self._tp + self._tn + self._fp + self._fn\n return num_hit / num_all if num_all > 0 else 0.0\n\n\ndef call_for_batch(context, slot, data_source):\n \"\"\"\n\n Args:\n context (dict):\n slot (photinia.Step):\n data_source (photinia.BatchSource):\n\n Returns:\n dict[str, any]:\n tuple|list:\n\n \"\"\"\n data_batch = data_source.next()\n if data_batch is None:\n data_batch = data_source.next()\n if data_batch is None:\n raise RuntimeError('Too many \"None\" returned by data source.')\n ret = slot(*data_batch)\n if isinstance(ret, (tuple, list)):\n for i, value in enumerate(ret):\n context[i] = value\n elif isinstance(ret, (dict, collections.OrderedDict)):\n context.update(ret)\n else:\n # Should not be reached, since Slot ALWAYS returns tuple or dict.\n raise RuntimeError('Invalid Slot outputs type.')\n return ret\n\n\ndef call_for_all(context, slot, data_source):\n \"\"\"\n\n Args:\n context (dict):\n slot (photinia.Step):\n data_source (photinia.BatchSource):\n\n Returns:\n dict[str, list]:\n\n \"\"\"\n ret = collections.defaultdict(list)\n while True:\n data_batch = data_source.next()\n if data_batch is None:\n break\n ret = slot(*data_batch)\n if isinstance(ret, (tuple, list)):\n for i, value in enumerate(ret):\n ret[i].append(value)\n elif isinstance(ret, (dict, collections.OrderedDict)):\n for name, value in ret.items():\n ret[name].append(value)\n else:\n # Should not be reached, since Slot ALWAYS returns tuple or dict.\n raise RuntimeError('Invalid Slot outputs type.')\n context.update(ret)\n return ret\n\n\ndef print_log(context, value_names, i=None, n=None, message=None):\n now = dt.datetime.now()\n print(now.strftime('[%Y-%m-%d %H:%M:%S'), end='')\n\n if i is not None:\n if n is not None:\n percentage = '%.2f' % (i / n * 100,)\n print(' %s/%s|%s%%]' % (str(i), str(n), percentage), end='')\n else:\n print(' %s]' % str(i), end='')\n else:\n print(']', end='')\n\n if message is not None:\n print('\\t' + str(message), end='')\n\n values = context[context] if context in context else ()\n if isinstance(values, (tuple, list)):\n for i, name in enumerate(value_names):\n if i < len(values):\n value = values[i]\n print('\\t%s=%f' % (name, value), end='')\n else:\n print('\\t%s=?' % (name,), end='')\n elif isinstance(values, (dict, collections.OrderedDict)):\n for name in value_names:\n if name in values:\n value = values[name]\n print('\\t%s=%f' % (name, value), end='')\n else:\n print('\\t%s=?' % (name,), end='')\n print()\n\n\nclass OptimizerWrapper(object):\n \"\"\"OptimizerWrapper\n \"\"\"\n\n def __init__(self,\n optimizer):\n self._optimizer = optimizer\n\n @property\n def optimizer(self):\n return self._optimizer\n\n def minimize(self, loss, var_list=None):\n pair_list = self._optimizer.compute_gradients(loss, var_list=var_list)\n pair_list = self._process_gradients(pair_list)\n return self._optimizer.apply_gradients(pair_list)\n\n def _process_gradients(self, pair_list):\n raise NotImplementedError\n\n\nclass GradientClipping(OptimizerWrapper):\n \"\"\"GradientClipping\n \"\"\"\n\n def __init__(self, optimizer, max_norm):\n self._max_norm = max_norm\n super(GradientClipping, self).__init__(optimizer)\n\n @property\n def max_norm(self):\n return self._max_norm\n\n def _process_gradients(self, pair_list):\n pair_list, raw_grad, grad = ops.clip_gradient(pair_list, self._max_norm)\n self._raw_grad_norm = raw_grad\n self._grad_norm = grad\n return pair_list\n\n @property\n def raw_grad_norm(self):\n return self._raw_grad_norm\n\n @property\n def grad_norm(self):\n return self._grad_norm\n","sub_path":"photinia/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":5856,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"242354308","text":"import PySimpleGUI as sg\r\n\r\n\r\ndef status_window():\r\n sg.ChangeLookAndFeel('TealMono') # Changes color scheme of window created\r\n form = sg.FlexForm('Status Window', auto_size_text=True, auto_size_buttons=False, grab_anywhere=False,\r\n return_keyboard_events=True)\r\n\r\n # Classes are placeholder names and will change\r\n class_list = ['Warrior', 'Wizard', 'Rogue', 'Sword Mage', 'Mercenary', 'Sorcerer', 'All-Rounder']\r\n\r\n istat = 5 # Initial stat value\r\n # Layout for the status window\r\n layout = [[sg.Text('Name:'), sg.Text('', size=(20, 1), background_color='black', text_color='white', key='cname'),\r\n sg.ReadFormButton('Choose Name')],\r\n [sg.Text('_' * 55)],\r\n [sg.Text('Stats:')],\r\n [sg.Text('Points Remaining'), sg.Text('15', size=(2, 1), key='points')],\r\n [sg.Text('STR:', size=(5, 1)),\r\n sg.Spin([i for i in range(5, 101)], initial_value=istat, key='STR', size=(5, 1), change_submits=True)],\r\n [sg.Text('INT:', size=(5, 1)),\r\n sg.Spin([i for i in range(5, 101)], initial_value=istat, key='INT', size=(5, 1), change_submits=True)],\r\n [sg.Text('DEX:', size=(5, 1)),\r\n sg.Spin([i for i in range(5, 101)], initial_value=istat, key='DEX', size=(5, 1), change_submits=True)],\r\n [sg.Text('Class:', size=(5, 1), font=('Helvetica', 20)),\r\n sg.Text('', size=(13, 1), font=('Helvetica', 20), background_color='black', text_color='white',\r\n justification='center', key='class'),\r\n sg.ReadFormButton('Class Info')],\r\n [sg.ReadFormButton('Reset Stats'), sg.Text(' ' * 51), sg.Exit()]]\r\n\r\n form.Layout(layout)\r\n total_points = 15\r\n cur_points = 15\r\n while True:\r\n button, values = form.Read()\r\n\r\n if button == 'Choose Name': # Button to type in your name for your character\r\n name = sg.PopupGetText('What is your name?')\r\n form.FindElement('cname').Update(name)\r\n\r\n if button is None or button == 'Exit': break # Program ends successfully if 'Quit' is clicked or window is closed\r\n\r\n # When no stat requirements are met:\r\n try:\r\n strength = int(values['STR'])\r\n intel = int(values['INT'])\r\n dex = int(values['DEX'])\r\n # spoints = int(values['points'])\r\n except:\r\n continue\r\n\r\n if all((strength, intel, dex)) < 10: form.FindElement('class').Update('')\r\n\r\n # How skill points remaining is determined (not sure how to stop stats from increasing when spoints = 0)\r\n if 0 <= cur_points < 16:\r\n stat = [strength, intel, dex]\r\n if 14 < sum(stat) <= 30:\r\n spoints = 15 - (sum(stat) - 15)\r\n cur_points = spoints\r\n form.FindElement('points').Update(spoints)\r\n\r\n form.FindElement('STR').Update(new_values=[i for i in range(1, strength + cur_points + 1)])\r\n form.FindElement('DEX').Update(new_values=[i for i in range(1, dex + cur_points + 1)])\r\n form.FindElement('INT').Update(new_values=[i for i in range(1, intel + cur_points + 1)])\r\n\r\n # Classes based on one stat:\r\n if strength >= 10:\r\n form.FindElement('class').Update(class_list[0])\r\n elif intel >= 10:\r\n form.FindElement('class').Update(class_list[\r\n 1]) # Class is displayed in window when the stat requirements are met (stat requirements are placeholder)\r\n elif dex >= 10:\r\n form.FindElement('class').Update(class_list[2])\r\n\r\n # Classes based on two stats:\r\n if strength >= 10 and intel >= 10:\r\n form.FindElement('class').Update(class_list[3])\r\n elif strength >= 10 and dex >= 10:\r\n form.FindElement('class').Update(class_list[4])\r\n elif intel >= 10 and dex >= 10:\r\n form.FindElement('class').Update(class_list[5])\r\n\r\n # Classes based on three stats:\r\n if strength >= 10 and intel >= 10 and dex >= 10:\r\n form.FindElement('class').Update(class_list[6])\r\n # # Button that resets stats back initial values as well as class name\r\n elif button == 'Reset Stats':\r\n form.Fill({'STR': '5', 'INT': '5', 'DEX': '5'})\r\n form.FindElement('points').Update(15)\r\n if all((strength, intel, dex)) < 10: form.FindElement('class').Update('')\r\n\r\n # Class info button\r\n # TODO - Need to change the use of .DisplayText\r\n # Cannot reach inside of the elements to get at their internal values like DisplayText.\r\n # Don't use GUI elements as variables. Update the elements but don't read (at this point in time)\r\n if button == 'Class Info' and form.FindElement('class').DisplayText == '':\r\n sg.Popup('Not a Class:', 'If you see this, go back and obtain a class!')\r\n if button == 'Class Info' and form.FindElement(\r\n 'class').DisplayText == 'Warrior': # Example of a popup display explaining the class when the button is pressed\r\n sg.Popup('Warrior Class:', 'A class that specializes in melee combat') # Placeholder descriptions\r\n\r\nstatus_window()","sub_path":"StatusWindow.py","file_name":"StatusWindow.py","file_ext":"py","file_size_in_byte":5294,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"310028934","text":"import pygame, sys\r\nfrom pygame.locals import *\r\n\r\nWINDOWWIDTH = 400 # Chiều dài cửa sổ\r\nWINDOWHEIGHT = 300 # Chiều cao cửa sổ\r\n\r\nWHITE = (255, 255, 255)\r\nRED = (255, 0, 0)\r\nGREEN = (0, 255, 0)\r\n\r\npygame.init()\r\n\r\n### Xác định FPS ###\r\nFPS = 60\r\nfpsClock = pygame.time.Clock()\r\n\r\nDISPLAYSURF = pygame.display.set_mode((WINDOWWIDTH, WINDOWHEIGHT))\r\npygame.display.set_caption('Event')\r\n\r\n\r\nclass Car():\r\n def __init__(self):\r\n self.x = 100 # Vị trí của xe\r\n\r\n ## Tạo surface và thêm hình chiếc xe vào ##\r\n self.surface = pygame.image.load('car.png')\r\n\r\n def draw(self): # Hàm dùng để vẽ xe\r\n DISPLAYSURF.blit(self.surface, (self.x, 100))\r\n\r\n def update(self, moveLeft, moveRight): # Hàm dùng để thay đổi vị trí xe\r\n if moveLeft == True:\r\n self.x -= 2\r\n if moveRight == True:\r\n self.x += 2\r\n\r\n if self.x + 100 > WINDOWWIDTH:\r\n self.x = WINDOWWIDTH - 100\r\n self.x=0\r\n if self.x < 0:\r\n self.x = WINDOWWIDTH - 100\r\n\r\n\r\ncar = Car()\r\nmoveLeft = False\r\nmoveRight = False\r\nwhile True:\r\n for event in pygame.event.get():\r\n if event.type == QUIT:\r\n pygame.quit()\r\n sys.exit()\r\n\r\n if event.type == KEYDOWN:\r\n if event.key == K_LEFT:\r\n moveLeft = True\r\n if event.key == K_RIGHT:\r\n moveRight = True\r\n\r\n if event.type == KEYUP:\r\n if event.key == K_LEFT:\r\n moveLeft = False\r\n if event.key == K_RIGHT:\r\n moveRight = False\r\n\r\n DISPLAYSURF.fill(WHITE)\r\n\r\n car.draw()\r\n car.update(moveLeft, moveRight)\r\n\r\n pygame.display.update()\r\n fpsClock.tick(FPS)\r\n","sub_path":"code/ki_1/python/pygame/dieu khien o to/dieu khien o to.py","file_name":"dieu khien o to.py","file_ext":"py","file_size_in_byte":1770,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"265739918","text":"\"\"\"\r\nID:ayush02\r\nLANG:PYTHON3\r\nTASK:whereamii\r\n\"\"\"\r\n#Import modules\r\nimport itertools\r\nimport re\r\n\r\n#C:/Users/ayush/OneDrive/Desktop/USACO/whereami/whereami.in\r\nwith open('C:/Users/ayush/OneDrive/Desktop/USACO/whereami/whereami.in', 'r') as fin:\r\n n = int(fin.readline())\r\n sequence = ''\r\n sequence += fin.readline().strip()\r\n\r\n#Given a list, determine if it is valid based on if it is all one's\r\ndef is_valid(occurance_list):\r\n valid = True\r\n setone = set(occurance_list)\r\n for i in setone:\r\n if i > 1:\r\n valid = False\r\n break\r\n return valid\r\n\r\n#Extra Test Cases\r\nmain = []\r\nfor k in range(n):\r\n occurances = []\r\n for i in range(0,n-k):\r\n occurances.append(sequence.count(sequence[i:i+k]))\r\n main.append(occurances)\r\n\r\nanswer = 0\r\nfor sublist in main:\r\n if is_valid(sublist) == True:\r\n break\r\n else:\r\n answer += 1\r\n'''\r\nwith open('whereami.out', 'w') as fout:\r\n fout.write(str(answer) + '\\n')\r\n'''\r\n\r\nprint(answer)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n'''\r\nATTEMPT ONE\r\n#Characters list for the reduced permutation time\r\n#Total sequence list\r\n\r\n\r\n#ALL substring occurances in a string\r\ndef find_unique(substring, sequence):\r\n matches = [i.start() for i in re.finditer(substring, sequence)]\r\n return len(matches)\r\n\r\n\r\n#For each value in range of the characters length\r\n\r\noccurances = []\r\nfor i in range(1, len(characters)+1):\r\n #Initialize the amount of occurances of each combination in the sequence\r\n mailbox_permutations = itertools.permutations(characters, i)\r\n #print(i, [i for i in mailbox_permutations])\r\n occurances.append([find_unique(''.join(k), sequence) for k in mailbox_permutations])\r\n\r\ndumplist = []\r\nfor sublist in occurances:\r\n for i in sublist:\r\n if i > 1:\r\n dumplist.append(sublist)\r\n\r\nanswer = 0\r\nfor sublist in dumplist:\r\n if sublist in occurances:\r\n answer += 1\r\n occurances.remove(sublist)\r\n\r\nprint(answer)\r\n'''\r\n","sub_path":"whereami/wherami.py","file_name":"wherami.py","file_ext":"py","file_size_in_byte":1983,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"312151302","text":"import cv2\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.cm as cm\nimport pickle\nimport sys\n\n# picam = cv2.imread('checkers.jpg')[:,::-1,:]\n# lepton = cv2.imread('checkers-h2.jpg')[:,::-1,:]\n\n# picam = cv2.imread('egg.jpg')[:,::-1,:]\n# lepton = cv2.imread('egg-heat.jpg')\n# lepton = np.rot90(lepton) * 100\n# lepton = lepton[:,::-1]\n\ntry:\n i = int(sys.argv[1])\nexcept:\n i=0\n\npicam = pickle.load(open('images/rgb%d.pkl'%i,'rb'))[...,::-1]\nlepton = pickle.load(open('images/therm%d.pkl'%i,'rb'))/100\n\nret = pickle.load(open('persp_mat.p','rb'))\n\n# print(picam.shape, picam.dtype)\n# print(lepton[5,5])\n\na = np.zeros((8,2))\ni=0\ndef onclick(event):\n global i\n a[i] = event.xdata, event.ydata\n i += 1\n print('xdata=%f, ydata=%f' %\n (event.xdata, event.ydata))\n\n# fig = plt.figure()\n# cid = fig.canvas.mpl_connect('button_press_event', onclick)\n\n# plt.subplot(121)\n# plt.imshow(picam)\n# plt.subplot(122)\n# plt.imshow(lepton)\n# plt.show()\n\n# a = a.reshape(2,4,2).astype(np.float32)\n# print(a)\n\n# ret = cv2.getPerspectiveTransform(a[1],a[0])\n# print(ret)\n# print('SAVED.')\n# pickle.dump(ret, open('persp_mat.p','wb'))\n\n\n# print(a)\n\nr, c, _ = picam.shape\nwarp = cv2.warpPerspective(lepton, ret, dsize=(c,r))\n\nplt.subplot(121)\nplt.imshow(picam)\nplt.subplot(122)\nplt.imshow(warp)\n\nplt.figure()\nplt.imshow(warp, alpha=.7)\nplt.imshow(picam, alpha=.6)\n\nplt.show()\n\n\n\n\n","sub_path":"microwave/align_images.py","file_name":"align_images.py","file_ext":"py","file_size_in_byte":1401,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"492942717","text":"\"\"\"\nDescription\nGiven n nodes in a graph labeled from 1 to n. There is no edges in the graph at beginning.\n\nYou need to support the following method:\n\nconnect(a, b), an edge to connect node a and node b\nquery(), Returns the number of connected component in the graph\n\nExample\n5 // n = 5\nquery() return 5\nconnect(1, 2)\nquery() return 4\nconnect(2, 4)\nquery() return 3\nconnect(1, 4)\nquery() return 3\n\"\"\"\n\nclass ConnectingGraph3:\n \"\"\"\n @param: n: An integer\n \"\"\"\n def __init__(self, n):\n self.father = {}\n self.size = n\n for i in range(1, n + 1):\n self.father[i] = i\n \n def find(self, a):\n path = []\n while a != self.father[a]:\n path.append(a)\n a = self.father[a]\n \n for node in path:\n self.father[node] = a\n \n return a\n\n \"\"\"\n @param: a: An integer\n @param: b: An integer\n @return: nothing\n \"\"\"\n def connect(self, a, b):\n root_a = self.find(a)\n root_b = self.find(b)\n if root_a != root_b:\n self.father[root_a] = root_b\n self.size -= 1\n\n \"\"\"\n @return: An integer\n \"\"\"\n def query(self):\n return self.size\n","sub_path":"Data Structure 1/Connecting Graph iii.py","file_name":"Connecting Graph iii.py","file_ext":"py","file_size_in_byte":1204,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"590888624","text":"# Copyright (C) 2011 Statoil ASA, Norway. \n# \n# The file 'observations.py' is part of ERT - Ensemble based Reservoir Tool. \n# \n# ERT is free software: you can redistribute it and/or modify \n# it under the terms of the GNU General Public License as published by \n# the Free Software Foundation, either version 3 of the License, or \n# (at your option) any later version. \n# \n# ERT is distributed in the hope that it will be useful, but WITHOUT ANY \n# WARRANTY; without even the implied warranty of MERCHANTABILITY or \n# FITNESS FOR A PARTICULAR PURPOSE. \n# \n# See the GNU General Public License at \n# for more details. \n\n\n# ----------------------------------------------------------------------------------------------\n# Observations tab\n# ----------------------------------------------------------------------------------------------\nfrom ert_gui.widgets.combochoice import ComboChoice\nfrom ert_gui.widgets.pathchooser import PathChooser\nfrom ert.ert.enums import history_source_type\nfrom ert_gui.widgets.reloadbutton import ReloadButton\nfrom ert.sched.history import HistoryType\n\ndef createObservationsPage(configPanel, parent):\n configPanel.startPage(\"Observations\")\n\n r = configPanel.addRow(ComboChoice(parent, history_source_type.values(), \"History source\", \"config/observations/history_source\"))\n\n def get_history_source(ert):\n history_source = ert.main.model_config.get_history_source\n return history_source_type.resolveValue(history_source.get_source_string)\n\n r.initialize = get_history_source\n r.getter = get_history_source\n\n def set_history_source(ert, value):\n history_source_enum = history_source_type.resolveName(str(value))\n sched_file = ert.main.ecl_config.get_sched_file\n refcase = ert.main.ecl_config.get_refcase\n if history_source_enum.value() == 0:\n history = HistoryType.alloc_from_sched_file(sched_file)\n if history_source_enum.value() == 1:\n history = HistoryType.alloc_from_refcase(refcase, True)\n if history_source_enum.value() == 2: \n history = HistoryType.alloc_from_refcase(refcase, False)\n ert.main.model_config.set_history_source(history, sched_file, refcase)\n \n r.setter = set_history_source\n\n \n r = configPanel.addRow(PathChooser(parent, \"Observations config\", \"config/observations/obs_config\", True))\n\n def get_obs(ert):\n obs = ert.main.get_obs\n return obs.get_config_file\n\n r.initialize = get_obs\n r.getter = get_obs\n\n\n def set_obs(ert, value):\n ert.main.load_obs( str(value))\n r.setter = set_obs\n\n\n r = configPanel.addRow(ReloadButton(parent, \"Reload Observations\", \"config/observations/reload_observation\", \"Reload\"))\n r.initialize = lambda ert : ert.main.reload_obs\n r.getter = lambda ert : ert.main.reload_obs\n \n\n configPanel.endPage()\n","sub_path":"devel/python/python/ert_gui/pages/config/observations.py","file_name":"observations.py","file_ext":"py","file_size_in_byte":2914,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"374956370","text":"from django.shortcuts import render\nfrom rest_framework.decorators import action\nfrom rest_framework.response import Response\nfrom rest_framework import viewsets, status\nfrom django.http import HttpResponse\nfrom drf_yasg.utils import swagger_auto_schema\nfrom django.shortcuts import redirect\nfrom django.urls import reverse\n\n\nimport json\nfrom pprint import pprint\nimport logging\nimport sqlite3\n\n# 取得縮網址\nfrom shortUrl.models import getShortUrl\nfrom shortUrl.serializers import ShortUrlSerializer\n\n\n# 取得原來網址\nfrom shortUrl.models import getLongUrl\n\n\n\n\n\nclass ShortUrlViewset(viewsets.ViewSet):\n \n \n @swagger_auto_schema(\n operation_summary='取得縮網址',\n request_body=ShortUrlSerializer,\n \n )\n @action(detail=False,methods=['post'],url_path='get_short_url')\n def short_url(self, request):\n '''\n # INPUT\n\n\n\n ```json\n { \n \"url\":\"http://www.google.com\" \n }\n ```\n\n\n\n # OUTPUT\n\n\n\n ```json\n {\n \"status\": 200,\n \"msg\": \"Success\",\n \"url\": \"http://www.google.com\",\n \"shorturl\": \"http://192.168.50.106:8000/shortUrl/get_short_url/MDOOM6ZO\"\n }\n ```\n \n\n '''\n \n url = request.data.get('url')\n \n \n \n res = getShortUrl(url=url)\n \n return Response(res, status=status.HTTP_200_OK)\n\n\n\n \n \n \n \n \n \n \n\n\n\n @swagger_auto_schema(\n operation_summary='導向原網址'\n )\n @action(detail=False,methods=['get'],url_path='get_short_url/(\\w+)')\n def original_url(self, request,url):\n\n try:\n \n # 建立資料庫\n conn = sqlite3.connect('mydatabase.db')\n c = conn.cursor()\n\n\n # select\n sql_select = (\"select id,url from shorturl where id = '{}'\").format(url)\n c.execute(sql_select)\n res = c.fetchall()\n res_json = [list(row) for row in res]\n\n return redirect(res_json[0][1])\n\n\n except Exception as e:\n \n error_class = e.__class__.__name__ #取得錯誤類型\n detail = e.args[0] #取得詳細內容\n cl, exc, tb = sys.exc_info() #取得Call Stack\n lastCallStack = traceback.extract_tb(tb)[-1] #取得Call Stack的最後一筆資料\n fileName = lastCallStack[0] #取得發生的檔案名稱\n lineNum = lastCallStack[1] #取得發生的行號\n funcName = lastCallStack[2] #取得發生的函數名稱\n\n res_json = { \n \"debug\":str(lineNum)+\"L,filename:\"+fileName+\",\"+str(detail),\n \"status\":500,\n \"detail\":str(e),\n \"msg\":\"failed\"\n }\n\n return res_json\n\n\n\n\n\n\n\n\n","sub_path":"shortUrl/.ipynb_checkpoints/views-checkpoint.py","file_name":"views-checkpoint.py","file_ext":"py","file_size_in_byte":2865,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"200338875","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nThis example can be found in Automate the Boring Stuff\n\"\"\"\n\nimport re\nphoneNumRegex = re.compile(r'\\d\\d\\d-\\d\\d\\d-\\d\\d\\d\\d')\nmo = phoneNumRegex.search('My number is 415-555-4242.')\n\nprint('Phone number found: ' + mo.group())\n\n\n#find all numbers in text\nmo = phoneNumRegex.findall('My number is 415-555-4242 or 415-555-4243.')\n\n\n#Grouping with Parentheses\n\nphoneNumRegex = re.compile(r'(\\d\\d\\d)-(\\d\\d\\d-\\d\\d\\d\\d)')\nmo = phoneNumRegex.search('My number is 415-555-4242.')\nmo.group(1)\nmo.group(2)\nmo.group(0)\nmo.groups()\nareaCode, mainNumber = mo.groups()\n\nprint(areaCode)\nprint(mainNumber)\n","sub_path":"PythonFiles/RegExp.py","file_name":"RegExp.py","file_ext":"py","file_size_in_byte":638,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"373255913","text":"import socket\nimport threading\n\nHOST, PORT = 'localhost' ,8080\nname = input('type your name: ')\n\nsock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n# sock.send(name)\n\ndef send():\n while True:\n data = input(name + ': ')\n sock.sendall(bytes(name + \": \" + data, 'utf-8'))\n sock.close()\n\ndef receive():\n while True:\n data = str(sock.recv(1024), 'utf-8')\n print(data)\n sock.close()\n\nsock.connect((HOST, PORT))\n# send_thread = threading.Thread(target=send, daemon=True)\n# receive_thread = threading.Thread(target=receive, daemon=True)\n# send_thread.start()\n# receive_thread.start()\nwhile True:\n data = input(name + ': ')\n sock.sendall(bytes(name + \": \" + data, 'utf-8'))\n response = sock.recv(1024)\n print(response.decode('utf-8'))\nsock.close()\n\n","sub_path":"venv/src/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":798,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"247193069","text":"import understand\nimport os,sys,re\n\nudb_path = sys.argv[1]\nname = sys.argv[2]\n\ndb = understand.open(udb_path)\nfiles = db.ents(\"Java File\")\nnew_files = []\nfor f in files:\n\tloc = f.metric(['CountLineCode'])['CountLineCode']\n\tif loc == 0 or loc > 10000:\n\t\tcontinue\n\tnew_files.append(f)\n\nfile_len = len(new_files)\n\nfilenames=[]\nfile_loc = {}\nfor i in range(file_len):\n\tfname = re.findall(name+\".*\\.java\",new_files[i].longname())[0][len(name)+1:]\n\tfilenames.append(fname)\n\tfile_loc[fname] = new_files[i].metric(['CountLineCode'])['CountLineCode']\nfilenames.sort()\n\nf = open(name+'/LOC.csv','w')\nf.write('FILENAME,SLOC'+'\\n')\nfor file_name in filenames:\n\tf.write(file_name+','+str(file_loc[file_name])+'\\n')\nf.close()\n'''\nfile_map={}\nfor i in range(file_len):\n\tfile_map[filenames[i]]=i\n\nadj_matrix=[[0]*file_len for i in range(file_len)]\n\nents = db.ents(\"class ~unresolved ~unknown\")\n\nfor ent in ents:\n\tif ent.ref(\"definein\",\"File\"):\n\t\tthis_file = re.findall(\"src.*\\.java\",ent.ref(\"definein\",\"File\").file().longname())[0]\n\t\tcouples = ent.refs(\"couple\",\"class\")\n\t\tfor cou in couples:\n\t\t\tif cou.ent().ref(\"definein\",\"File\"):\n\t\t\t\tcouple_file = re.findall(\"src.*\\.java\",cou.ent().ref(\"definein\",\"File\").file().longname())[0]\n\t\t\t\tadj_matrix[file_map[couple_file]][file_map[this_file]]=1\n\nf = open('file_depend.txt','w')\nfor i in adj_matrix:\n\tk=' '.join([str(j) for j in i])\n\tf.write(k+'\\n')\nf.close()\n'''\nfor i in filenames:\n\tprint(i)\ndb.close()","sub_path":"file_dep.py","file_name":"file_dep.py","file_ext":"py","file_size_in_byte":1434,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"152484068","text":"import datetime\nfrom google.cloud import firestore\nimport logging.config\n\nlogging.config.fileConfig(\"logging.conf\")\nlogger = logging.getLogger()\n\ndb = firestore.Client(project=\"cf-fs-project\")\n\n\ndef get_log(gcp_job_id):\n \"\"\"\n get execution log from firestore by gcp job id ,eg: big query job id\n Args:\n gcp_job_id: the gcp job id , like bigquery job id\n\n Returns: the tuple (document id, document dict)\n\n \"\"\"\n logger.info(gcp_job_id)\n try:\n logs_ref = db.collection(u'lss_logs')\n # use stream instead of get function\n docs = logs_ref.where(u'gcp_job_id', u'==', gcp_job_id).limit(1).get()\n if len(docs) > 0:\n # only one documents returned because the query limitation`\n for doc in docs:\n logger.info(u' doc {} => {}'.format(doc.id, doc.to_dict()))\n return doc.id, doc.to_dict()\n except Exception as ex:\n logger.error(ex)\n\n\ndef update_log(log_id, doc):\n \"\"\"\n Update job log document: endTime, logs\n Args:\n log_id: the job log document id\n doc:the attributes need to be updated in the document\n\n Returns:0: success , -1: failed, message\n\n \"\"\"\n try:\n code = 0\n message = \"success\"\n if log_id and doc:\n log_ref = db.collection(u'lss_logs').document(log_id)\n if log_ref.get().exists:\n logger.info(\"Update document by id:{} , value:{} \".format(log_id, doc))\n log_ref.update(\n doc\n )\n else:\n code = -1\n message = \"document not found: \" + str(log_id)\n logger.error(\"document not found, type: {}, id: {}\".format(\"lss_logs\", id))\n return code, message\n except Exception as ex:\n logger.error(\"Error happens: \")\n logger.error(ex)\n return -1, \"Errors\" + str(ex)\n\n\ndef insert_log(log={}):\n logger.info(log)\n try:\n if log['job_id'] and log['gcp_job_id']:\n job_ref = db.collection(u'lss_logs').document()\n log['status'] = 'start'\n log['start_time'] = datetime.datetime.now()\n job_ref.set(log, merge=True)\n logger.info(\"add logs into firestore:\")\n logger.info(job_ref.get().to_dict())\n else:\n logger.info(\"required fields are missing\")\n except Exception as e:\n logger.error(\"errors happen when insert document.\")\n logger.error(e)\n\n\ndef get_job(job_id=None):\n if id:\n try:\n doc_ref = db.collection(u'lss_jobs').document(job_id)\n doc = doc_ref.get()\n if doc.exists:\n logger.info(f'Document data: {doc.to_dict()}')\n return doc.to_dict()\n else:\n logger.info(u'No such document!')\n except Exception as e:\n logger.error(e)\n\n\ndef create_job(job={}):\n logger.info(job)\n try:\n if job['job_id'] and job['job_name'] and job['job_type'] and job['group_id']:\n job_ref = db.collection(u'lss_jobs').document(job['job_id'])\n job_ref.set(job, merge=True)\n logger.info(\"add job into firestore:\" + str(job['job_id']))\n else:\n logger.info(\"required fields are missing\")\n except Exception as e:\n logger.error(\"errors happen when insert document.\")\n logger.error(e)\n\n\nif __name__ == '__main__':\n pass\n # job_properties = {\n # u'query': u\"select id,name,age,gender ,'Dalian' as address from cf-fs-project.lss_raw.user_info\",\n # u'write_disposition': u'WRITE_APPEND',\n # u'destination': u'cf-fs-project.lss_insight.user_info_inst',\n # u'job_id_prefix': u'lss_demo_'\n # }\n # job_add = {\n # u'job_id': u'1001',\n # u'group_id': u'01',\n # u'job_name': u'lss_demo_raw2insight',\n # u'job_type': u'BigQuery',\n # u'input': u'cf-fs-project.lss_raw.user_info',\n # u'output': u'cf-fs-project.lss_insight.user_info_inst',\n # u'properties': job_properties\n # }\n # create_job(job_add)\n # res = get_job(u'1001')\n # logger.info(res)\n # job_log = {\n # u'job_id': u'1001',\n # u'gcp_job_id': u'test',\n # u'log_msg': u'job started',\n # }\n #\n # insert_log(job_log)\n # gcp_log_id = 'lss_demo_65563df0-837e-44de-b9fb-d6b57b7fdf95'\n # lss_log = get_log(gcp_log_id)\n # print(lss_log)\n # ('D8fCt9kejyzF5l9HFOGn',\n # {'status': 'start', 'gcp_job_id': 'lss_demo_65563df0-837e-44de-b9fb-d6b57b7fdf95', 'log_msg': 'job started',\n # 'job_id': '1001', 'start_time': DatetimeWithNanoseconds(2020, 11, 20, 17, 56, 49, 825796, tzinfo= < UTC >)})\n # document_id = 'D8fCt9kejyzF5l9HFOGn'\n # job_statics = {\n # \"billingTier\": 1,\n # \"createTime\": \"2020-11-20T09:56:46.047Z\",\n # \"endTime\": \"2020-11-20T09:56:48.465Z\",\n # \"queryOutputRowCount\": \"4\",\n # \"referencedTables\": [\n # {\n # \"datasetId\": \"lss_raw\",\n # \"projectId\": \"cf-fs-project\",\n # \"tableId\": \"user_info\"\n # }\n # ],\n # \"startTime\": \"2020-11-20T09:56:46.356Z\",\n # \"totalBilledBytes\": \"10485760\",\n # \"totalProcessedBytes\": \"100\",\n # \"totalSlotMs\": \"8404\",\n # \"totalTablesProcessed\": 1\n # }\n # end_time = job_statics['endTime']\n # status = \"Done\"\n # update_doc = {\n # \"end_time\": end_time,\n # \"status\": status,\n # \"job_statics\": job_statics\n # }\n # res = update_log(document_id, update_doc)\n # print(res)\n","sub_path":"lss_cloudfunction/update_cf/firestore_client.py","file_name":"firestore_client.py","file_ext":"py","file_size_in_byte":5577,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"647317636","text":"\ndata = open('../data/output', 'r')\ntrain = open('train.data', 'w')\ndev = open('dev.data', 'w')\ntest = open('test.data', 'w')\n\n\ncount = 1\n\ntrain_threshold = 2738\ndev_threshold = 3651\n\nfor line in data:\n line = line.strip()\n if line:\n if count <= train_threshold:\n train.write(line+'\\n')\n elif count > train_threshold and count<=dev_threshold:\n dev.write(line+'\\n')\n elif count > dev_threshold:\n test.write(line+'\\n')\n count += 1\n\ndata.close()\ntrain.close()\ntest.close()\ndev.close()\n","sub_path":"tools/split_data.py","file_name":"split_data.py","file_ext":"py","file_size_in_byte":549,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"414887632","text":"import calendar\n\nfrom consts.notification_type import NotificationType\nfrom notifications.base_notification import BaseNotification\n\n\nclass ScheduleUpdatedNotification(BaseNotification):\n\n _priority = 'high'\n\n def __init__(self, event, next_match=None):\n from helpers.match_helper import MatchHelper # recursive import issues\n self.event = event\n\n if not next_match:\n upcoming = MatchHelper.upcomingMatches(event.matches, 1)\n self.next_match = upcoming[0] if upcoming and len(upcoming) > 0 else None\n else:\n self.next_match = next_match\n\n @property\n def _type(self):\n return NotificationType.SCHEDULE_UPDATED\n\n def _build_dict(self):\n data = {}\n data['notification_type'] = NotificationType.type_names[self._type]\n data['message_data'] = {}\n data['message_data']['event_key'] = self.event.key_name\n data['message_data']['event_name'] = self.event.name\n if self.next_match and self.next_match.time:\n data['message_data']['first_match_time'] = calendar.timegm(self.next_match.time.utctimetuple())\n else:\n data['message_data']['first_match_time'] = None\n\n return data\n","sub_path":"notifications/schedule_updated.py","file_name":"schedule_updated.py","file_ext":"py","file_size_in_byte":1227,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"623568684","text":"class Solution(object):\n def reverseKGroup(self, head, k):\n \"\"\"\n :type head: ListNode\n :type k: int\n :rtype: ListNode\n \"\"\"\n current = head\n swap_nodes = 0\n while current is not None and swap_nodes != k:\n current = current.next\n swap_nodes += 1\n\n if swap_nodes == k:\n current = self.reverseKGroup(current, k)\n\n while swap_nodes > 0:\n head.next, current, head = current, head, head.next\n swap_nodes -= 1\n head = current\n return head\n","sub_path":"a025_reverse_k_nodes_in_groups/reverse_k_nodes_in_groups.py","file_name":"reverse_k_nodes_in_groups.py","file_ext":"py","file_size_in_byte":589,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"467223485","text":"import re\n\n\ndef way_better(filename): # Функция читающая файл\n print('reading file with way_better()')\n try:\n with open(filename) as f:\n return f.read()\n except FileNotFoundError:\n print('File not found')\n\n\ndef count_entry(list1): # Функция подсчитывающая количество вхождений одинаковых записей в список\n count_dict = {}\n count = 0\n value = 0\n\n while count < len(list1):\n if list1[count] not in count_dict.keys():\n for item in list1:\n if item == list1[count]:\n value += 1\n count_dict.update({list1[count]: value})\n count += 1\n value = 0\n return count_dict\n\n\ndef results(dict1): # Функция ввыводящая результат в консоль в виде таблицы\n for k, v in dict1.items():\n print('| ', k, ' | ', v, ' |')\n print()\n\n\nraw_text = way_better('nasa_19950801.tsv') # Читаем нужный файл\n\nname_pattern_group = r'.*\\s(\\d*)\\tGET\\t(.*)\\s(\\d\\d\\d)\\s\\d.*' # Шаблон регулярного выражения\nready_text = re.findall(name_pattern_group, raw_text) # Ищем по шаблону текст\n\n# f = open('nasa_ready_text.txt', 'w')\n# f.write(str(ready_text))\n# f.close()\n\nprint('Number of rows: ', len(ready_text))\n\n# Формируем отдельные списки из полученого\nx = 0\nurl_list = []\ntime_list = []\ncode_list = []\nwhile x < len(ready_text):\n time_list.append(ready_text[x][0])\n url_list.append(ready_text[x][1])\n code_list.append(ready_text[x][2])\n x += 1\n# print(time_list)\n# print(time_list, '\\n', url_list, '\\n', code_list)\n\n# Считаем вхождения и выводим в консоль\ncount_dict_code = count_entry(code_list)\nprint('| Код ошибки | Количество ошибок |')\nresults(count_dict_code)\ncount_dict_time = count_entry(time_list)\nprint('| Timestamp | Количество записей |')\nresults(count_dict_time)\ncount_dict_url = count_entry(url_list)\nprint('| URL | Количество записей |')\nresults(count_dict_url)\n\n\n\n\n\n\n","sub_path":"lesson9_1.py","file_name":"lesson9_1.py","file_ext":"py","file_size_in_byte":2212,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"244853743","text":"import csv\nimport tempfile\nimport unittest\n\nimport eva_cttv_pipeline.trait_mapping.output as output\nfrom eva_cttv_pipeline.trait_mapping.oxo import OxOMapping, OxOResult\nfrom eva_cttv_pipeline.trait_mapping.trait import OntologyEntry, Trait\nimport eva_cttv_pipeline.trait_mapping.zooma as zooma\n\n\nclass TestOutputTraitMapping(unittest.TestCase):\n def test_output_trait_mapping(self):\n tempfile_path = tempfile.mkstemp()[1]\n with open(tempfile_path, \"w\", newline='') as mapping_file:\n mapping_writer = csv.writer(mapping_file, delimiter=\"\\t\")\n mapping_writer.writerow([\"#clinvar_trait_name\", \"uri\", \"label\"])\n\n test_trait = Trait('aprt deficiency, japanese type', 11)\n\n # Normally a set, but changed to a list for predictable output order in test\n test_trait.finished_mapping_set = [\n OntologyEntry('http://www.orpha.net/ORDO/Orphanet_976',\n 'Adenine phosphoribosyltransferase deficiency'),\n OntologyEntry('http://www.orpha.net/ORDO/Orphanet_977',\n 'Adenine phosphoribosyltransferase deficiency type A')\n ]\n\n output.output_trait_mapping(test_trait, mapping_writer)\n\n with open(tempfile_path, \"rt\", newline='') as mapping_file:\n mapping_reader = csv.reader(mapping_file, delimiter=\"\\t\")\n next(mapping_reader)\n self.assertEqual(['aprt deficiency, japanese type',\n 'http://www.orpha.net/ORDO/Orphanet_976',\n 'Adenine phosphoribosyltransferase deficiency'],\n next(mapping_reader))\n\n self.assertEqual(['aprt deficiency, japanese type',\n 'http://www.orpha.net/ORDO/Orphanet_977',\n 'Adenine phosphoribosyltransferase deficiency type A'],\n next(mapping_reader))\n\n\nclass TestGetMappingsForCuration(unittest.TestCase):\n def test_get_non_efo_mapping(self):\n \"\"\"If mapping is not in EFO, its `is_current` flag should *not* be checked, and the mapping\n *should* be selected for curation.\"\"\"\n test_zooma_result = zooma.ZoomaResult(['http://purl.obolibrary.org/obo/HP_0001892'],\n 'abnormal bleeding', 'HIGH', 'eva-clinvar')\n mapping = test_zooma_result.mapping_list[0]\n mapping.confidence = zooma.ZoomaConfidence.HIGH\n mapping.in_efo = False\n mapping.is_current = False\n mapping.ontology_label = \"\"\n mapping.source = 'eva-clinvar'\n mapping.uri = 'http://purl.obolibrary.org/obo/HP_0000483'\n self.assertEqual([mapping], output.get_mappings_for_curation([test_zooma_result]))\n\n def test_get_obsolete_efo_mapping(self):\n \"\"\"If mapping is in EFO, but is not current, it *should not* be selected for curation.\"\"\"\n test_zooma_result = zooma.ZoomaResult(['http://www.orpha.net/ORDO/Orphanet_976'],\n 'Adenine phosphoribosyltransferase deficiency',\n 'HIGH', 'eva-clinvar')\n mapping = test_zooma_result.mapping_list[0]\n mapping.confidence = zooma.ZoomaConfidence.HIGH\n mapping.in_efo = True\n mapping.is_current = False\n mapping.ontology_label = \"Adenine phosphoribosyltransferase deficiency\"\n mapping.source = 'eva-clinvar'\n mapping.uri = 'http://www.orpha.net/ORDO/Orphanet_976'\n self.assertEqual([], output.get_mappings_for_curation([test_zooma_result]))\n\n def test_get_current_efo_mapping(self):\n \"\"\"If mapping is in EFO and is current, is *should* be selected for curation.\"\"\"\n test_zooma_result = zooma.ZoomaResult(['http://purl.obolibrary.org/obo/MONDO_0008091'],\n 'Abnormal neutrophil chemotactic response',\n 'MEDIUM', 'eva-clinvar')\n mapping = test_zooma_result.mapping_list[0]\n mapping.confidence = zooma.ZoomaConfidence.HIGH\n mapping.in_efo = True\n mapping.is_current = True\n mapping.ontology_label = \"Abnormal neutrophil chemotactic response\"\n mapping.source = 'eva-clinvar'\n mapping.uri = 'http://purl.obolibrary.org/obo/MONDO_0008091'\n self.assertEqual([mapping], output.get_mappings_for_curation([test_zooma_result]))\n\n\nclass TestOutputForCuration(unittest.TestCase):\n def test_output_for_curation(self):\n tempfile_path = tempfile.mkstemp()[1]\n with open(tempfile_path, \"wt\") as curation_file:\n curation_writer = csv.writer(curation_file, delimiter=\"\\t\")\n\n test_trait = Trait(\"transitional cell carcinoma of the bladder\", 276)\n\n test_oxo_result = OxOResult(\"HP:0006740\", \"Transitional cell carcinoma of the bladder\",\n \"HP:0006740\")\n test_oxo_mapping = OxOMapping(\"bladder transitional cell carcinoma\", \"EFO:0006544\", 2,\n \"HP:0006740\")\n test_oxo_mapping.in_efo = test_oxo_mapping.is_current = True\n test_oxo_mapping.ontology_label = \"bladder transitional cell carcinoma\"\n test_oxo_result.mapping_list = [test_oxo_mapping]\n\n test_trait.oxo_result_list = [test_oxo_result]\n\n output.output_for_curation(test_trait, curation_writer)\n\n with open(tempfile_path, \"rt\") as curation_file:\n curation_reader = csv.reader(curation_file, delimiter=\"\\t\")\n expected_record = [\n \"transitional cell carcinoma of the bladder\", \"276\",\n \"http://www.ebi.ac.uk/efo/EFO_0006544|bladder transitional cell carcinoma|2|HP:0006740|EFO_CURRENT\"\n ]\n self.assertEqual(expected_record, next(curation_reader))\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"tests/trait_mapping/test_output.py","file_name":"test_output.py","file_ext":"py","file_size_in_byte":5947,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"173138140","text":"import numpy as np\nimport cmath\nimport math\nimport matplotlib.pyplot as plt\n\nT = 1\nsigma = 0\nj = complex(0, 1)\nomega = np.arange(0, 2*math.pi, math.pi/50)\nz = np.empty([len(omega), 1], dtype=complex)\nw = np.empty([len(omega), 1], dtype=complex)\n\nfor k in range(len(omega)):\n s = sigma + (j * omega[k])\n z[k] = cmath.exp(T*s)\n w[k] = 2 * (z[k] - 1) / (T * (z[k] + 1))\n\nplt.ylim(-1.5, 1.5)\nplt.xlim(-1.5, 1.5)\nplt.ylabel(\"Imag\")\nplt.xlabel(\"Real\")\nplt.scatter([x.real for x in w], [y.imag for y in w], color='green')\nplt.scatter([x.real for x in z], [y.imag for y in z], color='red')\nplt.show()\n","sub_path":"zcircle.py","file_name":"zcircle.py","file_ext":"py","file_size_in_byte":614,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"453361318","text":"import time\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom main import *\n\nif __name__ == \"__main__\":\n i = 2\n exec_times_fft = []\n N_vals = []\n log_vals = []\n while i <= 1024*1024:\n start_time = time.time()\n N_vals.append(i)\n x = np.random.random(i)\n res = fft(x)\n exec_times_fft.append(time.time() - start_time)\n log_vals.append(0.0000007 * i * np.log(i))\n i *= 2\n # plt.semilogx(N_vals, log_vals)\n # plt.semilogx(N_vals, exec_times_fft)\n plt.plot(N_vals, log_vals)\n plt.plot(N_vals, exec_times_fft)\n plt.plot(N_vals, log_vals)\n plt.legend([\"Execution Time for FFT\", \"0.0000007*n*log(n)\"])\n plt.xlabel(\"Vector Length - N\")\n plt.ylabel(\"Execution Time\")\n plt.show()\n","sub_path":"fft_driver.py","file_name":"fft_driver.py","file_ext":"py","file_size_in_byte":765,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"72303195","text":"# coding=utf-8\n__author__ = 'Steven Cutting'\n__author_email__ = 'steven.e.cutting@gmail.com'\n__created_on__ = '10/22/14'\n\nimport sys\n\n# ---------------------------------------------------------------------\n# Yahoo Finance data options cheat sheet (short list)\n\n'''\nhttp://finance.yahoo.com/d/quotes\nhttp://ichart.finance.yahoo.com/table\n\nc6: Change (Realtime)\nk2: Change Percent (Realtime)\nc8: After Hours Change (Realtime)\nk1: Last Trade (Realtime) With Time\nl: Last Trade (With Time)\nl1: Last Trade (Price Only)\nw4: Day’s Value Change (Realtime)\nm2: Day’s Range (Realtime)\n\n**Symbol Info**\nv: More Info\nj1: Market Capitalization\nj3: Market Cap (Realtime)\nf6: Float Shares\nn: Name\nn4: Notes\ns: Symbol\ns1: Shares Owned\nx: Stock Exchange\nj2: Shares Outstanding\n\n**Averages:**\nm5: Change From 200 Day Moving Average\nm6: Percent Change From 200 Day Moving Average\nm7: Change From 50 Day Moving Average\nm8: Percent Change From 50 Day Moving Average\nm3: 50 Day Moving Average\nm4: 200 Day Moving Average\n'''\n\n# ---------------------------------------------------------------------\n# mktdotpy Token dictionaries\n\ndataCodeDict = {' Change (Realtime)': 'c6',\n ' Change Percent (Realtime)': 'k2',\n ' After Hours Change (Realtime)': 'c8',\n ' Last Trade (Realtime) With Time': 'k1',\n ' Last Trade (With Time)': 'l',\n ' Last Trade (Price Only)': 'l1',\n ' Day’s Value Change (Realtime)': 'w4',\n ' Day’s Range (Realtime)': 'm2',\n ' More Info': 'v',\n ' Market Capitalization': 'j1',\n ' Market Cap (Realtime)': 'j3',\n ' Float Shares': 'f6',\n ' Name': 'n',\n ' Notes': 'n4',\n ' Symbol': 's',\n ' Shares Owned': 's1',\n ' Stock Exchange': 'x',\n ' Shares Outstanding': 'j2',\n ' Change From 200 Day Moving Average': 'm5',\n ' Percent Change From 200 Day Moving Average': 'm6',\n ' Change From 50 Day Moving Average': 'm7',\n ' Percent Change From 50 Day Moving Average': 'm8',\n ' 50 Day Moving Average': 'm3',\n ' 200 Day Moving Average': 'm4',\n }\n\n\ntickerDict = {'S&P 500': '^GSPC',\n '^GSPC': '^GSPC',\n 'NASDAQ': '^IXIC',\n '^IXIC': '^IXIC',\n 'Russell 2000': '^RUT',\n '^RUT': '^RUT',\n 'Nikkei 225': '^N225',\n '^N225': '^N225',\n 'Crude Oil': 'CLX14.NYM',\n 'CLX14.NYM': 'CLX14.NYM',\n '10-Yr Bond': '^TNX',\n '^TNX': '^TNX',\n 'EUR/USD': 'EURUSD=X',\n 'EURUSD=X': 'ERUUSD=X',\n 'USD/JPY': 'USDJPY=X',\n 'USDJPY=X': 'USDJPY=X'\n }\n\n\ntickerDict_extended = {'S&P 500': '^GSPC',\n 'S&P500': '^GSPC',\n 's&p 500': '^GSPC',\n 's&p500': '^GSPC',\n '^GSPC': '^GSPC',\n '^Gspc': '^GSPC',\n '^gspc': '^GSPC',\n 'NASDAQ': '^IXIC',\n 'nasdaq': '^IXIC',\n 'Nasdaq': '^IXIC',\n '^IXIC': '^IXIC',\n '^ixic': '^IXIC',\n '^Ixic': '^IXIC',\n 'NASDAQ Composite': '^IXIC',\n 'nasdaq Composite': '^IXIC',\n 'Nasdaq Composite': '^IXIC',\n 'NASDAQComposite': '^IXIC',\n 'nasdaq composite': '^IXIC',\n 'Nasdaq composite': '^IXIC',\n 'Russell 2000': '^RUT',\n 'Russell2000': '^RUT',\n 'RUSSELL 2000': '^RUT',\n 'RUSSELL2000': '^RUT',\n 'russell 2000': '^RUT',\n 'russell2000': '^RUT',\n '^RUT': '^RUT',\n '^Rut': '^RUT',\n '^rut': '^RUT',\n 'Nikkei 225': '^N225',\n 'Nikkei225': '^N225',\n 'NIKKEI225': '^N225',\n 'NIKKEI 225': '^N225',\n 'nikkei225': '^N225',\n 'nikkei 225': '^N225',\n '^N225': '^N225',\n '^n225': '^N225',\n 'Crude Oil': 'CLX14.NYM',\n 'crude oil': 'CLX14.NYM',\n 'CRUDE OIL': 'CLX14.NYM',\n 'Crude Oil Nov 14': 'CLX14.NYM',\n 'CrudeOil': 'CLX14.NYM',\n 'crudeoil': 'CLX14.NYM',\n 'CRUDEOIL': 'CLX14.NYM',\n 'CLX14.NYM': 'CLX14.NYM',\n 'Clx14.Nym': 'CLX14.NYM',\n 'clx14.nym': 'CLX14.NYM',\n '10-Yr Bond': '^TNX',\n '10-yr bond': '^TNX',\n 'Bond 10yr': '^TNX',\n 'bond 10yr': '^TNX',\n 'Bond10yr': '^TNX',\n 'CBOE Interest Rat': '^TNX',\n 'CBOE Interest Rate': '^TNX',\n 'cboe interest rate': '^TNX',\n '^TNX': '^TNX',\n '^Tnx': '^TNX',\n '^tnx': '^TNX',\n 'EUR/USD': 'EURUSD=X',\n 'Eur/Usd': 'EURUSD=X',\n 'eur/usd': 'EURUSD=X',\n 'EUR to USD': 'EURUSD=X',\n 'Eur to Usd': 'EURUSD=X',\n 'eur to usd': 'EURUSD=X',\n 'Euro/USD': 'EURUSD=X',\n 'Euro/usd': 'EURUSD=X',\n 'euro/USD': 'EURUSD=X',\n 'euro/usd': 'EURUSD=X',\n 'Euro to USD': 'EURUSD=X',\n 'Euro to usd': 'EURUSD=X',\n 'euro to USD': 'EURUSD=X',\n 'euro to usd': 'EURUSD=X',\n 'EURUSD=X': 'EURUSD=X',\n 'EurUsd=x': 'EURUSD=X',\n 'eurusd=x': 'EURUSD=X',\n 'USD/JPY': 'USDJPY=X',\n 'usd/jpy': 'USDJPY=X',\n 'USD/YEN': 'USDJPY=X',\n 'USD/Yen': 'USDJPY=X',\n 'usd/yen': 'USDJPY=X',\n 'USD to JPY': 'USDJPY=X',\n 'usd to jpy': 'USDJPY=X',\n 'USD to YEN': 'USDJPY=X',\n 'USD to Yen': 'USDJPY=X',\n 'usd to yen': 'USDJPY=X',\n 'USDJPY=X': 'USDJPY=X',\n 'UsdJpy=x': 'USDJPY=X',\n 'usdjpy=x': 'USDJPY=X',\n }\n\n\nif __name__ == '__main__':\n sys.exit(main())\n","sub_path":"mktdotpy/mktdicts.py","file_name":"mktdicts.py","file_ext":"py","file_size_in_byte":6970,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"481007846","text":"import renpy\nimport os\nimport sys\nimport imp\nimport modinfo\nimport importlib\n# So technically anything up here's going to be imported into mods since we're making a copy of our globals. It's safe to import them anyway. \n\nmodinfo.init()\n \ndef getdir():\n return renpy.config.gamedir\n\nprint(\"AWSW Mod Loader Init\")\n\nsearch_dir = getdir() + \"/mods/\"\nif not os.path.exists(search_dir):\n os.makedirs(search_dir)\n\nsys.path.append(getdir() + \"/modloader/\")\nsys.path.append(getdir() + \"/mods/\")\n\nloaded_mods = []\n\nfor object in os.listdir(search_dir):\n fullpath = search_dir + object\n if os.path.isdir(fullpath):\n for object2 in os.listdir(fullpath):\n modobj = fullpath + '/' + object2\n if object2 == 'mod.py':\n print(('Loaded mod ' + object).encode('utf-8'))\n name = os.path.splitext(os.path.split(modobj)[-1])\n modinfo.modlist.append(object)\n loc = dict()\n glo = dict(globals())\n execfile(modobj, glo, loc) # We want to isolate mods from each other, but give them a copy of our globals so modinfo is not reloaded.\n loaded_mods.append((glo, loc)) # Grab locals so we can initialize the mods later.\n elif object2 == 'resource' and os.path.isdir(modobj):\n renpy.config.searchpath.append(modobj)\n\nfor (glo, loc) in loaded_mods:\n if 'mod_init' in loc:\n comb = glo.copy()\n comb.update(loc)\n func = loc['mod_init']\n exec(func.__code__, comb) # This is nasty!\n \n# force renpy to reindex all game files\nrenpy.loader.old_config_archives = None","sub_path":"modloader/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1647,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"327382251","text":"import sys\nimport numpy as np\nfrom numba import jit\n\nfrom scipy.special import hyp2f1\nimport scipy.integrate as integrate\n\n@jit\ndef Support(params):\n if params[0] <= 0 : return False\n # if params[1] <= 0 : return False\n return True\n\n@jit\ndef Density(r,params,Rmax):\n rc = params[0]\n a = 0.6\n b = 2.5\n g = 0.0\n v1 = ((r/rc)**(-g))*(1.0 + (r/rc)**(1./a))**(-a*(b - g)) \n w = a*(b-g)\n x = -a*(g-2.)\n y = 1. - (a*(g-2.))\n z = -((rc/Rmax)**(-1./a))\n v2 = -((rc**g)*((Rmax**(-1./a))**(a*(g-2.)))/(g-2.))*hyp2f1(w,x,y,z)\n return v1/v2\n\n\ndef Number(r,params,Rmax):\n Num = np.vectorize(lambda y: integrate.quad(lambda x:Density(x,params,Rmax)*x,1e-5,y,\n epsabs=1.49e-03, epsrel=1.49e-03,limit=1000)[0])\n return Num(r)\n\n@jit\ndef logLikeStar(p,r,params,Rmax):\n return np.log((p*r*Density(r,params,Rmax)) + (1.-p)*LikeField(r,Rmax))\n\n@jit\ndef LikeField(r,rm):\n return 2.*r/rm**2\n\nclass Module:\n \"\"\"\n Chain for computing the likelihood \n \"\"\"\n def __init__(self,radii,pro,Rmax,trans_lim):\n \"\"\"\n Constructor of the logposteriorModule\n \"\"\"\n self.radii = radii\n self.pro = pro\n self.Rmax = Rmax\n self.t = trans_lim\n print(\"Module Initialized\")\n\n def Priors(self,params, ndim, nparams):\n #------- Uniform Priors -------\n for i in range(ndim):\n params[i] = (params[i])*(self.t[i,1]-self.t[i,0])+self.t[i,0]\n\n def LogLike(self,params,ndim,nparams):\n #----- Checks if parameters' values are in the ranges\n if not Support(params) : \n return -1e50\n\n # ----- Computes Likelihoods ---------\n llike = np.sum(map(lambda w,x:logLikeStar(w,x,params,self.Rmax),self.pro,self.radii))\n # print(llike)\n return llike\n\n\n\n","sub_path":"MultiNest/old-lixo/Models/MGDPRC.py","file_name":"MGDPRC.py","file_ext":"py","file_size_in_byte":1843,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"373909112","text":"#Uncoded Computation - Multi-message Communıcation\n\nimport numpy as np\n\n#ds = 1000 # data size\nds = 3000\nsample = 720\nfp = 0.8 #failure probability\ndelay_time = 0.006\nnw = 20 # number of workers\nr = 3 # repetition factor\nnumbexp = 100 # number of experiment\nNWe = 10\n\ndef readdata(filename):\n times = []\n inp = open(filename, \"r\")\n for line in inp.readlines():\n times.append([float(val) for idx, val in enumerate(line.split(\" \")) if idx != 0 and idx <= sample])\n comptime = times[0]\n commtime = times[1]\n return np.array(comptime), np.array(commtime)\n\ndef isdecodable(received, nw):\n flag = 0\n count = 0\n for i in range(0, nw):\n if i in received:\n count += 1\n if count == nw:\n flag = 1\n return flag\n\n\n\ndef decodingtime(assign, realize, nw):\n rs = np.sort(realize, axis=None)\n i = 0\n time = 0\n while i < rs.size:\n time = rs[i]\n received = assign[realize <= time]\n if isdecodable(received, nw) == 1:\n break\n i += 1\n return time\n\n\ndef main():\n assign = np.zeros(shape=(nw, r))\n for i in range(0, r):\n assign[:, i] = np.roll(np.arange(nw), -i)\n comp = np.zeros(shape=(NWe, sample))\n comm = np.zeros(shape=(NWe, sample))\n for d in range(0, NWe):\n comp[d], comm[d] = readdata(\"P2P/resultsp2p-\" + str(ds) + \"rank\" + str(d+1) + \".txt\")\n comp = comp.reshape((nw, int(sample / (nw / NWe))))\n comm = comm.reshape((nw, int(sample / (nw / NWe))))\n itercount = int((sample / (nw / NWe)) / r) # number of iteration\n avg = 0\n for nexp in range(0, numbexp):\n wt = np.zeros(shape=(1, itercount))\n for i in range(0, itercount):\n for j in range(0, nw):\n rgn = np.random.random_sample()\n if rgn < fp:\n delay = delay_time\n else:\n delay = 0\n cctp = np.zeros(shape=(1, r))\n for l in range(0, r):\n if l == 0:\n cctp[0][l] = comp[j][i * r] + comm[j][i * r]\n else:\n compsum = 0\n for k in range(0, l + 1):\n compsum += comp[j][i * r + k] # compute overall computation time\n cctp[0][l] = max(compsum, cctp[0][l - 1]) + comm[j][i * r + l]\n cctp = cctp + delay\n if j == 0:\n cct = cctp\n else:\n cct = np.concatenate((cct, cctp))\n wt[0][i] = decodingtime(assign, cct, nw)\n avg += np.mean(wt) / numbexp\n print(avg)\nmain()","sub_path":"FixedRandomDelay/distcomp_UCC_MM.py","file_name":"distcomp_UCC_MM.py","file_ext":"py","file_size_in_byte":2650,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"143601267","text":"import time, urandom, struct\nfrom machine import Pin, SPI \n\nclass LoRa:\n def __init__(self, RST_Pin, CS_Pin, SPI_CH, SCK_Pin, MOSI_Pin, MISO_Pin, DIO0_Pin, LoRa_id = 0, wait_ACK=True, plus20dBm=False): \n self.ack_token = 0\n self.sending = False\n self.send_id = LoRa_id\n self.header_fmt = 'HHH' # self.send_id, recv_id, self.ack_token\n self._mode = None\n ####################\n # #\n # 1.Reset # \n # #\n #################### \n # Reset LoRa Module\n rst_pin = Pin(RST_Pin, Pin.OUT)\n rst_pin.off()\n time.sleep(0.01)\n rst_pin.on()\n time.sleep(0.01) \n\n ####################\n # #\n # 2.SPI #\n # #\n ####################\n '''\n We command LoRa module to perform Tx/Rx operations via the SPI interface.\n We disable SPI communication first to ensure it only happends when we need.\n Define communication functions read and write.\n The SPI comm is enabled temporarily for reading and writing and disabled thereafter.\n '''\n # Disable SPI communication with the LoRa module\n self.cs_pin = Pin(CS_Pin, Pin.OUT)\n self.cs_pin.on() # Release board from SPI Bus by bringing it into high impedance status. \n \n # SPI communication\n # See datasheet: Device support SPI mode 0 (polarity & phase = 0) up to a max of 10MHz.\n self.spi = SPI(SPI_CH, baudrate=10_000_000, polarity=0, phase=0,\n sck=Pin(SCK_Pin), mosi=Pin(MOSI_Pin), miso=Pin(MISO_Pin)\n ) \n\n ####################\n # #\n # 3.Lora #\n # #\n ####################\n self.RegTable = { # register table\n 'RegFifo' : 0x00 ,\n 'RegOpMode' : 0x01 , # operation mode\n 'RegFrfMsb' : 0x06 ,\n 'RegFrfMid' : 0x07 ,\n 'RegFrfLsb' : 0x08 ,\n 'RegPaConfig' : 0x09 ,\n 'RegFifoTxBaseAddr' : 0x0e ,\n 'RegFifoRxBaseAddr' : 0x0f ,\n 'RegFifoAddrPtr' : 0x0d ,\n 'RegFifoRxCurrentAddr' : 0x10 ,\n 'RegIrqFlags' : 0x12 , \n 'RegRxNbBytes' : 0x13 , # Number of received bytes \n 'RegPktSnrValue' : 0x19 ,\n 'RegPktRssiValue' : 0x1a ,\n 'RegRssiValue' : 0x1b ,\n 'RegModemConfig1' : 0x1d , \n 'RegModemConfig2' : 0x1e , \n 'RegPreambleMsb' : 0x20 , \n 'RegPreambleLsb' : 0x21 ,\n 'RegPayloadLength' : 0x22 ,\n 'RegModemConfig3' : 0x26 , \n 'RegDioMapping1' : 0x40 , \n 'RegVersion' : 0x42 , \n 'RegPaDac' : 0x4d \n }\n \n self.Mode = { # see Table 16 LoRa ® Operating Mode Functionality \n 'SLEEP' : 0b000,\n 'STANDBY' : 0b001,\n 'TX' : 0b011,\n 'RXCONTINUOUS' : 0b101, \n 'RXSINGLE' : 0b110, \n 'CAD' : 0b111, \n } \n\n # Choose LoRa mode and Test write/read functions\n LongRangeMode = 0b1\n # Choose LoRa (instead of FSK) mode for SX1276 and put the module in sleep mode\n self.write('RegOpMode', self.Mode['SLEEP'] | LongRangeMode << 7) \n # Test read function \n assert self.read('RegOpMode') == (self.Mode['SLEEP'] | LongRangeMode << 7), \"LoRa initialization failed\"\n \n # Set modem config: bandwidth, coding rate, header mode, spreading factor, CRC, and etc. \n # See 4.4. LoRa Mode Register Map \n Bw = {'125KHz':0b0111, '500kHz':0b1001}\n CodingRate = {5:0b001, 6:0b010, 7:0b011, 8:0b100}\n ImplicitHeaderModeOn = {'Implicit':0b1, 'Explicit':0b0}\n self.write('RegModemConfig1', Bw['125KHz'] << 4 | CodingRate[5] << 1 | ImplicitHeaderModeOn['Explicit'])\n SpreadingFactor = {7:0x7, 9:0x9, 12:0xC}\n TxContinuousMode = {'normal':0b0, 'continuous':0b1}\n RxPayloadCrcOn = {'disable':0b0, 'enable':0b1}\n self.write('RegModemConfig2', SpreadingFactor[7] << 4 | TxContinuousMode['normal'] << 3 | RxPayloadCrcOn['enable'] << 2 | 0x00) # Last 0x00 is SymbTimeout(9:8)\n self.write('RegModemConfig3', 0x04) # 0x04 is SymbTimeout(7:0)\n \n # Preamble length\n self.write('RegPreambleMsb', 0x0) # Preamble can be (2^15)kb long, much longer than payload\n self.write('RegPreambleLsb', 0x8) # but we just use 8-byte preamble\n \n # See 4.1.4. Frequency Settings\n FXOSC = 32e6 # Freq of XOSC\n FSTEP = FXOSC / (2**19)\n Frf = int(915e6 / FSTEP)\n self.write('RegFrfMsb', (Frf >> 16) & 0xff)\n self.write('RegFrfMid', (Frf >> 8) & 0xff)\n self.write('RegFrfLsb', Frf & 0xff)\n \n # Output Power\n '''\n If desired output power is within -4 ~ +15dBm, use PA_LF or PA_HF as amplifier. \n Use PA_BOOST as amplifier to output +2 ~ +17dBm continuous power or up to 20dBm \n peak power in a duty cycled operation.\n Here we will always use PA_BOOST. \n Since we use PA_BOOST, Pout = 2 + OutputPower and MaxPower could be any number (Why not 0b111/0x7?)\n '''\n PaSelect = {'PA_BOOST':0b1, 'RFO':0b0} # Choose PA_BOOST (instead of RFO) as the power amplifier\n MaxPower = {'15dBm':0x7, '13dBm':0x2} # Pmax = 10.8 + 0.6 * 7 \n OutputPower = {'17dBm':0xf, '2dBm':0x0} \n self.write('RegPaConfig', PaSelect['PA_BOOST'] << 7 | MaxPower['15dBm'] << 4 | OutputPower['2dBm'])\n \n # Enables the +20dBm option on PA_BOOST pin. \n if plus20dBm: # PA (Power Amplifier) DAC (Digital Analog Converter)\n PaDac = {'default':0x04, 'enable_PA_BOOST':0x07} # Can be 0x04 or 0x07. 0x07 will enables the +20dBm option on PA_BOOST pin\n self.write('RegPaDac', PaDac['enable_PA_BOOST']) \n \n # FIFO data buffer \n '''\n SX1276 has a 256 byte memory area as the FIFO buffer for Tx/Rx operations.\n How do we know which area is for Tx and which is for Rx.\n We must set the base addresses RegFifoTxBaseAddr and RegFifoRxBaseAddr independently.\n Since SX1276 work in a half-duplex manner, we better set both base addresses\n at the bottom (0x00) of the FIFO buffer so that we can buffer 256 byte data\n during transmition or reception.\n ''' \n self.Fifo_Bottom = 0x00 # We choose this value to max buffer we can write (then send out)\n self.write('RegFifoTxBaseAddr', self.Fifo_Bottom)\n self.write('RegFifoRxBaseAddr', self.Fifo_Bottom)\n \n ####################\n # #\n # 4.Interrupt #\n # #\n ####################\n '''\n # This section is optional for Tx.\n # It enable an interrupt when Tx is done.\n '''\n self.DioMapping = {\n 'Dio0' : {\n 'RxDone' : 0b00 << 6,\n 'TxDone' : 0b01 << 6,\n 'CadDone' : 0b10 << 6\n },\n 'Dio1' : {\n 'RxTimeout' : 0b00 << 4,\n 'FhssChangeChannel': 0b01 << 4,\n 'CadDetected' : 0b10 << 4\n },\n 'Dio2' : {},\n 'Dio3' : {},\n 'Dio4' : {},\n 'Dio5' : {},\n } \n \n self.IrqFlags = {\n 'RxTimeout' : 0b1 << 7,\n 'RxDone' : 0b1 << 6,\n 'PayloadCrcError' : 0b1 << 5,\n 'ValidHeader' : 0b1 << 4,\n 'TxDone' : 0b1 << 3,\n 'CadDone' : 0b1 << 2,\n 'FhssChangeChannel': 0b1 << 1,\n 'CadDetected' : 0b1 << 0, \n }\n \n dio0_pin = Pin(DIO0_Pin, Pin.IN)\n dio0_pin.irq(handler=self._irq_handler, trigger=Pin.IRQ_RISING)\n \n ''' # interrupt flag mask: use to deactive a particular interrupt\n RegIrqFlagsMask = 0x11;\n IrqFlagsMask = {\n 'RxTimeoutMask' : 0b1 << 7,\n 'RxDoneMask' : 0b1 << 6,\n 'PayloadCrcErrorMask' : 0b1 << 5,\n 'ValidHeaderMask' : 0b1 << 4,\n 'TxDoneMask' : 0b1 << 3,\n 'CadDoneMask' : 0b1 << 2,\n 'FhssChangeChannelMask': 0b1 << 1,\n 'CadDetectedMask' : 0b1 << 0\n }\n write(RegIrqFlagsMask, IrqFlagsMask['TxDoneMask']) # This will deactivate interrupt on TxDone.\n ''' \n\n self.mode = 'STANDBY' # Request Standby mode so SX1276 performs reception initialization. \n \n @property\n def mode(self):\n return self._mode\n\n @mode.setter\n def mode(self, value): \n if self.mode != value:\n if value == 'TX':\n self.write('RegDioMapping1', self.DioMapping['Dio0']['TxDone']) \n elif value == 'RXCONTINUOUS':\n self.write('RegDioMapping1', self.DioMapping['Dio0']['RxDone']) \n self.write('RegOpMode', self.Mode[value]) \n self._mode = value\n\n def write(self, reg, data, fifo=False): \n wb = bytes([self.RegTable[reg] | 0x80]) # Create a writing byte\n if fifo:\n data = wb + data\n else:\n data = wb + bytes([data]) \n self.cs_pin.value(0) # Bring the CS pin low to enable communication \n self.spi.write(data)\n self.cs_pin.value(1) # release the bus. \n\n def read(self, reg=None, length=1):\n self.cs_pin.value(0)\n # https://docs.micropython.org/en/latest/library/machine.SPI.html#machine-softspi\n if length == 1:\n data = self.spi.read(length+1, self.RegTable[reg])[1]\n else:\n data = self.spi.read(length+1, self.RegTable[reg])[1:]\n self.cs_pin.value(1)\n return data\n \n def _irq_handler(self, pin):\n irq_flags = self.read('RegIrqFlags')\n if irq_flags & self.IrqFlags['TxDone']: \n self.mode = 'RXCONTINUOUS' \n while 1:\n 1\n self.after_TxDone(self)\n\n elif irq_flags & self.IrqFlags['RxDone']:\n if irq_flags & self.IrqFlags['PayloadCrcError']:\n print('PayloadCrcError')\n else:\n self.write('RegFifoAddrPtr', self.read('RegFifoRxCurrentAddr'))\n packet = self.read('RegFifo', self.read('RegRxNbBytes'))\n PacketSnr = self.read('RegPktSnrValue')\n SNR = PacketSnr / 4\n PacketRssi = self.read('RegPktRssiValue')\n #Rssi = read(RegRssiValue)\n if SNR < 0:\n RSSI = -157 + PacketRssi + SNR\n else:\n RSSI = -157 + 16 / 15 * PacketRssi\n RSSI = round(RSSI, 2) # Table 7 Frequency Synthesizer Specification\n self.packet_handler(self, packet, SNR, RSSI) \n self.Tx() \n else:\n for i, j in self.IrqFlags.items():\n if irq_flags & j:\n print(i)\n\n self.write('RegIrqFlags', 0xff) # write anything could clear all types of interrupt flags\n \n \n def send(self, data, recv_id=0): \n if len(data) > 240: raise # want to send a too large message \n self.ack_token = urandom.randint(0,65535)\n header = struct.pack('= 0.05:\n # more than 5% pixels are void\n flag[k] = 1 # this image will not be saved\n k += 1\n\n if fg:\n return image_patchs, flag\n else:\n return image_patchs\n\n\ndef rotate_image_random(img, rotation_index):\n deg_dict = {\n 1: 0,\n 2: 90,\n 3: 180,\n 4: 270\n }\n\n # rows = img.shape[0]\n # cols = img.shape[1]\n #\n # deg = deg_dict[rotation_index]\n\n if rotation_index != 1:\n # M = cv2.getRotationMatrix2D(((cols - 1) / 2.0, (rows - 1) / 2.0), deg, 1)\n # dst = cv2.warpAffine(img, M, (cols, rows))\n\n dst = np.rot90(img, rotation_index-1)\n\n return dst\n\n else:\n return img\n\n\nif __name__ == \"__main__\":\n\n reference = \"/run/user/1001/gvfs/smb-share:server=141.58.125.9,share=s-platte/ShuFangwen/results/lvl4_nadir/test_set/2_mask\"\n data_path = \"/run/user/1001/gvfs/smb-share:server=141.58.125.9,share=s-platte/ShuFangwen/results/lvl4_nadir/test_set\"\n folders_list = os.listdir(data_path)\n folders_list.remove(\"2_mask\")\n folders_list.remove(\"not_use_feature\")\n # folders_list.remove(\"1_pointlabel\")\n\n save_path = \"/data/fangwen/mix_test2\"\n make_if_not_exists(save_path)\n\n size = (480, 480)\n\n mask_list = os.listdir(reference)\n length = len(mask_list)\n for l in tqdm(range(length)):\n\n for rotation_index in range(1, 5):\n\n name = mask_list[l]\n\n mask_path = os.path.join(reference, name)\n # name = \"DSC03717.tif\"\n # mask_path ='/data/fangwen/results/level3/test_set/2_mask/DSC03717.tif'\n mask = cv2.imread(mask_path, 0)\n mask = rotate_image_random(mask, rotation_index)\n\n mask_patchs, flag = chip(mask, chip_size=size, overlap=0.5, nchannel=1, fg=True)\n\n # based on this flag, we chip other image\n for folder in folders_list:\n folder_path = os.path.join(data_path, folder)\n img_path = os.path.join(folder_path, name)\n\n if folder_path.split(\"/\")[-1].split(\"_\")[-2] == \"f\" or folder_path.split(\"/\")[-1].split(\"_\")[-2] == \"5\":\n # read index image and feature image\n img = tifffile.imread(img_path)\n img = rotate_image_random(img, rotation_index)\n img_patchs = chip(img, chip_size=size, overlap=0.5, nchannel=1, fg=False)\n\n elif folder_path.split(\"/\")[-1].split(\"_\")[-2] == \"rgb\" or folder_path.split(\"/\")[-1].split(\"_\")[\n -2] == \"4\":\n # rgb\n img = cv2.imread(img_path)\n img = rotate_image_random(img, rotation_index)\n img_patchs = chip(img, chip_size=size, overlap=0.5, nchannel=3, fg=False)\n\n elif folder_path.split(\"/\")[-1].split(\"_\")[-2] == \"3\":\n # grey\n img = cv2.imread(img_path, 0)\n img = rotate_image_random(img, rotation_index)\n img_patchs = chip(img, chip_size=size, overlap=0.5, nchannel=1, fg=False)\n\n for id in range(flag.shape[0]):\n\n if flag[id] == 0:\n # save masks\n save_mask = os.path.join(save_path, \"2_mask\")\n make_if_not_exists(save_mask)\n cv2.imwrite(os.path.join(save_mask, name.split(\".\")[-2] + \"_\" + str(id) + '_r' + str(rotation_index) + \".tif\"),\n mask_patchs[id])\n\n # save other images\n if folder_path.split(\"/\")[-1].split(\"_\")[-2] == \"f\" or folder_path.split(\"/\")[-1].split(\"_\")[\n -2] == \"5\":\n save_img = os.path.join(save_path, folder_path.split(\"/\")[-1])\n make_if_not_exists(save_img)\n tifffile.imsave(os.path.join(save_img, name.split(\".\")[-2] + \"_\" + str(id) + '_r' + str(rotation_index) + \".tif\"),\n img_patchs[id])\n\n if folder_path.split(\"/\")[-1].split(\"_\")[-2] == \"rgb\" or folder_path.split(\"/\")[-1].split(\"_\")[\n -2] == \"4\":\n save_img = os.path.join(save_path, folder_path.split(\"/\")[-1])\n make_if_not_exists(save_img)\n cv2.imwrite(os.path.join(save_img, name.split(\".\")[-2] + \"_\" + str(id) + '_r' + str(rotation_index) + \".tif\"),\n img_patchs[id])\n\n elif folder_path.split(\"/\")[-1].split(\"_\")[-2] == \"3\":\n save_img = os.path.join(save_path, folder_path.split(\"/\")[-1])\n make_if_not_exists(save_img)\n cv2.imwrite(os.path.join(save_img, name.split(\".\")[-2] + \"_\" + str(id) + '_r' + str(rotation_index) + \".tif\"),\n img_patchs[id])\n","sub_path":"src/semantic-segmentation/chip.py","file_name":"chip.py","file_ext":"py","file_size_in_byte":6985,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"56684102","text":"import numpy as np\nimport cv2\n\n#we are capturing image from the standard cameras\n# 0,1,2,... (since we are using the laptop capera , we use 0 , else we use 1,2..)\n#Note: we need to create a VideoCapture object to capture video in OpenCV\n\ncap = cv2.VideoCapture(0) #this will create a streaming video via the lappy cam\n\n#we can apply processing on this video frame/frame and apply computations accordingly\n\n#we are interesting in caputuring camera frames from several cameras in the setup\n#and detect the objects and their location, output as a matrix,as give\n# plotting in a graph\n\n#Challenges:\n#Trigerring frame capture at the same time from these cameras\n#Converting them in to unified co-ordinates\n#providing a 3D view of these objects\n#plot the orientation of these objects in this unified co-ordinate system\n\n#Detection\n# 1. detect robots using red circles mounted on these cameras\n# 2. detect by learning the shape of these robots by training negative\n# and positive images\n# 3. Proximity information specific to individual cameras \n\nwhile True:\n #Capturing the images frames by frames\n ret,frame = cap.read() #the read method of the VideoCapture Object\n #returns a frame\n\n #we convert each frame into a grayscale\n gray = cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY)\n\n #displaying the resulting frame\n cv2.imshow('frame',gray)\n \n\n k = cv2.waitKey(0)\n\n if k == 27:\n cv2.destroyAllWindows()\n\n for i in range(1,4):\n cv2.waitKey(1)\n \n \n","sub_path":"VideoCapture.py","file_name":"VideoCapture.py","file_ext":"py","file_size_in_byte":1521,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"526775576","text":"import nltk\nfrom nltk.corpus import state_union\nfrom nltk.tokenize import PunktSentenceTokenizer\n\ntrain_text = state_union.raw(\"margot.txt\")\nsample_text = state_union.raw(\"gal_gadot.txt\")\n\ncustom_sent_tokenizer = PunktSentenceTokenizer(train_data)\n\ntokenized = custom_sent_tokenizer(sample_text)\n\ntry:\n for w in tokenized:\n words = nltk.word_tokenize(w)\n tagged = nltk.pos_tag(words)\n print(tagged)\n \nexcept Exception as e:\n print(str(e))\n","sub_path":"Natural_Language_Processing/4- Part of Speech tagging.py","file_name":"4- Part of Speech tagging.py","file_ext":"py","file_size_in_byte":473,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"307791931","text":"\"\"\"\nCompatibility module for high-level h5py\n\"\"\"\nimport sys\nimport six\n\nWINDOWS_ENCODING = \"mbcs\"\n\n\ntry:\n from os import fspath\nexcept ImportError:\n def fspath(path):\n \"\"\"\n Return the string representation of the path.\n If str or bytes is passed in, it is returned unchanged.\n This code comes from PEP 519, modified to support earlier versions of\n python.\n\n This is required for python < 3.6.\n \"\"\"\n if isinstance(path, (six.text_type, six.binary_type)):\n return path\n\n # Work from the object's type to match method resolution of other magic\n # methods.\n path_type = type(path)\n try:\n return path_type.__fspath__(path)\n except AttributeError:\n if hasattr(path_type, '__fspath__'):\n raise\n try:\n import pathlib\n except ImportError:\n pass\n else:\n if isinstance(path, pathlib.PurePath):\n return six.text_type(path)\n\n raise TypeError(\"expected str, bytes or os.PathLike object, not \"\n + path_type.__name__)\n\n# This is from python 3.5 stdlib (hence lacks PEP 519 changes)\n# This was introduced into python 3.2, so python < 3.2 does not have this\n# Effectively, this is only required for python 2.6 and 2.7, and can be removed\n# once support for them is dropped\ndef _fscodec():\n encoding = sys.getfilesystemencoding()\n if encoding == 'mbcs':\n errors = 'strict'\n else:\n try:\n from codecs import lookup_error\n lookup_error('surrogateescape')\n except LookupError:\n errors = 'strict'\n else:\n errors = 'surrogateescape'\n\n def fsencode(filename):\n \"\"\"\n Encode filename to the filesystem encoding with 'surrogateescape' error\n handler, return bytes unchanged. On Windows, use 'strict' error handler if\n the file system encoding is 'mbcs' (which is the default encoding).\n \"\"\"\n if isinstance(filename, six.binary_type):\n return filename\n elif isinstance(filename, six.text_type):\n return filename.encode(encoding, errors)\n else:\n raise TypeError(\"expect bytes or str, not %s\" % type(filename).__name__)\n\n def fsdecode(filename):\n \"\"\"\n Decode filename from the filesystem encoding with 'surrogateescape' error\n handler, return str unchanged. On Windows, use 'strict' error handler if\n the file system encoding is 'mbcs' (which is the default encoding).\n \"\"\"\n if isinstance(filename, six.text_type):\n return filename\n elif isinstance(filename, six.binary_type):\n return filename.decode(encoding, errors)\n else:\n raise TypeError(\"expect bytes or str, not %s\" % type(filename).__name__)\n\n return fsencode, fsdecode\n\n_fsencode, _fsdecode = _fscodec()\ndel _fscodec\n\ntry:\n from os import fsencode\nexcept ImportError:\n fsencode = _fsencode\n\ntry:\n from os import fsdecode\nexcept ImportError:\n fsdecode = _fsdecode\n\n\ndef filename_encode(filename):\n \"\"\"\n Encode filename for use in the HDF5 library.\n\n Due to how HDF5 handles filenames on different systems, this should be\n called on any filenames passed to the HDF5 library. See the documentation on\n filenames in h5py for more information.\n \"\"\"\n filename = fspath(filename)\n if sys.platform == \"win32\":\n if isinstance(filename, six.text_type):\n return filename.encode(WINDOWS_ENCODING, \"strict\")\n return filename\n return fsencode(filename)\n\n\ndef filename_decode(filename):\n \"\"\"\n Decode filename used by HDF5 library.\n\n Due to how HDF5 handles filenames on different systems, this should be\n called on any filenames passed from the HDF5 library. See the documentation\n on filenames in h5py for more information.\n \"\"\"\n if sys.platform == \"win32\":\n if isinstance(filename, six.binary_type):\n return filename.decode(WINDOWS_ENCODING, \"strict\")\n elif isinstance(filename, six.text_type):\n return filename\n else:\n raise TypeError(\"expect bytes or str, not %s\" % type(filename).__name__)\n return fsdecode(filename)\n","sub_path":"Tensorflow_Pandas_Numpy/source3.6/h5py/_hl/compat.py","file_name":"compat.py","file_ext":"py","file_size_in_byte":4324,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"358430982","text":"#!/usr/bin/env python\n# -*- coding: utf8 -*-\nimport RPi.GPIO as GPIO\nimport MFRC522\nimport signal\nimport time\nimport datetime\n#from datetime import date\nGPIO.setwarnings(False)\ncmpt=0\n\ndate = datetime.datetime.now()\nprint(date)\n \ncontinue_reading = True\n# Capture SIGINT for cleanup when the script is aborted\ndef end_read(signal,frame):\n global continue_reading\n print (\"Lecture terminée\")\n continue_reading = False\n GPIO.cleanup()\n# Hook the SIGINT\nsignal.signal(signal.SIGINT, end_read)\n# Create an object of the class MFRC522\nMIFAREReader = MFRC522.MFRC522()\n#print (\"Press Ctrl-C to stop.\")\n#secteurBloc=eval(input(\"Entrez un Secteur :\\n\"))\nsecteurBlock2=12\n#secteurBlock3=12\n\nprint (\"Passer le tag RFID a lire\")\n# This loop keeps checking for chips. If one is near it will get the UID and authenticate\nwhile continue_reading:\n # Scan for cards \n (status,TagType) = MIFAREReader.MFRC522_Request(MIFAREReader.PICC_REQIDL)\n # If a card is found\n if status == MIFAREReader.MI_OK:\n print (\"Carte detectee\")\n # Get the UID of the card\n (status,uid) = MIFAREReader.MFRC522_Anticoll()\n # If we have the UID, continue\n if status == MIFAREReader.MI_OK:\n data = [0x59,0x61,0x50,0x6F,0x54,0x74,0xFF,0x07,0x80,0x69,0x59,0x61,0x50,0x6F,0x54,0x74]\n # Print UID\n print (\"UID de la carte : \"+str(uid[0])+\".\"+str(uid[1])+\".\"+str(uid[2])+\".\"+str(uid[3])+\".\"+str(uid[4]))\n # This is the default key for authentication\n keyA_Public = [0xFF,0xFF,0xFF,0xFF,0xFF,0xFF]\n # Clee d authentification privée\n keyA_Privé = [0x59,0x61,0x50,0x6F,0x54,0x74] #\"YaPoTt\"\n \n key = [0x59,0x61,0x50,0x6F,0x54,0x74,0xFF,0x07,0x80,0x69,0x59,0x61,0x50,0x6F,0x54,0x74]\n #keyA_Privé = key\n #keyA_Public = key\n # Select the scanned tag\n MIFAREReader.MFRC522_SelectTag(uid)\n # Authenticate with private key\n status = MIFAREReader.MFRC522_Auth(MIFAREReader.PICC_AUTHENT1A, secteurBlock2,keyA_Privé, uid)\n # Check if authenticated\n if(status == MIFAREReader.MI_OK):\n next = False\n print (\"Authentification Avec la Clee Privé \")\n print(\"\\n\")\n print(\"Carte deja initialisé_sur secteur \",secteurBlock2,\"\\n\")\n print(\"INFORMATION Block: \")\n print (\"Le secteur\",secteurBlock2,\" contient actuellement : \")\n MIFAREReader.MFRC522_Read(secteurBlock2)\n print (\"Le secteur\",secteurBlock2+1,\" contient actuellement : \")\n MIFAREReader.MFRC522_Read(secteurBlock2+1)\n print (\"Le secteur\",secteurBlock2+2,\" contient actuellement : \")\n MIFAREReader.MFRC522_Read(secteurBlock2+2)\n # Stop\n #MIFAREReader.MFRC522_StopCrypto1()\n # Make sure to stop reading for cards\n continue_reading = False\n next = False\n else:\n print (\"\\nErreur d\\'Authentification Avec la Clee Privé sur secteur \",secteurBlock2,\"\\n\")\n next =True\n \n if(next == True):\n # Authenticate with Public key\n status1 = MIFAREReader.MFRC522_Auth(MIFAREReader.PICC_AUTHENT1A, secteurBlock2,keyA_Public, uid)\n # Check if authenticated\n if(status1 == MIFAREReader.MI_OK):\n print (\"Authentification Avec la Clee Public sur secteur \",secteurBlock2,\"\\n\")\n print (\"Le secteur \",secteurBlock2+3,\"contient actuellement :\")\n MIFAREReader.MFRC522_Read(secteurBlock2+3)\n print (\"Ecriture ...Clee Privé sur secteur\",secteurBlock2+3)\n MIFAREReader.MFRC522_Write(secteurBlock2+3, data)\n print (\"\\n\")\n print (\"Carte initialisé sur Block\",secteurBlock2+3)\n MIFAREReader.MFRC522_StopCrypto1()\n continue_reading = False\n else:\n print (\"Error Authentification Avec la Clee Public sur secteur \",secteurBlock2)\n \n","sub_path":"sauvegarde/Read.py","file_name":"Read.py","file_ext":"py","file_size_in_byte":4080,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"274552255","text":"def partition(a):\n pivot = a[0]\n left, rite = 0, len(a)-1\n while left != rite:\n while rite != left and a[rite] > pivot:\n rite -= 1\n a[left], a[rite] = a[rite], a[left]\n while left != rite and a[left] <= pivot:\n left += 1\n a[left], a[rite] = a[rite], a[left]\n\n\n\n\nn = int(input())\na = [int(x) for x in input().split()]\n#ifile = open(\"rosalind_par.txt\", \"r\")\n#wfile = open(\"rosalind_par_ans.txt\", \"w\")\n#n = int(ifile.readline())\n#a = [int(x) for x in ifile.readline().split()]\npartition(a)\n#print(*a,file=wfile)\nprint(*a)","sub_path":"Rosalind/algorithm_heights/par.py","file_name":"par.py","file_ext":"py","file_size_in_byte":578,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"338131101","text":"\n\"\"\"\nDefine predicate-safeness.\n\"\"\"\n\nfrom helper import get_predicates, get_ordered_parameter_names\nfrom base import PredicateError\nfrom delayed import DelayedArgumentError\nfrom Transparent import Transparent\n\nclass PredicateSafeFunction(Transparent):\n\n getattr_fallback = lambda self: self._function\n\n def __init__(self, function):\n \"\"\"\n Defines an `@safe` function.\n Is a wrapper for a function.\n \"\"\"\n self._function = function\n self._predicates = get_predicates(self._function) # A dict mapping {parameter name : predicate}\n self._parameter_names = get_ordered_parameter_names(self._function)\n\n for predicate_i, predicate in enumerate(self._predicates.values()):\n if predicate.is_generalized:\n raise DelayedArgumentError(\"Predicate #{} is generalized.\".format(predicate_i))\n\n def test_arguments(self, *args):\n \"\"\"\n Test arguments against their python-predicates.\n Raise an error if any don't satisfy.\n \"\"\"\n for param_num, param_name in enumerate(self._parameter_names):\n argument = args[param_num]\n predicate = self._predicates.get(param_name)\n if predicate and not predicate(argument):\n raise PredicateError(\"Predicate for parameter #{} not satisfied.\".format(param_num))\n\n def test_return_val(self, ret):\n predicate = self._predicates.get(\"return\")\n if predicate and not predicate(ret):\n raise PredicateError(\"Predicate for return value not satisfied.\")\n\n def __call__(self, *args):\n self.test_arguments(*args)\n ret = self._function(*args)\n self.test_return_val(ret)\n return ret","sub_path":"safe.py","file_name":"safe.py","file_ext":"py","file_size_in_byte":1717,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"634861272","text":"\n\nfrom xai.brain.wordbase.verbs._ennoble import _ENNOBLE\n\n#calss header\nclass _ENNOBLES(_ENNOBLE, ):\n\tdef __init__(self,): \n\t\t_ENNOBLE.__init__(self)\n\t\tself.name = \"ENNOBLES\"\n\t\tself.specie = 'verbs'\n\t\tself.basic = \"ennoble\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/verbs/_ennobles.py","file_name":"_ennobles.py","file_ext":"py","file_size_in_byte":245,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"631069877","text":"# 2018-8-8\n# update 2018-9-4\n# Code chinese character\n# from codeWord import CodeWord\nclass CodeChar(object):\n\t\"\"\"\n\tHave code problem!\n\ttest = CodeChar()\n\tres = test.code(string)\n\tres2 = test.decode(res)\n\t\"\"\"\n\tdef __init__(self):\n\t\tself.exp = [',','。','?','!',':','《','》','【','】','(',')','、','@','#','$','%','^','&','*','.',',','.','<','>','-','+','=', ' ','\\n', '“','”', '!', '[', ']', '(', ')']\n\t\tself.radix = 19\n\t\tself.a_int = 97\n\t\tself.A_int = 65\n\t\tself.func = CodeWord(33)\n\n\tdef code(self, s):\n\t\tres = []\n\t\tflag = 0\n\t\ttmp = []\n\t\tfor i in s:\n\t\t\tif i in self.exp:\n\t\t\t\tif flag == 1:\n\t\t\t\t\ttmp.append('`')\n\t\t\t\t\tt = self.func.code(\"\".join(tmp[1:-1]))\n\t\t\t\t\tt = '`' + t + '`'\n\t\t\t\t\tres.append(t)\n\t\t\t\t\ttmp = []\n\t\t\t\t\tflag = 0\n\t\t\t\tres.append(i)\n\t\t\telse:\n\t\t\t\ti_to_int = ord(i)\n\t\t\t\tif i_to_int > 19000 and i_to_int < 40000:\n\t\t\t\t\tif flag == 1:\n\t\t\t\t\t\ttmp.append('`')\n\t\t\t\t\t\tt = self.func.code(\"\".join(tmp[1:-1]))\n\t\t\t\t\t\tt = '`' + t + '`'\n\t\t\t\t\t\tres.append(t)\n\t\t\t\t\t\ttmp = []\n\t\t\t\t\t\tflag = 0 \n\t\t\t\t\tnums = []\n\t\t\t\t\twhile i_to_int > 0:\n\t\t\t\t\t\twhile i_to_int:\n\t\t\t\t\t\t\tnum = i_to_int % 10\n\t\t\t\t\t\t\tnums.append(num)\n\t\t\t\t\t\t\ti_to_int = i_to_int // 10\n\t\t\t\t\t\tnums = nums[::-1]\n\t\t\t\t\t\tr = []\n\t\t\t\t\t\tfir = nums[0] * 10 + nums[1] - self.radix + self.A_int\n\t\t\t\t\t\tr.append(chr(fir))\n\t\t\t\t\t\tsec = nums[2] + self.a_int\n\t\t\t\t\t\tr.append((chr(sec)))\n\t\t\t\t\t\tthr = nums[3] + self.a_int + 8\n\t\t\t\t\t\tr.append(chr(thr))\n\t\t\t\t\t\tfur = nums[4] + self.a_int + 16\n\t\t\t\t\t\tr.append(chr(fur))\n\t\t\t\t\t\tres.append(\"\".join(r))\n\t\t\t\t\t\tr = []\n\t\t\t\telse:\n\t\t\t\t\tif flag == 0:\n\t\t\t\t\t\ttmp.append('`')\n\t\t\t\t\t\ttmp.append(i)\n\t\t\t\t\t\tflag = 1\n\t\t\t\t\telse:\n\t\t\t\t\t\ttmp.append(i)\n\t\treturn \" \".join(res)\n\n\tdef decode(self, s):\n\t\trecord = []\n\t\tflag = 1\n\t\tmark = 0\n\t\tend = 0\n\t\ttmp = []\n\t\tre = \"\"\n\t\tfor i in s:\n\t\t\tif i not in self.exp and i != \" \" and i != '`' and mark == 0:\n\t\t\t\trecord.append(i)\n\t\t\telif i == '`':\n\t\t\t\tif mark == 0:\n\t\t\t\t\tmark = 1\n\t\t\t\tif end:\n\t\t\t\t\tt = self.func.deCode(\" \".join(tmp))\n\t\t\t\t\tre += t\n\t\t\t\t\tmark = 0\n\t\t\t\t\tend = 0\n\t\t\t\t\ttmp = []\n\t\t\telse:\t\n\t\t\t\tif mark == 1:\n\t\t\t\t\ttmp.append(i)\n\t\t\t\t\tend = 1\n\t\t\t\telse:\n\t\t\t\t\tre += i\n\t\t\t\t\tflag = 0\n\t\t\tif flag == 0 and len(record) == 4:\n\t\t\t\tr = 0\n\t\t\t\tr += (ord(record[0]) - self.A_int + self.radix) * 1000\n\t\t\t\tr += (ord(record[1]) - self.a_int) * 100\n\t\t\t\tr += (ord(record[2]) - self.a_int - 8) * 10\n\t\t\t\tr += ord(record[3]) - self.a_int - 16\n\t\t\t\tre += chr(r)\n\t\t\t\trecord = []\n\t\t\t\tflag = 1\n\n\t\tres = \"\"\n\t\tpre = 0\n\t\tfor i in re:\n\t\t\tif i == ' ' and pre == 1:\n\t\t\t\tres += i\n\t\t\telif i != ' ':\n\t\t\t\tres += i\n\t\t\t\tpre = 0\n\n\t\t\tif i == ' ':\n\t\t\t\tpre = 1\n\t\treturn res\n\t\t\nclass CodeWord(object):\n\tdef __init__(self,x):\n\t\tself.x = x\n\tdef code(self, s):\n\t\td = {}\n\t\tfor i in (65,97):\n\t\t\tfor j in range(26):\n\t\t\t\td[chr(i+j)] = chr( (j+self.x) % 26 + i)\n\t\tres = \"\".join([d.get(c,c) for c in s])\n\t\treturn res\n\n\tdef deCode(self, s):\n\t\td = {}\n\t\tfor i in (65, 97):\n\t\t\tfor j in range(26):\n\t\t\t\td[chr(i+j)] = chr((j+26-self.x) % 26 + i)\n\t\tres = \"\".join([d.get(c,c) for c in s])\n\t\treturn res\n\ns = \"\"\"\n\n>## 个人博客\n\n- [链接](http://www.lxxx.site)\n\n>## tool \n\n- 图片,文件批量操作\n- 数据处理\n- 字符编码\n\n>## LeetCode\n\n- LeetCode算法题解析 Python, Java, C\n\n>## C Python Linux java MySQL PHP MatLab PyQt5 神经网络\n- 笔记\n\n\n>## Data Structures and Algorithm Analysis\n\n- 大部分用python实���\n\n\n\"\"\"\ntest = CodeChar()\nr1 = test.code(s)\nr2 = test.decode(r1)\nprint()\nprint(r1)\nprint(r2)\n\n\"\"\"\n! [ ] ( `kpqrzayh` . `qwn` ) \n > # # Bajq Bbnu Cdly Eeny \n \n - [ Tbms Gfiz ] ( `oaaw://ddd` . `seee` . `zpal` ) \n \n > # # `avvs` \n \n - Dcpq Kcnv , Gjrr Bcju Gciz Sdkx Giiv Bdjw \n - Gjoy Genu Dhqy Khis \n - Edqt Mfkw Nflu Lhkr \n \n > # # `SllaJvkl` \n \n - `SllaJvkl` Mglz Iior Uaou Qcrz Hfjs `Wfaovu` , `Qhch` , `J` \n \n > # # `J` `Wfaovu` `Spube` `qhch` `TfZXS` `WOW` `ThaShi` `WfXa5` Mapq Neot Nfrt Nepw \n - Mfiy Qhoq \n \n \n > # # `Khah` `Zaybjabylz` `huk` `Hsnvypaot` `Huhsfzpz` \n \n - Dikt Sarw Bjry Kjrs `wfaovu` Eenu Kgjw \n \n \n\n\n\n>## 个人博客\n\n- [链接](http://www.lxxx.site)\n\n>## tool \n\n- 图片,文件批量操作\n- 数据处理\n- 字符编码\n\n>## LeetCode\n\n- LeetCode算法题解析 Python, Java, C\n\n>## C Python Linux java MySQL PHP MatLab PyQt5 神经网络\n- 笔记\n\n\n>## Data Structures and Algorithm Analysis\n\n- 大部分用python实现\n\n\"\"\"","sub_path":"tool/汉字编码/codeChinese.py","file_name":"codeChinese.py","file_ext":"py","file_size_in_byte":4350,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"341373658","text":"from Search import *\nimport math\nfrom save import *\n# import pylab\n# import matplotlib.pyplot as plt\n\n\ndef A(x, y, alpha, delta):\n f_zn = f(x, y, alpha)\n g_zn = g(x, y, alpha, delta)\n f_x = pr_f_x(alpha, x, y)\n f_y = pr_f_y(alpha, x)\n g_x = pr_g_x(alpha, x, y)\n g_y = pr_g_y(alpha, delta, x, y)\n pr_fg = f_y + g_x\n sq_fg = f_zn ** 2 + g_zn ** 2\n return 2 * (f_x * g_zn ** 2 + g_y * f_zn ** 2 - g_zn * f_zn * pr_fg) / sq_fg\n\n \ndef b(x, y, alpha, delta):\n f_zn = f(x, y, alpha)\n g_zn = g(x, y, alpha, delta)\n sq_fg = f_zn ** 2 + g_zn ** 2\n return ((g_zn * x) ** 2 + (f_zn * y) ** 2)/sq_fg\n # return ((f_zn * y * y) ** 2) / sq_fg\n \n\ndef sens(alpha, delta, h):\n max_m = 0\n min_m = 1000\n x_list, y_list = search_cycle(alpha, delta, h)\n k = len(x_list)\n # print(k)\n h_list = []\n r_list = []\n m_list = []\n t_list = []\n t1_list = []\n new_a = 0\n r_list.append(1)\n h_list.append(0)\n for i in range(1, k):\n a1 = A(x_list[i-1], y_list[i-1], alpha, delta)\n a2 = A(x_list[i], y_list[i], alpha, delta)\n # f_zn1 = f(x_list[i-1], y_list[i-1], alpha)\n # f_zn2 = f(x_list[i], y_list[i], alpha)\n # g_zn1 = g(x_list[i-1], y_list[i-1], alpha, delta)\n # g_zn1 = g(x_list[i - 1], y_list[i - 1], alpha, delta)\n # f_x = pr_f_x(alpha, x_list[i], y_list[i])\n # f_y = pr_f_y(alpha, x_list[i])\n # g_x = pr_g_x(alpha, x_list[i], y_list[i])\n # g_y = pr_g_y(alpha, delta, x_list[i], y_list[i])\n # pr_fg = f_y + g_x\n # sq_fg = f_zn ** 2 + g_zn ** 2\n # new_a += 2 * (f_x * g_zn ** 2 + g_y * f_zn ** 2 - g_zn * f_zn * pr_fg) * h / sq_fg\n new_a += (a1 + a2) * h / 2\n new_r = math.exp(new_a)\n r_list.append(new_r)\n new_h = h_list[i - 1] + h * (b(x_list[i - 1], y_list[i - 1], alpha, delta) / r_list[i - 1] + b(x_list[i], y_list[i], alpha, delta) / r_list[i]) / 2\n h_list.append(new_h)\n t1_list.append(i)\n const_c = r_list[- 1] * h_list[- 1] / (1 - r_list[- 1])\n # print(const_c)\n for i in range(k):\n m_list.append(r_list[i] * (const_c + h_list[i]))\n if m_list[i] < min_m:\n min_m = m_list[i]\n if m_list[i] > max_m:\n max_m = m_list[i]\n t_list.append(i)\n # matlab_export(r_list, h_list, \"r_h.txt\")\n # return m_list, t_list\n # return m_list, max_m, min_m, len(x_list)\n\n # print(max_m, min_m)\n return m_list, x_list, y_list\n\n\ndef main(alpha, delta, h):\n s, M, m, d = sens(alpha, delta, h)\n print(M, m, d)\n # x0, y0 = sens(alpha, delta, h)\n # plt.plot(x0, y0)\n # plt.grid(True)\n # pylab.show()\n\n\nif __name__ == '__main__':\n main(0.4, 0.1307, 0.01)\n","sub_path":"Сопромат/sensitivity_ag.py","file_name":"sensitivity_ag.py","file_ext":"py","file_size_in_byte":2722,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"513523090","text":"#!/usr/bin/python\n# -*- encoding=GBK -*-\n__author__ = \"孙志宇\"\n__title__ = \"好物页面\"\nimport os\nimport sys\ncurPath = os.path.abspath(os.path.dirname(__file__))\nrootPath = os.path.split(curPath)[0]\nsys.path.append(rootPath)\n\nimport unittest\n\nimport logging\nfrom page.Card_Page.shoppingcard_page import ShoppingCard\n\nlogger = logging.getLogger(\"airtest\")\nlogger.setLevel(logging.ERROR)\n\n\nclass GoodThing(unittest.TestCase):\n def __init__(self, *args, **kwargs):\n from poco.drivers.android.uiautomation import AndroidUiautomationPoco\n poco = AndroidUiautomationPoco()\n unittest.TestCase.__init__(self, *args, **kwargs)\n self.poco = poco\n\n # 点击好物按钮\n def test1_classification(self):\n # 点击按钮\n self.poco(text=\"好物\").click()\n\n # 点击搜索框并输入商品名\n def test2_search_et(self, TradeName):\n self.poco(name=\"com.devkeep.mall:id/search_et\").click()\n self.poco(name=\"com.devkeep.mall:id/search_et\").set_text(TradeName)\n self.poco(name=\"com.devkeep.mall:id/search_btn\").click()\n # 判断上面搜索商品是否存在\n if TradeName in self.poco(name=\"com.devkeep.mall:id/goods_name\")[0].get_text():\n self.poco(name=\"com.devkeep.mall:id/cart_iv\")[0].click()\n # 判断商品是否有sku\n if len(self.poco(name=\"com.devkeep.mall:id/tag_tv\")) >= 1:\n self.poco(name=\"com.devkeep.mall:id/tag_tv\")[0].click()\n self.poco(name=\"com.devkeep.mall:id/cart_buy_tv\").click()\n else:\n print(\"----商品没改sku----\")\n print(\"----搜索商品存在并加入购物车----\")\n else:\n print(\"----搜索商品不存在----\")\n\n def test3_shoppingCard_bubble(self):\n self.poco(name=\"android.widget.ImageView\").click()\n return ShoppingCard\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","sub_path":"page/ClassiFication_Page/GoodThing_page.py","file_name":"GoodThing_page.py","file_ext":"py","file_size_in_byte":1922,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"40145366","text":"# Mad Lib\n# Tworzy opowiadanie oparte na szczegółach wprowadzonych przez uzytkownika\n\nfrom tkinter import *\n\nclass Application(Frame):\n \"\"\"Aplikacja oparta na GUI która tworzy opowiadanie\n na podstawie informacji wprowadzonych przez użytkownika\n \"\"\"\n def __init__(self, master):\n \"\"\"Inicjalizuj ramke\"\"\"\n super(Application,self).__init__(master)\n self.grid()\n self.create_widgets()\n\n def create_widgets(self):\n \"\"\"Tworzy widgety potrzebne do ramki\"\"\"\n # utwórz etykiete z instrukcją\n Label(self,\n text = \"Wprowadz informacje do nowego opowiadania\",\n ).grid(row = 0, column = 0, columnspan =2, sticky = W)\n\n # utwórz etykiete i pole znakowe służąace do wpisania imienia osoby\n Label(self,\n text = \"Osoba: \",\n ).grid(row = 1, column = 0, sticky = W)\n self.person_entry = Entry(self)\n self.person_entry.grid(row = 1, column = 1, sticky = W)\n\n # utwórz etykiete i pole znakowe słuzące do wpisania rzeczownika w liczbie mnogiej\n Label(self,\n text = \"Podaj rzeczownik w liczbie mnogiej:\",\n ).grid(row = 2, column = 0, sticky = W)\n self.noun_entry = Entry(self)\n self.noun_entry.grid(row = 2, column = 1, sticky = W)\n\n #utwóz etykiete i pole znakowe do wpisania czasownika\n Label(self,\n text = \"Podaj czasownik\",\n ).grid(row = 3 , column = 0, sticky = W)\n self.verb_entry = Entry(self)\n self.verb_entry.grid(row =3 , column = 1, sticky = W)\n\n #utwórz etykiete do pół wyboru przemiotników\n Label(self,\n text = \"Przymiotniki:\",\n ).grid(row = 4, column = 0, sticky = W)\n self.is_itchy = BooleanVar()\n Checkbutton(self,\n text = \"naglace\",\n variable = self.is_itchy\n ).grid(row = 4, column = 1, sticky = W)\n\n self.is_electric = BooleanVar()\n Checkbutton(self,\n text = \"elektryzujace\",\n variable = self.is_electric\n ).grid(row = 4, column = 2, sticky = W)\n\n self.is_joyus = BooleanVar()\n Checkbutton(self,\n text = \"radosne\",\n variable = self.is_joyus\n ).grid(row = 4, column = 3, sticky = W)\n\n Label(self,\n text = \"Czesci ciala:\",\n ).grid(row = 5, column = 0, sticky = W)\n self.body_part = StringVar()\n self.body_part.set(None)\n body_parts = [\"pepek\",\"noga\",\"nerka\"]\n column = 1\n for part in body_parts:\n Radiobutton(self,\n text = part,\n variable = self.body_part,\n value = part\n ).grid(row = 5, column = column, sticky = W)\n column += 1\n\n # przycisk akceptacji danych\n Button(self,\n text = \"Kliknij aby wyświetlic opowiadanie\",\n command = self.tell_story\n ).grid(row = 6, column = 0, sticky = W)\n self.story_text = Text(self, width = 75, height = 10, wrap = WORD)\n self.story_text.grid(row = 7, column = 0, sticky = W)\n\n def tell_story(self):\n \"\"\"Wpisz w pole tekstowe opowiadanie oparte na danych uzyttkownika\"\"\"\n # pobierz wartosci interfejsu gui\n person = self.person_entry.get()\n noun = self.noun_entry.get()\n verb = self.verb_entry.get()\n adjectives = \"\"\n if self.is_itchy.get():\n adjectives += \"naglące\"\n if self.is_joyus.get():\n adjectives += \"radosne\"\n if self.is_electric.get():\n adjectives += \"elektryzujace\"\n\n body_part = self.body_part.get()\n\n # create the story\n story = \"Uzytkownik tego programu\"\n story += person\n story += \"chciał nauczyc się programowania\"\n story += \"jest on\"\n story += noun\n story += adjectives\n story += verb\n story += body_part + \".\"\n\n self.story_text.delete(0.0, END)\n self.story_text.insert(0.0, story)\n\nroot = Tk()\nroot.title(\"Mad Lib\")\napp = Application(root)\nroot.mainloop()\n\n\n","sub_path":"GUI Exercises/Mad Lib.py","file_name":"Mad Lib.py","file_ext":"py","file_size_in_byte":4268,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"244714580","text":"import urllib.request, urllib.parse, urllib.error\nimport json\nimport ssl\n# Ignore SSL certificate errors\nctx = ssl.create_default_context()\nctx.check_hostname = False\nctx.verify_mode = ssl.CERT_NONE\nserviceurl = 'http://py4e-data.dr-chuck.net/json?'\naddress = input('Enter location: ')\nurl = serviceurl + urllib.parse.urlencode({'address': address})\nprint('Retrieving',url)\nuh = urllib.request.urlopen(url,context=ctx)\ndata = uh.read().decode()\nprint('Retrieved',len(data),'charactors')\njs = json.loads(data)\nif not js or 'status' not in js or js['status'] != 'OK':\n print('==== Failure To Retrieve ====')\n print(data)\n exit()\nPlace_id = js[\"results\"][0]['place_id']\nprint(Place_id)\n","sub_path":"14.2.py","file_name":"14.2.py","file_ext":"py","file_size_in_byte":692,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"194771699","text":"'''\nGiven an array, count the unique values\n\n'''\n\n\ndef unique_value(arr):\n if len(arr) <= 1:\n return arr\n\n i = 0\n for j in range(1, len(arr)):\n if arr[i] != arr[j]:\n i += 1\n arr[i] = arr[j]\n\n return arr[:i+1]\n\n\nprint(unique_value([1, 1, 2, 2, 4, 5, 6, 6, 7]))\n","sub_path":"count_unique.py","file_name":"count_unique.py","file_ext":"py","file_size_in_byte":308,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"322616647","text":"import csv\nimport io\nimport logging\nimport os\nimport sys\nfrom tqdm import tqdm\n_BUCKET_NAME = sys.argv[1]\n\nfrom google.cloud import storage\nclient = storage.Client()\nbucket = client.get_bucket(_BUCKET_NAME)\n\ndef process():\n print(\"start processing bucket\", _BUCKET_NAME)\n if os.path.isfile('fulldata.csv'):\n os.remove('fulldata.csv')\n blobs = list(bucket.list_blobs())\n with open(\"fulldata.csv\", \"a\") as f:\n for blob in tqdm(blobs):\n try:\n user_knowledge, quality, label, img_name = blob.name.split(\"/\")\n if str(img_name).endswith(\".jpg\"):\n f.write(\"gs://\"+_BUCKET_NAME+\"/\"+blob.name+\",\"+quality+\",\"+label+\",\"+img_name+\"\\n\")\n except:\n pass\n\nprocess()\n\n","sub_path":"cloud/reading_files_bucket.py","file_name":"reading_files_bucket.py","file_ext":"py","file_size_in_byte":764,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"114783398","text":"# Package imports\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom testCases import *\nimport sklearn\nimport sklearn.datasets\nimport sklearn.linear_model\nfrom planar_utils import plot_decision_boundary, sigmoid, load_planar_dataset, load_extra_datasets\n\nnp.random.seed(1) # set a seed so that the results are consistent\n\nX, Y = load_planar_dataset()\nplt.scatter(X[0, :], X[1, :], c=np.squeeze(Y), s=40, cmap=plt.cm.Spectral)\nplt.show()\nshape_X = X.shape\nshape_Y = Y.shape\nm = shape_X[1]\nprint('The shape of X is: '+str(shape_X))\nprint('The shape of Y is: '+str(shape_Y))\nprint('I have m = %d training example.' %(m))\nclf = sklearn.linear_model.LogisticRegressionCV()\nclf.fit(X.T, Y.T)\nplot_decision_boundary(lambda x: clf.predict(x), X, Y)\nplt.title(\"Logistic Regression\")\nLR_predictions = clf.predict(X.T)\nprint(LR_predictions)\nprint ('Accuracy of logistic regression: %d ' %\n float((np.dot(Y,LR_predictions) + np.dot(1-Y,1-LR_predictions))/float(Y.size)*100) +\n '% ' + \"(percentage of correctly labelled datapoints)\")\n\ndef layer_sizes(X, Y):\n n_x = X.shape[0]\n n_h = 4\n n_y = Y.shape[0]\n return (n_x, n_h, n_y)\n\ndef initialize_parameters(n_x, n_h, n_y):\n \"\"\"\n Argument:\n n_x -- size of the input layer\n n_h -- size of the hidden layer\n n_y -- size of the output layer\n\n Returns:\n params -- python dictionary containing your parameters:\n W1 -- weight matrix of shape (n_h, n_x)\n b1 -- bias vector of shape (n_h, 1)\n W2 -- weight matrix of shape (n_y, n_h)\n b2 -- bias vector of shape (n_y, 1)\n \"\"\"\n np.random.seed(2)\n W1 = np.random.randn(n_h, n_x)*0.01\n b1 = np.zeros((n_h, 1))\n W2 = np.random.randn(n_y, n_h)*0.01\n b2 = np.zeros((n_y, 1))\n assert(W1.shape == (n_h, n_x))\n assert(b1.shape == (n_h, 1))\n assert(W2.shape == (n_y, n_h))\n assert(b2.shape == (n_y, 1))\n parameters = {\"W1\": W1, \"b1\": b1, \"W2\": W2, \"b2\": b2}\n return parameters\n\ndef forward_propagation(X, parameters):\n \"\"\"\n Argument:\n X -- input data of size (n_x, m)\n parameters -- python dictionary containing your parameters(output of initialization function)\n return:\n A2 -- The sigmoid output of the second activation\n cache -- a dictionary containing \"Z1\",\"A1\",\"Z2\" and \"A2\"\n\n \"\"\"\n W1 = parameters[\"W1\"]\n b1 = parameters[\"b1\"]\n W2 = parameters[\"W2\"]\n b2 = parameters[\"b2\"]\n Z1 = np.dot(W1, X) + b1\n A1 = np.tanh(Z1)\n Z2 = np.dot(W2, A1) + b2\n A2 = 1/(1+np.exp(-Z2))\n # print(A2.shape)\n assert(A2.shape == (1, X.shape[1]))\n cache = {\"Z1\": Z1, \"A1\": A1, \"Z2\": Z2, \"A2\": A2}\n return A2, cache\n\n# GRADED FUNCTION: compute_cost\ndef compute_cost(A2, Y, parameters):\n \"\"\"\n Computes the cross-entropy cost given in equation (13)\n\n Arguments:\n A2 -- The sigmoid output of the second activation, of shape (1, number of examples)\n Y -- \"true\" labels vector of shape (1, number of examples)\n parameters -- python dictionary containing your parameters W1, b1, W2 and b2\n\n Returns:\n cost -- cross-entropy cost given equation (13)\n \"\"\"\n m = Y.shape[1]\n logprobs = np.multiply(np.log(A2), Y) + np.multiply(np.log(1-A2), 1-Y)\n cost = -np.sum(logprobs)/m\n cost = np.squeeze(cost)\n assert(isinstance(cost, float)) # 判断cost 是否是float类型\n return cost\n\n# GRADED FUNCTION: backward_propagation\n\ndef backward_propagation(parameters, cache, X, Y):\n \"\"\"\n Implement the backward propagation using the instructions above.\n Arguments:\n parameters -- python dictionary containing our parameters\n cache -- a dictionary containing \"Z1\", \"A1\", \"Z2\" and \"A2\".\n X -- input data of shape (2, number of examples)\n Y -- \"true\" labels vector of shape (1, number of examples)\n Returns:\n grads -- python dictionary containing your gradients with respect to different parameters\n \"\"\"\n m = X.shape[1]\n W1 = parameters[\"W1\"]\n W2 = parameters[\"W2\"]\n A1 = cache[\"A1\"]\n A2 = cache[\"A2\"]\n dZ2 = A2 - Y\n dW2 = np.dot(dZ2, A1.T)/m\n db2 = np.sum(dZ2, axis=1, keepdims=True)/m\n dZ1 = np.multiply(np.dot(W2.T, dZ2), (1 - np.power(A1, 2)))\n dW1 = np.dot(dZ1, X.T)/m\n db1 = np.sum(dZ1, axis=1, keepdims=True)/m\n\n grads = {\"dW1\": dW1, \"db1\": db1, \"dW2\": dW2, \"db2\": db2}\n return grads\n\n# GRADED FUNCTION: update_parameters\n\ndef update_parameters(parameters, grads, learning_rate=1.2):\n \"\"\"\n Updates parameters using the gradient descent update rule given above\n Arguments:\n parameters -- python dictionary containing your parameters\n grads -- python dictionary containing your gradients\n Returns:\n parameters -- python dictionary containing your updated parameters\n \"\"\"\n W1 = parameters[\"W1\"]\n b1 = parameters[\"b1\"]\n W2 = parameters[\"W2\"]\n b2 = parameters[\"b2\"]\n\n dW1 = grads[\"dW1\"]\n db1 = grads[\"db1\"]\n dW2 = grads[\"dW2\"]\n db2 = grads[\"db2\"]\n\n W1 = W1 - learning_rate*dW1\n b1 = b1 - learning_rate*db1\n W2 = W2 - learning_rate*dW2\n b2 = b2 - learning_rate*db2\n\n parameters = {\"W1\": W1, \"b1\": b1, \"W2\": W2, \"b2\": b2}\n return parameters\n\n# GRADED FUNCTION: nn_model\ndef nn_model(X, Y, n_h, num_iterations=10000, print_cost=False):\n \"\"\"\n Arguments:\n X -- dataset of shape (2, number of examples)\n Y -- labels of shape (1, number of examples)\n n_h -- size of the hidden layer\n num_iterations -- Number of iterations in gradient descent loop\n print_cost -- if True, print the cost every 1000 iterations\n Returns:\n parameters -- parameters learnt by the model. They can then be used to predict.\n \"\"\"\n np.random.seed(3)\n n_x = layer_sizes(X, Y)[0]\n n_y = layer_sizes(X, Y)[2]\n parameters=initialize_parameters(n_x, n_h, n_y)\n W1 = parameters[\"W1\"]\n b1 = parameters[\"b1\"]\n W2 = parameters[\"W2\"]\n b2 = parameters[\"b2\"]\n for i in range(0, num_iterations):\n A2, cache = forward_propagation(X, parameters)\n cost = compute_cost(A2, Y, parameters)\n grads = backward_propagation(parameters, cache, X, Y)\n parameters = update_parameters(parameters, grads)\n if print_cost and i % 1000 == 0:\n print(\"cost after iteratin %i:%f\"%(i, cost))\n return parameters\n\n\n# GRADED FUNCTION: predict\n\ndef predict(parameters, X):\n \"\"\"\n Using the learned parameters, predicts a class for each example in X\n Arguments:\n parameters -- python dictionary containing your parameters\n X -- input data of size (n_x, m)\n Returns\n predictions -- vector of predictions of our model (red: 0 / blue: 1)\n \"\"\"\n A2, cache = forward_propagation(X, parameters)\n prediction = (A2 > 0.5)\n return prediction\n\nparameters = nn_model(X, Y, n_h=4, num_iterations=10000, print_cost=True)\nplt.title(\"Decision Boundary for hidden layer size \" + str(4))\nplot_decision_boundary(lambda x: predict(parameters, x.T), X, Y)\n\npredictions = predict(parameters, X)\nprint(\"Accuracy: %d\" % float((np.dot(Y, predictions.T) + np.dot(1-Y, 1-predictions.T))/float(Y.size)*100)+\"%\")\n\n# 调整隐藏神经元的数目观察结果\nplt.figure()\nhidden_layer_sizes = [1, 2, 3, 4, 5, 20, 50]\n# enumerate() 同时返回索引和值\nfor i, n_h, in enumerate(hidden_layer_sizes):\n plt.subplot(5, 2, i+1)\n plt.title(\"Hidden Layer of size %d\" % n_h)\n parameters = nn_model(X, Y, n_h, num_iterations=5000)\n plot_decision_boundary(lambda x: predict(parameters, x.T), X, Y)\n predictions = predict(parameters, X)\n accuracy = float((np.dot(Y, predictions.T) + np.dot(1-Y, 1-predictions.T))/float(Y.size)*100)\n print(\"Accuracy for {} hidden units: {} %\".format(n_h, accuracy))\n","sub_path":"第一课第三周编程作业/第一课第三周编程作业/assignment3/one_three.py","file_name":"one_three.py","file_ext":"py","file_size_in_byte":7762,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"264740873","text":"#!/usr/bin/python\n\n# How many followers do you have?\nimport urllib.request\nimport re\n\nfeeds = [\n 'rbowen','centosproject','theasf','realDonaldTrump'\n ];\nfor feed in feeds:\n req = urllib.request.Request( 'https://twitter.com/' + feed,\n data = None,\n headers={\n 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/35.0.1916.47 Safari/537.36'\n } )\n f = urllib.request.urlopen(req)\n html = f.read().decode('utf-8')\n # print (html)\n\n# Looks like ...\n# 2,615
\n# Followers
\n\n print ( feed + ': ' + re.search('.*?followers\">.+?statnum\">([\\d,MK]+).*?<.*?statlabel\"> Followers.*', html, re.DOTALL).group(1) )\n\n","sub_path":"followers.py","file_name":"followers.py","file_ext":"py","file_size_in_byte":798,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"471377301","text":"from bs4 import BeautifulSoup\nimport unicodedata\nimport html\nfrom pyspark import SparkContext, SparkConf\nimport argparse\nimport json\n\nclosings = [\n \"Regards\",\n \"Reg\",\n \"Best,\",\n \"Thanks,\",\n \"Sent from my iPhone\",\n \"Sent from my ipad\",\n \"Sent from my android\",\n \"Sent from my mobile device\",\n \"Sincerely\",\n \"Yours truly\",\n \"Yours sincerely\",\n \"Best regards\",\n \"Cordially\",\n \"Yours respectfully\",\n \"Warm regards\",\n \"Best wishes\",\n \"With appreciation\",\n \"Cordially yours\",\n \"Fond regards\",\n \"In appreciation\",\n \"In sympathy\",\n \"Kind regards\",\n \"Kind thanks\",\n \"Kind wishes\",\n \"Many thanks\",\n \"Regards\",\n \"Respectfully\",\n \"Respectfully yours\",\n \"Sincerely\",\n \"Sincerely yours\",\n \"Warm regards\",\n \"Warm wishes\",\n \"Warmly\",\n \"With appreciation\",\n \"With deepest sympathy\",\n \"With gratitude\",\n \"With sincere thanks\",\n \"With sympathy\",\n \"Your help is greatly appreciated\",\n \"Yours cordially\",\n \"Yours faithfully\",\n \"Yours sincerely\",\n \"Yours truly\",\n \"From:\",\n \"Sent:\"\n]\n\nclosings = closings + [word.lower() for word in closings]\n\nclosings = closings + [word.lower() for word in closings]\n\n\ndef split(txt, seps):\n default_sep = seps[0]\n\n # we skip seps[0] because that's the default seperator\n for sep in seps[1:]:\n txt = txt.replace(sep, default_sep)\n return [i.strip() for i in txt.split(default_sep)]\n\n\ndef remove_html(doc_tuple):\n doc_id, raw = doc_tuple\n soup = BeautifulSoup(\n raw, 'lxml') # create a new bs4 object from the html data loaded\n for script in soup([\"script\",\n \"style\"]): # remove all javascript and stylesheet code\n script.extract()\n # get text\n text = soup.get_text()\n # break into lines and remove leading and trailing space on each\n lines = (line.strip() for line in text.splitlines())\n # break multi-headlines into a line each\n chunks = (phrase.strip() for line in lines for phrase in line.split(\" \"))\n # encode unicode characters\n text = unicodedata.normalize(\"NFKD\", text)\n # encode html characters\n text = html.unescape(text)\n text = split(text, closings)[0]\n return {'id': doc_id, 'body': text}\n\n\nif __name__ == \"__main__\":\n\n desc = 'remove html tags from email text'\n parser = argparse.ArgumentParser(\n description=desc,\n formatter_class=argparse.RawDescriptionHelpFormatter,\n epilog=desc)\n parser.add_argument(\"-i\", \"--input_path\", help=\"directory with json texts\")\n parser.add_argument(\n \"-o\",\n \"--output_path\",\n help=\n \"output directory for spark results of json texts with html tags removed\"\n )\n args = parser.parse_args()\n conf = SparkConf().setAppName(\"Html Tag Removal\")\n sc = SparkContext(conf=conf)\n rdd = sc.textFile(args.input_path)\n\n def doc_to_tuple(sz):\n j = json.loads(sz)\n return (j.get('id'), j.get('body'))\n\n cleandoc = rdd.map(doc_to_tuple).map(remove_html).cache()\n\n output = cleandoc.map(lambda x: json.dumps(x))\n\n output.saveAsTextFile(args.output_path)\n","sub_path":"spark/rmhtml.py","file_name":"rmhtml.py","file_ext":"py","file_size_in_byte":3137,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"239080006","text":"from query.report_base import ReportBase\n\nclass CNT01(ReportBase):\n \n def get_aggregate(cls,args):\n #{\"status\":\"processing\"},\n _aggregate = [\n {\"$match\": {\"$and\" : [\n { \"history.0.dept\": args['dept'] },\n {\"history.0.date\":{\"$gte\":args['startDate'],\"$lt\":args['endDate']}},\n {\"name\":'Contact'}\n ]\n }\n\n },\n {\"$sort\": {\"no\":1}}\n ] \n return _aggregate\n\n def run(cls,server,args):\n #此表有申請部門權限, 只能查自己部門的報表, 故需檢查avatar 部門=history.0.dept\n def check_args(args):\n checked = {}\n if 'year' not in args or 'month' not in args or 'avatar' not in args:\n return {\"error\":\"查詢條件沒有全部輸入!無法查詢\"}\n\n try:\n if 'userid' in args and args['userid'] is not None and args['userid'].strip()!= '':\n checked.update({'userid':args['userid']})\n checked.update({'startDate':cls.cvt_startDate('{}/{}/01'.format(args['year'],args['month']))})\n checked.update({'endDate':cls.cvt_firstDayNextMonth(args['year'],args['month'])})\n checked.update({\"dept\":args[\"avatar\"][\"dept_id\"]})\n checked.update({\"avatar\":args[\"avatar\"]})\n except Exception as e:\n return {\"error\":\"查詢條件格式錯誤!無法查詢-{}\".format(str(e))} \n\n return checked \n \n #檢查參數\n newargs = check_args(args)\n if \"error\" in newargs:\n return newargs\n \"\"\"\n newargs = {}\n newargs.update({'dept':args['dept']})\n newargs.update({'startDate':cls.cvt_startDate('{}/{}/01'.format(args['year'],args['month']))})\n newargs.update({'endDate':cls.cvt_firstDayNextMonth(args['year'],args['month'])})\n \"\"\"\n pipeline = cls.get_aggregate(newargs)\n if 'userid' in newargs:\n pipeline[0][\"$match\"][\"$and\"].append({\"history.0.userid\": args['userid']})\n data = cls.mongo(server,'headway','flow',pipeline)\n #db = get_mongo()['headway']['flow']\n #data = list(db.aggregate(pipeline))\n result = []\n result.append([\"項目\",\"編號\",\"委託名稱\",\"委託人\",\"執行人\",\"狀態\",\"目前站別\"])\n for item in data:\n temp = ['','','','','','','']\n temp[0]=data.index(item) + 1\n temp[1]=item[\"no\"]\n temp[2]=item[\"history\"][0][\"data\"][0][\"value\"]\n temp[3]=item[\"history\"][0][\"username\"]\n temp[6]=''\n for history in item[\"history\"][::-1]:\n if history[\"id\"]==\"ExecutorRes\":\n temp[4]=history[\"username\"]\n break\n temp[5]=cls.toStatus(item[\"status\"])\n\n result.append(temp)\n return cls.result(result)","sub_path":"app/query/report/CNT01.py","file_name":"CNT01.py","file_ext":"py","file_size_in_byte":3023,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"48233587","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\nimport argparse\nimport json\nimport queue\nimport shlex\nimport os\nimport re\nimport subprocess\nimport threading\nimport time\nimport urllib.parse\nfrom datetime import date, datetime\nfrom glob import glob\n\nimport pypandoc\n\n\nYEAR_DEATHS = \"{year}\"\nMONTH_DEATHS = \"Deaths_in_{month}_{year}\"\nDAY_DEATHS = \"{month}_{day\"\nFIND_DEATHS_SECTION = re.compile(r'^== *Deaths *==', re.MULTILINE)\nFIND_NEXT_SECTION = re.compile(r'^== *[^=]', re.MULTILINE)\nPERSON_LINK = re.compile(r'\\[\\[([^\\[]*?)\\]\\], .*?\\(b.')\n\nINFOBOX_MARKER = re.compile(r'^ *{{ *Infobox.*', re.IGNORECASE)\nINFOBOX_TOKENIZER = re.compile(r'({{ *Infobox|{{|}})', re.IGNORECASE)\nKEY_VALUE_PAIR = re.compile(r'\\| *(\\S+) *= *(.*)', re.IGNORECASE | re.MULTILINE)\nCLUTTER = re.compile(r'{{|}}|\\[\\[|\\]\\]')\nDATE_ODD = re.compile(r'(\\d{1,2}) death year and age.*?(\\d{4}).*\\| *(\\d{1,2})', re.IGNORECASE)\nDATE = re.compile(r'.*? *\\| *(\\d{4}) *\\| *(\\d+) *\\| *(\\d+).*')\nDATE2 = re.compile(r'OldStyleDate *\\| *(\\d+) *([a-z]*) *\\| *(\\d{4}).*', re.IGNORECASE)\nDATE3 = re.compile(r'(\\d+) +([a-z]*),? death year and age *\\| *(\\d{4}).*', re.IGNORECASE)\nDATE4 = re.compile(r'([a-z]*) +(\\d+),? death year and age *\\| *(\\d{4}).*', re.IGNORECASE)\nDATE_UNCERTAIN_DAY = re.compile(r'(\\d+)(?:[/-–]\\d+| or \\d+) +([a-z]+),? +(\\d{4})', re.IGNORECASE)\nDATE_UNCERTAIN_DAY2 = re.compile(r'([a-z]+) +(\\d+)(?:[/-–]\\d+| or \\d+),? +(\\d{4})', re.IGNORECASE)\nDATE_UNCERTAIN_DATE = re.compile(r'([a-z]+) +(\\d+),? or [a-z]+ +\\d+,? +(\\d{4})', re.IGNORECASE)\nDATE_YEAR_RANGE = re.compile(r'(\\d{4}) *(?:[-–]|or) *(\\d{4})', re.IGNORECASE)\nEXTRA_AGE = re.compile(r'(, *aged? *\\d*|\\(aged? *\\d*)|age *\\d+', re.IGNORECASE)\nEXTRA_REF = re.compile(r'{{ *(ref|efn|Cn).*}}')\nEXTRA_TAGS = re.compile(r'|$)||', re.IGNORECASE)\nEXTRA_PARENTHESES = re.compile(r'\\(.*\\)', re.IGNORECASE)\nEXTRA_CIRCA = re.compile(r'c\\.|circa|ca\\.', re.IGNORECASE)\nEXTRA_QUOTES = re.compile(r'[\\']', re.IGNORECASE)\nEXTRA_NOWRAP = re.compile(r'nowrap *\\|', re.IGNORECASE)\nEXTRA_BR = re.compile(r'
', re.IGNORECASE)\nEXTRA_ADOPTION_OF_CALENDAR = re.compile(r'Adoption *of *the *Gregorian.*', re.IGNORECASE)\n\nCHILD_ARTICLE = re.compile(r'{{ *main article *\\|(.*?)}}', re.IGNORECASE)\n\nclass Worker(threading.Thread):\n def __init__(self, thread_id, name_queue, name_queue_lock, args, function):\n super().__init__()\n self.thread_id = thread_id\n self.name_queue = name_queue\n self.name_queue_lock = name_queue_lock\n self.args = args\n self.function = function\n\n def run(self):\n while True:\n with self.name_queue_lock:\n if self.name_queue.empty():\n break\n\n name = self.name_queue.get()\n\n self.function(self.args, name)\n\n\ndef download_article(args, title, output):\n os.makedirs(os.path.dirname(output), exist_ok=True)\n if os.path.exists(output):\n if args.verbose:\n print(\" file exists: {}\".format(output))\n\n return\n\n url = \"https://en.wikipedia.org/w/api.php?action=query&titles={title}&prop=revisions&rvprop=content&format=json\" \\\n .format(title=urllib.parse.quote(title))\n command = \"curl -s -S -z {output} -o {output} {url}\".format(url=shlex.quote(url), output=shlex.quote(output))\n if args.verbose:\n print(\" running: {}\".format(command))\n\n subprocess.call(shlex.split(command))\n\n\ndef grab_year(args, year):\n if args.verbose:\n print(\"fetching year {}...\".format(year))\n\n title = YEAR_DEATHS.format(year=year)\n output = \"years/year-{year}.json\".format(year=year)\n download_article(args, title, output)\n\n\ndef grab_names_from_content(content):\n results = []\n for match in PERSON_LINK.finditer(content):\n name = match.group(1)\n chunks = name.split(\"|\")\n if len(chunks) > 1:\n name = chunks[0].strip()\n\n results.append(name)\n\n return results\n\n\ndef grab_names_from_file(input_file):\n results = []\n data = json.load(open(input_file))\n for unused_pageid, page_data in data[\"query\"][\"pages\"].items():\n if \"revisions\" not in page_data:\n continue\n\n content = page_data[\"revisions\"][0][\"*\"]\n deaths_section_match = FIND_DEATHS_SECTION.search(content)\n next_section_match = FIND_NEXT_SECTION.search(content, deaths_section_match.end())\n end = len(content)\n if next_section_match:\n end = next_section_match.start()\n\n section = content[deaths_section_match.start():end]\n\n results += grab_names_from_content(section)\n\n return results\n\n\ndef parse_date(value):\n value = value.strip(\", |\")\n if not value:\n return None\n\n value = EXTRA_PARENTHESES.sub('', value)\n value = EXTRA_CIRCA.sub('', value)\n value = EXTRA_NOWRAP.sub('', value)\n value = EXTRA_QUOTES.sub('', value)\n value = EXTRA_AGE.sub('', value)\n value = EXTRA_ADOPTION_OF_CALENDAR.sub('', value)\n value = value.replace(\"ndash\", \"-\")\n value = value.replace(\"baptized\", \"\")\n value = value.strip(\", |\")\n\n specials = {\n \"February or March 1945\": date(1945, 2, 1),\n \"February or March, 1945\": date(1945, 2, 1),\n \"1850s\": date(1850, 1, 1),\n \"1860s\": date(1860, 1, 1),\n \"Between October 4, 1919 and January 2, 1920\": date(1920, 10, 4),\n \"Unknown\": None,\n \"29 February 1900\": date(1900, 2, 18), # different calendar in eastern Europe\n \"September 13, 1922, or September 10, 1923\": date(1922, 9, 13),\n \"Death year and age|1992|1912|4|23\": date(1992, 4, 23),\n \"late 1867 or early 1868\": date(1867, 12, 31),\n \"around 1840\": date(1840, 1, 1),\n \"September 23, 1968 or 1969\": date(1968, 9, 23),\n \"FETCH_WIKIDATA\": None,\n \"Month? Day?, 1879\": date(1879, 1, 1),\n \"Unknown, 1677 and 1736 claimed\": date(1677, 1, 1),\n \"Kathryn Johanna Kuhlman\": date(1907, 5, 9),\n }\n if value in specials:\n return specials[value]\n\n # 7 {{Death year and age|df=yes|1962|1896|9}}\n # must be tried before DATE, else that matches\n mo = DATE_ODD.match(value)\n if mo:\n chunk = \" \".join(mo.groups())\n try:\n return datetime.strptime(chunk, \"%d %Y %m\")\n except ValueError:\n pass\n\n mo = DATE.match(value)\n if mo:\n return date(*map(int, mo.groups()))\n\n mo = DATE2.match(value)\n if mo:\n chunk = \" \".join(mo.groups())\n try:\n return datetime.strptime(chunk, \"%d %B %Y\")\n except ValueError:\n pass\n\n mo = DATE3.match(value)\n if mo:\n chunk = \" \".join(mo.groups())\n try:\n return datetime.strptime(chunk, \"%d %B %Y\")\n except ValueError:\n pass\n\n mo = DATE4.match(value)\n if mo:\n chunk = \" \".join(mo.groups())\n try:\n return datetime.strptime(chunk, \"%B %d %Y\")\n except ValueError:\n pass\n\n mo = DATE_YEAR_RANGE.match(value)\n if mo:\n return date(int(mo.group(1)), 1, 1)\n\n mo = DATE_UNCERTAIN_DAY.match(value)\n if mo:\n chunk = \" \".join(mo.groups())\n try:\n return datetime.strptime(chunk, \"%d %B %Y\")\n except ValueError:\n pass\n\n mo = DATE_UNCERTAIN_DAY2.match(value)\n if mo:\n chunk = \" \".join(mo.groups())\n try:\n return datetime.strptime(chunk, \"%B %d %Y\")\n except ValueError:\n pass\n\n mo = DATE_UNCERTAIN_DATE.match(value)\n if mo:\n chunk = \" \".join(mo.groups())\n try:\n return datetime.strptime(chunk, \"%B %d %Y\")\n except ValueError:\n pass\n\n formats = [\"%B %d, %Y\",\n \"%B %d , %Y\",\n \"%B %d %Y\",\n \"%d %B %Y\",\n \"%d %B, %Y\",\n \"%d %B , %Y\",\n \"%B %Y\",\n \"%Y-%m-%d\",\n \"%Y\"]\n value = EXTRA_BR.sub(\"|\", value)\n for chunk in value.split(\"|\"):\n chunk = chunk.strip()\n for date_format in formats:\n try:\n return datetime.strptime(chunk, date_format)\n except ValueError:\n pass\n\n raise Exception(\"couldn't parse this as date: {}\".format(value))\n\n\ndef parse_infobox(data, name):\n if not INFOBOX_MARKER.match(data):\n return\n\n result = {}\n for match in KEY_VALUE_PAIR.finditer(data):\n key, value = match.groups()\n if key not in (\"birth_date\", \"birth_place\",\n \"death_date\", \"death_place\", \"death_cause\",\n \"background\", \"occupation\"):\n continue\n\n value = value.replace(\"{{Greece Old Style dating}}\", \"\")\n value = EXTRA_TAGS.sub('', value)\n value = EXTRA_REF.sub('', value)\n value = CLUTTER.sub('', value)\n\n if key.endswith(\"_date\"):\n if name.startswith(\"Auguste and Louis\"):\n if key == \"birth-date\":\n value = date(1862, 10, 19)\n else:\n value = date(1954, 4, 10)\n else:\n value = parse_date(value)\n else:\n if value.startswith(\"hlist\"):\n value = list(map(lambda x: x.lower(), value.split(\"|\")[1:]))\n\n result[key] = value\n\n\n return result\n\n\ndef parse_article(filename):\n json_data = json.load(open(filename))\n page_data = list(json_data[\"query\"][\"pages\"].items())[0][1]\n if \"revisions\" not in page_data:\n return\n\n content = page_data[\"revisions\"][0][\"*\"]\n name = os.path.splitext(os.path.basename(filename))[0]\n child_articles = CHILD_ARTICLE.findall(content)\n\n result = {\n \"name\": name,\n \"article_size\": len(content),\n \"child_article_count\": len(child_articles),\n }\n\n tokens = INFOBOX_TOKENIZER.split(content)\n started = False\n level = 0\n block = \"\"\n for i, token in enumerate(tokens):\n if not started:\n if INFOBOX_MARKER.match(token):\n started = True\n level += 1\n block += token\n else:\n block += token\n if token == \"{{\":\n level += 1\n elif token == \"}}\":\n level -= 1\n if level == 0:\n break\n\n infobox = parse_infobox(block, name)\n if infobox:\n result.update(infobox)\n\n # print(result)\n return result\n\n\ndef grab_years(args, start, end):\n name_queue_lock = threading.Lock()\n name_queue = queue.Queue()\n for year in range(start, end + 1):\n name_queue.put(year)\n\n threads = []\n for i in range(args.thread_count):\n t = Worker(i, name_queue, name_queue_lock, args, grab_year)\n t.start()\n threads.append(t)\n\n while True:\n time.sleep(1)\n with name_queue_lock:\n if name_queue.empty():\n break\n\n\ndef grab_names(args, input_files, output_file):\n names = []\n for input_file in input_files:\n names += grab_names_from_file(input_file)\n\n open(args.names_file, \"a\").write(\"\\n\".join(names) + \"\\n\")\n\n\ndef download_name(args, name):\n if args.verbose:\n print(\"fetching article {}...\".format(name))\n\n output = \"article/{title}.json\".format(title=name)\n download_article(args, name, output)\n\n\ndef download_names(args, input_file):\n name_queue_lock = threading.Lock()\n name_queue = queue.Queue()\n for name in open(input_file):\n name = name.strip()\n name_queue.put(name)\n\n threads = []\n for i in range(args.thread_count):\n t = Worker(i, name_queue, name_queue_lock, args, download_name)\n t.start()\n threads.append(t)\n\n while True:\n time.sleep(1)\n with name_queue_lock:\n if name_queue.empty():\n break\n\n\ndef parse_articles(args, input_files):\n for filename in input_files:\n parse_article(filename)\n\n\ndef generate_data(args):\n for filename in glob(\"article/*.json\"):\n result = parse_article(filename)\n if not result:\n continue\n\n print(result[\"article_size\"] + result[\"child_article_count\"] * 30000, result[\"name\"])\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-v\", \"--verbose\", action=\"store_true\", default=False, help=\"verbose output\")\n parser.add_argument(\"--years\", nargs=2, metavar=(\"START\", \"END\"), type=int, help=\"grab years from START to END (inclusive)\")\n parser.add_argument(\"--names\", nargs=\"+\", metavar=\"FILE\", help=\"grab person names from given input JSON files\")\n parser.add_argument(\"-d\", \"--download-names\", action=\"store_true\", help=\"download articles for names in NAMES-FILE\")\n parser.add_argument(\"-n\", \"--names-file\", default=\"names.txt\", metavar=\"NAMES-FILE\", help=\"file to write parsed names to as well as read them from, it is appended to\")\n parser.add_argument(\"-p\", \"--parse\", nargs=\"+\", metavar=\"FILE\", help=\"parse articles\")\n parser.add_argument(\"-t\", \"--thread-count\", type=int, default=10, metavar=\"NUM\", help=\"number of threads for downloading (default: 10)\")\n parser.add_argument(\"-g\", \"--generate\", action=\"store_true\", help=\"generate data file\")\n args = parser.parse_args()\n if args.years:\n grab_years(args, args.years[0], args.years[1])\n\n if args.names:\n grab_names(args, args.names, args.names_file)\n\n if args.download_names:\n download_names(args, args.names_file)\n\n if args.parse:\n parse_articles(args, args.parse)\n\n if args.generate:\n generate_data(args)\n","sub_path":"grab_deaths.py","file_name":"grab_deaths.py","file_ext":"py","file_size_in_byte":13559,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"186796076","text":"#!/usr/bin/python\n\nnumN = int(input(\"How many numbers will you enter?\"))\nsum = 0\n\nprint(\"Input num\")\n\nnum = [int(x)for x in input().split()]\n\nfor i in range(0,numN):\n\tsum = num[i] + sum\n\ntotal = sum/numN\n\nprint(\"Average : \", total)\n\n","sub_path":"py_lab/aver_num.py","file_name":"aver_num.py","file_ext":"py","file_size_in_byte":233,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"525148291","text":"# -*- coding: utf-8 -*-\n\n\nimport numpy as np\n\nfrom .base import Layer\nfrom ..activation import Tanh\nfrom ..initialization import GlorotUniform\nfrom ..initialization import Orthogonal\nfrom ..initialization import Zero\n\n\nclass SimpleRNN(Layer):\n \"\"\"Fully-connected RNN where the output is to be fed back to input.(完全连接的RNN在输出将被反馈到输入。)\n\n # Arguments\n output_dim: dimension of the internal projections and the final output.\n init: weight initialization function.\n Can be the name of an existing function (str),\n or a Theano function (see: [initializations](../initializations.md)).\n inner_init: initialization function of the inner cells.\n activation: activation function.\n Can be the name of an existing function (str),\n or a Theano function (see: [activations](../activations.md)).\n W_regularizer: instance of [WeightRegularizer](../regularizers.md)\n (eg. L1 or L2 regularization), applied to the input weights matrices.\n U_regularizer: instance of [WeightRegularizer](../regularizers.md)\n (eg. L1 or L2 regularization), applied to the recurrent weights matrices.\n b_regularizer: instance of [WeightRegularizer](../regularizers.md),\n applied to the bias.\n dropout_W: float between 0 and 1. Fraction of the input units to drop for input gates.\n dropout_U: float between 0 and 1. Fraction of the input units to drop for recurrent connections.\n\n # References\n - [A Theoretically Grounded Application of Dropout in Recurrent Neural Networks](http://arxiv.org/abs/1512.05287)\n \"\"\"\n\n def __init__(self, n_out, n_in=None, init=GlorotUniform(), inner_init=Orthogonal(), activation=Tanh(), return_sequence=False):\n self.n_out = n_out\n self.n_in = n_in\n self.init = init\n self.inner_init = inner_init\n self.activation_cls = activation.__class__\n self.activations = []\n self.return_sequence = return_sequence\n\n self.W, self.dW = None, None\n self.U, self.dU = None, None\n self.b, self.db = None, None\n self.last_outputs = None\n self.last_input = None\n self.out_shape = None\n\n def connect_to(self, prev_layer=None):\n if prev_layer is not None:\n assert len(prev_layer.out_shape) == 3\n n_in = prev_layer.out_shape[-1]\n else:\n assert self.n_in is not None\n n_in = self.n_in\n\n self.W = self.init((n_in, self.n_out))\n self.U = self.inner_init((self.n_out, self.n_out))\n self.b = Zero()((self.n_out,))\n\n if self.return_sequence:\n self.out_shape = (None, None, self.n_out)\n else:\n self.out_shape = (None, self.n_out)\n\n def forward(self, input, *args, **kwargs):\n assert np.ndim(input) == 3, 'Only support batch training.'\n\n self.last_input = input\n nb_batch, nb_timestep, nb_in = input.shape\n outputs = Zero()((nb_batch, nb_timestep, self.n_out))\n\n if len(self.activations) == 0:\n self.activations = [self.activation_cls() for _ in range(nb_timestep)]\n\n outputs[:, 0, :] = self.activations[0].forward(np.dot(input[:, 0, :], self.W) + self.b)\n\n for i in range(1, nb_timestep):\n outputs[:, i, :] = self.activations[i].forward(\n np.dot(input[:, i, :], self.W) +\n np.dot(outputs[:, i - 1, :], self.U) + self.b)\n\n self.last_outputs = outputs\n if self.return_sequence:\n return self.last_outputs\n else:\n return self.last_outputs[:, -1, :]\n\n def backward(self, pre_grad, *args, **kwargs):\n zero = Zero()\n self.dW = zero(self.W.shape)\n self.dU = zero(self.U.shape)\n self.db = zero(self.b.shape)\n\n # hiddens.shape == (nb_timesteps, nb_batch, nb_out)\n hiddens = np.transpose(self.last_outputs, (1, 0, 2))\n if self.return_sequence:\n # check shape #\n # self.outputs.shape == (nb_batch, nb_timesteps, nb_out)\n assert hiddens.shape == pre_grad.shape\n nb_timesteps = pre_grad.shape[0]\n if not self.first_layer:\n layer_grad = Zero()(pre_grad.shape)\n\n for timestep1 in np.arange(nb_timesteps)[::-1]:\n delta = pre_grad[timestep1] * self.activations[timestep1].derivative()\n for timestep2 in np.arange(timestep1)[::-1]:\n self.dU += np.dot(hiddens[timestep2].T, delta)\n self.dW += np.dot(self.last_input[:, timestep2 + 1, :].T, delta)\n self.db += np.mean(delta, axis=0)\n if not self.first_layer:\n layer_grad[timestep2 + 1] += np.dot(delta, self.W.T)\n delta = np.dot(delta, self.U.T)\n\n if timestep1 == 0 or timestep2 == 0:\n self.dW += np.dot(self.last_input[:, 0, :].T, delta)\n self.db += np.mean(delta, axis=0)\n if not self.first_layer:\n layer_grad[0] += np.dot(delta, self.W.T)\n\n else:\n nb_timesteps = self.last_outputs.shape[1]\n nb_batchs = self.last_outputs.shape[0]\n assert (nb_batchs, self.last_outputs.shape[2]) == pre_grad.shape\n if not self.first_layer:\n layer_grad = Zero()(hiddens.shape)\n\n delta = pre_grad * self.activations[nb_timesteps - 1].derivative()\n for timestep2 in np.arange(nb_timesteps - 1)[::-1]:\n self.dU += np.dot(hiddens[timestep2].T, delta)\n self.dW += np.dot(self.last_input[:, timestep2 + 1, :].T, delta)\n self.db += np.mean(delta, axis=0)\n if not self.first_layer:\n layer_grad[timestep2 + 1] += np.dot(delta, self.W.T)\n delta = np.dot(delta, self.U.T)\n\n if timestep2 == 0:\n self.dW += np.dot(self.last_input[:, timestep2 + 1, :].T, delta)\n self.db += np.mean(delta, axis=0)\n if not self.first_layer:\n layer_grad[0] += np.dot(delta, self.W.T)\n\n if not self.first_layer:\n return layer_grad\n\n @property\n def params(self):\n return self.W, self.U, self.b\n\n @property\n def grads(self):\n return self.dW, self.dU, self.db\n\n\nclass GRU(Layer):\n def __init__(self):\n pass\n\n\nclass LSTM(Layer):\n def __init__(self):\n pass\n","sub_path":"npdl/layers/reccurent.py","file_name":"reccurent.py","file_ext":"py","file_size_in_byte":6543,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"172926365","text":"#!/usr/bin/env python\n\n# Required packages\nimport argparse\nfrom configparser import ConfigParser\n\nimport requests\nfrom astropy.time import Time\nfrom astropy.io import fits\nimport numpy as np\n\n\ndef DMS_to_detector(data, detector):\n \"\"\"Transformations from Robert Jedzrejewski\n https://github.com/STScI-JWST/jwst/blob/master/jwst/refpix/reference_pixels.py#L690\n \"\"\"\n if detector == 'NRS1':\n # NRS1 is just flipped over the line X=Y\n data = np.swapaxes(data, 2, 3)\n\n if detector == 'NRS2':\n # NRS2 is flipped over the line Y=X, then rotated 180 degrees\n data = np.swapaxes(data, 2, 3)[:, :, ::-1, ::-1]\n\n if detector == 'NRCA1':\n # NRCA1 is just flipped in X\n data = data[:, :, :, ::-1]\n\n if detector == 'NRCA2':\n # NRCA2 is just flipped in Y\n data = data[:, :, ::-1]\n\n if detector == 'NRCA3':\n # NRCA3 is just flipped in X\n data = data[:, :, :, ::-1]\n\n if detector == 'NRCA4':\n # NRCA4 is just flipped in Y\n data = data[:, :, ::-1]\n\n if detector == 'NRCALONG':\n # NRCA3 is just flipped in X\n data = data[:, :, :, ::-1]\n\n if detector == 'NRCB1':\n # NRCB1 is just flipped in Y\n data = data[:, :, ::-1]\n\n if detector == 'NRCB2':\n # NRCB2 is just flipped in X\n data = data[:, :, :, ::-1]\n\n if detector == 'NRCB3':\n # NRCB1 is just flipped in Y\n data = data[:, :, ::-1]\n\n if detector == 'NRCB4':\n # NRCB4 is just flipped in X\n data = data[:, :, :, ::-1]\n\n if detector == 'NRCBLONG':\n # NRCB1 is just flipped in Y\n data = data[:, :, ::-1]\n\n if detector == 'NIS':\n # NIRISS has a 180 degree rotation followed by a flip across the line\n # X=Y\n data = np.swapaxes(data[:, :, ::-1, ::-1], 2, 3)\n\n if detector == 'GUIDER1':\n # GUIDER1 is flipped in X and Y\n data = data[:, :, ::-1, ::-1]\n\n if detector == 'GUIDER2':\n # GUIDER2 is just flipped in X\n data = data[:, :, :, ::-1]\n\n # MIRI data doesn't need transforming\n\n return data\n\ndef detector_to_DMS(data, detector):\n if detector == 'NRS1':\n # Just flip back\n data = np.swapaxes(data, 2, 3)\n\n if detector == 'NRS2':\n # The inverse is to rotate 180 degrees, then flip over the line Y=X\n data = np.swapaxes(data[:, :, ::-1, ::-1], 2, 3)\n\n if detector == 'NRCA1':\n # Just flip back\n data = data[:, :, ::-1, ::-1]\n\n if detector == 'NRCA2':\n # Just flip back\n data = data[:, :, ::-1]\n\n if detector == 'NRCA3':\n # Just flip back\n data = data[:, :, :, ::-1]\n\n if detector == 'NRCA4':\n # Just flip back\n data = data[:, :, ::-1]\n\n if detector == 'NRCALONG':\n # Just flip back\n data = data[:, :, :, ::-1]\n\n if detector == 'NRCB1':\n # Just flip back\n data = data[:, :, ::-1]\n\n if detector == 'NRCB2':\n # Just flip back\n data = data[:, :, :, ::-1]\n\n if detector == 'NRCB3':\n # Just flip back\n data = data[:, :, ::-1]\n\n if detector == 'NRCB4':\n # Just flip back\n data = data[:, :, :, ::-1]\n\n if detector == 'NRCBLONG':\n # Just flip back\n data = data[:, :, ::-1]\n\n if detector == 'NIS':\n # Just flip and rotate back\n data = np.swapaxes(data, 2, 3)[:, :, ::-1, ::-1]\n \n if detector == 'GUIDER1':\n # Just flip back\n data = data[:, :, ::-1, ::-1]\n\n if detector == 'GUIDER2':\n # Just flip back\n data = data[:, :, :, ::-1]\n\n # MIRI data doesn't need transforming\n\n return data\n\ndef main(args):\n\n config = ConfigParser()\n config.read(args.config_file)\n\n old_hdulist = fits.open(args.input_file)\n\n new_hdulist = fits.HDUList()\n new_hdulist.append(fits.PrimaryHDU())\n new_hdulist[0].header = old_hdulist[0].header\n\n # get the exposure start and end times\n start_time = Time(old_hdulist[0].header['EXPSTART'], format='mjd').isot\n end_time = Time(old_hdulist[0].header['EXPEND'], format='mjd').isot\n\n params = {'sTime' : start_time, 'eTime' : end_time}\n\n s = requests.Session()\n\n # jwdmsdevwsvm1 is for testing. The actual DB host will be different.\n url_base = 'http://jwdmsdevwsvm1.stsci.edu/JWDMSEngSpAcc_CV2CV3/TlmMnemonicDataSrv.svc/Data/'\n\n for keyword, mnemonic in config['mnemonics'].items():\n\n # get request to server.\n url = url_base + mnemonic\n\n r = s.get(url, params=params, verify=False)\n\n # Parse json\n parsed_json = r.json()\n\n # json ObsTime has format '/Date(1358619814230+0000)/' which is 1358619814.230 in UNIX time\n # isotime = Time(float(parsed_json['Data'][0]['ObsTime'][6:-7])/1000., format='unix').isot\n\n # just take the first value of the series\n new_hdulist[0].header[keyword] = (parsed_json['Data'][0]['EUValue'], mnemonic.upper())\n\n # add the Engineering Mnemonics section heading\n new_hdulist[0].header.set('', 'Engineering Mnemonics', before=config['mnemonics'].keys()[0])\n new_hdulist[0].header.set('', '', before=config['mnemonics'].keys()[0])\n new_hdulist[0].header.set('', '', before=config['mnemonics'].keys()[0])\n\n # transform from DMS to detector orientation\n pixel_data = DMS_to_detector(old_hdulist['SCI'].data, old_hdulist['PRIMARY'].header['DETECTOR'])\n\n # collapse from 4D to 3D\n nints, ngroups, nx, ny = pixel_data.shape\n\n # add reference output for MIRI\n if old_hdulist['PRIMARY'].header['INSTRUME'] == 'MIRI':\n new_hdulist[0].data = np.append(old_hdulist['SCI'].data.reshape((nints*ngroups, nx, ny)), \n old_hdulist['REFOUT'].data.reshape((nints*ngroups, 256, 1032)), axis=1)\n\n else:\n new_hdulist[0].data = pixel_data.reshape((nints*ngroups, nx, ny))\n\n new_hdulist[0].header.set('', '', before='BSCALE')\n\n # remove the NEXTEND keyword since there is only one extension now\n new_hdulist[0].header.remove('NEXTEND')\n\n # Write out\n new_hdulist.writeto(args.output_file, clobber=True)\n\nif __name__ == '__main__':\n # Command line argument handler.\n parser = argparse.ArgumentParser(\n description='Convert JWST data from DMS format to FITSWriter format',\n epilog='example: flight2ground tlm.cfg input.fits output.fits')\n parser.add_argument('config_file', help='config file with Telemetry FITS keyword/mnemonic pairs')\n parser.add_argument('input_file', help='level 1B data file to reformat')\n parser.add_argument('output_file', help='name of output file')\n args = parser.parse_args()\n main(args)","sub_path":"flight2ground/flight2ground.py","file_name":"flight2ground.py","file_ext":"py","file_size_in_byte":6602,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"401790621","text":"import FWCore.ParameterSet.Config as cms\n\nSKFlatMaker = cms.EDAnalyzer(\"SKFlatMaker\",\n\n DataYear = cms.untracked.int32(-1),\n processName = cms.untracked.string(\"HLT\"),\n DebugLevel = cms.untracked.int32(0),\n\n # -- Object Tags -- #\n Muon = cms.untracked.InputTag(\"slimmedMuons\"),\n Electron = cms.untracked.InputTag(\"slimmedElectrons\"),\n Photon = cms.untracked.InputTag(\"slimmedPhotons\"),\n Jet = cms.untracked.InputTag(\"slimmedJets\"),\n GenJet = cms.untracked.InputTag(\"slimmedGenJets\"),\n FatJet = cms.untracked.InputTag(\"slimmedJetsAK8\"),\n GenFatJet = cms.untracked.InputTag(\"slimmedGenJetsAK8\"),\n MET = cms.InputTag(\"slimmedMETs\"),\n LHEEventProduct = cms.untracked.InputTag(\"externalLHEProducer\"),\n LHERunInfoProduct = cms.untracked.InputTag(\"externalLHEProducer\"),\n GenParticle = cms.untracked.InputTag(\"genParticles\"),\n\n #### MiniIso\n pfCandsForMiniIso = cms.untracked.InputTag(\"packedPFCandidates\"),\n ## Muon\n ## https://github.com/cms-sw/cmssw/blob/f493624b3018543865bbf04bb8a48c5dae44bc82/RecoMuon/MuonIsolation/python/muonPFIsolationValues_cff.py\n miniIsoParams = cms.vdouble(0.05, 0.2, 10.0, 0.5, 0.0001, 0.01, 0.01, 0.01, 0.0),\n ## Electron\n ## https://github.com/cms-sw/cmssw/blob/09c3fce6626f70fd04223e7dacebf0b485f73f54/RecoParticleFlow/PFProducer/python/electronPFIsolationValues_cff.py\n miniIsoParamsE = cms.vdouble(0.05, 0.2, 10.0, 0.0, 0.015, 0.015, 0.08, 0.0, 0.0),\n miniIsoParamsB = cms.vdouble(0.05, 0.2, 10.0, 0.0, 0.000, 0.000, 0.00, 0.0, 0.0),\n\n # -- electron information -- #\n rho = cms.untracked.InputTag(\"fixedGridRhoFastjetAll\"),\n conversionsInputTag = cms.untracked.InputTag(\"allConversions\"),\n GsfTrack = cms.untracked.InputTag(\"electronGsfTracks\"),\n electron_EA_NHandPh_file = cms.untracked.FileInPath(\"RecoEgamma/ElectronIdentification/data/Fall17/effAreaElectrons_cone03_pfNeuHadronsAndPhotons_94X.txt\"),\n electron_IDtoSave = cms.untracked.vstring(\n\"cutBasedElectronID-Fall17-94X-V2-veto\",\n\"cutBasedElectronID-Fall17-94X-V2-loose\",\n\"cutBasedElectronID-Fall17-94X-V2-medium\",\n\"cutBasedElectronID-Fall17-94X-V2-tight\",\n'mvaEleID-Fall17-iso-V2-wp80' ,\n'mvaEleID-Fall17-iso-V2-wp90' ,\n'mvaEleID-Fall17-iso-V2-wpHZZ' ,\n'mvaEleID-Fall17-iso-V2-wpLoose',\n'mvaEleID-Fall17-noIso-V2-wp80' ,\n'mvaEleID-Fall17-noIso-V2-wp90' ,\n'mvaEleID-Fall17-noIso-V2-wpLoose',\n\"heepElectronID-HEEPV70\",\n ),\n\n #### Rochestor\n roccorPath = cms.string('SKFlatMaker/SKFlatMaker/data/roccor.Run2.v3/RoccoR2016.txt'),\n\n # -- photon information -- #\n photon_EA_CH_file = cms.untracked.FileInPath(\"RecoEgamma/PhotonIdentification/data/Fall17/effAreaPhotons_cone03_pfChargedHadrons_90percentBased_TrueVtx.txt\"),\n photon_EA_HN_file = cms.untracked.FileInPath(\"RecoEgamma/PhotonIdentification/data/Fall17/effAreaPhotons_cone03_pfNeutralHadrons_90percentBased_TrueVtx.txt\"),\n photon_EA_Ph_file = cms.untracked.FileInPath(\"RecoEgamma/PhotonIdentification/data/Fall17/effAreaPhotons_cone03_pfPhotons_90percentBased_TrueVtx.txt\"),\n\n # -- Jet information -- #\n AK4Jet_payloadName = cms.string('AK4PFchs'),\n AK8Jet_payloadName = cms.string('AK8PFPuppi'),\n AK4Jet_JER_PtRes_filepath = cms.string('SKFlatMaker/SKFlatMaker/data/JRDatabase/textFiles/Summer16_25nsV1_MC/Summer16_25nsV1_MC_PtResolution_AK4PFchs.txt'),\n AK4Jet_JER_SF_filepath = cms.string('SKFlatMaker/SKFlatMaker/data/JRDatabase/textFiles/Summer16_25nsV1_MC/Summer16_25nsV1_MC_SF_AK4PFchs.txt'),\n AK8Jet_JER_PtRes_filepath = cms.string('SKFlatMaker/SKFlatMaker/data/JRDatabase/textFiles/Summer16_25nsV1_MC/Summer16_25nsV1_MC_PtResolution_AK8PFPuppi.txt'),\n AK8Jet_JER_SF_filepath = cms.string('SKFlatMaker/SKFlatMaker/data/JRDatabase/textFiles/Summer16_25nsV1_MC/Summer16_25nsV1_MC_SF_AK8PFPuppi.txt'),\n\n # -- MET information -- #\n METFilterResults_PAT = cms.InputTag(\"TriggerResults\", \"\", \"PAT\"),\n METFilterResults_RECO = cms.InputTag(\"TriggerResults\", \"\", \"RECO\"),\n pfMET = cms.untracked.InputTag(\"pfMet\"),\n \n # -- Trigger -- #\n TriggerResults = cms.untracked.InputTag(\"TriggerResults\", \"\", \"HLT\"),\n TriggerResultsPAT = cms.untracked.InputTag(\"TriggerResults\", \"\", \"PAT\"),\n ##TriggerObject = cms.untracked.InputTag(\"selectedPatTrigger\"),\n TriggerObject = cms.untracked.InputTag(\"slimmedPatTrigger\"), \n \n # -- Else -- #\n GenEventInfo = cms.untracked.InputTag(\"generator\"),\n BeamSpot = cms.untracked.InputTag(\"offlineBeamSpot\"),\n PrimaryVertex = cms.untracked.InputTag(\"offlinePrimaryVerticesWithBS\"),\n Track = cms.untracked.InputTag(\"generalTracks\"),\n PileUpInfo = cms.untracked.InputTag(\"addPileupInfo\"),\n\n # -- Store Flags -- #\n StoreMuonFlag = cms.untracked.bool(True),\n StoreElectronFlag = cms.untracked.bool(True),\n StoreCalibElectronFlag = cms.untracked.bool(True),\n StorePhotonFlag = cms.untracked.bool(True),\n StoreJetFlag = cms.untracked.bool(True),\n StoreFatJetFlag = cms.untracked.bool(True),\n StoreMETFlag = cms.untracked.bool(True),\n StoreLHEFlag = cms.untracked.bool(True),\n StoreGENFlag = cms.untracked.bool(True),\n KeepAllGen = cms.untracked.bool(True), \n StorePriVtxFlag = cms.untracked.bool(True),\n StoreHLTReportFlag = cms.untracked.bool(True),\n StoreHLTObjectFlag = cms.untracked.bool(False),\n StoreL1PrefireFlag = cms.untracked.bool(False),\n\n # -- Filters -- #\n ApplyFilter = cms.untracked.bool(False),\n FilterType = cms.untracked.int32(0),\n\n #### PDF ID's to be save\n ScaleIDRange = cms.untracked.vint32(-999,-999),\n PDFErrorIDRange = cms.untracked.vint32(-999,-999),\n PDFAlphaSIDRange = cms.untracked.vint32(-999,-999),\n PDFAlphaSScaleValue = cms.untracked.vdouble(-999.,-999.),\n\n)\n","sub_path":"SKFlatMaker/python/SKFlatMaker_cfi.py","file_name":"SKFlatMaker_cfi.py","file_ext":"py","file_size_in_byte":5547,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"332994485","text":"from setuptools import setup, find_packages\nimport sys, os\n\nfrom pyhdhomerun.hdhr import get_hdhr\n\ntry:\n get_hdhr()\nexcept OSError as e:\n print(\"Could not load HDHomeRun library: %s\" % (e))\n sys.exit(1)\nelse:\n print(\"HDHomeRun libraries verified.\")\n \nversion = '2.3.4'\n\nsetup(name='pyhdhomerun',\n version=version,\n description=\"HDHomeRun interface library.\",\n long_description=\"\"\"\\\nHDHomeRun interface library. Supports device discovery, channel-scanning, streaming, status inquiries, channel changes, etc..\"\"\",\n classifiers=['Development Status :: 4 - Beta',\n 'License :: OSI Approved :: BSD License',\n 'Natural Language :: English',\n 'Programming Language :: Python :: 2.7',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n 'Topic :: Multimedia :: Video :: Capture'\n ],\n keywords='tv television tuner tvtuner hdhomerun',\n author='Dustin Oprea',\n author_email='myselfasunder@gmail.com',\n url='https://github.com/dsoprea/PyHdHomeRun',\n license='New BSD',\n packages=['pyhdhomerun'],\n include_package_data=True,\n zip_safe=True,\n install_requires=[\n 'setuptools',\n ],\n entry_points=\"\"\"\n # -*- Entry points: -*-\n \"\"\",\n )\n","sub_path":"pypi_install_script/pyhdhomerun-2.3.4.tar/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1355,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"250499778","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed May 13 19:33:07 2020\n\n@author: macbookariel\n\"\"\"\n# Regresión lineal simple: el primer paso será copiar el documento de data_preprocessing.\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\n \n# Importar el Data set\ndataset = pd.read_csv(\"Salary_Data.csv\") \n\n#En este caso, al ver la variable, tenemos que darle a format %,1 para ver 1 posicición decimal.\n# Recordemos que la columna llamada \"Index en el variable explorer NO ES UNA COLUMNA Y QUE LAS COLUMNAS EMPIEZAN\n# A NOMBRARSE EN EL CERO.\n\nX = dataset.iloc[:, :-1].values #Variable independiente = años de experiencia. Ubicada en la anteúltima posición (-1)\ny = dataset.iloc[:, 1].values # Variable dependiente = a predecir = salario. Ubicada en la columna 1\n\n\n# Dividir el data set en conjunto de entrenamiento y en conjunto de testing.\n# En este caso vamos a tomar 10 para testing (1/3) y el resto para entrenamiento (20).\n\nfrom sklearn.model_selection import train_test_split\nX_train, X_test, y_train, y_test = train_test_split(X, y,test_size = 1/3, random_state = 0)\n\n# Escalado de variables. En el caso de la regresión lineal, el modelo no requiere escalado.\n\n# from sklearn.preprocessing import StandardScaler\n# sc_X = StandardScaler()\n# X_train = sc_X.fit_transform(X_train)\n# X_test = sc_X.transform(X_test)\n\n# Crear modelo de regresión lineal simple con el conjunto de entrenamiento.\n\nfrom sklearn.linear_model import LinearRegression\n\nregression = LinearRegression()\nregression.fit(X_train, y_train) #La máquina ha aprendido, entonces, con las variables que le suministramos (igual tamaño en ambas variables!!)\n\n\n# Predecir el conjunto de test. Para ello crearemos un vector de datos con los datos de predicción para obtener la variable \n# dependiente que nos devuelve el modelo. Observemos que la variable a ser suministrada solo es la independiente (X_test) y\n# el modelo hace la predicción y la guarda en y_pred. Es decir, usando la X_test (años de experiencia) quiero que prediga el sueldo\n# y lo guarde en y_pred\n\ny_pred = regression.predict(X_test)\n\n#Visualizar los resultados de entrenamiento. Vamos a generar un scatter plot (nube de dispersión). Vamos a usar pyplot.\n\nplt.scatter(X_train, y_train, color = \"red\")\n\n# Vamos a hacer un scatter plot donde la X es el grupo de entrenamiento y la y es la predición pero sobre X_train,\n# así vemos las dos variables\n\nplt.plot(X_train, regression.predict(X_train), color = \"blue\")\nplt.title(\"Sueldo vs Años de experiencia (Conjunto de entrenamiento\")\nplt.xlabel(\"Años de experiencia\")\nplt.ylabel(\"Sueldo (en $)\")\nplt.show()\n\n# Vamos a hacer un scatter plot para ver cómo quedan los datos de test y cómo se ajusta a ellos la recta de regresión\n\nplt.scatter(X_test, y_test, color = \"red\")\n\nplt.plot(X_train, regression.predict(X_train), color = \"blue\")\nplt.title(\"Sueldo vs Años de experiencia (Conjunto de testing)\")\nplt.xlabel(\"Años de experiencia\")\nplt.ylabel(\"Sueldo (en $)\")\nplt.show()\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"datasets/Part 2 - Regression/Section 4 - Simple Linear Regression/Ariel_Regresion_Lineal_Simple.py","file_name":"Ariel_Regresion_Lineal_Simple.py","file_ext":"py","file_size_in_byte":3036,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"112097525","text":"import os\nimport signal\nimport sys\n\nfrom httptask import workers\n\nclass Error(Exception): pass\n\nclass Watcher(object):\n \"\"\"See http://code.activestate.com/recipes/496735/\"\"\"\n\n def __init__(self):\n self.child = os.fork()\n if self.child == 0:\n return\n else:\n self.watch()\n\n def watch(self):\n try:\n os.wait()\n except KeyboardInterrupt:\n self.kill()\n sys.exit()\n\n def kill(self):\n try:\n os.kill(self.child, signal.SIGKILL)\n except OSError:\n pass\n\nclass Spawn(object):\n\n def __init__(self, call, url, options):\n self.call = call\n self.url = url\n self.options = options\n\n def __call__(self):\n return self.call(self.url, self.options)\n\nclass Service(object):\n\n def __init__(self, config):\n self.worker_list = []\n self.config = config\n Watcher()\n\n def get(self, id, config, name, required=False, default=None, ctype=str):\n present = name in config\n if required and not present:\n raise Error('%s is required in %s.' % (name, id))\n value = config.get(name, default)\n if present:\n del config[name]\n try:\n return ctype(value)\n except ValueError:\n raise Error('%s was type \"%s\" and should be \"%s\" in %s.' % (name, type(name), ctype, id))\n\n def run(self):\n for id, value in self.config.items():\n url = self.get(id, value, 'url', required=True)\n count = self.get(id, value, 'count', default=1, ctype=int)\n worker_type = self.get(id, value, 'type', required=True)\n spawn = None\n if worker_type == 'beanstalk':\n spawn = Spawn(workers.Beanstalk, url, value)\n if not spawn is None:\n for i in range(count):\n worker = spawn()\n worker.start()\n self.worker_list.append(worker)\n else:\n raise Error('Unknown worker type (%s)' % type)\n","sub_path":"python-httptask/httptask/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1817,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"604749482","text":"from rasa.nlu.training_data import load_data\nfrom rasa.nlu.config import RasaNLUModelConfig\nfrom rasa.nlu.model import Trainer\nfrom rasa.nlu import config\nfrom search_weather import search_weather\nimport string\nimport re\nimport random\n\nclass chatBot:\n INIT = 0\n AUTHED = 1\n CITY_CHOOSEN = 2\n LIVES_FUNCTION_CHOOSE = 3\n GOODBYE = 4\n\n interpreter = Trainer(config.load(\"resources/config_spacy.yml\")).train(load_data('resources/weather_intent.json'))\n current_information = \"\"\n weather_query = search_weather()\n cur_state = INIT\n cur_pending = None\n res_message = \"\"\n pending_intent = \"\"\n\n lives_function = [\"weather\", \"temperature\", \"windpower\", \"humidity\", \"winddirection\"]\n\n forecasts_function = [\"dayweather\", \"nightweather\", \"daytemp\", \"nighttemp\", \"daywind\", \"nightwind\", \"daypower\", \"nightpower\"]\n\n forecasts_day = [\"zr_forecasts\", \"oe_forecasts\", \"tw_forecasts\"]\n\n function_mapping = {\n \"lives_info\": \"实时信息:\",\n \"lives_weather\": \"实时天气:\",\n \"lives_temperature\": \"实时气温:\",\n \"lives_winddirection\": \"实时风向:\",\n \"lives_windpower\": \"实时风力:\",\n \"lives_humidity\": \"实时湿度:\",\n \"lives_reporttime\": \"报告时间:\",\n \"zr_forecasts_dayweather\": \"明天日间天气:\",\n \"zr_forecasts_nightweather\": \"明天夜间天气:\",\n \"zr_forecasts_daytemp\": \"明天日间气温:\",\n \"zr_forecasts_nighttemp\": \"明天夜间气温:\",\n \"zr_forecasts_daywind\": \"明天日间风向:\",\n \"zr_forecasts_nightwind\": \"明天夜间风向:\",\n \"zr_forecasts_daypower\": \"明天日间风力:\",\n \"zr_forecasts_nightpower\": \"明天夜间风力:\",\n \"oe_forecasts_dayweather\": \"后天日间天气:\",\n \"oe_forecasts_nightweather\": \"后天夜间天气:\",\n \"oe_forecasts_daytemp\": \"后天日间气温:\",\n \"oe_forecasts_nighttemp\": \"后天夜间气温:\",\n \"oe_forecasts_daywind\": \"后天日间风向:\",\n \"oe_forecasts_nightwind\": \"后天夜间风向:\",\n \"oe_forecasts_daypower\": \"后天日间风力:\",\n \"oe_forecasts_nightpower\": \"后天夜间风力:\",\n \"tw_forecasts_dayweather\": \"大后天日间天气:\",\n \"tw_forecasts_nightweather\": \"大后天夜间天气:\",\n \"tw_forecasts_daytemp\": \"大后天日间气温:\",\n \"tw_forecasts_nighttemp\": \"大后天夜间气温:\",\n \"tw_forecasts_daywind\": \"大后天日间风向:\",\n \"tw_forecasts_nightwind\": \"大后天夜间风向:\",\n \"tw_forecasts_daypower\": \"大后天日间风力:\",\n \"tw_forecasts_nightpower\": \"大后天夜间风力:\",\n }\n\n policy_rules = {\n (INIT, \"greet\"): (INIT, \"你好呀。\", None),\n (INIT, \"pending_forecasts_day\"): (INIT, \"请先登录再查询预报\", None),\n (INIT, \"pending_forecasts_function\"): (INIT, \"请先登录再查询预报\", None),\n (INIT, \"enquire_forecasts\"): (INIT, \"请先登录再查询预报\", None),\n (INIT, \"pending_function\"): (INIT, \"请先登录再查询实时信息:\", None),\n (INIT, \"city_error\"): (INIT, \"请先告诉我号码。\", None),\n (INIT, \"city_error\"): (INIT, \"城市自动设置失败,请手动设置城市:\", None),\n (INIT, \"weather_info\"): (INIT, \"请先告诉我你的号码。\", None),\n (INIT, \"number\"): (CITY_CHOOSEN, \"欢迎。当前您所在的城市已经自动设置:\", None),\n (INIT, \"city_choose\"): (INIT, \"城市已经设置,但是请先输入你的手机号码。\", None),\n (INIT, \"city_switch\"): (INIT, \"你连号码都没有设置过。\", None),\n (INIT, \"none\"): (INIT, \"你想要让我做什么?\", None),\n (AUTHED, \"pending_forecasts_day\"): (INIT, \"请先设置城市再查询预报\", None),\n (AUTHED, \"pending_forecasts_function\"): (INIT, \"请先设置城市再查询预报\", None),\n (AUTHED, \"enquire_forecasts\"): (INIT, \"请先设置城市再查询预报\", None),\n (AUTHED, \"pending_function\"): (INIT, \"请先设置城市再查询实时信息:\", None),\n (AUTHED, \"city_error\"): (AUTHED, \"城市自动设置失败,请手动设置城市:\", None),\n (AUTHED, \"city_choose\"): (CITY_CHOOSEN, \"当前城市已经切换为:\", None),\n (AUTHED, \"weather_info\"): (AUTHED, \"你还没设置过城市呢。\", None),\n (AUTHED, \"none\"): (AUTHED, \"我不清楚我应该做什么。\", None),\n (AUTHED, \"greet\"): (AUTHED, \"你已经打过招呼了鸭。\", None),\n (AUTHED, \"city_switch\"): (AUTHED, \"好的,请选择一个城市。\", None),\n (CITY_CHOOSEN, \"pending_function\"): (CITY_CHOOSEN, \"您想查询现在的什么信息:\", None),\n (CITY_CHOOSEN, \"weather_info\"): (CITY_CHOOSEN, \"帮您查询到的信息:\", None),\n (CITY_CHOOSEN, \"none\"): (CITY_CHOOSEN, \"不是很清楚\", None),\n (CITY_CHOOSEN, \"greet\"): (CITY_CHOOSEN, \"我们已经聊了会了鸭。\", None),\n (CITY_CHOOSEN, \"city_choose\"): (CITY_CHOOSEN, \"您已经设置过城市了。可以告诉我'切换'来切换城市。\", None),\n (CITY_CHOOSEN, \"goodbye\"): (INIT, \"很高兴为您服务\", None),\n (CITY_CHOOSEN, \"city_switch\"): (AUTHED, \"好的,请选择新的城市\", None),\n (CITY_CHOOSEN, \"pending_forecasts_day\"): (CITY_CHOOSEN, \"请选择你要查询的日期\", None),\n (CITY_CHOOSEN, \"pending_forecasts_function\"): (CITY_CHOOSEN, \"请选择你想查询的功能\", None),\n (CITY_CHOOSEN, \"enquire_forecasts\"): (CITY_CHOOSEN, \"准备给您进行天气预报,请问想要查询什么时候的\", None),\n (INIT, \"affirm\"): (INIT, \"你在开玩笑吧\", None),\n (AUTHED, \"affirm\"): (AUTHED, \"我也没为你查什么呀\", None),\n (CITY_CHOOSEN, \"affirm\"): (CITY_CHOOSEN, \"我很高兴得到您的夸奖,接下去想要查询什么\", None),\n (INIT, \"goodbye\"): (INIT, \"还没登录,舍不得离开\", None),\n (AUTHED, \"goodbye\"): (INIT, \"还没查询新的城市,舍不得离开\", None)\n\n }\n query_function = [\"lives_info\",\n \"lives_weather\",\n \"lives_temperature\",\n \"lives_winddirection\",\n \"lives_windpower\",\n \"lives_humidity\",\n \"lives_reporttime\",\n \"forecasts_dayweather\",\n \"forecasts_nightweather\",\n \"forecasts_daytemp\",\n \"forecasts_nighttemp\",\n \"forecasts_daywind\",\n \"forecasts_nightwind\",\n \"forecasts_daypower\",\n \"forecasts_nightpower\"]\n\n def create_intent(self, intent):\n if self.pending_intent != \"\":\n if self.pending_intent == \"lives\" and intent in self.lives_function:\n return self.pending_intent + \"_\" + intent\n if self.pending_intent in self.forecasts_day and intent in self.forecasts_function:\n return self.pending_intent + \"_\" + intent\n if self.pending_intent in self.forecasts_function and intent in self.forecasts_day:\n return intent + \"_\" + self.pending_intent\n return None\n\n\n def send_message(self, state, pending, message):\n self.res_message = \"\"\n print(\"USER : {}\".format(message))\n new_state, response, pending_state = self.respond(state, message)\n # print(\"BOT : {}\".format(response))\n self.res_message = format(response)\n if self.current_information != \"\":\n self.res_message += self.weather_query.get_city_name() + \" \" + self.current_information\n # print(self.weather_query.get_city_name() + \" \" + self.current_information)\n self.current_information = \"\"\n return new_state, pending\n\n def wether_is_forecasts(self, intent):\n if intent[0:2] in ['zr', 'oe', 'tw']:\n return True\n return False\n\n def extrect_phone_number(self, message):\n pattern = re.compile(\"[0-9]{11}\")\n match = re.search(pattern, message)\n return match\n\n def extrect_city_code(self, message):\n pattern = re.compile(\"[0-9]{6}\")\n match = re.search(pattern, message)\n return match\n\n def respond(self, state, message):\n intent = self.interpret(state, message)\n if message in self.weather_query.get_cities().keys() and intent == \"city_error\":\n intent = \"city_choose\"\n if self.extrect_city_code(message) and intent == \"city_error\":\n intent = \"city_choose\"\n if intent == \"city_choose\":\n if self.extrect_city_code(message):\n self.weather_query.set_local(self.extrect_city_code(message).group(0))\n else:\n self.weather_query.set_local(message)\n if (self.wether_is_forecasts(intent)) or (intent in self.query_function):\n if state != self.INIT:\n self.current_information = self.function_mapping[intent] + self.weather_query.get_information(intent)\n intent = \"weather_info\"\n new_state, response, pending = self.policy_rules[(state, intent)]\n if (state == self.INIT and intent == \"number\") or (state == self.AUTHED and intent == \"city_choose\"):\n response += self.weather_query.get_city_name()\n return new_state, response, pending\n\n def interpret(self, state, message):\n if self.weather_query.get_city_name() == \"\":\n return 'city_error'\n data = self.interpreter.parse(message)\n # print(data)\n intent = data[\"intent\"][\"name\"]\n # print(intent + \" \" + self.pending_intent)\n if message in self.weather_query.get_cities().keys():\n self.weather_query.set_local(self.weather_query.get_cities()[message])\n return 'city_choose'\n if self.extrect_phone_number(message):\n return 'number'\n if self.extrect_city_code(message):\n return 'city_choose'\n if \"切换\" in message:\n return 'city_switch'\n if intent == \"lives\" and self.pending_intent not in self.lives_function:\n # print(\"asdasdadsadad\")\n self.pending_intent = intent\n return \"pending_function\"\n if intent in self.forecasts_day and self.pending_intent not in self.forecasts_function:\n self.pending_intent = intent\n return \"pending_forecasts_function\"\n if intent in self.forecasts_function and self.pending_intent not in self.forecasts_day:\n self.pending_intent = intent\n return \"pending_forecasts_day\"\n if intent in self.lives_function:\n rel_intent = self.create_intent(intent)\n if rel_intent is not None:\n self.pending_intent = \"\"\n return rel_intent\n if (intent in self.forecasts_day and self.pending_intent in self.forecasts_function) or (intent in self.forecasts_function and self.pending_intent in self.forecasts_day):\n rel_intent = self.create_intent(intent)\n if rel_intent is not None:\n self.pending_intent = \"\"\n print(rel_intent)\n return rel_intent\n if ((state, intent) not in self.policy_rules.keys()) and (intent not in self.function_mapping.keys()):\n return \"none\"\n return intent\n\n def chat(self, msg):\n self.cur_state, self.cur_pending = self.send_message(self.cur_state, self.cur_pending, msg)\n return self.res_message\n","sub_path":"chatBot.py","file_name":"chatBot.py","file_ext":"py","file_size_in_byte":11580,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"255976580","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport re\nfrom register_vars import RegisterVars\n\n\nINVENTORY_FILE = './hosts'\nVARS_FILE = './group_vars/servers.yml'\n\n\nclass RegisterHostsVars(RegisterVars):\n def __init__(self):\n super(RegisterHostsVars, self).__init__()\n 'This class has no propertys.'\n\n def _generate_hostlist(self, resultlist):\n resultlist[1] = map(lambda n:n.strip().split(' mngip='),\n filter(lambda n:'mngip' in n and not re.match('\\A#', n), resultlist[0]))\n return resultlist\n\n def _refine_each_param(self, host_ip_list, param_dict):\n if not host_ip_list:\n return param_dict\n host_ip = host_ip_list.pop()\n param_dict['hosts_params'].append({'name':host_ip[0], 'ipaddr':host_ip[1]})\n return self._refine_each_param(host_ip_list, param_dict)\n \n def _refine_param(self, host_ip_list):\n host_ip_list.reverse()\n param_dict = {'hosts_params': []}\n return self._refine_each_param(host_ip_list, param_dict)\n\nif __name__ == '__main__':\n RegisterHostsVars().main(INVENTORY_FILE, VARS_FILE)\n\n","sub_path":"register_hosts_vars.py","file_name":"register_hosts_vars.py","file_ext":"py","file_size_in_byte":1134,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"354102619","text":"import json\nimport copy\n\nfile_with_old_and_new_id = 'd:/_work/_program/_python/prName_hallId_prId_format.json'\nfile_types_acbtype_refactor = 'd:/_work/_program/_python/_AKOM_types_acbtype_refactor.json'\nfile_rezult = 'd:/_work/_program/_python/_AKOM_types_acbtype_refactor_with_true_id.json'\n\ndef rezdic(doc1_element, doc2):\n result_element = []\n doc1_element_copy = copy.deepcopy(doc1_element)\n del doc1_element_copy['_id']\n for charge_hall in doc1_element_copy['chargeHalls']:\n for index, program_id in enumerate(charge_hall['programId']):\n for element in doc2:\n if charge_hall['hallId'] == element['chargeHallId'] and element['programId'] == program_id:\n charge_hall['programId'][index] = element['new_prid']\n break\n\n result_element.append(doc1_element_copy)\n return result_element\n\n\nif __name__ == '__main__':\n result_all_elements = []\n with open(file_types_acbtype_refactor) as json_data:\n doc1 = json.load(json_data)\n with open(file_with_old_and_new_id) as json_data:\n doc2 = json.load(json_data)\n for doc1_element in doc1:\n result_all_elements.extend(rezdic(doc1_element, doc2))\n\n print(result_all_elements)\n file = open(file_rezult, 'w')\n file.write(json.dumps(result_all_elements))\n file.close()","sub_path":"work with mongodb/mongodb_find_and_create5.py","file_name":"mongodb_find_and_create5.py","file_ext":"py","file_size_in_byte":1347,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"127104145","text":"import re\n\nimport pandas as pd\n\nimport astropy.units as u\nfrom universal_loader.kb.invalid_symbols import INVALID_SYMBOL_LIST\nfrom universal_loader.unstructured.text2csv import plural_to_single\n\nu.imperial.enable()\n\n\"\"\"\nNotes\n------\n\n\nThis module manages metadata knowledge base for atomic and composite units. based on astropy\n\"\"\"\n\n\ndef get_first_item(collection):\n \"\"\"\n\n helper function to help handle uncertainties\n\n Parameters\n ----------\n collection of units\n\n Returns\n -------\n first item in collection\n\n \"\"\"\n if len(collection) == 0:\n return ''\n else:\n return collection[0]\n\n\ndef emit_units_kb():\n \"\"\"\n\n Instantiate unit db from astropy\n\n Returns\n -------\n list of tuples with 5 fields - name, physical type, alias, long name and unit name\n\n Examples\n --------\n >>> kb=emit_units_kb()\n\n \"\"\"\n unit_list = []\n for att in dir(u):\n unit = getattr(u, att)\n try:\n unit_list.append([unit.name,\n unit.physical_type,\n get_first_item(unit.aliases),\n get_first_item(unit.short_names),\n get_first_item(unit.long_names),\n \"u.\" + unit.name\n ])\n except:\n continue\n\n # imperial units are used in US and UK-commonwealth\n for att in dir(u.imperial):\n unit = getattr(u.imperial, att)\n try:\n unit_list.append([unit.name, unit.physical_type,\n get_first_item(unit.aliases),\n get_first_item(unit.short_names),\n get_first_item(unit.long_names),\n \"u.imperial.\" + unit.name\n ])\n except:\n continue\n\n return unit_list\n\n\ndef get_units_as_df():\n \"\"\"\n\n Returns units as pandas dataframe\n\n Returns\n -------\n\n Examples\n --------\n >>>> get_units_as_df()\n\n \"\"\"\n unit_list = emit_units_kb()\n df = pd.DataFrame(unit_list, columns=[\"name\", \"physical_type\", \"alias\", \"short_name\", \"long_name\", \"object_type\"])\n df.drop_duplicates(inplace=True)\n return df\n\n\ndef is_unit(test_str):\n \"\"\"\n\n Check if string is atomic unit.\n\n Parameters\n ----------\n test_str: string representing unit. can be long or short version of it eg meters or m.\n\n Returns\n -------\n True/False\n\n Examples\n -------\n >>> is_unit(\"second\")\n >>> is_unit(\"parsec\")\n\n See Also\n --------\n is_composite_unit\n\n \"\"\"\n units = get_units_as_df()\n if (test_str.upper() in list(map(lambda x: x.upper(), units['short_name'].values.tolist()))) \\\n or (test_str.upper() in list(map(lambda x: x.upper(), units['long_name'].values.tolist()))) \\\n or (test_str.upper() in list(map(lambda x: x.upper(), units['alias'].values.tolist()))):\n return True\n else:\n return False\n\n\ndef is_composite_unit(test_str):\n \"\"\"\n\n Check if string can be expressed as composite units\n\n Parameters\n ----------\n\n :param test_str:\n\n Examples\n --------\n >>> is_composite_unit('miles per hour')\n >>> is_composite_unit(\"kilometers per hour\")\n >>> is_composite_unit(\"feet per second per second\")\n >>> is_composite_unit(\"kilometers per second per second\")\n >>> is_composite_unit(\"DEG/DAY\")\n >>> is_composite_unit(\"meters-per-second\")\n >>> is_composite_unit(\"meters-per-second-per-second\")\n\n Returns\n --------\n :return: True/False\n \"\"\"\n delim_found = detect_delim_for_composite_type(test_str)\n if delim_found is not None:\n unit_components = preprocess_composite_components(delim_found, test_str)\n\n all_units = list(map(lambda c: is_unit(c), unit_components))\n return all(all_units)\n\n\ndef preprocess_composite_components(delim_found, test_str):\n unit_components = test_str.split(delim_found)\n # strip the spaces\n unit_components = list(map(lambda c: c.strip(), unit_components))\n # strip any invalid symbols\n pat = re.compile(INVALID_SYMBOL_LIST)\n unit_components = list(map(lambda c: re.sub(pat, '', c), unit_components))\n # make sure each component is singular form and not plural\n unit_components = list(map(lambda c: plural_to_single(c), unit_components))\n return unit_components\n\n\ndef detect_delim_for_composite_type(test_str):\n \"\"\"\n\n Detect separator in composite type string; only two separators are supported\n / and per\n\n Parameters\n ----------\n test_str\n\n Returns\n -------\n either detected separator or None\n\n Examples\n --------\n >>> detect_delim_for_composite_type(\"miles per hour\")\n >>> \"per\"\n\n \"\"\"\n delim_found = None\n composite_delim_list = [\"per\", \"/\"]\n for delim in composite_delim_list:\n if len(test_str.split(delim)) > 1:\n delim_found = delim\n break\n return delim_found\n\n\ndef get_computable_type(test_str):\n \"\"\"\n\n Search unit database and return equivalent computing type\n\n\n Parameters\n ----------\n test_str - atomic unit\n\n Returns\n -------\n computable object or None\n\n Examples\n -----------\n >>> get_computable_type(\"mile\")\n\n\n \"\"\"\n units = get_units_as_df()\n computable_type = eval(\n units.loc[\n (units[\"name\"] == test_str.lower())\n | (units[\"long_name\"] == test_str.lower())\n | (units[\"short_name\"] == test_str.lower())\n | (units[\"alias\"] == test_str.lower())\n ][\"object_type\"].values[0])\n return computable_type\n\n\ndef get_composite_computable_type(test_str):\n \"\"\"\n\n parse string and attempt to produce composite computable type\n\n Parameters\n ----------\n test_str\n\n Returns\n -------\n computable type or none\n\n Examples\n ---------\n >>> get_composite_computable_type(\"miles per hour\")\n\n \"\"\"\n\n delim = detect_delim_for_composite_type(test_str)\n comps = preprocess_composite_components(delim, test_str)\n comp_list_types = list(map(lambda comp: get_computable_type(comp), comps))\n composite_type = comp_list_types[0] / comp_list_types[1]\n return composite_type\n","sub_path":"universal_loader/kb/units_metadata.py","file_name":"units_metadata.py","file_ext":"py","file_size_in_byte":6240,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"237570192","text":"from flask import (Blueprint, render_template, redirect, url_for, request\n , flash, abort)\n\nfrom models import User, UserRegistrationForm\nfrom application.utilities.random import random_alphanumeric\nfrom application import bcrypt\n\nusers = Blueprint('users', __name__, template_folder='templates')\n\n@users.route('/confirm-email/')\ndef confirm_email(code):\n users = User.objects(email_confirmation_code=code)\n if not users:\n abort(404)\n user = users[0]\n user.update(set__email_confirmed=True)\n\n flash('Email confirmed, thanks! You may now login.', 'success')\n return redirect(url_for('users.login'))\n\n\n@users.route('/register', methods=['GET', 'POST'])\ndef register():\n user_form = UserRegistrationForm()\n \n if request.method == 'GET':\n return render_template('register.html', user_form=user_form)\n\n if request.method == 'POST':\n if user_form.validate():\n # create the new user\n new_user = User(\n name = user_form.name.data\n , email = user_form.email.data\n , self_reported_org = user_form.organization.data\n , hashed_password = bcrypt.generate_password_hash(\n user_form.password.data, 10)\n , email_confirmation_code = random_alphanumeric(20)\n )\n new_user.save()\n new_user.send_verification_email()\n flash('We have sent you a message to confirm your email address.'\n , 'info')\n return redirect(url_for('public.root'))\n\n # validation failed\n flash('Validation failed.', 'error')\n return redirect(url_for('users.register'))\n\n\n@users.route('/login')\ndef login():\n return render_template('login.html')\n\n@users.route('/logout')\ndef logout():\n # logout ..\n return redirect(url_for('public.root'))\n\n@users.route('/members')\ndef show_all():\n users = User.objects()\n return render_template('all_users.html', users=users)\n","sub_path":"application/users/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":2000,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"392225814","text":"from os import listdir\r\nfrom nltk.stem import WordNetLemmatizer\r\nimport re\r\nimport pickle\r\nimport math\r\nfrom multiprocessing import Pool\r\nimport time\r\n#import multiprocessing\r\n\r\n\r\ndef gethumantime(sec):\r\n if sec < 60: return str(round(sec,2))+\" seconds\"\r\n else: return str(int(sec/60))+\" minutes \"+str(round(sec%60,2))+\" seconds\"\r\n\r\n\r\ndef gaussianStrength(wordGap,sentenceGap):\r\n strength = (1/wordGap)*math.exp(-sentenceGap)\r\n return(strength)\r\n\r\n\"\"\"\r\ndef gaussianStrength(wordGap):\r\n strength = math.exp(-wordGap)\r\n return(strength)\r\n\"\"\"\r\n\r\ndef stopwords_preprocessing():\r\n fileOpen = open('stopwords/unprocessed_stopwords.txt',encoding='utf-8',errors=\"ignore\")\r\n lines = fileOpen.read()\r\n fileOpen.close()\r\n fileOpen = open('stopwords/stopwords.txt','w+',encoding='utf-8',errors=\"ignore\")\r\n eachLine = lines.split('\\n')\r\n stopwords = list(set(eachLine))\r\n for i in range(len(stopwords)): stopwords[i] = re.sub('[^A-Za-z]+', '', stopwords[i].lower())\r\n stopwords = list(set(stopwords))\r\n stopwords.sort()\r\n for i in range(len(stopwords)): \r\n if i == len(stopwords)-1: fileOpen.write(stopwords[i])\r\n else: fileOpen.write(stopwords[i]+'\\n')\r\n fileOpen.close()\r\n \r\ndef get_stopwords():\r\n fileOpen = open('stopwords/stopwords.txt',encoding='utf-8',errors=\"ignore\")\r\n lines = fileOpen.read()\r\n stopwords = lines.split('\\n')\r\n return(stopwords)\r\n\r\ndef processBookInformation(fileName,count):\r\n stopwords_preprocessing()\r\n stopwords = get_stopwords()\r\n lemmatizer = WordNetLemmatizer()\r\n folder_name = \"inputBooks\"\r\n cut_size = 7\r\n #list of all books containing sentenced token words\r\n book_data = []\r\n getfile = open(folder_name+'/'+fileName,encoding='utf-8',errors=\"ignore\")\r\n lines = getfile.read()\r\n lines = re.sub('[\\\\n\\\\t\\\\r]+', ' ', lines)\r\n eachLine = lines.split('.')\r\n list_sentences =[] #list of all sentences of token words\r\n for index in range(len(eachLine)):\r\n eachLine[index] = re.sub('[^A-Za-z ]+', ' ', eachLine[index].lower())\r\n eachLine = list(filter(None, eachLine))\r\n new_eachLine = []\r\n for data in eachLine:\r\n data_token = data.split(' ')\r\n data_token = list(filter(None, data_token))\r\n chunks = [data_token[x:x+cut_size] for x in range(0, len(data_token), cut_size)]\r\n new_eachLine.extend(chunks)\r\n eachLine = new_eachLine\r\n eachLine = list(filter(None, eachLine))\r\n for index in range(len(eachLine)):\r\n token_words = eachLine[index]\r\n #token_words = list(filter(None, token_words))\r\n new_token_words = [] #list of all token words in a sentences\r\n for each_tokenWords in token_words:\r\n if each_tokenWords not in stopwords:\r\n new_token_words.append(lemmatizer.lemmatize(each_tokenWords))\r\n list_sentences.append(new_token_words)\r\n book_data = list_sentences\r\n getfile.close()\r\n print(\"Read Book \" + str(count+1))\r\n saveString = \"tokenized_books/tokenized_book\" + str(count+1)\r\n with open(saveString, 'wb') as fp: pickle.dump(book_data, fp)\r\n \r\ndef calculate_strength(count):\r\n #with open('tokenized_book', 'rb') as fp: book_data = pickle.load(fp)\r\n disp_count = count+1\r\n #print(count)\r\n edgePairs = {}\r\n max_distance = 2\r\n #strengths = []\r\n loadString = \"tokenized_books/tokenized_book\" + str(disp_count)\r\n with open(loadString, 'rb') as fp: eachBook = pickle.load(fp)\r\n #print(disp_count)\r\n for source_sentence in range(len(eachBook)):\r\n for source_word in range(len(eachBook[source_sentence])):\r\n wordGap = 0\r\n index = source_word + 1\r\n for dest_word in range(index,len(eachBook[source_sentence])):\r\n item_pair = eachBook[source_sentence][source_word] + \"\\t\" + eachBook[source_sentence][dest_word]\r\n wordGap = wordGap + 1\r\n sentenceGap = 0\r\n conceptStrength = gaussianStrength(wordGap,sentenceGap)\r\n #conceptStrength = gaussianStrength(wordGap)\r\n if item_pair in edgePairs:\r\n edgePairs[item_pair] = edgePairs[item_pair] + conceptStrength\r\n else:\r\n edgePairs[item_pair] = conceptStrength\r\n sentenceGap = 1\r\n for dest_sentence in range(source_sentence+sentenceGap,len(eachBook)):\r\n if sentenceGap>max_distance:\r\n break\r\n else:\r\n for dest_word in range(len(eachBook[dest_sentence])):\r\n item_pair = eachBook[source_sentence][source_word] + \"\\t\" + eachBook[dest_sentence][dest_word]\r\n wordGap = wordGap + 1\r\n conceptStrength = gaussianStrength(wordGap,sentenceGap)\r\n #conceptStrength = gaussianStrength(wordGap)\r\n if item_pair in edgePairs:\r\n edgePairs[item_pair] = edgePairs[item_pair] + conceptStrength\r\n else:\r\n edgePairs[item_pair] = conceptStrength\r\n sentenceGap += 1\r\n \r\n eps = []\r\n for key, value in edgePairs.items():\r\n eps.append([key,value])\r\n #eps.sort(key=lambda elem: elem[2],reverse=True)\r\n fileNameEP = \"edgepairs/edgepair_strength_\"+str(disp_count)+\".txt\"\r\n print(fileNameEP)\r\n file = open(fileNameEP,'w+')\r\n for i in range(len(edgePairs)):\r\n if i=5000:\r\n break\r\n single_esp = line.rstrip().split('\\t')\r\n value = float(single_esp[2])\r\n item_pair = single_esp[0] + \"\\t\" +single_esp[1]\r\n if item_pair in edgePairs:\r\n edgePairs[item_pair] = edgePairs[item_pair] + value\r\n else:\r\n edgePairs[item_pair] = value\r\n count +=1\r\n eps = []\r\n for key, value in edgePairs.items():\r\n eps.append([key,value])\r\n eps.sort(key=lambda elem: elem[1],reverse=True)\r\n fileNameEP = \"merged_edgeList.txt\" \r\n file = open(fileNameEP,'w+')\r\n for i in range(len(edgePairs)):\r\n if i= 3 and len(rhyme2_list) >= 2: # call function again if not enough rhymes\n break\n return rhyme1_list, rhyme2_list\n\ndef find_rhymes(rhyme_list, num_to_find):\n endings = []\n for i in range(num_to_find):\n ending = random.choice(rhyme_list)\n ending_words = set(x[0] for x in endings)\n while ending[0] in ending_words:\n ending = random.choice(rhyme_list)\n endings.append(ending)\n return endings\n\ndef count_syllables(word_key):\n phone_list = word_dict[word_key]\n stressed_list = [x for x in phone_list if x[-1].isdigit()]\n num_syllables = len(stressed_list)\n return num_syllables\n\ndef fill_line(ending, total_syllables, return_stress=False, stress=None):\n while True:\n line_words = []\n num_syllables = count_syllables(ending)\n while num_syllables < total_syllables:\n new_syllables = total_syllables\n while num_syllables + new_syllables > total_syllables:\n new_word = random.choice(word_dict.keys())\n new_syllables = count_syllables(new_word)\n line_words.append(new_word)\n num_syllables += new_syllables\n line_words.append(ending)\n if return_stress:\n line_stress = ''.join(stress_dict[x] for x in line_words)\n if line_stress[1] == '1':\n return line_words, line_stress\n if stress:\n line_stress = ''.join(stress_dict[x] for x in line_words)\n matching = 0.0\n total = len(stress)\n for i in range(total):\n if stress[i] == line_stress[i]:\n matching += 1\n if (matching / total) >= 0.7 and line_stress[1] == '1':\n break\n return line_words\n\ndef make_limerick(len1, len2):\n rhyme1, rhyme2 = choose_rhymes()\n end1, end2, end5 = find_rhymes(rhyme1, 3)\n end3, end4 = find_rhymes(rhyme2, 2)\n filled1, stress1 = fill_line(end1, len1, return_stress=True)\n filled3, stress2 = fill_line(end3, len2, return_stress=True)\n limerick_tuple = (filled1, fill_line(end2, len1, stress=stress1),\n filled3, fill_line(end4, len2, stress=stress2),\n fill_line(end5, len1, stress=stress1))\n for line in limerick_tuple:\n text_line = ' '.join(x[0] for x in line)\n print(text_line)\n\nif __name__ == '__main__':\n for i in range(3):\n make_limerick(9, 6)\n print('')\n","sub_path":"cmu_limericks.py","file_name":"cmu_limericks.py","file_ext":"py","file_size_in_byte":3957,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"472023820","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[35]:\n\n\nAsem_dirc = [\": .byte\",\": .half\", \": .word\", \": .dword\", \": .asciiz\"]\nincr = {\": .byte\":\"1\",\": .half\":\"2\",\": .word\":\"4\",\": .dword\":\"8\",\": .asciiz\":\"1\"}\n\n\n\ndef KMPSearch(minor,major): \n\tl = len(minor) \n\tL = len(major) \n\tlcmin = [0]*l \n\tj = 0 \n\tcomputeLPSArray(minor,l,lcmin) \n\ti = 0\n\twhile i < L: \n\t\tif minor[j] == major[i]: \n\t\t\ti += 1\n\t\t\tj += 1\n\t\tif j == l: \n\t\t\treturn 1\n\t\t\tj = lcmin[j-1] \n\n\t\telif i < L and minor[j] != major[i]: \n\t\t\tif j != 0: \n\t\t\t\tj = lcmin[j-1] \n\t\t\telse: \n\t\t\t\ti += 1\n\ndef computeLPSArray(minor,l,lcmin): \n\tlen = 0 \n\tlcmin[0] \n\ti = 1\n\twhile i < l: \n\t\tif minor[i]== minor[len]: \n\t\t\tlen += 1\n\t\t\tlcmin[i] = len\n\t\t\ti += 1\n\t\telse: \n\t\t\tif len != 0: \n\t\t\t\tlen = lcmin[len-1] \n\t\t\telse: \n\t\t\t\tlcmin[i] = 0\n\t\t\t\ti += 1\n\n\n\n\ndef KMPSearch2(minor, major): \n\tll=[]\n\tl = len(minor) \n\tL = len(major) \n\tlcmin = [0]*l\n\tj = 0 \n\tcomputeLPSArray(minor,l,lcmin) \n\ti = 0 \n\twhile i < L: \n\t\tif minor[j] == major[i]: \n\t\t\ti += 1\n\t\t\tj += 1\n\n\t\tif j == l: \n\t\t\tll.append ((i-j) )\n\t\t\tj = lcmin[j-1] \n\n\t\telif i < L and minor[j] != major[i]: \n\t\t\tif j != 0: \n\t\t\t\tj = lcmin[j-1] \n\t\t\telse: \n\t\t\t\ti += 1\n\n\treturn ll\n\ndef computeLPSArray(minor,l,lcmin): \n\tlen = 0 \n\tlcmin[0] \n\ti = 1 \n\twhile iiids):\n \n \n if(i==po2):\n po = po+1\n po1 = po1 +1\n break\n else:\n if(len(p.findall(list1[i]))==0):\n po = po+1\n po1 = po1 +1\n \n flag = 0\n for i in (range(len(aas))):\n if(i>iids):\n \n \n if(KMPSearch(pat, aas[i])):\n flag = 1 \n cnt = cnt+1\n break\n else:\n if(len(p.findall(list1[i]))==0):\n cnt = cnt+1\n \n cnt = cnt - po \n if(flag==0):\n print(\"Error: \"+str(pat)+\" not Defined\")\n return \n #print(pat)\n immd = cnt*2\n if(immd<0):\n immd = -1*immd\n immd_= onescomp(dec2bin2(int(immd)))\n immd_ = array2string2(immd_)\n ll = (int(immd_,2)+1 ) \n immd = ll\n \n d1 = convert((dec2bin2(int(immd))[2:8]))\n d2 = ((dec2bin2(int(immd))[0]))\n d3 = convert((dec2bin2(int(immd))[8:12]))\n d4 = ((dec2bin2(int(immd))[1]))\n reg_1 = array2string(dec2bin(int(reg1)))\n reg_2 = array2string(dec2bin(int(reg2)))\n \n output = str(d2)+str(d1)+str(reg_1)+str(reg_2)+str(funt3)+str(d3)+str(d4)+\"1100011\"\n \n return output \n\n \n\n \n \ndef Uformat(reg1,imm,b):\n reg_1 = array2string(dec2bin(int(reg1)))\n op=U[b]\n immd_ = array2string3(dec2bin3(int(imm)))\n \n if(op==\"auipc\"):\n output = str(immd_)+str(reg_1)+\"0010111\"\n else:\n output = str(immd_)+str(reg_1)+\"0110111\"\n \n return output\ndef UJformat(reg1,aas,string,po2,iids):\n hhf = string.replace(\"\\n\",\"\").split(\",\")\n pat = hhf[1]\n po=0\n pat = pat+':'\n p = re.compile('[\\w]*[:]') \n cnt=0\n cnt1 = 0\n \n po=0\n po1 = 0\n for i in (range(len(aas))):\n if(i>iids):\n \n \n if(i==po2):\n po = po+1\n po1 = po1 +1\n break\n else:\n if(len(p.findall(list1[i]))==0):\n po = po+1\n po1 = po1 +1\n \n flag = 0\n for i in (range(len(aas))):\n if(i>iids):\n \n \n if(KMPSearch(pat, aas[i])):\n flag = 1 \n cnt = cnt+1\n break\n else:\n if(len(p.findall(list1[i]))==0):\n cnt = cnt+1\n if(flag==0):\n print(\"Error: \"+str(pat)+\" not Defined\")\n return\n \n cnt = cnt - po \n \n \n immd = cnt*2\n \n if(immd<0):\n immd = -1*immd\n immd_= onescomp(dec2bin3(int(immd)))\n \n immd_ = array2string3(immd_)\n ll = (int(immd_,2)+1 ) \n immd = ll\n d4 = convert((dec2bin3(int(immd))[1:9]))\n d2 = convert((dec2bin3(int(immd))[10:20]))\n d1 = ((dec2bin3(int(immd))[0]))\n d3 = ((dec2bin3(int(immd))[9]))\n reg_1 = array2string(dec2bin(int(reg1)))\n \n \n output = str(d1)+str(d2)+str(d3)+str(d4)+str(reg_1)+\"1101111\"\n return output\n\n\n\n\n# Array of formats\n\ndef regno(string, op,aas,po,iids):\n passorder = []\n p = re.compile('[x]\\d+') \n jfj= p.findall(string)\n for z in range(len(jfj)):\n passorder.append(int(jfj[z].replace(\"x\",\"\")))\n \n funct3pass = str(R_funct3.get(str(op)))\n a,b=formatfound(string)\n var=formats[a]\n \n funct3pass = foundfunct3(a,b)\n \n if(var==\"R\"): #let vari contain the format\n ss = string\n ss = ss.split(\",\")\n ss1 =int( ss[0].split(\" \")[1].replace(\"x\",\"\"))\n ss2 = int( ss[1].replace(\"x\",\"\"))\n ss3 = int(ss[2].replace(\"x\",\"\"))\n return(Rformat(funct3pass,ss3,ss2,ss1,b))\n if(var==\"SB\"):\n if(len(passorder)==3):\n print(\"Error:Expecting one immediate value\")\n return\n if(len(passorder)==1):\n h1=(passorder[0])\n h2=h1\n else:\n h1=(passorder[0])\n h2=(passorder[1])\n \n return( SBformat(funct3pass,h2,h1,aas,string,po,iids))\n if(var==\"S\"):\n if(len(passorder)==3):\n print(\"Error:Expecting one immediate value\")\n return\n if(len(passorder)==1):\n h1=(passorder[0])\n h2=h1\n else:\n h1=(passorder[0])\n h2=(passorder[1])\n imm=immediate_found(string)\n \n return( Sformat(funct3pass,h2,h1,imm)) \n \n \n \n if(var==\"I\"):\n if(len(passorder)==3):\n print(\"Error:Expecting one immediate value\")\n return\n if(len(passorder)==1):\n h1=(passorder[0])\n h2=h1\n else:\n h1=(passorder[0])\n h2=(passorder[1])\n imm=immediate_found(string)\n return(Iformat(funct3pass,h2,h1,imm,string)) #let imm gets the immediate value\n if(var==\"U\"):\n ss = string.split(\",\")\n imm =int(ss[1],16)\n if(imm<0):\n imm = -1*imm\n immd_= onescomp(dec2bin2(int(imm)))\n immd_ = array2string2(immd_)\n ll = (int(immd_,2)+1 ) \n imm = ll\n \n \n return(Uformat((passorder[0]),imm,b))\n if(var==\"UJ\"):\n \n \n return(UJformat((passorder[0]),aas,string,po,iids))\n \nimmq = []\nimport re \ndef immediate_found(string):\n p = re.compile('[,][-/+]?[\\d]+') \n ff = str(p.findall(string))\n ff = ff.replace(\",\",\"\")\n ff = (ff.replace(\"'\",\"\"))\n ff = ff.replace(\"[\",\"\")\n ff = ff.replace(\"]\",\"\")\n \n ff = int(ff)\n return ff\n\n\n \n\n\n\ng = open(\"oo.mc.txt\",\"w+\")\np = re.compile('[+|-]?\\d+') \n\n\ndef assebly_dirc(string,PC,aas,po,iids):\n \n \n import re \n \n\n for i in range(len(Asem_dirc)):\n \n if(KMPSearch(Asem_dirc[i],string)==1):\n \n if(i!=4):\n p = re.compile('[+|-]?\\d+') \n num87 = p.findall(string)\n \n for j in range(len(num87)):\n g.write(str(hex(PC[0]))+\" \"+str(hex(int(num87[j])))+\"\\n\")\n PC[0] = PC[0] + int(incr[str(Asem_dirc[i])])\n elif(i==4):\n saa = string.split(\" \")[2]\n p = re.compile('\\w') \n daa = p.findall(saa)\n for j in range(len(daa)):\n g.write(str(hex(PC[0]))+\" \"+str(hex(ord(daa[i])-ord('a')+10))+\"\\n\")\n PC[0] = PC[0] + int(incr[str(Asem_dirc[i])])\n \n \n return\n \n a,b=formatfound(string)\n op=formats[a]\n \n \n g.write(str(hex(PC[0]))+\" \"+str(hex(int(regno(string, op,aas,po,iids),base = 2)))+\"\\n\")\n #print(regno(string, op,aas,po,iids))\n PC[0] = PC[0] + 4\ndef assebly_dirc1(string,PC,aas,po):\n \n \n import re \n \n\n for i in range(len(Asem_dirc)):\n \n if(KMPSearch(Asem_dirc[i],string)==1):\n \n \n if(i!=4):\n p = re.compile('[+|-]?\\d+')\n string = string.split(\":\")[1]\n num87 = p.findall(string)\n \n for j in range(len(num87)):\n immd = int(num87[j])\n if(immd<0):\n immd = -1*immd\n if(str(Asem_dirc[i])==\": .byte\"):\n immd_= onescomp(dec2bin8(int(immd)))\n\n immd_ = array2string8(immd_)\n if(str(Asem_dirc[i])==\": .half\"):\n immd_= onescomp(dec2bin16(int(immd)))\n\n immd_ = array2string16(immd_)\n if(str(Asem_dirc[i])==\": .word\"):\n \n immd_= onescomp(dec2bin32(int(immd)))\n\n immd_ = array2string32(immd_)\n if(str(Asem_dirc[i])==\": .dword\"):\n immd_= onescomp(dec2bin64(int(immd)))\n\n immd_ = array2string64(immd_)\n \n ll = (int(immd_,2)+1 ) \n immd = ll\n \n g.write(str(hex(PC[0]))+\" \"+str(hex(int(immd)))+\"\\n\")\n PC[0] = PC[0] + int(incr[str(Asem_dirc[i])])\n elif(i==4):\n \n saa = string.split(\" \")[2]\n p = re.compile('\\w') \n daa = p.findall(saa)\n for j in range(len(daa)):\n \n g.write(str(hex(PC[0]))+\" \"+str(hex(ord(daa[j])-ord('a')+10))+\"\\n\")\n PC[0] = PC[0] + int(incr[str(Asem_dirc[i])])\n \n \n return\n \n \n \n \nf= open(\"text.mc.txt\",\"r+\")\ng = open(\"oo.mc.txt\",\"w+\")\nlist1=f.readlines()\nPC= []\nPC.append(268435456)\np = re.compile('[\\w]*[:]') \nap = 0\nfor i in range(len(list1)):\n if(i!=0):\n if(KMPSearch(\".text\", list1[i])): \n ap =i\n break\n \n if(len(p.findall(list1[i]))!=0):\n \n \n if(len(list1[i])==1): break\n\n assebly_dirc1(list1[i],PC,list1,i)\nPC[0]=0\nfor i in range(len(list1)):\n if(i>ap):\n if(len(p.findall(list1[i]))==0):\n \n if(len(list1[i])==1): break\n\n assebly_dirc(list1[i],PC,list1,i,ap)\n \n#print(d1,d2,d3,d4,reg_1)\n#output = str(d2)+str(d4)+str(d1)+str(reg_1)+\"1101111\"\n#0000 0000 11111111 000 1 11111111 00001 1101111 \n \n\n\n\n\ng = open(\"oo.mc.txt\",\"w+\")\n\n\n\n\n\n\n\n","sub_path":"PHASE1.py","file_name":"PHASE1.py","file_ext":"py","file_size_in_byte":19939,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"646454691","text":"# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom django.urls import reverse_lazy\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom horizon import exceptions\nfrom horizon import tabs\n\nfrom gbpui import client\nfrom gbpui import column_filters as gfilters\n\nfrom gbpui.panels.application_policy import tables\n\nPolicyRulesTable = tables.PolicyRulesTable\nPolicyClassifiersTable = tables.PolicyClassifiersTable\nPolicyActionsTable = tables.PolicyActionsTable\n\n\nclass PolicyActionsTab(tabs.TableTab):\n table_classes = (PolicyActionsTable,)\n name = _(\"Policy Actions\")\n slug = \"policyactions\"\n template_name = \"horizon/common/_detail_table.html\"\n\n def get_policyactionstable_data(self):\n actions = []\n try:\n actions = client.policyaction_list(self.tab_group.request,\n tenant_id=self.tab_group.request.user.tenant_id)\n a = lambda x, y: gfilters.update_policyaction_attributes(x, y)\n actions = [a(self.request, item) for item in actions]\n except Exception as e:\n msg = _('Unable to retrieve actions list. %s') % (str(e))\n exceptions.handle(self.tab_group.request, msg)\n return actions\n\n\nclass PolicyClassifiersTab(tabs.TableTab):\n table_classes = (PolicyClassifiersTable,)\n name = _(\"Policy Classifiers\")\n slug = \"policyclassifiers\"\n template_name = \"horizon/common/_detail_table.html\"\n\n def get_policyclassifierstable_data(self):\n try:\n classifiers = client.policyclassifier_list(self.tab_group.request,\n tenant_id=self.tab_group.request.user.tenant_id)\n except Exception:\n classifiers = []\n exceptions.handle(self.tab_group.request,\n _('Unable to retrieve classifier list.'))\n else:\n classifiers = gfilters.update_classifier_attributes(classifiers)\n return classifiers\n\n\nclass PolicyRulesTab(tabs.TableTab):\n table_classes = (PolicyRulesTable,)\n name = _(\"Policy Rules\")\n slug = \"policyrules\"\n template_name = \"horizon/common/_detail_table.html\"\n\n def get_policyrulestable_data(self):\n try:\n policy_rules = client.policyrule_list(self.tab_group.request,\n tenant_id=self.tab_group.request.user.tenant_id)\n policy_rules = [gfilters.update_policyrule_attributes(\n self.request, item) for item in policy_rules]\n except Exception:\n policy_rules = []\n exceptions.handle(self.tab_group.request,\n _('Unable to retrieve policy-rule list.'))\n\n for rule in policy_rules:\n rule.set_id_as_name_if_empty()\n\n return policy_rules\n\n\nclass ApplicationPoliciesTab(tabs.TableTab):\n table_classes = (tables.ApplicationPoliciesTable,)\n name = _(\"Policy Rule Set\")\n slug = \"application_policies\"\n template_name = \"horizon/common/_detail_table.html\"\n\n def get_application_policies_table_data(self):\n policy_rule_sets = []\n try:\n policy_rule_sets = client.policy_rule_set_list(\n self.tab_group.request,\n tenant_id=self.tab_group.request.user.tenant_id)\n policy_rule_sets = [gfilters.update_pruleset_attributes(\n self.request, item) for item in policy_rule_sets]\n except Exception:\n exceptions.handle(\n self.tab_group.request,\n _('Unable to retrieve policy rule set list.'))\n\n for policy_rule_set in policy_rule_sets:\n policy_rule_set.set_id_as_name_if_empty()\n return policy_rule_sets\n\n\nclass ApplicationPoliciesTabs(tabs.TabGroup):\n slug = \"application_policies_tabs\"\n tabs = (ApplicationPoliciesTab,\n PolicyRulesTab,\n PolicyClassifiersTab,\n PolicyActionsTab)\n sticky = True\n\n\nclass PolicyRuleSetDetailsTab(tabs.Tab):\n name = _(\"Policy Rule Set Details\")\n slug = \"policy_rule_setdetails\"\n template_name = \"project/application_policy/_policy_rule_set_details.html\"\n failure_url = reverse_lazy('horizon:project:policy_rule_set:index')\n\n def get_context_data(self, request):\n cid = self.tab_group.kwargs['policy_rule_set_id']\n try:\n policy_rule_set = client.policy_rule_set_get(request, cid)\n rules = client.policyrule_list(\n request, tenant_id=request.user.tenant_id,\n policy_rule_set_id=policy_rule_set.id)\n rules = [\n item for item in rules if item.id in\n policy_rule_set.policy_rules]\n rules_with_details = []\n for rule in rules:\n r = {}\n r['name'] = rule.name\n r['id'] = rule.id\n action_list = []\n for aid in rule.policy_actions:\n action = client.policyaction_get(request, aid)\n a = {'id': action.id}\n if action.action_value:\n if action.action_type == 'redirect':\n scspec = client.get_servicechain_spec(request,\n action.action_value)\n a['name'] = \"Redirect:%s\" % scspec.name\n else:\n values = (str(action.action_type),\n str(action.action_value))\n name = \"%s:%s\" % values\n a['name'] = name\n else:\n a['name'] = str(action.action_type)\n action_list.append(a)\n r['actions'] = action_list\n r['classifier'] = client.policyclassifier_get(\n request, rule.policy_classifier_id)\n rules_with_details.append(r)\n except Exception as e:\n msg = _('Unable to retrieve policy_rule_set details.') % (str(e))\n exceptions.handle(request, msg, redirect=self.failure_url)\n return {'policy_rule_set': policy_rule_set,\n 'rules_with_details': rules_with_details}\n\n\nclass PolicyRuleSetDetailsTabs(tabs.TabGroup):\n slug = \"policy_rule_settabs\"\n tabs = (PolicyRuleSetDetailsTab,)\n\n\nclass PolicyRulesDetailsTab(tabs.Tab):\n name = _(\"Policy Rule Details\")\n slug = \"policyruledetails\"\n template_name = \"project/application_policy/_policyrules_details.html\"\n failure_url = reverse_lazy('horizon:project:policyrule:index')\n\n def get_context_data(self, request):\n ruleid = self.tab_group.kwargs['policyrule_id']\n actions = []\n classifiers = []\n try:\n policyrule = client.policyrule_get(request, ruleid)\n actions = client.policyaction_list(request,\n tenant_id=request.user.tenant_id, policyrule_id=ruleid)\n actions = [\n item for item in actions if item.id in\n policyrule.policy_actions]\n classifiers = client.policyclassifier_list(\n request, tenant_id=request.user.tenant_id,\n policyrule_id=ruleid)\n classifiers = [\n item for item in classifiers if\n item.id == policyrule.policy_classifier_id]\n except Exception:\n exceptions.handle(request,\n _('Unable to retrieve policyrule details.'),\n redirect=self.failure_url)\n return {'policyrule': policyrule,\n 'classifiers': classifiers,\n 'actions': actions}\n\n\nclass PolicyRuleDetailsTabs(tabs.TabGroup):\n slug = \"policyruletabs\"\n tabs = (PolicyRulesDetailsTab,)\n\n\nclass PolicyClassifierDetailsTab(tabs.Tab):\n name = _(\"Policy Classifier Details\")\n slug = \"policyclassifierdetails\"\n template_name = \"project/application_policy/_policyclassifier_details.html\"\n failure_url = reverse_lazy('horizon:project:policy_rule_set:index')\n\n def get_context_data(self, request):\n pcid = self.tab_group.kwargs['policyclassifier_id']\n try:\n policyclassifier = client.policyclassifier_get(request, pcid)\n policyclassifier = gfilters.update_classifier_attributes(\n policyclassifier)\n except Exception:\n exceptions.handle(request,\n _('Unable to retrieve policy_rule_set details.'),\n redirect=self.failure_url)\n return {'policyclassifier': policyclassifier}\n\n\nclass PolicyClassifierDetailsTabs(tabs.TabGroup):\n slug = \"policyclassifiertabs\"\n tabs = (PolicyClassifierDetailsTab,)\n\n\nclass PolicyActionDetailsTab(tabs.Tab):\n name = _(\"Policy Action Details\")\n slug = \"policyactiondetails\"\n template_name = \"project/application_policy/_policyaction_details.html\"\n failure_url = reverse_lazy('horizon:project:policy_rule_set:index')\n\n def get_context_data(self, request):\n paid = self.tab_group.kwargs['policyaction_id']\n try:\n policyaction = client.policyaction_get(request, paid)\n policyaction = gfilters.update_policyaction_attributes(request,\n policyaction)\n except Exception:\n exceptions.handle(request,\n _('Unable to retrieve policyaction details.'),\n redirect=self.failure_url)\n return {'policyaction': policyaction}\n\n\nclass PolicyActionDetailsTabs(tabs.TabGroup):\n slug = \"policyactiontabs\"\n tabs = (PolicyActionDetailsTab,)\n","sub_path":"gbpui/panels/application_policy/tabs.py","file_name":"tabs.py","file_ext":"py","file_size_in_byte":10061,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"76765200","text":"from ..Base import Base, db\nfrom sqlalchemy.sql import func\n\nfrom flask import current_app\n\nimport re\nimport os\nimport math\nimport base64\nfrom io import BytesIO\nfrom PIL import Image as PIL_Image\nimport time\n\n\nclass Image(Base):\n __abstract__ = True\n\n file_descriptor = db.Column('file_descriptor', db.String(128), nullable = False)\n file_mime = db.Column('file_mime', db.String(32), nullable = False)\n\n def __repr__(self):\n return \"\" % (self.id, self.file_mime)\n\n def __str__(self):\n return \"%s\" % (self.id, )\n\n def serialize(self):\n obj = super(Image,self).serialize()\n \n obj['file_descriptor'] = self.file_descriptor\n obj['file_mime'] = self.file_mime\n \n return obj\n\n @staticmethod\n def save(image, filename = None, max_width = None, max_height = None):\n \n directory = os.path.join(current_app.config['UPLOADED_FILES_DEST'])\n try:\n os.stat(directory)\n except:\n os.mkdir(directory)\n \n if filename is None:\n filename = 'image_'+str(math.floor(1000*time.time()))\n\n file_descriptor = os.path.join(directory, filename)\n file_mime = image.format\n\n if file_mime == 'PNG':\n file_descriptor = file_descriptor + '.png'\n elif file_mime in ('JPEG', None,):\n image = image.convert('RGB')\n file_mime = 'JPEG'\n file_descriptor = file_descriptor + '.jpg'\n \n\n #\n # convent to jpg\n #\n\n if file_mime == 'PNG':\n #re-convert to jpeg\n image = image.convert('RGB')\n file_descriptor = re.sub(r'\\.png$', '.jpg', file_descriptor)\n file_mime = 'JPEG'\n \n \n width, height = image.size\n\n \n if max_width is not None and width > max_width:\n if file_mime == 'PNG':\n #re-convert to jpeg\n image = image.convert('RGB')\n file_descriptor = re.sub(r'\\.png$','.jpg',file_descriptor)\n file_mime = 'JPEG'\n\n height = math.floor(height*max_width/width)\n width = max_width\n image = image.resize((width, height), PIL_Image.ANTIALIAS)\n\n if max_height is not None and height > max_height:\n if file_mime == 'PNG':\n #re-convert to jpeg\n image = image.convert('RGB')\n file_descriptor = re.sub(r'\\.png$', '.jpg', file_descriptor)\n file_mime = 'JPEG'\n\n width = math.floor(width*max_height/height)\n height = max_height\n image = image.resize((width, height), PIL_Image.ANTIALIAS)\n\n\n if file_mime == 'PNG':\n image.save(file_descriptor)\n elif file_mime in ('JPEG', None,):\n image.save(file_descriptor, optimize=True, quality=95)\n else:\n print('File mimetype = %s unknown' % (file_mime,))\n return None, None\n\n return file_descriptor, file_mime\n\n\n @staticmethod\n def save_from_urlData(data, filename = None, max_width = None, max_height = None):\n\n r = re.compile('data:(.+);base64,')\n match = re.search(r, data)\n file_mime = match.group(1)\n\n image_data = bytes(re.sub(r, '', data), encoding='ascii')\n\n image = PIL_Image.open(BytesIO(base64.b64decode(image_data)))\n croped_image = image.crop((0,0,min(image.size),min(image.size)))\n\n return Image.save(croped_image,filename, max_width,max_height)\n \n\n \n","sub_path":"models/main/Image.py","file_name":"Image.py","file_ext":"py","file_size_in_byte":3575,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"109398403","text":"import inspect\nimport pyspark\nimport sys\nimport subprocess\nimport shutil\nimport tempfile\nimport uuid\nimport warnings\nimport os\n\n__all__ = ['Captain']\n\nhandle_del = False\n\ntry:\n from subprocess import DEVNULL # py3k\nexcept ImportError:\n import os\n DEVNULL = open(os.devnull, 'wb')\n\n\nclass Captain(object):\n \"\"\"\n The captain of the coffee boat is used to setup packages for Spark to use.\n\n To use it run init, call the `add_pip_packages` for whatever you wish to\n add and then `launch_ship` before you create your SparkContext.\n The coffee boat captain currently works by creating a conda env and\n shipping it.\n\n \"\"\"\n def __init__(self,\n use_conda=True,\n install_local=True,\n env_name=None,\n working_dir=None,\n accept_conda_license=False,\n python_version=None,\n conda_path=None):\n \"\"\"Create a captain to captain the coffee boat and install the packages.\n\n Currently only supports conda, TODO:PEX for others.\n\n :param use_conda: Build a conda package rather than a pex package.\n :param install_local: Attempt to install packages locally as well\n :param env_name: Enviroment name to use. May squash existing enviroment\n :param working_dir: Directory for working in.\n :param accept_conda_license: If you accept the conda license. Set it\n to True to work.\n :param conda_path: Path to conda (optional). Otherwise searches system\n or self-installs.\n\n \"\"\"\n self.accept_conda_license = accept_conda_license\n self.working_dir = working_dir\n self.install_local = install_local\n self.env_name = env_name or \"auto{0}\".format(str(uuid.uuid4()))\n # Kind of hackey, but yay shells....\n self.env_name = self.env_name.replace('-', \"_\")\n self.python_version = (python_version or\n '.'.join(map(str, sys.version_info[:3])))\n if not self.working_dir:\n self.working_dir = tempfile.mkdtemp(prefix=\"coffee_boat_tmp_\")\n import atexit\n if handle_del:\n atexit.register(lambda: shutil.rmtree(self.working_dir))\n self.use_conda = use_conda\n self.conda = conda_path\n self.pip_pkgs = []\n return\n\n def add_pip_packages(self, *pkgs):\n \"\"\"Add pip packages\"\"\"\n active_context = pyspark.context.SparkContext._active_spark_context\n if self.install_local:\n args = [\"pip\", \"install\"]\n args.extend(pkgs)\n subprocess.check_call(args, stdout=DEVNULL)\n self.pip_pkgs.extend(pkgs)\n\n def launch_ship(self):\n \"\"\"Creates a relocatable environment and distributes it.\n\n .. note::\n\n This function *should* be called before you init your SparkContext, if it's\n called after we need to do some sketchy things to make it work.\n \"\"\"\n # Doing sketchy things with the gateway if we've already stopped the context\n active_context = pyspark.context.SparkContext._active_spark_context\n gateway = pyspark.context.SparkContext._gateway\n if active_context is None and gateway is not None:\n try:\n pyspark.context.SparkContext._gateway.jvm.java.lang.System.exit(0)\n except Exception:\n pass\n self._cleanup_keys()\n pyspark.context.SparkContext._gateway = None\n elif active_context is not None:\n warnings.warn(\n \"Launching on an existing SparkContext. Packages will only be available to RDDs\"\n \"created from here forward. If this makes you sad, stop the Spark context and\"\n \"re-create those RDDs you want to have access to your packages in.\")\n\n\n if self.use_conda:\n self._setup_or_find_conda()\n return self._launch_conda_ship()\n else:\n return self._launch_pex()\n\n def _launch_conda_ship(self):\n \"\"\"Create a conda enviroment, zips it up, and manipulate the environment\n variables.\n\n \"\"\"\n # Create the conda package env spec\n pkgs = [\"\"]\n pkgs.extend(map(str, self.pip_pkgs))\n pip_packages = '\\n - '.join(pkgs)\n\n # Create the package_spec\n base_package_spec = inspect.cleandoc(\"\"\"\n name: {0}\n dependencies:\n - python=={1}\n - anaconda\n - pip\n - pip:\n \"\"\").format(self.env_name, self.python_version)\n package_spec = \"{0}{1}\".format(base_package_spec, pip_packages)\n package_spec_file = tempfile.NamedTemporaryFile(dir=self.working_dir,\n delete=handle_del)\n package_spec_path = package_spec_file.name\n print(\"Writing package spec to {0}.\".format(package_spec_path))\n package_spec_file.write(package_spec)\n package_spec_file.flush()\n\n # Create the conda env\n conda_prefix = os.path.join(self.working_dir, self.env_name)\n print(\"Creating conda env\")\n if os.path.exists(conda_prefix):\n print(\"Cleaining up old prefix {0}\".format(conda_prefix))\n subprocess.check_call([\"rm\", \"-rf\", conda_prefix])\n subprocess.check_call([self.conda, \"env\", \"create\",\n \"-f\", package_spec_path,\n \"--prefix\", conda_prefix],\n stdout=DEVNULL)\n\n # Package it for distro\n zip_name = \"coffee_boat_{0}.zip\".format(self.env_name)\n zip_target = os.path.join(self.working_dir, zip_name)\n print(\"Packaging conda env\")\n subprocess.check_call([\"zip\", zip_target, \"-r\", conda_prefix],\n stdout=DEVNULL)\n relative_python_path = \".\" + conda_prefix + \"/bin/python\"\n\n # Make a self extractor script\n runner_script = inspect.cleandoc(\"\"\"#!/bin/bash\n if [ -f {0} ];\n then\n unzip {0} &>/dev/null && rm {0} &> /dev/null\n fi\n {1} \"$@\" \"\"\".format(zip_name, relative_python_path))\n script_name = \"coffee_boat_runner_{0}.sh\".format(self.env_name)\n runner_script_path = os.path.join(self.working_dir, script_name)\n with open(runner_script_path, 'w') as f:\n f.write(runner_script)\n subprocess.check_call([\"chmod\", \"a+x\", runner_script_path])\n\n # Adjust environment variables so that the env gets distributed.\n old_args = os.environ.get(\"PYSPARK_SUBMIT_ARGS\", \"pyspark-shell\")\n # Backup the old arguments\n if \"coffee_boat\" not in old_args:\n os.environ[\"BACK_PYSPARK_SUBMIT_ARGS\"] = old_args\n else:\n old_args = os.environ.get(\"BACK_PYSPARK_SUBMIT_ARGS\", \"pyspark-shell\")\n new_args = \"--files {0},{1} {2}\".format(zip_target, runner_script_path, old_args)\n print(\"using {0} as python arguments\".format(new_args))\n os.environ[\"PYSPARK_SUBMIT_ARGS\"] = new_args\n # Handle active/already running contexts.\n sc = pyspark.context.SparkContext._active_spark_context\n if sc is not None:\n print(\"Adding {0} & {1} to existing sc\".format(zip_target, runner_script_path))\n sc.addFile(zip_target)\n sc.addFile(runner_script_path)\n print(\"Updating python exec on existing sc\")\n sc.pythonExec = \"./{0}\".format(script_name)\n else:\n print(\"No active context, depending on submit args.\")\n\n if \"PYSPARK_GATEWAY_PORT\" in os.environ:\n print(\"Hey the Java process is already running, this might not work.\")\n os.environ[\"PYSPARK_PYTHON\"] = \"./{0}\".format(script_name)\n\n def _launch_pex(self):\n \"\"\" Create a pex environment.\"\"\"\n pass\n\n def _setup_or_find_conda(self):\n \"\"\"Find conda or set up a conda installation\"\"\"\n # Check if we need to setup conda or return if we already have one\n rc = subprocess.call(['which', 'conda'])\n if rc == 0:\n self.conda = \"conda\"\n return\n if self.conda is not None:\n return\n\n # Install conda if we need to\n if not self.accept_conda_license:\n raise Exception(\"Please accept the conda license by setting \"\n \"accept_conda_license\")\n python_version = sys.version_info[0]\n url = \"https://repo.continuum.io/miniconda/Miniconda%d-latest-Linux-x86_64.sh\" % python_version\n print(\"Downloading conda from %s to %s\" % (url, self.working_dir))\n mini_conda_target = \"%s/%s\" % (self.working_dir, \"miniconda.sh\")\n subprocess.check_call([\"wget\", url, \"-O\", mini_conda_target, \"-nv\"],\n shell=False,\n stdout=DEVNULL)\n print(\"Running conda setup....\")\n subprocess.check_call([\"chmod\", \"a+x\", mini_conda_target],\n shell=False,\n stdout=DEVNULL)\n conda_target = \"%s/%s\" % (self.working_dir, \"conda\")\n subprocess.check_call([mini_conda_target, \"-b\", \"-p\", conda_target],\n stdout=DEVNULL,\n stderr=DEVNULL)\n self.conda = \"%s/bin/conda\" % conda_target\n\n def _cleanup_keys(self):\n import os\n def cleanup_key(name):\n if name in os.environ:\n del os.environ[name]\n keys = [\n \"PYSPARK_PYTHON\",\n \"PYSPARK_GATEWAY_PORT\",\n \"_PYSPARK_DRIVER_CALLBACK_HOST\",\n \"_PYSPARK_DRIVER_CALLBACK_PORT\"]\n map(cleanup_key, keys)\n","sub_path":"coffee_boat/captain.py","file_name":"captain.py","file_ext":"py","file_size_in_byte":9667,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"257155949","text":"from django.urls import path\n\nfrom . import views\n\napp_name = 'app_manager'\n\nurlpatterns = [\n path('dashboard/', views.dashboard, name='dashboard'), # show companies\n\n # path('company/new', views.company_new, name='company_new'), # add new company\n # path('company/', views.company_show, name='company_show'), # show the company\n\n path('group/new', views.group_new, name='group_new'), # add new group\n path('group/modify//', views.group_modify, name='group_modify'), # modify the group\n path('group/del/', views.group_del, name='group_del'), # add new group\n\n path('group//device/new', views.device_new, name='device_new'), # add new device\n path('device/modify/', views.device_modify, name='device_show'), # modify the device\n path('device/del/', views.device_del, name='device_del'), # add new device\n\n path('user/del//', views.user_del, name='user_del'), # del user from company\n path('user/add//', views.user_add, name='user_add'), # add company_id_to_user\n\n path('logo/upload', views.logo_upload, name='logo_upload'), # show the company\n\n path('device//checklist/new', views.checklist_new, name='checklist_new'), # add new checklist\n path('device//checklist/modify/', views.checklist_new, name='checklist_new'), # modify checklist\n path('device//checklist/del/', views.checklist_new, name='checklist_new'), # del checklist\n\n path('generic//', views.list_show, name='list_show'), # add new group\n path('add//', views.list_edit, name='list_edit'), # add new group\n\n]\n","sub_path":"app_manager/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1817,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"560531582","text":"__author__ = 'PyBeaner'\n\n# You need to check the start or end of a string for specific text patterns, such as filename\n# extensions, URL schemes, and so on.\n\nfilename = \"spam.txt\"\nprint(filename.endswith(\".txt\"))\nprint(filename.startswith(\"file:\"))\n\nurl = \"http://www.python.org\"\nprint(url.startswith(\"http:\"))\n\n\nimport os\nfilenames = os.listdir(\"..\")\nprint(filenames)\nr = [name for name in filenames if name.startswith(\"Matching\")]\nprint(r)\nprint(any(name.endswith(\"String\") for name in filenames))\n\nfrom urllib.request import urlopen\n\ndef read_data(name):\n # tuple works(but not list or set)\n if name.startswith(\"http:\",\"https:\",\"ftp:\"):\n return urlopen(name).read()\n with open(name) as f:\n return f.read()\n\nchoices = [\"http\",\"ftp\"]\nurl = \"http://www.python.org\"\n# startswith first arg must be str or a tuple of str, not list\n# url.startswith(choices)\nurl.startswith(tuple(choices))","sub_path":"Chapter 2. Strings and Text/Matching Text at the Start or End of a String/example.py","file_name":"example.py","file_ext":"py","file_size_in_byte":907,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"434976650","text":"#! /usr/bin/env python\nimport os\nimport sys\nimport unittest\nfrom irods.session import iRODSSession\nfrom irods.exception import NetworkException\nimport irods.test.config as config\n\n\nclass TestConnections(unittest.TestCase):\n\n def setUp(self):\n self.sess = iRODSSession(host=config.IRODS_SERVER_HOST,\n port=config.IRODS_SERVER_PORT, # 4444: why?\n user=config.IRODS_USER_USERNAME,\n password=config.IRODS_USER_PASSWORD,\n zone=config.IRODS_SERVER_ZONE)\n\n def tearDown(self):\n '''Close connections\n '''\n self.sess.cleanup()\n\n def test_connection(self):\n with self.sess.pool.get_connection() as conn:\n self.assertTrue(conn)\n\n def test_connection_destructor(self):\n conn = self.sess.pool.get_connection()\n conn.__del__()\n conn.release(destroy=True)\n\n def test_failed_connection(self):\n # mess with the account's port\n self.sess.pool.account.port = 6666\n\n # try connecting\n with self.assertRaises(NetworkException):\n self.sess.pool.get_connection()\n\n # set port back\n self.sess.pool.account.port = config.IRODS_SERVER_PORT\n\n def test_send_failure(self):\n with self.sess.pool.get_connection() as conn:\n # try to close connection twice, 2nd one should fail\n conn.disconnect()\n with self.assertRaises(NetworkException):\n conn.disconnect()\n\n def test_reply_failure(self):\n with self.sess.pool.get_connection() as conn:\n # close connection\n conn.disconnect()\n\n # try sending reply\n with self.assertRaises(NetworkException):\n conn.reply(0)\n\n\nif __name__ == '__main__':\n # let the tests find the parent irods lib\n sys.path.insert(0, os.path.abspath('../..'))\n unittest.main()\n","sub_path":"irods/test/connection_test.py","file_name":"connection_test.py","file_ext":"py","file_size_in_byte":1960,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"492122481","text":"\"\"\"This script increases the minor version of the current project.\"\"\"\n\nimport os\n\ndef increaseVersion(fname):\n assert os.path.exists(fname)\n with open(fname) as f:\n lines = f.read().split(\"\\n\")\n for i, line in enumerate(lines):\n if line.startswith(\"__version__\"):\n oldVersionString = line.split(\"'\")[1]\n newVersion = [int(x) for x in oldVersionString.split(\".\")]\n newVersion[-1] += 1\n newVersionString = \".\".join([str(x) for x in newVersion])\n lines[i] = lines[i].replace(oldVersionString, newVersionString)\n with open(fname, 'w') as f:\n f.write(\"\\n\".join(lines))\n print(f\"Upgraded: {oldVersionString} -> {newVersionString}\")\n return\n\n\nif __name__ == \"__main__\":\n PATH_HERE = os.path.abspath(os.path.dirname(__file__))\n versionFile = os.path.abspath(PATH_HERE+\"/../../src/pyabf/__init__.py\")\n increaseVersion(versionFile)\n","sub_path":"dev/scripts/versionIncrease.py","file_name":"versionIncrease.py","file_ext":"py","file_size_in_byte":925,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"582875373","text":"from OpenGL.GL import *\nfrom OpenGL.GLUT import *\nfrom OpenGL.GLU import *\n\nfrom math import pi, cos, sin\nfrom numpy import arctan2\nfrom node import *\n\nclass Player:\n def __init__(s, parent_node, direction = 0):\n s.parent_node = parent_node #the maze node that the player is currently inside of\n s.pos = [0.0, 0.0, 0.0]\n s.keyState = {}\n s.xy_dir = direction\n s.z_dir = 0\n s.height = 0.5\n s.left_mouse_button = False\n s.mouse_position = [0, 0]\n \n def keyboardFunc(s, key, x, y):\n s.keyState[key.lower()] = True\n def keyboardUpFunc(s, key, x, y):\n s.keyState[key.lower()] = False\n def mouseFunc(s, button, state, x, y):\n if state is 0:\n s.left_mouse_button = True\n s.mouse_position = [x, y]\n else:\n s.left_mouse_button = False\n def motionFunc(s, x, y):\n if s.left_mouse_button:\n s.xy_dir += float(s.mouse_position[0] - x)/200.0\n s.z_dir += float(s.mouse_position[1] - y)/200.0\n \n s.mouse_position = [x, y]\n \n s.xy_dir = s.xy_dir%(pi*2)\n s.z_dir = min(pi/2, max(s.z_dir, -pi/2))\n \n def update(s):\n def keyDown(key):\n return key in s.keyState and s.keyState[key]\n movement = 1.0/100.0\n if keyDown('w'):\n s.pos[0] += cos(s.xy_dir)*movement\n s.pos[1] += sin(s.xy_dir)*movement\n if keyDown('s'):\n s.pos[0] -= cos(s.xy_dir)*movement\n s.pos[1] -= sin(s.xy_dir)*movement\n if keyDown('a'):\n s.pos[0] += cos(s.xy_dir + pi/2)*movement\n s.pos[1] += sin(s.xy_dir + pi/2)*movement\n if keyDown('d'):\n s.pos[0] -= cos(s.xy_dir + pi/2)*movement\n s.pos[1] -= sin(s.xy_dir + pi/2)*movement\n \n if s.parent_node is not None:\n next_node = None\n from_dir = to_dir = None\n w = float(s.parent_node.width) /2.0\n h = float(s.parent_node.height)/2.0\n conditions = {DR_LEFT: s.pos[0] < -w, DR_RIGHT: s.pos[0] > w, DR_TOP: s.pos[1] < -h, DR_BOTTOM: s.pos[1] > h}\n for dir in [DR_LEFT, DR_RIGHT, DR_TOP, DR_BOTTOM]:\n if dir in s.parent_node.doors and conditions[dir]:\n next_pair = s.parent_node.doors[dir]\n next_node = next_pair[0]\n to_dir = next_pair[1]\n from_dir = dir\n \n if next_node is not None:\n print(\"from_dir: {} to_dir: {}\".format(from_dir, to_dir))\n mag = (s.pos[0]**2 + s.pos[1]**2)**0.5\n dir = arctan2(s.pos[0], s.pos[1]) #this switches x and y apparently\n \n print('{} {}'.format(s.pos[0], s.pos[1]))\n #print('m = {}'.format([mag*cos(dir - from_dir*pi/2.0), mag*sin(dir - from_dir*pi/2.0) - 0]))\n s.pos[0:2] = [mag*sin(dir - from_dir*pi/2.0), mag*cos(dir - from_dir*pi/2.0) - 0]#3.0]\n print('{} {}'.format(s.pos[0], s.pos[1]))\n s.pos[1]+=3.0\n print('{} {}'.format(s.pos[0], s.pos[1]))\n \n mag = (s.pos[0]**2 + s.pos[1]**2)**0.5\n dir = arctan2(s.pos[0], s.pos[1])\n \n s.pos[0:2] = [mag*sin(dir + (to_dir-2)*pi/2.0), mag*cos(dir + (to_dir-2)*pi/2.0)]\n print('{} {}'.format(s.pos[0], s.pos[1]))\n \n s.xy_dir += (from_dir - to_dir + 2)*pi/2.0\n s.parent_node = next_node\n \n def displayFunc(s):\n zcomp = cos(s.z_dir)\n #print(\" {} {} {}\".format(cos(s.xy_dir)*zcomp, sin(s.xy_dir)*zcomp, cos(s.z_dir)))\n gluLookAt(0, 0, s.height, cos(s.xy_dir)*zcomp, sin(s.xy_dir)*zcomp, sin(s.z_dir) + s.height, 0,0,1)\n glTranslate(-s.pos[0], -s.pos[1], -s.pos[2])\n \n s.parent_node.displayFunc()","sub_path":"player.py","file_name":"player.py","file_ext":"py","file_size_in_byte":3968,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"51386023","text":"# encoding= utf-8\n# Author: HHB\n# Data: 2022/11/09 10:54\n\n\nimport re\nfrom lxml import etree\nimport os\nimport json\nimport datetime\nimport uuid\nimport cv2\nimport time\nimport requests\nimport random\nfrom selenium.webdriver import ActionChains\nfrom selenium import webdriver\nfrom selenium.webdriver.chrome.service import Service\n\n\ndef canny(filepath, cell=7):\n img = cv2.imread(filepath, 0)\n blurred = cv2.GaussianBlur(img, (cell, cell), 0)\n return cv2.Canny(blurred, 240, 250)\n\n\ndef getPosition(img_file1, img_file2):\n img = canny(img_file1)\n img2 = img.copy()\n template = canny(img_file2, cell=5)\n w, h = template.shape[::-1]\n img = img2.copy()\n method = eval(\"cv2.TM_CCOEFF_NORMED\")\n\n res = cv2.matchTemplate(img, template, method)\n min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)\n\n if method in [cv2.TM_SQDIFF, cv2.TM_SQDIFF_NORMED]:\n top_left = min_loc\n else:\n top_left = max_loc\n bottom_right = (top_left[0] + w, top_left[1] + h)\n\n cv2.rectangle(img, top_left, bottom_right, 255, 2)\n return top_left\n\n\ndef get_track(distance):\n v = 0\n t = 0.4\n tracks = []\n current = 0\n mid = distance * 7 / 8\n distance += 5\n while current < distance:\n if current < mid:\n a = random.randint(2, 4) # 加速运动\n else:\n a = -random.randint(1, 3) # 减速运动\n v0 = v\n s = v0 * t + 0.6 * a * (t ** 2)\n current += s\n tracks.append(round(s))\n v = v0 + a * t\n random.shuffle(tracks)\n return tracks\n\n\ndef checkCode(b, img_file1, img_file2):\n scale = 1.7\n try:\n while 1:\n t = b.find_element_by_xpath('//*[@id=\"captcha-verify-image\"]')\n t = t.get_attribute(\"src\")\n img = requests.get(t)\n f = open(img_file1, \"wb\")\n f.write(img.content)\n f.close()\n t = b.find_element_by_xpath('//*[@id=\"captcha_container\"]/div/div[2]/img[2]').get_attribute(\"src\")\n img = requests.get(t)\n f = open(img_file2, \"wb\")\n f.write(img.content)\n f.close()\n p = int(getPosition(img_file1, img_file2)[0] / scale)\n # print(p)\n button = b.find_element_by_xpath('//*[@id=\"secsdk-captcha-drag-wrapper\"]/div[2]')\n tracks = get_track(p)\n ActionChains(b).click_and_hold(button).perform()\n for x in tracks:\n ActionChains(b).move_by_offset(xoffset=x, yoffset=0).perform()\n ActionChains(b).release(button).perform()\n time.sleep(1)\n except:\n print(\"ok\")\n\n# 此脚本可以跑单个页面 也可以作为跑多个页面的线程\n# 跑单个时 修改下方要爬的url直接运行此文件\n# 作为线程时 调用get_comment(url),并传入参数即可\n\n \ndef get_review_number(b):\n # //*[@id=\"root\"]/div/div[2]/div/div/div[1]/div[1]/div[3]/div/div[2]/div[1]/div[2]/span\n # num=b.find_element_by_xpath('//*[@id=\"root\"]/div/div[2]/div/div/div[1]/div[1]/div[3]/div/div[2]/div[1]/div[2]/span').text\n # print(int(num))\n for x in range(1, 15, 5):\n time.sleep(1)\n j = x * 12\n js = 'document.documentElement.scrollTop=document.documentElement.scrollHeight* %f' % j\n b.execute_script(js)\n\n\ndef get_comment(url, id):\n # chrome_d = \"C:\\Program Files (x86)\\Google\\Chrome\\Application\\chromedriver.exe\"\n chrome_d = r\"./chromedriver.exe\"\n option = webdriver.ChromeOptions()\n option.add_argument('headless') # 添加无头模式\n b = webdriver.Chrome(executable_path=chrome_d, options=option)\n b.get(url)\n b.maximize_window()\n time.sleep(2)\n img1 = str(uuid.uuid1()) + '.jpeg'\n img2 = str(uuid.uuid1()) + '.png'\n checkCode(b, img1, img2) # 过验证码\n if os.path.exists(img1):\n os.remove(img1)\n if os.path.exists(img2):\n os.remove(img2)\n time.sleep(2)\n get_review_number(b)\n b.implicitly_wait(3)\n # Review_list = b.find_elements_by_xpath('//*[@id=\"root\"]/div/div[2]/div/div/div[1]/div[3]/div/div/div[4]/div/div')\n Review_list = b.find_element_by_xpath('//*[@id=\"root\"]/div/div[2]/div/div/div[1]/div[3]/div/div').get_attribute(\n \"outerHTML\")\n b.close()\n html = etree.HTML(Review_list)\n Review_list = html.xpath('//div[4]/div/div[@class=\"qolG5qEO\"]')\n\n review_infos = [] # 内容列表\n for i in Review_list:\n # print('1',i)\n review_html = etree.HTML(etree.tostring(i).decode())\n # print(review_html)\n review = review_html.xpath('//span[@class=\"mzZanXbP\"]/span/span/span[1]/span/text()') # 用户名和评论内容\n # print(review)\n try:\n if len(review) == 1:\n # print(1111)\n review.append('[表情]')\n if len(review[2]) != 0:\n review[1] = review[1] + review[2]\n except:\n pass\n result_like = review_html.xpath('//div[2]/div[2]/div/p/span/text()') # 点赞数\n content = etree.tostring(i).decode()\n result_time = re.findall(r'(.*?)
', content) # 评论时间\n result = re.findall(r'a href=\"//(.*?)\" class=\"yqT9PfJg\"', content) # 用户主页地址\n if len(result_time) == 0:\n result_time[0] = 0\n # print(review[0], ':', review[1])\n # print(result_time[0],' ',result[0])\n review_info = {\"用户名\": review[0], \"评论内容\": review[1], \"评论时间\": result_time[0], '点赞数': result_like[0],\n \"用户主页链接\": result[0]}\n review_infos.append(review_info)\n this = os.getcwd() # 获取当前路径\n this = this + \"\\\\tiktok_review_info\"\n ti = 'review_info%s-%s.txt' % (str(datetime.datetime.now().date()), id) # 获取时间拼接字符串作为文件名\n path = os.path.join(this, ti) # 吧两段拼成文件存储路径\n fp = open(path, 'w', encoding='utf-8')\n fp.write('[\\n')\n for i in review_infos:\n print(i)\n data = json.dumps(i, ensure_ascii=False)\n fp.write(data + ',\\n')\n fp.write(']')\n fp.close()\n\n\nif __name__ == '__main__':\n url = 'https://www.douyin.com/search/%E8%8B%8F%E5%B7%9E%E6%96%B0%E9%97%BB?aid=322ae664-96f1-430c-8161-86e1bd61ec67&publish_time=0&sort_type=0&source=normal_search&type=video'\n get_comment(url, id)","sub_path":"PoliceProject/tikTok/tilTok_test.py","file_name":"tilTok_test.py","file_ext":"py","file_size_in_byte":6327,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"70402061","text":"\"\"\"\nprob: A string string of lowercase letters is given. We want to partition this string into as many parts as possible so that each letter appears in at most one part, and return a list of integers representing the size of these parts.\n\nFor example:\nInput: string = \"ababfeefhijkh\"\nOutput: [4,4,5]\n\nExplanation:\nThe partition is \"abab\", \"feef\", \"hijkh\". This is a partition so that each letter appears in at most one part.\n\nIdea: We need an array last[char] -> index of S where char occurs last. Then, let anchor and j be the start and end of the current partition. If we are at a label that occurs last at some index after j, we'll extend the partition j = last[c]. If we are at the end of the partition (i == j) then we'll append a partition size to our answer, and set the start of our new partition to i+1\nComp:\n\"\"\"\n\n\ndef partition_labels(string):\n last = {c: i for i, c in enumerate(string)}\n right, left, res = 0, 0, []\n for i, c in enumerate(string):\n right = max(right, last[c])\n if i == right:\n res.append(i - left + 1)\n left = i + 1\n return res\n\n\nstring = \"ababfeefhijkh\"\nexpected = [4, 4, 5]\nactual = partition_labels(string)\nprint(expected == actual)\n\nstring = \"ababcbacadefegdehijhklij\"\nexpected = [9, 7, 8]\nactual = partition_labels(string)\nprint(expected == actual)\n","sub_path":"other/prgcrk/array_string/pointers/partition_labels.py","file_name":"partition_labels.py","file_ext":"py","file_size_in_byte":1331,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"579714111","text":"\"\"\"\n q6# Binary-Search-3\n\n## Problem1\nPow(x,n) (https://leetcode.com/problems/powx-n/)\n\nImplement pow(x, n), which calculates x raised to the power n (xn).\n\nExample 1:\n\nInput: 2.00000, 10\nOutput: 1024.00000\nExample 2:\n\nInput: 2.10000, 3\nOutput: 9.26100\nExample 3:\n\nInput: 2.00000, -2\nOutput: 0.25000\nExplanation: 2-2 = 1/22 = 1/4 = 0.25\nNote:\n\n-100.0 < x < 100.0\nn is a 32-bit signed integer, within the range [−231, 231 − 1]\n\n\nTIME - 0(LOG N)\nSPACE - O(LOG N)\n\"\"\"\n\n\nclass Solution:\n def myPow(self, x: float, n: int) -> float:\n if n == 0: # base case\n return 1\n\n y = self.myPow(x, int(n / 2)) #recursive case\n\n if n % 2 == 0: # n is even then 2 recursive calls\n return y * y\n else: # n is odd then 2 recursive calls and product with if n >0 or with 1/x if n <0\n if n > 0:\n return y * y * x\n else:\n return y * y * (1 / x)\n\n\n\n\n\n","sub_path":"powerx-n.py","file_name":"powerx-n.py","file_ext":"py","file_size_in_byte":981,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"566211311","text":"#python\nimport datetime\nimport os\n\n#libs\nfrom django.utils import timezone\nfrom django.test import TestCase, LiveServerTestCase\nfrom selenium import webdriver\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.webdriver.common.by import By\n\n#local\nfrom .models import Category, Album\n\n\nclass AlbumViewsTestCase(TestCase):\n def test_index(self):\n Category.objects.create(\n name='Blues brothers',\n slug='Blues brothers',\n views=10,\n likes=20\n )\n Category.objects.create(\n name='My new category',\n slug='My new category',\n views=20,\n likes=20\n )\n\n resp = self.client.get('/albums/')\n self.assertEqual(resp.status_code, 200)\n self.assertTrue('categories' in resp.context)\n self.assertEqual([category.slug for category in\n resp.context['categories']],\n ['My new category', 'Blues brothers'])\n\n\nclass AlbumViewsTest(TestCase):\n fixtures = ['albums_views_testdata.json']\n\n def test_index(self):\n resp = self.client.get('/albums/')\n self.assertEqual(resp.status_code, 200)\n self.assertTrue('categories' in resp.context)\n self.assertEqual([category.slug for category in\n resp.context['categories']],\n ['rock', 'blues', 'pop', 'test'])\n\n category_blues = resp.context['categories'][1]\n self.assertEqual(category_blues.name, 'Blues')\n self.assertEqual(category_blues.slug, 'blues')\n self.assertEqual(category_blues.views, 0)\n self.assertEqual(category_blues.likes, 0)\n\n category_pop = resp.context['categories'][2]\n self.assertEqual(category_pop.name, 'Pop')\n self.assertEqual(category_pop.slug, 'pop')\n self.assertEqual(category_pop.views, 0)\n self.assertEqual(category_pop.likes, 0)\n\n category_rock = resp.context['categories'][0]\n self.assertEqual(category_rock.name, 'Rock')\n self.assertEqual(category_rock.slug, 'rock')\n self.assertEqual(category_rock.views, 50)\n self.assertEqual(category_rock.likes, 0)\n\n def test_albums(self):\n resp = self.client.get('/albums/category/blues/')\n self.assertEqual(resp.status_code, 200)\n self.assertTrue('albums' in resp.context)\n self.assertEqual([album.slug for album in resp.context['albums']],\n ['myalbum', 'stevealbum'])\n\n album_1 = resp.context['albums'][0]\n self.assertEqual(album_1.name, 'Myalbum')\n self.assertEqual(album_1.slug, 'myalbum')\n self.assertEqual(album_1.publication_date,\n datetime.date(year=2013, month=11, day=20))\n self.assertEqual(album_1.views, 123)\n self.assertEqual(album_1.likes, 0)\n\n album_2 = resp.context['albums'][1]\n self.assertEqual(album_2.name, 'SteveAlbum')\n self.assertEqual(album_2.slug, 'stevealbum')\n self.assertEqual(album_2.publication_date,\n datetime.date(year=2013, month=11, day=20))\n self.assertEqual(album_2.views, 0)\n self.assertEqual(album_2.likes, 0)\n\n\nclass AlbumCreate(TestCase):\n fixtures = ['albums_views_testdata.json']\n\n def create_category(self, name=\"test category\", slug=\"testcategory\"):\n return Album.objects.create(name=name,\n slug=slug,\n )\n\n def test_category_creation(self):\n cat = self.create_category()\n self.assertTrue(isinstance(cat, Category))\n self.assertEqual(cat.__unicode__(), cat.name)\n\n def create_album(self, name=\"test album\", slug=\"blah\"):\n return Album.objects.create(name=name,\n slug=slug,\n publication_date=timezone.now(),\n views=0,\n likes=0,\n )\n\n def test_album_creation(self):\n albums = self.create_album()\n self.assertTrue(isinstance(albums, Album))\n self.assertEqual(albums.__unicode__(), albums.name)\n\n\nclass CategoryTest(LiveServerTestCase):\n fixtures = ['admin_user.json']\n\n def setUp(self):\n self.browser = webdriver.Firefox()\n self.browser.implicitly_wait(3)\n\n def tearDown(self):\n self.browser.quit()\n\n def test_for_create_new_category_via_user(self):\n self.browser.get(self.live_server_url + '/albums/')\n\n login_user_link = self.browser.find_element_by_link_text('Login')\n login_user_link.click()\n\n body = self.browser.find_element_by_tag_name('body')\n self.assertIn('Login', body.text)\n\n username_field = self.browser.find_element_by_name('username')\n username_field.send_keys('Squalles')\n\n password_field = self.browser.find_element_by_name('password')\n password_field.send_keys('sq321')\n password_field.send_keys(Keys.RETURN)\n\n body = self.browser.find_element_by_tag_name('body')\n self.assertIn('MyAlbum', body.text)\n\n edit_link = self.browser.find_element(By.CSS_SELECTOR,\n \".dropdown-toggle\")\n edit_link.click()\n edit_link = self.browser.find_element(By.ID, value=\"Create_Category\")\n edit_link.click()\n\n body = self.browser.find_element_by_tag_name('body')\n self.assertIn('Create Category', body.text)\n\n name_field = self.browser.find_element_by_name('name')\n name_field.send_keys(\"My Test Category\")\n\n create_button = self.browser.find_element_by_css_selector(\n \"button[value='Save']\")\n create_button.click()\n\n\nclass CategoryModelTest(TestCase):\n def test_creating_a_new_category(self):\n category = Category()\n category.name = \"TestCategory\"\n category.content = \"Example content test\"\n category.slug = \"testcategory\"\n\n category.save()\n\n all_category_in_database = Category.objects.all()\n self.assertEqual(len(all_category_in_database), 1)\n only_category_in_database = all_category_in_database[0]\n self.assertEqual(only_category_in_database, category)\n\n self.assertEqual(only_category_in_database.name, \"TestCategory\")\n self.assertEqual(only_category_in_database.content,\n \"Example content test\")\n self.assertEqual(only_category_in_database.slug, \"testcategory\")\n\n\nclass ArtistTest(LiveServerTestCase):\n fixtures = ['admin_user.json', 'all_data.json']\n\n def setUp(self):\n self.browser = webdriver.Firefox()\n self.browser.implicitly_wait(3)\n\n def tearDown(self):\n self.browser.quit()\n\n def test_for_create_new_artist_via_user(self):\n self.browser.get(self.live_server_url + '/albums/')\n\n login_user_link = self.browser.find_element_by_link_text('Login')\n login_user_link.click()\n\n body = self.browser.find_element_by_tag_name('body')\n self.assertIn('Login', body.text)\n\n username_field = self.browser.find_element_by_name('username')\n username_field.send_keys('Squalles')\n\n password_field = self.browser.find_element_by_name('password')\n password_field.send_keys('sq321')\n password_field.send_keys(Keys.RETURN)\n\n body = self.browser.find_element_by_tag_name('body')\n self.assertIn('Genres', body.text)\n\n genres = self.browser.find_element_by_link_text('Genres')\n genres.click()\n choose_category = self.browser.find_element_by_link_text('Blues')\n choose_category.click()\n\n body = self.browser.find_element_by_tag_name('body')\n self.assertIn('Edit', body.text)\n\n create_artist = self.browser.find_element_by_link_text('Edit')\n create_artist.click()\n create_artist = self.browser.find_elements_by_link_text(\n 'Create New Artist')\n create_artist[0].click()\n\n body = self.browser.find_element_by_tag_name('body')\n self.assertIn('Create Artist', body.text)\n\n self.browser.find_element_by_name('first_name').send_keys(\"John\")\n self.browser.find_element_by_name('last_name').send_keys(\"Doe\")\n self.browser.find_element_by_name('born').send_keys(\"1942-02-03\")\n self.browser.find_element_by_name('known_as').send_keys(\"Test\")\n self.browser.find_element_by_name('start_date').send_keys(\n \"1972-02-01\")\n self.browser.find_element_by_name(\"picture\").send_keys(\n os.getcwd() + \"/image.png/\")\n self.browser.find_element_by_class_name('forminput').send_keys(\"TestTag\")\n\n self.browser.find_element_by_css_selector(\n \"button[value='Add']\").click()\n\n body = self.browser.find_element_by_tag_name('body')\n self.assertIn('John Doe', body.text)\n\n\n\n\n","sub_path":"albums/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":8866,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"614551800","text":"# -*- coding: utf-8 -*-\n'''\nAnalyzer Object\n===============\n'''\n\nfrom __future__ import annotations\n\n__all__ = ('Analyzer',)\n\nimport os\nfrom analyzer.core.frequency_analyzer import FrequencyAnalyzer\nfrom analyzer.core.percent_analyzer import PercentAnalyzer\nfrom analyzer.core.tokenizer import Tokenizer\nfrom analyzer.core.word_analyzer import WordAnalyzer\nfrom analyzer.datatypes.analyzerexception import AnalyzerError\nfrom analyzer.datatypes.tokenlist import TokenList\nfrom builder.core.executer import Executer\nfrom builder.core.outputter import Outputter\nfrom builder.datatypes.outputmode import OutputMode\nfrom builder.datatypes.rawdata import RawData\nfrom builder.datatypes.resultdata import ResultData\nfrom builder.datatypes.textlist import TextList\nfrom builder.utils import assertion\nfrom builder.utils.util_file import get_content_from_text_file\nfrom builder.utils.logger import MyLogger\n\n\n# logger\nLOG = MyLogger.get_logger(__name__)\nLOG.set_file_handler()\n\n\nclass Analyzer(Executer):\n ''' Analyzer object.\n '''\n def __init__(self):\n super().__init__()\n LOG.info('ANALYZER: initialize')\n\n def execute(self, src: (str, list, TextList),\n person_names: list,\n is_debug: bool=False) -> ResultData: # pragma: no cover\n LOG.info('ANALYZER: start exec')\n is_succeeded = True\n error = None\n basesrc = None\n result = ResultData([], is_succeeded, error)\n\n if isinstance(src, str):\n basesrc = TextList(*get_content_from_text_file(src))\n elif isinstance(src, TextList):\n basesrc = src\n elif isinstance(src, (list, tuple)):\n basesrc = TextList(*src)\n else:\n msg = f'Invalid analyze source!: {src}'\n LOG.critical(msg)\n return ResultData(result, False, AnalyzerError(msg))\n\n tmp = self._rid_tag(basesrc)\n\n LOG.info('TOKENIZER: call')\n result = assertion.is_instance(Tokenizer().execute(tmp, person_names), ResultData)\n if not result.is_succeeded:\n return result\n tokens = assertion.is_instance(result.data, TokenList)\n\n LOG.info('WORD_ANALYZER: call')\n result = assertion.is_instance(WordAnalyzer().execute(tokens), ResultData)\n if not result.is_succeeded:\n return result\n word_data = assertion.is_listlike(result.data)\n\n LOG.info('PERCENT_ANALYZER: call')\n result = assertion.is_instance(PercentAnalyzer().execute(tmp), ResultData)\n if not result.is_succeeded:\n return result\n percent_data = assertion.is_listlike(result.data)\n\n LOG.info('FREQUENCY_ANALYZER: call')\n result = assertion.is_instance(FrequencyAnalyzer().execute(tokens), ResultData)\n if not result.is_succeeded:\n return result\n freq_data = assertion.is_listlike(result.data)\n\n LOG.info('Analyzer result output')\n result_data = percent_data + ['\\n---\\n'] \\\n + word_data + ['\\n---\\n'] \\\n + freq_data\n fname = 'result'\n suffix = ''\n extention = 'md'\n builddir = 'build/results'\n mode = OutputMode.CONSOLE if is_debug else OutputMode.FILE\n data = TextList(*[f'{line}\\n' for line in result_data])\n Outputter().execute(data, mode, fname, suffix, extention, builddir)\n return result\n\n #\n # private\n #\n\n def _rid_tag(self, src: TextList) -> TextList:\n LOG.info('ANALYZER: rid tags start')\n tmp = []\n for line in assertion.is_instance(src, TextList).data:\n assertion.is_str(line)\n if line.startswith('#') or line.startswith('\\n#'):\n continue\n elif line.startswith('---') or line.startswith('\\n---'):\n continue\n elif line in ('\\n', '\\n\\n'):\n continue\n else:\n tmp.append(line)\n return TextList(*tmp)\n","sub_path":"analyzer/analyzer.py","file_name":"analyzer.py","file_ext":"py","file_size_in_byte":3945,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"156510914","text":"\"\"\"\n\n metadata.py\n\n\n Lukas Puehringer \n Santiago Torres \n\n\n Oct 23, 2017\n\n\n See LICENSE for licensing information.\n\n\n Provides a container class `Metablock` for signed metadata and\n functions for signing, signature verification, de-serialization and\n serialization from and to JSON.\n\n\"\"\"\n\nimport attr\nimport json\nimport canonicaljson\n\nimport securesystemslib.keys\nimport securesystemslib.formats\nimport securesystemslib.exceptions\n\nfrom in_toto.models.link import Link\nfrom in_toto.models.layout import Layout\nfrom in_toto.exceptions import SignatureVerificationError\n\n@attr.s(repr=False, init=False)\nclass Metablock(object):\n \"\"\" This object holds the in-toto metablock data structure. This includes\n the fields \"signed\" and \"signatures\", i.e., what was signed and the\n signatures. \"\"\"\n signatures = attr.ib()\n signed = attr.ib()\n\n\n def __init__(self, **kwargs):\n self.signatures = kwargs.get(\"signatures\", [])\n self.signed = kwargs.get(\"signed\")\n\n\n def __repr__(self):\n \"\"\"Returns a JSON string representation of the object.\"\"\"\n # the double {{'s is the escape sequence for an individual {. We wrap this\n # under a format string to avoid encoding to json twice (which turns a json\n # string into a string and so on...\n # FIXME:\n # We are mixing 3 JSON string formats here: The value of \"signed\" is\n # \"pretty printed canonical json\", the value of \"signatures\" is\n # \"canonical json\" and the container is just \"json\".\n # Is this really what we want?\n return '{{\"signed\": {}, \"signatures\": {}}}'.format(self.signed,\n canonicaljson.encode_canonical_json(self.signatures))\n\n\n def dump(self, filename):\n \"\"\"\n \n Write the JSON string representation of the Metablock object\n to disk.\n\n \n filename:\n The path to write the file to.\n\n \n Writing metadata file to disk\n\n \n None.\n\n \"\"\"\n with open(filename, \"wt\") as fp:\n fp.write(\"{}\".format(self))\n\n\n @staticmethod\n def load(path):\n \"\"\"\n \n Loads the JSON string representation of signed metadata from disk\n and creates a Metablock object.\n The `signed` attribute of the Metablock object is assigned a Link\n or Layout object, depending on the `_type` field in the loaded\n metadata file.\n\n \n path:\n The path to write the file to.\n\n \n Reading metadata file from disk\n\n \n None.\n\n \"\"\"\n\n with open(path, \"r\") as fp:\n data = json.load(fp)\n\n signatures = data.get(\"signatures\", [])\n signed_data = data.get(\"signed\", {})\n signed_type = signed_data.get(\"_type\")\n\n if signed_type == \"link\":\n signed = Link.read(signed_data)\n\n elif signed_type == \"layout\":\n signed = Layout.read(signed_data)\n\n else:\n raise securesystemslib.exceptions.FormatError(\"Invalid Metadata format\")\n\n return Metablock(signatures=signatures, signed=signed)\n\n\n @property\n def _type(self):\n \"\"\" Shortcut to the _type property of the contained Link or Layout object,\n should be one of \"link\" or \"layout\". \"\"\"\n return self.signed._type\n\n\n def sign(self, key):\n \"\"\"\n \n Signs the pretty printed canonical JSON representation\n (see models.common.Signable.__repr__) of the Link or Layout object\n contained in the `signed` property with the passed key and appends the\n created signature to `signatures`.\n\n \n key:\n A signing key in the format securesystemslib.formats.KEY_SCHEMA\n\n \n None.\n\n \"\"\"\n securesystemslib.formats.KEY_SCHEMA.check_match(key)\n\n signature = securesystemslib.keys.create_signature(key, repr(self.signed))\n self.signatures.append(signature)\n\n\n def verify_signatures(self, keys_dict):\n \"\"\"\n \n Verifies all signatures found in the `signatures` property using the keys\n from the passed dictionary of keys and the pretty printed canonical JSON\n representation (see models.common.Signable.__repr__) of the Link or\n Layout object contained in `signed`.\n\n Verification fails if,\n - the passed keys don't have the right format,\n - the object is not signed,\n - there is a signature for which no key was passed,\n - if any of the signatures is actually broken.\n\n Note:\n This will be revised with in-toto/in-toto#135\n\n \n keys_dict:\n Verifying keys in the format:\n securesystemslib.formats.KEYDICT_SCHEMA\n\n \n FormatError\n if the passed key dictionary is not conformant with\n securesystemslib.formats.KEYDICT_SCHEMA\n\n SignatureVerificationError\n if the Metablock is not signed\n\n if the Metablock carries a signature for which no key is found in\n the passed key dictionary, which means that multiple signatures\n have to be verified at once\n\n if any of the verified signatures is actually broken\n\n \n None.\n\n \"\"\"\n securesystemslib.formats.KEYDICT_SCHEMA.check_match(keys_dict)\n\n if not self.signatures or len(self.signatures) <= 0:\n raise SignatureVerificationError(\"No signatures found\")\n\n for signature in self.signatures:\n keyid = signature[\"keyid\"]\n try:\n key = keys_dict[keyid]\n except KeyError:\n raise SignatureVerificationError(\n \"Signature key not found, key id is '{0}'\".format(keyid))\n if not securesystemslib.keys.verify_signature(\n key, signature, repr(self.signed)):\n raise SignatureVerificationError(\"Invalid signature\")","sub_path":"in_toto/models/metadata.py","file_name":"metadata.py","file_ext":"py","file_size_in_byte":5779,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"160360289","text":"from . import stock_prediction\r\nfrom flask import request, jsonify\r\nfrom webapp.models import *\r\nfrom library.datetime_function import get_offset_date\r\nfrom library.sql_function import sql_to_object,object_to_sql\r\n\r\n\r\n@stock_prediction.route('api_comprehensive_analysis', methods=['GET'])\r\ndef api_comprehensive_analysis():\r\n code = request.args.get('code')\r\n date = request.args.get('date')\r\n bar_data = Stock_Daily_Bar.query.filter(Stock_Daily_Bar.ts_code == code, Stock_Daily_Bar.trade_date <= date,\r\n Stock_Daily_Bar.trade_date >= get_offset_date(date, -730)).order_by(\r\n Stock_Daily_Bar.trade_date.asc()).all()\r\n basic_data = Stock_Daily_Basic.query.filter_by(ts_code=code, trade_date=date).first()\r\n # 要判断上面两个查询结果非空\r\n today_bar_data = {'open': bar_data[-1].open, 'close': bar_data[-1].close, 'high': bar_data[-1].high,\r\n 'low': bar_data[-1].low, 'change': bar_data[-1].change, 'pct_chg': bar_data[-1].pct_chg,\r\n 'pre_close': bar_data[-1].pre_close, 'vol': bar_data[-1].vol, 'amount': bar_data[-1].amount}\r\n today_basic_data = {'turnover_rate': basic_data.turnover_rate, 'pe': basic_data.pe, 'pb': basic_data.pb,\r\n 'circ_mv': basic_data.circ_mv}\r\n short_trend_data = []\r\n k_line_data = []\r\n one_year_data = []\r\n for i in bar_data[-60:]:\r\n short_trend_data.append(i.close)\r\n for i in bar_data[-250:]:\r\n one_year_data.append(i.close)\r\n for i in bar_data:\r\n trade_date = i.trade_date\r\n k_line_data.append(\r\n [(trade_date[0:4] + '/' + trade_date[4:6] + '/' + trade_date[6:8]), i.open, i.close, i.low, i.high,\r\n i.amount])\r\n one_year_min = min(one_year_data)\r\n one_year_max = max(one_year_data)\r\n concepts = []\r\n company_data = Stock_Company_Extend.query.join(Stock_Company,\r\n Stock_Company_Extend.ts_code == Stock_Company.ts_code).add_columns(\r\n Stock_Company.province, Stock_Company.city).join(Stock_Industry_SW_3,\r\n Stock_Company_Extend.industry_sw_code == Stock_Industry_SW_3.industry_sw_3_code).add_columns(\r\n Stock_Industry_SW_3.industry_sw_3_name).join(\r\n Stock_Industry_SW_2, Stock_Industry_SW_3.belong_to == Stock_Industry_SW_2.industry_sw_2_code).add_columns(\r\n Stock_Industry_SW_2.industry_sw_2_name).join(\r\n Stock_Industry_SW_1, Stock_Industry_SW_2.belong_to == Stock_Industry_SW_1.industry_sw_1_code).add_columns(\r\n Stock_Industry_SW_1.industry_sw_1_name).filter(\r\n Stock_Company_Extend.ts_code == code).first()\r\n industry = [company_data.industry_sw_1_name, company_data.industry_sw_2_name, company_data.industry_sw_3_name]\r\n area = [company_data.province, company_data.city]\r\n concept_data = Stock_Concept_Detail.query.join(Stock_Concept_List,\r\n Stock_Concept_Detail.concept_code == Stock_Concept_List.concept_code).add_columns(\r\n Stock_Concept_List.concept_name).filter(Stock_Concept_Detail.ts_code == code,\r\n Stock_Concept_List.src == 'wind').all()\r\n for i in concept_data:\r\n concepts.append(i.concept_name)\r\n return jsonify(\r\n {'today_bar_data': today_bar_data, 'today_basic_data': today_basic_data, 'short_trend_data': short_trend_data,\r\n 'k_line_data': k_line_data, 'one_year_data': {'min': one_year_min, 'max': one_year_max},\r\n 'company_and_concept': {'area': area, 'industry': industry, 'concept': concepts}})\r\n\r\n\r\n@stock_prediction.route('api_trend_prediction', methods=['GET'])\r\ndef api_trend_prediction():\r\n code = request.args.get('code')\r\n date = request.args.get('date')\r\n bar_data = Stock_Daily_Bar.query.filter(Stock_Daily_Bar.ts_code == code, Stock_Daily_Bar.trade_date <= date,\r\n Stock_Daily_Bar.trade_date >= get_offset_date(date, -120)).order_by(\r\n Stock_Daily_Bar.trade_date.asc()).all()\r\n basic_data = Stock_Daily_Basic.query.filter_by(ts_code=code, trade_date=date).first()\r\n today_bar_data = {'open': bar_data[-1].open, 'close': bar_data[-1].close, 'high': bar_data[-1].high,\r\n 'low': bar_data[-1].low, 'change': bar_data[-1].change, 'pct_chg': bar_data[-1].pct_chg,\r\n 'pre_close': bar_data[-1].pre_close, 'vol': bar_data[-1].vol, 'amount': bar_data[-1].amount}\r\n if basic_data is None:\r\n today_basic_data = {'turnover_rate': 0, 'pe': 0, 'pb': 0,\r\n 'circ_mv': 0}\r\n else:\r\n today_basic_data = {'turnover_rate': basic_data.turnover_rate, 'pe': basic_data.pe, 'pb': basic_data.pb,\r\n 'circ_mv': basic_data.circ_mv}\r\n trade_point_data = Model_Trading_Point.query.filter(Model_Trading_Point.ts_code == code,\r\n Model_Trading_Point.trade_date <= date,\r\n Model_Trading_Point.trade_date >= get_offset_date(date,\r\n -120)).order_by(\r\n Model_Trading_Point.trade_date.asc()).all()\r\n aggressive_requirement = '维持现状'\r\n steady_requirement = '维持现状'\r\n trade_point_line_data = []\r\n aggressive_trade_point = []\r\n steady_trade_point = []\r\n for i in bar_data:\r\n trade_date = i.trade_date\r\n trade_point_line_data.append(\r\n [(trade_date[0:4] + '/' + trade_date[4:6] + '/' + trade_date[6:8]), i.open, i.close, i.low, i.high,\r\n i.amount])\r\n for i in trade_point_data:\r\n trade_date = i.trade_date\r\n if i.aggressive_buy_point is True:\r\n close = (Stock_Daily_Bar.query.filter_by(ts_code=code, trade_date=trade_date).first()).close\r\n aggressive_trade_point.append(\r\n [(trade_date[0:4] + '/' + trade_date[4:6] + '/' + trade_date[6:8]), close, '买点'])\r\n if trade_date == date:\r\n aggressive_requirement = i.aggressive_buy_requirement\r\n if i.aggressive_sell_point is True:\r\n close = (Stock_Daily_Bar.query.filter_by(ts_code=code, trade_date=trade_date).first()).close\r\n aggressive_trade_point.append(\r\n [(trade_date[0:4] + '/' + trade_date[4:6] + '/' + trade_date[6:8]), close, '卖点'])\r\n if trade_date == date:\r\n aggressive_requirement = i.aggressive_sell_requirement\r\n if i.steady_buy_point is True:\r\n close = (Stock_Daily_Bar.query.filter_by(ts_code=code, trade_date=trade_date).first()).close\r\n steady_trade_point.append([(trade_date[0:4] + '/' + trade_date[4:6] + '/' + trade_date[6:8]), close, '买点'])\r\n if trade_date == date:\r\n steady_requirement = i.steady_buy_requirement\r\n if i.steady_sell_point is True:\r\n close = (Stock_Daily_Bar.query.filter_by(ts_code=code, trade_date=trade_date).first()).close\r\n steady_trade_point.append([(trade_date[0:4] + '/' + trade_date[4:6] + '/' + trade_date[6:8]), close, '卖点'])\r\n if trade_date == date:\r\n steady_requirement = i.steady_sell_requirement\r\n trend_forecast_result = Model_Trend_Forecast.query.filter_by(ts_code=code, trade_date=date).first()\r\n first_vote = [trend_forecast_result.first_fall_vote, trend_forecast_result.first_maintain_vote,\r\n trend_forecast_result.first_rise_vote]\r\n second_vote = [trend_forecast_result.second_fall_vote, trend_forecast_result.second_maintain_vote,\r\n trend_forecast_result.second_rise_vote]\r\n third_vote = [trend_forecast_result.third_fall_vote, trend_forecast_result.third_maintain_vote,\r\n trend_forecast_result.third_rise_vote]\r\n\r\n similarity_data = []\r\n similarity_1_data = []\r\n similarity_2_data = []\r\n similarity_3_data = []\r\n similarity_match_1_data = []\r\n similarity_match_2_data = []\r\n similarity_match_3_data = []\r\n mark_line = [\r\n (bar_data[-20].trade_date[0:4] + '/' + bar_data[-20].trade_date[4:6] + '/' + bar_data[-20].trade_date[6:8]),\r\n (bar_data[-1].trade_date[0:4] + '/' + bar_data[-1].trade_date[4:6] + '/' + bar_data[-1].trade_date[6:8])]\r\n similarity_short_term_result = Model_Similarity_Short_Term.query.filter_by(ts_code=code, trade_date=date).first()\r\n similarity_1_stock = Stock_Basic.query.filter_by(ts_code=similarity_short_term_result.similarity_1_code).first()\r\n similarity_2_stock = Stock_Basic.query.filter_by(ts_code=similarity_short_term_result.similarity_2_code).first()\r\n similarity_3_stock = Stock_Basic.query.filter_by(ts_code=similarity_short_term_result.similarity_3_code).first()\r\n similarity_1_result = Stock_Daily_Bar.query.filter(\r\n Stock_Daily_Bar.ts_code == similarity_short_term_result.similarity_1_code,\r\n Stock_Daily_Bar.trade_date <= similarity_short_term_result.similarity_1_prediction_end_time, ).order_by(\r\n Stock_Daily_Bar.trade_date.desc()).limit(90).all()\r\n similarity_2_result = Stock_Daily_Bar.query.filter(\r\n Stock_Daily_Bar.ts_code == similarity_short_term_result.similarity_2_code,\r\n Stock_Daily_Bar.trade_date <= similarity_short_term_result.similarity_2_prediction_end_time, ).order_by(\r\n Stock_Daily_Bar.trade_date.desc()).limit(90).all()\r\n similarity_3_result = Stock_Daily_Bar.query.filter(\r\n Stock_Daily_Bar.ts_code == similarity_short_term_result.similarity_3_code,\r\n Stock_Daily_Bar.trade_date <= similarity_short_term_result.similarity_3_prediction_end_time, ).order_by(\r\n Stock_Daily_Bar.trade_date.desc()).limit(90).all()\r\n similarity_1_result.reverse()\r\n similarity_2_result.reverse()\r\n similarity_3_result.reverse()\r\n for i in bar_data[-60:]:\r\n trade_date = i.trade_date\r\n similarity_data.append(\r\n [(trade_date[0:4] + '/' + trade_date[4:6] + '/' + trade_date[6:8]), i.open, i.close, i.low, i.high,\r\n i.amount])\r\n for i in similarity_1_result[-30:]:\r\n factor = bar_data[-1].close / similarity_1_result[-31].close\r\n trade_date = i.trade_date\r\n similarity_1_data.append(\r\n [(trade_date[0:4] + '/' + trade_date[4:6] + '/' + trade_date[6:8]), i.open * factor, i.close * factor,\r\n i.low * factor, i.high * factor,\r\n i.amount])\r\n for i in similarity_2_result[-30:]:\r\n factor = bar_data[-1].close / similarity_2_result[-31].close\r\n trade_date = i.trade_date\r\n similarity_2_data.append(\r\n [(trade_date[0:4] + '/' + trade_date[4:6] + '/' + trade_date[6:8]), i.open * factor, i.close * factor,\r\n i.low * factor, i.high * factor,\r\n i.amount])\r\n for i in similarity_3_result[-30:]:\r\n factor = bar_data[-1].close / similarity_3_result[-31].close\r\n trade_date = i.trade_date\r\n similarity_3_data.append(\r\n [(trade_date[0:4] + '/' + trade_date[4:6] + '/' + trade_date[6:8]), i.open * factor, i.close * factor,\r\n i.low * factor, i.high * factor,\r\n i.amount])\r\n similarity_1_data = similarity_data + similarity_1_data\r\n similarity_2_data = similarity_data + similarity_2_data\r\n similarity_3_data = similarity_data + similarity_3_data\r\n for i in similarity_1_result:\r\n trade_date = i.trade_date\r\n similarity_match_1_data.append(\r\n [(trade_date[0:4] + '/' + trade_date[4:6] + '/' + trade_date[6:8]), i.open, i.close, i.low, i.high,\r\n i.amount])\r\n for i in similarity_2_result:\r\n trade_date = i.trade_date\r\n similarity_match_2_data.append(\r\n [(trade_date[0:4] + '/' + trade_date[4:6] + '/' + trade_date[6:8]), i.open, i.close, i.low, i.high,\r\n i.amount])\r\n for i in similarity_3_result:\r\n trade_date = i.trade_date\r\n similarity_match_3_data.append(\r\n [(trade_date[0:4] + '/' + trade_date[4:6] + '/' + trade_date[6:8]), i.open, i.close, i.low, i.high,\r\n i.amount])\r\n similarity_match_mark_line_1 = [(similarity_short_term_result.similarity_1_matching_start_time[\r\n 0:4] + '/' + similarity_short_term_result.similarity_1_matching_start_time[\r\n 4:6] + '/' + similarity_short_term_result.similarity_1_matching_start_time[\r\n 6:8]),\r\n (similarity_short_term_result.similarity_1_matching_end_time[\r\n 0:4] + '/' + similarity_short_term_result.similarity_1_matching_end_time[\r\n 4:6] + '/' + similarity_short_term_result.similarity_1_matching_end_time[\r\n 6:8])]\r\n similarity_match_mark_line_2 = [(similarity_short_term_result.similarity_2_matching_start_time[\r\n 0:4] + '/' + similarity_short_term_result.similarity_2_matching_start_time[\r\n 4:6] + '/' + similarity_short_term_result.similarity_2_matching_start_time[\r\n 6:8]),\r\n (similarity_short_term_result.similarity_2_matching_end_time[\r\n 0:4] + '/' + similarity_short_term_result.similarity_2_matching_end_time[\r\n 4:6] + '/' + similarity_short_term_result.similarity_2_matching_end_time[\r\n 6:8])]\r\n similarity_match_mark_line_3 = [(similarity_short_term_result.similarity_3_matching_start_time[\r\n 0:4] + '/' + similarity_short_term_result.similarity_3_matching_start_time[\r\n 4:6] + '/' + similarity_short_term_result.similarity_3_matching_start_time[\r\n 6:8]),\r\n (similarity_short_term_result.similarity_3_matching_end_time[\r\n 0:4] + '/' + similarity_short_term_result.similarity_3_matching_end_time[\r\n 4:6] + '/' + similarity_short_term_result.similarity_3_matching_end_time[\r\n 6:8])]\r\n sparkline_data = []\r\n similarity_1_sparkline_data = []\r\n similarity_2_sparkline_data = []\r\n similarity_3_sparkline_data = []\r\n for i in bar_data[-20:]:\r\n sparkline_data.append(i.pct_chg)\r\n sparkline_data.extend([None] * 30)\r\n for i in similarity_1_result[-50:]:\r\n similarity_1_sparkline_data.append(i.pct_chg)\r\n for i in similarity_2_result[-50:]:\r\n similarity_2_sparkline_data.append(i.pct_chg)\r\n for i in similarity_3_result[-50:]:\r\n similarity_3_sparkline_data.append(i.pct_chg)\r\n similarity_1_table_data = []\r\n similarity_2_table_data = []\r\n similarity_3_table_data = []\r\n for i in similarity_1_result[-30:]:\r\n similarity_1_table_data.append([i.pct_chg, i.open, i.close, i.high, i.low, i.vol])\r\n for i in similarity_2_result[-30:]:\r\n similarity_2_table_data.append([i.pct_chg, i.open, i.close, i.high, i.low, i.vol])\r\n for i in similarity_3_result[-30:]:\r\n similarity_3_table_data.append([i.pct_chg, i.open, i.close, i.high, i.low, i.vol])\r\n similarity_short_term_data = {'similarity_1_data': similarity_1_data, 'similarity_2_data': similarity_2_data,\r\n 'similarity_3_data': similarity_3_data,\r\n 'similarity_match_1_data': similarity_match_1_data,\r\n 'similarity_match_2_data': similarity_match_2_data,\r\n 'similarity_match_3_data': similarity_match_3_data,\r\n 'mark_line': mark_line, 'similarity_match_mark_line_1': similarity_match_mark_line_1,\r\n 'similarity_match_mark_line_2': similarity_match_mark_line_2,\r\n 'similarity_match_mark_line_3': similarity_match_mark_line_3,\r\n 'sparkline_data': sparkline_data,\r\n 'similarity_1_sparkline_data': similarity_1_sparkline_data,\r\n 'similarity_2_sparkline_data': similarity_2_sparkline_data,\r\n 'similarity_3_sparkline_data': similarity_3_sparkline_data,\r\n 'similarity_1_table_data': similarity_1_table_data,\r\n 'similarity_2_table_data': similarity_2_table_data,\r\n 'similarity_3_table_data': similarity_3_table_data,\r\n 'similarity_stock': [\r\n {'code': similarity_1_stock.ts_code, 'name': similarity_1_stock.name,\r\n 'distance': similarity_short_term_result.similarity_1_distance},\r\n {'code': similarity_2_stock.ts_code, 'name': similarity_2_stock.name,\r\n 'distance': similarity_short_term_result.similarity_2_distance},\r\n {'code': similarity_3_stock.ts_code, 'name': similarity_3_stock.name,\r\n 'distance': similarity_short_term_result.similarity_3_distance}],\r\n 'trend_description': similarity_short_term_result.similarity_1_trend_description}\r\n correlation_data = []\r\n correlation_1_data = []\r\n correlation_2_data = []\r\n correlation_3_data = []\r\n correlation_match_1_data = []\r\n correlation_match_2_data = []\r\n correlation_match_3_data = []\r\n correlation_short_term_result = Model_Correlation_Short_Term.query.filter_by(ts_code=code, trade_date=date).first()\r\n correlation_1_stock = Stock_Basic.query.filter_by(ts_code=correlation_short_term_result.correlation_1_code).first()\r\n correlation_2_stock = Stock_Basic.query.filter_by(ts_code=correlation_short_term_result.correlation_2_code).first()\r\n correlation_3_stock = Stock_Basic.query.filter_by(ts_code=correlation_short_term_result.correlation_3_code).first()\r\n correlation_1_result = Stock_Daily_Bar.query.filter(\r\n Stock_Daily_Bar.ts_code == correlation_short_term_result.correlation_1_code,\r\n Stock_Daily_Bar.trade_date <= correlation_short_term_result.correlation_1_prediction_end_time, ).order_by(\r\n Stock_Daily_Bar.trade_date.desc()).limit(90).all()\r\n correlation_2_result = Stock_Daily_Bar.query.filter(\r\n Stock_Daily_Bar.ts_code == correlation_short_term_result.correlation_2_code,\r\n Stock_Daily_Bar.trade_date <= correlation_short_term_result.correlation_2_prediction_end_time, ).order_by(\r\n Stock_Daily_Bar.trade_date.desc()).limit(90).all()\r\n correlation_3_result = Stock_Daily_Bar.query.filter(\r\n Stock_Daily_Bar.ts_code == correlation_short_term_result.correlation_3_code,\r\n Stock_Daily_Bar.trade_date <= correlation_short_term_result.correlation_3_prediction_end_time, ).order_by(\r\n Stock_Daily_Bar.trade_date.desc()).limit(90).all()\r\n correlation_1_result.reverse()\r\n correlation_2_result.reverse()\r\n correlation_3_result.reverse()\r\n for i in bar_data[-60:]:\r\n trade_date = i.trade_date\r\n correlation_data.append(\r\n [(trade_date[0:4] + '/' + trade_date[4:6] + '/' + trade_date[6:8]), i.open, i.close, i.low, i.high,\r\n i.amount])\r\n for i in correlation_1_result[-30:]:\r\n factor = bar_data[-1].close / correlation_1_result[-31].close\r\n trade_date = i.trade_date\r\n correlation_1_data.append(\r\n [(trade_date[0:4] + '/' + trade_date[4:6] + '/' + trade_date[6:8]), i.open * factor, i.close * factor,\r\n i.low * factor, i.high * factor,\r\n i.amount])\r\n for i in correlation_2_result[-30:]:\r\n factor = bar_data[-1].close / correlation_2_result[-31].close\r\n trade_date = i.trade_date\r\n correlation_2_data.append(\r\n [(trade_date[0:4] + '/' + trade_date[4:6] + '/' + trade_date[6:8]), i.open * factor, i.close * factor,\r\n i.low * factor, i.high * factor,\r\n i.amount])\r\n for i in correlation_3_result[-30:]:\r\n factor = bar_data[-1].close / correlation_3_result[-31].close\r\n trade_date = i.trade_date\r\n correlation_3_data.append(\r\n [(trade_date[0:4] + '/' + trade_date[4:6] + '/' + trade_date[6:8]), i.open * factor, i.close * factor,\r\n i.low * factor, i.high * factor,\r\n i.amount])\r\n correlation_1_data = correlation_data + correlation_1_data\r\n correlation_2_data = correlation_data + correlation_2_data\r\n correlation_3_data = correlation_data + correlation_3_data\r\n for i in correlation_1_result:\r\n trade_date = i.trade_date\r\n correlation_match_1_data.append(\r\n [(trade_date[0:4] + '/' + trade_date[4:6] + '/' + trade_date[6:8]), i.open, i.close, i.low, i.high,\r\n i.amount])\r\n for i in correlation_2_result:\r\n trade_date = i.trade_date\r\n correlation_match_2_data.append(\r\n [(trade_date[0:4] + '/' + trade_date[4:6] + '/' + trade_date[6:8]), i.open, i.close, i.low, i.high,\r\n i.amount])\r\n for i in correlation_3_result:\r\n trade_date = i.trade_date\r\n correlation_match_3_data.append(\r\n [(trade_date[0:4] + '/' + trade_date[4:6] + '/' + trade_date[6:8]), i.open, i.close, i.low, i.high,\r\n i.amount])\r\n correlation_match_mark_line_1 = [(correlation_short_term_result.correlation_1_matching_start_time[\r\n 0:4] + '/' + correlation_short_term_result.correlation_1_matching_start_time[\r\n 4:6] + '/' + correlation_short_term_result.correlation_1_matching_start_time[\r\n 6:8]),\r\n (correlation_short_term_result.correlation_1_matching_end_time[\r\n 0:4] + '/' + correlation_short_term_result.correlation_1_matching_end_time[\r\n 4:6] + '/' + correlation_short_term_result.correlation_1_matching_end_time[\r\n 6:8])]\r\n correlation_match_mark_line_2 = [(correlation_short_term_result.correlation_2_matching_start_time[\r\n 0:4] + '/' + correlation_short_term_result.correlation_2_matching_start_time[\r\n 4:6] + '/' + correlation_short_term_result.correlation_2_matching_start_time[\r\n 6:8]),\r\n (correlation_short_term_result.correlation_2_matching_end_time[\r\n 0:4] + '/' + correlation_short_term_result.correlation_2_matching_end_time[\r\n 4:6] + '/' + correlation_short_term_result.correlation_2_matching_end_time[\r\n 6:8])]\r\n correlation_match_mark_line_3 = [(correlation_short_term_result.correlation_3_matching_start_time[\r\n 0:4] + '/' + correlation_short_term_result.correlation_3_matching_start_time[\r\n 4:6] + '/' + correlation_short_term_result.correlation_3_matching_start_time[\r\n 6:8]),\r\n (correlation_short_term_result.correlation_3_matching_end_time[\r\n 0:4] + '/' + correlation_short_term_result.correlation_3_matching_end_time[\r\n 4:6] + '/' + correlation_short_term_result.correlation_3_matching_end_time[\r\n 6:8])]\r\n correlation_1_sparkline_data = []\r\n correlation_2_sparkline_data = []\r\n correlation_3_sparkline_data = []\r\n for i in correlation_1_result[-50:]:\r\n correlation_1_sparkline_data.append(i.pct_chg)\r\n for i in correlation_2_result[-50:]:\r\n correlation_2_sparkline_data.append(i.pct_chg)\r\n for i in correlation_3_result[-50:]:\r\n correlation_3_sparkline_data.append(i.pct_chg)\r\n correlation_1_table_data = []\r\n correlation_2_table_data = []\r\n correlation_3_table_data = []\r\n for i in correlation_1_result[-30:]:\r\n correlation_1_table_data.append([i.pct_chg, i.open, i.close, i.high, i.low, i.vol])\r\n for i in correlation_2_result[-30:]:\r\n correlation_2_table_data.append([i.pct_chg, i.open, i.close, i.high, i.low, i.vol])\r\n for i in correlation_3_result[-30:]:\r\n correlation_3_table_data.append([i.pct_chg, i.open, i.close, i.high, i.low, i.vol])\r\n correlation_short_term_data = {'correlation_1_data': correlation_1_data, 'correlation_2_data': correlation_2_data,\r\n 'correlation_3_data': correlation_3_data,\r\n 'correlation_match_1_data': correlation_match_1_data,\r\n 'correlation_match_2_data': correlation_match_2_data,\r\n 'correlation_match_3_data': correlation_match_3_data,\r\n 'mark_line': mark_line,\r\n 'correlation_match_mark_line_1': correlation_match_mark_line_1,\r\n 'correlation_match_mark_line_2': correlation_match_mark_line_2,\r\n 'correlation_match_mark_line_3': correlation_match_mark_line_3,\r\n 'sparkline_data': sparkline_data,\r\n 'correlation_1_sparkline_data': correlation_1_sparkline_data,\r\n 'correlation_2_sparkline_data': correlation_2_sparkline_data,\r\n 'correlation_3_sparkline_data': correlation_3_sparkline_data,\r\n 'correlation_1_table_data': correlation_1_table_data,\r\n 'correlation_2_table_data': correlation_2_table_data,\r\n 'correlation_3_table_data': correlation_3_table_data,\r\n 'correlation_stock': [\r\n {'code': correlation_1_stock.ts_code, 'name': correlation_1_stock.name,\r\n 'r': correlation_short_term_result.correlation_1_r},\r\n {'code': correlation_2_stock.ts_code, 'name': correlation_2_stock.name,\r\n 'r': correlation_short_term_result.correlation_2_r},\r\n {'code': correlation_3_stock.ts_code, 'name': correlation_3_stock.name,\r\n 'r': correlation_short_term_result.correlation_3_r}],\r\n 'trend_description': correlation_short_term_result.correlation_1_trend_description}\r\n\r\n similarity_history_result = Model_Similarity_History.query.filter_by(ts_code=code, trade_date=date).first()\r\n similarity_history_match_result = Stock_Daily_Bar.query.filter(\r\n Stock_Daily_Bar.ts_code == code,\r\n Stock_Daily_Bar.trade_date <= similarity_history_result.prediction_end_time, ).order_by(\r\n Stock_Daily_Bar.trade_date.desc()).limit(90).all()\r\n similarity_history_match_result.reverse()\r\n similarity_history_match_data = []\r\n similarity_history_match_sparkline_data = []\r\n for i in similarity_history_match_result:\r\n trade_date = i.trade_date\r\n similarity_history_match_data.append(\r\n [(trade_date[0:4] + '/' + trade_date[4:6] + '/' + trade_date[6:8]), i.open, i.close, i.low, i.high,\r\n i.amount])\r\n similarity_history_match_sparkline_data.append(i.pct_chg)\r\n similarity_history_mark_line = [(similarity_history_result.matching_start_time[\r\n 0:4] + '/' + similarity_history_result.matching_start_time[\r\n 4:6] + '/' + similarity_history_result.matching_start_time[6:8]),\r\n (similarity_history_result.matching_end_time[\r\n 0:4] + '/' + similarity_history_result.matching_end_time[\r\n 4:6] + '/' + similarity_history_result.matching_end_time[6:8])]\r\n similarity_history_match_date = [(date[0:4] + '/' + date[4:6] + '/' + date[6:8]),\r\n (similarity_history_result.matching_end_time[\r\n 0:4] + '/' + similarity_history_result.matching_end_time[\r\n 4:6] + '/' + similarity_history_result.matching_end_time[6:8])]\r\n similarity_history_sparkline_data = []\r\n for i in bar_data[-60:]:\r\n similarity_history_sparkline_data.append(i.pct_chg)\r\n similarity_history_sparkline_data.extend([None] * 30)\r\n similarity_history_table_data = []\r\n for i in similarity_history_match_result[-30:]:\r\n similarity_history_table_data.append([i.pct_chg, i.open, i.close, i.high, i.low, i.vol])\r\n similarity_history_data = {'similarity_history_match_data': similarity_history_match_data,\r\n 'mark_line': similarity_history_mark_line, 'match_date': similarity_history_match_date,\r\n 'similarity_history_match_sparkline_data': similarity_history_match_sparkline_data,\r\n 'sparkline_data': similarity_history_sparkline_data,\r\n 'similarity_history_table_data': similarity_history_table_data}\r\n\r\n bar_data_for_state_transition_result = Stock_Daily_Bar.query.filter(Stock_Daily_Bar.ts_code == code,\r\n Stock_Daily_Bar.trade_date <= date,\r\n ).order_by(\r\n Stock_Daily_Bar.trade_date.desc()).limit(200).all()\r\n state_transition_result = Model_State_Transition.query.filter(Model_State_Transition.ts_code == code,\r\n Model_State_Transition.trade_date <= date).order_by(\r\n Model_State_Transition.trade_date.desc()).limit(200).all()\r\n bar_data_for_state_transition_result.reverse()\r\n state_transition_result.reverse()\r\n today_state_transition_data = {'s_rise_rate': state_transition_result[-1].s_rise_rate,\r\n 's_maintain_rate': state_transition_result[-1].s_maintain_rate,\r\n 's_fall_rate': state_transition_result[-1].s_fall_rate,\r\n 'l_rise_rate': state_transition_result[-1].l_rise_rate,\r\n 'l_maintain_rate': state_transition_result[-1].l_maintain_rate,\r\n 'l_fall_rate': state_transition_result[-1].l_fall_rate}\r\n s_rise_sparkline_data = []\r\n s_fall_sparkline_data = []\r\n l_rise_sparkline_data = []\r\n l_fall_sparkline_data = []\r\n state_transition_line_data = []\r\n for i in state_transition_result[-60:]:\r\n s_rise_sparkline_data.append(i.s_rise_rate)\r\n s_fall_sparkline_data.append(i.s_fall_rate)\r\n l_rise_sparkline_data.append(i.l_rise_rate)\r\n l_fall_sparkline_data.append(i.l_fall_rate)\r\n for i, j in zip(bar_data_for_state_transition_result, state_transition_result):\r\n trade_date = i.trade_date\r\n state_transition_line_data.append(\r\n [(trade_date[0:4] + '/' + trade_date[4:6] + '/' + trade_date[6:8]), i.open, i.close, i.low, i.high,\r\n i.amount, j.s_rise_rate, j.s_fall_rate, j.l_rise_rate, j.l_fall_rate])\r\n state_transition_data = {'state_transition_line_data': state_transition_line_data,\r\n 'today_state_transition_data': today_state_transition_data,\r\n 's_rise_sparkline_data': s_rise_sparkline_data,\r\n 's_fall_sparkline_data': s_fall_sparkline_data,\r\n 'l_rise_sparkline_data': l_rise_sparkline_data,\r\n 'l_fall_sparkline_data': l_fall_sparkline_data}\r\n\r\n return jsonify({'today_bar_data': today_bar_data, 'today_basic_data': today_basic_data,\r\n 'trade_point_data': {'line_data': trade_point_line_data, 'aggressive': aggressive_trade_point,\r\n 'steady': steady_trade_point, 'aggressive_requirement': aggressive_requirement,\r\n 'steady_requirement': steady_requirement},\r\n 'trend_forecast_data': {'first_vote': first_vote, 'second_vote': second_vote,\r\n 'third_vote': third_vote,\r\n 'first_change': trend_forecast_result.first_change,\r\n 'second_change': trend_forecast_result.second_change,\r\n 'third_change': trend_forecast_result.third_change},\r\n 'similarity_short_term_data': similarity_short_term_data,\r\n 'correlation_short_term_data': correlation_short_term_data,\r\n 'similarity_history_data': similarity_history_data,\r\n 'state_transition_data': state_transition_data})\r\n\r\n\r\n@stock_prediction.route('api_stock_assessment', methods=['GET'])\r\ndef api_stock_assessment():\r\n code = request.args.get('code')\r\n date = request.args.get('date')\r\n stock_daily_bar_result = Stock_Daily_Bar.query.filter(Stock_Daily_Bar.ts_code == code,\r\n Stock_Daily_Bar.trade_date <= date).order_by(\r\n Stock_Daily_Bar.trade_date.desc()).limit(500).all()\r\n stock_daily_basic_result = Stock_Daily_Basic.query.filter(Stock_Daily_Basic.ts_code == code,\r\n Stock_Daily_Basic.trade_date <= date).order_by(\r\n Stock_Daily_Basic.trade_date.desc()).limit(500).all()\r\n stock_assessment_result = Model_Stock_Assessment.query.filter(Model_Stock_Assessment.ts_code == code,\r\n Model_Stock_Assessment.trade_date <= date).order_by(\r\n Model_Stock_Assessment.trade_date.desc()).limit(500).all()\r\n stock_daily_bar_result.reverse()\r\n stock_daily_basic_result.reverse()\r\n stock_assessment_result.reverse()\r\n company_data = Stock_Company_Extend.query.join(Stock_Industry_SW_3,\r\n Stock_Company_Extend.industry_sw_code == Stock_Industry_SW_3.industry_sw_3_code).add_columns(\r\n Stock_Industry_SW_3.industry_sw_3_name).join(\r\n Stock_Industry_SW_2, Stock_Industry_SW_3.belong_to == Stock_Industry_SW_2.industry_sw_2_code).add_columns(\r\n Stock_Industry_SW_2.industry_sw_2_name).join(\r\n Stock_Industry_SW_1, Stock_Industry_SW_2.belong_to == Stock_Industry_SW_1.industry_sw_1_code).add_columns(\r\n Stock_Industry_SW_1.industry_sw_1_code, Stock_Industry_SW_1.industry_sw_1_name).join(Stock_Industry_CSRC_2,\r\n Stock_Company_Extend.industry_csrc_code== Stock_Industry_CSRC_2.industry_csrc_2_code).join(\r\n Stock_Industry_CSRC_1,\r\n Stock_Industry_CSRC_2.belong_to == Stock_Industry_CSRC_1.industry_csrc_1_code).add_columns(\r\n Stock_Industry_CSRC_1.industry_csrc_1_code, Stock_Industry_CSRC_1.industry_csrc_1_name).filter(\r\n Stock_Company_Extend.ts_code == code).first()\r\n sw_industry, sw_industry_name = company_data.industry_sw_1_code, company_data.industry_sw_1_name\r\n csrc_industry, csrc_industry_name = company_data.industry_csrc_1_code, company_data.industry_csrc_1_name\r\n sw_industry_basic_result = Stock_Industry_Basic.query.filter(Stock_Industry_Basic.industry_code == sw_industry,\r\n Stock_Industry_Basic.trade_date <= date).order_by(\r\n Stock_Industry_Basic.trade_date.desc()).limit(500).all()\r\n csrc_industry_basic_result = Stock_Industry_Basic.query.filter(Stock_Industry_Basic.industry_code == csrc_industry,\r\n Stock_Industry_Basic.trade_date <= date).order_by(\r\n Stock_Industry_Basic.trade_date.desc()).limit(500).all()\r\n sw_industry_basic_result.reverse()\r\n csrc_industry_basic_result.reverse()\r\n csrc_order_result = Stock_Daily_Basic.query.join(Stock_Basic,\r\n Stock_Daily_Basic.ts_code == Stock_Basic.ts_code).add_columns(\r\n Stock_Basic.name).join(Stock_Company_Extend, Stock_Daily_Basic.ts_code == Stock_Company_Extend.ts_code).join(\r\n Stock_Industry_CSRC_2,\r\n Stock_Company_Extend.industry_csrc_code == Stock_Industry_CSRC_2.industry_csrc_2_code).join(\r\n Stock_Industry_CSRC_1,\r\n Stock_Industry_CSRC_2.belong_to == Stock_Industry_CSRC_1.industry_csrc_1_code).filter(\r\n Stock_Daily_Basic.trade_date == date,\r\n Stock_Industry_CSRC_1.industry_csrc_1_code == csrc_industry,Stock_Daily_Basic.pe_ttm!=None).order_by(Stock_Daily_Basic.pe_ttm.asc()).all()\r\n sw_order_result = Stock_Daily_Basic.query.join(Stock_Basic,\r\n Stock_Daily_Basic.ts_code == Stock_Basic.ts_code).add_columns(\r\n Stock_Basic.name).join(Stock_Company_Extend,\r\n Stock_Daily_Basic.ts_code == Stock_Company_Extend.ts_code).join(\r\n Stock_Industry_SW_3,\r\n Stock_Company_Extend.industry_sw_code == Stock_Industry_SW_3.industry_sw_3_code).join(\r\n Stock_Industry_SW_2, Stock_Industry_SW_3.belong_to == Stock_Industry_SW_2.industry_sw_2_code).join(\r\n Stock_Industry_SW_1, Stock_Industry_SW_2.belong_to == Stock_Industry_SW_1.industry_sw_1_code).filter(\r\n Stock_Daily_Basic.trade_date == date,\r\n Stock_Industry_SW_1.industry_sw_1_code == sw_industry,Stock_Daily_Basic.pe_ttm!=None).order_by(Stock_Daily_Basic.pe_ttm.asc()).all()\r\n csrc_order_number = 0\r\n sw_order_number = 0\r\n\r\n for i in range(0, len(csrc_order_result)):\r\n if csrc_order_result[i].Stock_Daily_Basic.ts_code == code:\r\n csrc_order_number = i + 1\r\n break\r\n for i in range(0, len(sw_order_result)):\r\n if sw_order_result[i].Stock_Daily_Basic.ts_code == code:\r\n sw_order_number = i + 1\r\n break\r\n csrc_order_data = []\r\n sw_order_data = []\r\n for i in csrc_order_result[0:5]:\r\n csrc_order_data.append([i.Stock_Daily_Basic.ts_code,i.name, i.Stock_Daily_Basic.pe_ttm])\r\n for i in sw_order_result[0:5]:\r\n sw_order_data.append([i.Stock_Daily_Basic.ts_code,i.name, i.Stock_Daily_Basic.pe_ttm])\r\n close_list = []\r\n for i in stock_daily_bar_result[-15:]:\r\n trade_date = i.trade_date\r\n close_list.append([(trade_date[0:4] + '/' + trade_date[4:6] + '/' + trade_date[6:8]), i.close])\r\n assessment_list = []\r\n for i in stock_assessment_result:\r\n assessment_list.append([i.estimated_value, i.estimated_value_std])\r\n pe_list = []\r\n for i in stock_daily_basic_result[-20:]:\r\n trade_date = i.trade_date\r\n pe_list.append([(trade_date[0:4] + '/' + trade_date[4:6] + '/' + trade_date[6:8]), i.pe_ttm])\r\n csrc_pe_list = []\r\n sw_pe_list = []\r\n for i in csrc_industry_basic_result[-20:]:\r\n csrc_pe_list.append(i.pe_ttm_overall)\r\n for i in sw_industry_basic_result[-20:]:\r\n sw_pe_list.append(i.pe_ttm_overall)\r\n\r\n today_data = {'close': stock_daily_bar_result[-1].close,\r\n 'pe_ttm': stock_daily_basic_result[-1].pe_ttm,\r\n 'csrc_pe': csrc_industry_basic_result[-1].pe_ttm_overall,\r\n 'sw_pe': sw_industry_basic_result[-1].pe_ttm_overall,\r\n 'estimated_value': stock_assessment_result[-1].estimated_value,\r\n 'estimated_value_std': stock_assessment_result[-1].estimated_value_std,\r\n 'similar_pe_count': stock_assessment_result[-1].similar_pe_count,\r\n 'pe_mean': stock_assessment_result[-1].pe_mean,\r\n 'pe_std': stock_assessment_result[-1].pe_std,\r\n 'pe_min': stock_assessment_result[-1].pe_min,\r\n 'pe_max': stock_assessment_result[-1].pe_max}\r\n\r\n assessment_data = {'close': close_list, 'assessment': assessment_list}\r\n\r\n industry_compare_data = {'pe': pe_list, 'csrc_pe': csrc_pe_list, 'sw_pe': sw_pe_list}\r\n\r\n order_data = {'sw_industry': sw_industry_name, 'csrc_industry': csrc_industry_name, 'csrc_order': csrc_order_number,\r\n 'csrc_order_data': csrc_order_data, 'sw_order': sw_order_number, 'sw_order_data': sw_order_data}\r\n return jsonify(\r\n {'today_data': today_data, 'assessment_data': assessment_data, 'industry_compare_data': industry_compare_data,\r\n 'order_data': order_data})\r\n\r\n\r\n@stock_prediction.route('api_money_flow', methods=['GET'])\r\ndef api_money_flow():\r\n code = request.args.get('code')\r\n date = request.args.get('date')\r\n stock_bar_result = Stock_Daily_Bar.query.filter(Stock_Daily_Bar.ts_code == code,\r\n Stock_Daily_Bar.trade_date <= date).order_by(\r\n Stock_Daily_Bar.trade_date.desc()).limit(20).all()\r\n stock_daily_basic_result = Stock_Daily_Basic.query.filter(Stock_Daily_Basic.ts_code == code,\r\n Stock_Daily_Basic.trade_date <= date).order_by(\r\n Stock_Daily_Basic.trade_date.desc()).limit(20).all()\r\n money_flow_result = Market_Money_Flow.query.filter(Market_Money_Flow.ts_code == code,\r\n Market_Money_Flow.trade_date <= date).order_by(\r\n Market_Money_Flow.trade_date.desc()).limit(20).all()\r\n stock_bar_result.reverse()\r\n stock_daily_basic_result.reverse()\r\n money_flow_result.reverse()\r\n today_data = {'buy_sm': money_flow_result[-1].buy_sm_amount,\r\n 'sell_sm': money_flow_result[-1].sell_sm_amount,\r\n 'buy_md': money_flow_result[-1].buy_md_amount,\r\n 'sell_md': money_flow_result[-1].sell_md_amount,\r\n 'buy_lg': money_flow_result[-1].buy_lg_amount,\r\n 'sell_lg': money_flow_result[-1].sell_lg_amount,\r\n 'buy_elg': money_flow_result[-1].buy_elg_amount,\r\n 'sell_elg': money_flow_result[-1].sell_lg_amount,\r\n 'amount': stock_bar_result[-1].amount / 10}\r\n pct_chg_list = []\r\n trade_date_list = []\r\n for i in stock_bar_result:\r\n trade_date = i.trade_date\r\n trade_date_list.append((trade_date[0:4] + '/' + trade_date[4:6] + '/' + trade_date[6:8]))\r\n pct_chg_list.append(i.pct_chg)\r\n main_buy_list = []\r\n main_sell_list = []\r\n retail_buy_list = []\r\n retail_sell_list = []\r\n money_dist_data = []\r\n net_list = []\r\n for i in money_flow_result:\r\n trade_date = i.trade_date\r\n net_list.append(i.net_mf_amount)\r\n main_buy_list.append(i.buy_lg_amount + i.buy_elg_amount)\r\n main_sell_list.append(i.sell_lg_amount + i.sell_elg_amount)\r\n retail_buy_list.append(i.buy_sm_amount + i.buy_md_amount)\r\n retail_sell_list.append(i.sell_sm_amount + i.sell_md_amount)\r\n money_dist_data.append(\r\n [(trade_date[0:4] + '/' + trade_date[4:6] + '/' + trade_date[6:8]), i.buy_sm_amount + i.sell_sm_amount,\r\n i.buy_md_amount + i.sell_md_amount, i.buy_lg_amount + i.sell_lg_amount,\r\n i.buy_elg_amount + i.sell_elg_amount])\r\n corr_list = []\r\n scale_list = []\r\n for x, y, z in zip(stock_daily_basic_result, pct_chg_list, net_list):\r\n try:\r\n corr_list.append(z / (x.circ_mv / (1 + y/100) * y/100))\r\n except:\r\n corr_list.append(0)\r\n for i in money_dist_data:\r\n scale_list.append((i[3] + i[4]) / (i[1] + i[2] + i[3] + i[4]))\r\n money_flow_data = {'trade_date': trade_date_list, 'pct_chg': pct_chg_list, 'main_buy': main_buy_list,\r\n 'main_sell': main_sell_list, 'retail_buy': retail_buy_list, 'retail_sell': retail_sell_list}\r\n main_ability = {'scale': scale_list[-1],\r\n 'corr': corr_list[-1],\r\n 'scale_list': scale_list,\r\n 'corr_list': corr_list}\r\n return jsonify({'today_data': today_data, 'money_flow_data': money_flow_data, 'money_dist_data': money_dist_data,\r\n 'main_ability': main_ability})\r\n\r\n\r\n@stock_prediction.route('api_rule_statistics', methods=['GET'])\r\ndef api_rule_statistics():\r\n code = request.args.get('code')\r\n date = request.args.get('date')\r\n bar_data = Stock_Daily_Bar.query.filter_by(ts_code=code, trade_date=date).first()\r\n basic_data = Stock_Daily_Basic.query.filter_by(ts_code=code, trade_date=date).first()\r\n today_bar_data = {'open': bar_data.open, 'close': bar_data.close, 'high': bar_data.high,\r\n 'low': bar_data.low, 'change': bar_data.change, 'pct_chg': bar_data.pct_chg,\r\n 'pre_close': bar_data.pre_close, 'vol': bar_data.vol, 'amount': bar_data.amount}\r\n if basic_data is None:\r\n today_basic_data = {'turnover_rate': 0, 'pe': 0, 'pb': 0,\r\n 'circ_mv': 0}\r\n else:\r\n today_basic_data = {'turnover_rate': basic_data.turnover_rate, 'pe': basic_data.pe, 'pb': basic_data.pb,\r\n 'circ_mv': basic_data.circ_mv}\r\n rise_rise_data=[]\r\n rise_rise_result=Model_Associate_Rule.query.join(Stock_Basic,Model_Associate_Rule.associate_code==Stock_Basic.ts_code).add_columns(Stock_Basic.name).filter(Model_Associate_Rule.ts_code==code,Model_Associate_Rule.trade_date==date,Model_Associate_Rule.associate_type=='rise_rise').order_by(Model_Associate_Rule.probability.desc()).all()\r\n for i in rise_rise_result:\r\n rise_rise_data.append([i.name,i.Model_Associate_Rule.approval_rating,i.Model_Associate_Rule.probability,i.Model_Associate_Rule.trading_day_count,i.Model_Associate_Rule.matching_day_count,i.Model_Associate_Rule.effect_day_count])\r\n rise_fall_data=[]\r\n rise_fall_result=Model_Associate_Rule.query.join(Stock_Basic,Model_Associate_Rule.associate_code==Stock_Basic.ts_code).add_columns(Stock_Basic.name).filter(Model_Associate_Rule.ts_code==code,Model_Associate_Rule.trade_date==date,Model_Associate_Rule.associate_type=='rise_fall').order_by(Model_Associate_Rule.probability.desc()).all()\r\n for i in rise_fall_result:\r\n rise_fall_data.append([i.name,i.Model_Associate_Rule.approval_rating,i.Model_Associate_Rule.probability,i.Model_Associate_Rule.trading_day_count,i.Model_Associate_Rule.matching_day_count,i.Model_Associate_Rule.effect_day_count])\r\n rise_not_rise_data=[]\r\n rise_not_rise_result=Model_Associate_Rule.query.join(Stock_Basic,Model_Associate_Rule.associate_code==Stock_Basic.ts_code).add_columns(Stock_Basic.name).filter(Model_Associate_Rule.ts_code==code,Model_Associate_Rule.trade_date==date,Model_Associate_Rule.associate_type=='rise_not_rise').order_by(Model_Associate_Rule.probability.desc()).all()\r\n for i in rise_not_rise_result:\r\n rise_not_rise_data.append([i.name,i.Model_Associate_Rule.approval_rating,i.Model_Associate_Rule.probability,i.Model_Associate_Rule.trading_day_count,i.Model_Associate_Rule.matching_day_count,i.Model_Associate_Rule.effect_day_count])\r\n rise_not_fall_data=[]\r\n rise_not_fall_result=Model_Associate_Rule.query.join(Stock_Basic,Model_Associate_Rule.associate_code==Stock_Basic.ts_code).add_columns(Stock_Basic.name).filter(Model_Associate_Rule.ts_code==code,Model_Associate_Rule.trade_date==date,Model_Associate_Rule.associate_type=='rise_not_fall').order_by(Model_Associate_Rule.probability.desc()).all()\r\n for i in rise_not_fall_result:\r\n rise_not_fall_data.append([i.name,i.Model_Associate_Rule.approval_rating,i.Model_Associate_Rule.probability,i.Model_Associate_Rule.trading_day_count,i.Model_Associate_Rule.matching_day_count,i.Model_Associate_Rule.effect_day_count])\r\n fall_rise_data=[]\r\n fall_rise_result=Model_Associate_Rule.query.join(Stock_Basic,Model_Associate_Rule.associate_code==Stock_Basic.ts_code).add_columns(Stock_Basic.name).filter(Model_Associate_Rule.ts_code==code,Model_Associate_Rule.trade_date==date,Model_Associate_Rule.associate_type=='fall_rise').order_by(Model_Associate_Rule.probability.desc()).all()\r\n for i in fall_rise_result:\r\n fall_rise_data.append([i.name,i.Model_Associate_Rule.approval_rating,i.Model_Associate_Rule.probability,i.Model_Associate_Rule.trading_day_count,i.Model_Associate_Rule.matching_day_count,i.Model_Associate_Rule.effect_day_count])\r\n fall_fall_data=[]\r\n fall_fall_result=Model_Associate_Rule.query.join(Stock_Basic,Model_Associate_Rule.associate_code==Stock_Basic.ts_code).add_columns(Stock_Basic.name).filter(Model_Associate_Rule.ts_code==code,Model_Associate_Rule.trade_date==date,Model_Associate_Rule.associate_type=='fall_fall').order_by(Model_Associate_Rule.probability.desc()).all()\r\n for i in fall_fall_result:\r\n fall_fall_data.append([i.name,i.Model_Associate_Rule.approval_rating,i.Model_Associate_Rule.probability,i.Model_Associate_Rule.trading_day_count,i.Model_Associate_Rule.matching_day_count,i.Model_Associate_Rule.effect_day_count])\r\n fall_not_rise_data=[]\r\n fall_not_rise_result=Model_Associate_Rule.query.join(Stock_Basic,Model_Associate_Rule.associate_code==Stock_Basic.ts_code).add_columns(Stock_Basic.name).filter(Model_Associate_Rule.ts_code==code,Model_Associate_Rule.trade_date==date,Model_Associate_Rule.associate_type=='fall_not_rise').order_by(Model_Associate_Rule.probability.desc()).all()\r\n for i in fall_not_rise_result:\r\n fall_not_rise_data.append([i.name,i.Model_Associate_Rule.approval_rating,i.Model_Associate_Rule.probability,i.Model_Associate_Rule.trading_day_count,i.Model_Associate_Rule.matching_day_count,i.Model_Associate_Rule.effect_day_count])\r\n fall_not_fall_data=[]\r\n fall_not_fall_result=Model_Associate_Rule.query.join(Stock_Basic,Model_Associate_Rule.associate_code==Stock_Basic.ts_code).add_columns(Stock_Basic.name).filter(Model_Associate_Rule.ts_code==code,Model_Associate_Rule.trade_date==date,Model_Associate_Rule.associate_type=='fall_not_fall').order_by(Model_Associate_Rule.probability.desc()).all()\r\n for i in fall_not_fall_result:\r\n fall_not_fall_data.append([i.name,i.Model_Associate_Rule.approval_rating,i.Model_Associate_Rule.probability,i.Model_Associate_Rule.trading_day_count,i.Model_Associate_Rule.matching_day_count,i.Model_Associate_Rule.effect_day_count])\r\n associate_rule_data={'rise_rise':rise_rise_data,'rise_fall':rise_fall_data,'rise_not_rise':rise_not_rise_data,'rise_not_fall':rise_not_fall_data,\r\n 'fall_rise':fall_rise_data,'fall_fall':fall_fall_data,'fall_not_rise':fall_not_rise_data,'fall_not_fall':fall_not_fall_data}\r\n\r\n fluctuation_statistics_result=Model_Fluctuation_Statistics.query.filter_by(ts_code=code,trade_date=date).first()\r\n fluctuation_statistics_data={'D4':fluctuation_statistics_result.D4,\r\n 'D3':fluctuation_statistics_result.D3,\r\n 'D2':fluctuation_statistics_result.D2,\r\n 'D1':fluctuation_statistics_result.D1,\r\n 'five_total_count':fluctuation_statistics_result.five_total_count,\r\n 'five_D0_list':sql_to_object(fluctuation_statistics_result.five_D0_list),\r\n 'five_appearance_count_list':sql_to_object(fluctuation_statistics_result.five_appearance_count_list),\r\n 'five_proportion_list':sql_to_object(fluctuation_statistics_result.five_proportion_list),\r\n 'four_total_count':fluctuation_statistics_result.four_total_count,\r\n 'four_D0_list':sql_to_object(fluctuation_statistics_result.four_D0_list),\r\n 'four_appearance_count_list':sql_to_object(fluctuation_statistics_result.four_appearance_count_list),\r\n 'four_proportion_list':sql_to_object(fluctuation_statistics_result.four_proportion_list),\r\n 'three_total_count':fluctuation_statistics_result.three_total_count,\r\n 'three_D0_list':sql_to_object(fluctuation_statistics_result.three_D0_list),\r\n 'three_appearance_count_list':sql_to_object(fluctuation_statistics_result.three_appearance_count_list),\r\n 'three_proportion_list':sql_to_object(fluctuation_statistics_result.three_proportion_list),\r\n 'two_total_count':fluctuation_statistics_result.two_total_count,\r\n 'two_D0_list':sql_to_object(fluctuation_statistics_result.two_D0_list),\r\n 'two_appearance_count_list':sql_to_object(fluctuation_statistics_result.two_appearance_count_list),\r\n 'two_proportion_list':sql_to_object(fluctuation_statistics_result.two_proportion_list),\r\n 'one_total_count':fluctuation_statistics_result.one_total_count,\r\n 'one_D0_list':sql_to_object(fluctuation_statistics_result.one_D0_list),\r\n 'one_appearance_count_list':sql_to_object(fluctuation_statistics_result.one_appearance_count_list),\r\n 'one_proportion_list':sql_to_object(fluctuation_statistics_result.one_proportion_list)}\r\n\r\n fluctuation_correlation_result=Model_Fluctuation_Correlation.query.filter_by(ts_code=code,trade_date=date).first()\r\n fluctuation_correlation_data={'D4':fluctuation_correlation_result.D4,\r\n 'D3':fluctuation_correlation_result.D3,\r\n 'D2':fluctuation_correlation_result.D2,\r\n 'D1':fluctuation_correlation_result.D1,\r\n 'five_sample_count':fluctuation_correlation_result.five_sample_count,\r\n 'five_total_count':fluctuation_correlation_result.five_total_count,\r\n 'five_approval_rating':fluctuation_correlation_result.five_approval_rating,\r\n 'five_D0_list':sql_to_object(fluctuation_correlation_result.five_D0_list),\r\n 'five_appearance_count_list':sql_to_object(fluctuation_correlation_result.five_appearance_count_list),\r\n 'five_proportion_list':sql_to_object(fluctuation_correlation_result.five_proportion_list),\r\n 'four_sample_count': fluctuation_correlation_result.four_sample_count,\r\n 'four_total_count': fluctuation_correlation_result.four_total_count,\r\n 'four_approval_rating': fluctuation_correlation_result.four_approval_rating,\r\n 'four_D0_list': sql_to_object(fluctuation_correlation_result.four_D0_list),\r\n 'four_appearance_count_list': sql_to_object(fluctuation_correlation_result.four_appearance_count_list),\r\n 'four_proportion_list': sql_to_object(fluctuation_correlation_result.four_proportion_list),\r\n 'three_sample_count': fluctuation_correlation_result.three_sample_count,\r\n 'three_total_count': fluctuation_correlation_result.three_total_count,\r\n 'three_approval_rating': fluctuation_correlation_result.three_approval_rating,\r\n 'three_D0_list': sql_to_object(fluctuation_correlation_result.three_D0_list),\r\n 'three_appearance_count_list': sql_to_object(fluctuation_correlation_result.three_appearance_count_list),\r\n 'three_proportion_list': sql_to_object(fluctuation_correlation_result.three_proportion_list),\r\n 'two_sample_count': fluctuation_correlation_result.two_sample_count,\r\n 'two_total_count': fluctuation_correlation_result.two_total_count,\r\n 'two_approval_rating': fluctuation_correlation_result.two_approval_rating,\r\n 'two_D0_list': sql_to_object(fluctuation_correlation_result.two_D0_list),\r\n 'two_appearance_count_list': sql_to_object(fluctuation_correlation_result.two_appearance_count_list),\r\n 'two_proportion_list': sql_to_object(fluctuation_correlation_result.two_proportion_list),\r\n }\r\n\r\n fluctuation_sequencing_result=Model_Fluctuation_Sequencing.query.filter_by(ts_code=code,trade_date='20190430').first()\r\n fluctuation_sequencing_data={'close':fluctuation_sequencing_result.close,\r\n 'high_change':fluctuation_sequencing_result.high_change,\r\n 'high_order':fluctuation_sequencing_result.high_order,\r\n 'low_change':fluctuation_sequencing_result.low_change,\r\n 'low_order':fluctuation_sequencing_result.low_order,\r\n 'order_sum':fluctuation_sequencing_result.order_sum}\r\n\r\n high_change_list=[]\r\n high_change_result=Model_Fluctuation_Sequencing.query.filter_by(trade_date='20190430').all()\r\n for i in high_change_result:\r\n high_change_list.append(int(i.high_change*100+0.5))\r\n high_change_statistics=[0]*101\r\n for i in high_change_list:\r\n high_change_statistics[100+i]+=1\r\n similarity_fluctuation_result = Model_Similarity_Fluctuation.query.filter_by(ts_code=code,trade_date=date).first()\r\n similarity_trend_result = Model_Similarity_Trend.query.filter_by(ts_code=code,trade_date=date).first()\r\n similarity_fluctuation_stock_list=sql_to_object(similarity_fluctuation_result.stock_list)\r\n similarity_fluctuation_liveness_list=sql_to_object(similarity_fluctuation_result.liveness_list)\r\n similarity_trend_stock_list = sql_to_object(similarity_trend_result.stock_list)\r\n similarity_trend_liveness_list = sql_to_object(similarity_trend_result.liveness_list)\r\n similarity_fluctuation_stock_data=[]\r\n similarity_fluctuation_pct_chg_data=[]\r\n for i in similarity_fluctuation_stock_list:\r\n stock_data=Stock_Basic.query.filter_by(ts_code=i).first()\r\n stock_bar=Stock_Daily_Bar.query.filter(Stock_Daily_Bar.ts_code==i,Stock_Daily_Bar.trade_date<=date).order_by(Stock_Daily_Bar.trade_date.desc()).limit(120).all()\r\n stock_bar.reverse()\r\n pct_chg_data=[]\r\n for j in stock_bar:\r\n if j.pct_chg>=2:\r\n pct_chg_data.append(1)\r\n elif j.pct_chg<=-2:\r\n pct_chg_data.append(-1)\r\n else:\r\n pct_chg_data.append(0)\r\n similarity_fluctuation_stock_data.append({'name': stock_data.name, 'symbol': stock_data.symbol})\r\n similarity_fluctuation_pct_chg_data.append(pct_chg_data)\r\n similarity_trend_stock_data=[]\r\n similarity_trend_close_data=[]\r\n for i in similarity_trend_stock_list:\r\n stock_data=Stock_Basic.query.filter_by(ts_code=i).first()\r\n stock_bar=Stock_Daily_Bar.query.filter(Stock_Daily_Bar.ts_code==i,Stock_Daily_Bar.trade_date<=date,Stock_Daily_Bar.trade_date>='20150101').order_by(Stock_Daily_Bar.trade_date.asc()).all()\r\n close_data=[]\r\n for j in stock_bar:\r\n close_data.append(j.close)\r\n similarity_trend_stock_data.append({'name': stock_data.name, 'symbol': stock_data.symbol})\r\n similarity_trend_close_data.append(close_data)\r\n\r\n cur_stock_pct_chg_data=[]\r\n cur_stock_close_data=[]\r\n cur_stock_bar_result=Stock_Daily_Bar.query.filter(Stock_Daily_Bar.ts_code==code,Stock_Daily_Bar.trade_date<=date,Stock_Daily_Bar.trade_date>='20150101').order_by(Stock_Daily_Bar.trade_date.asc()).all()\r\n n=0\r\n for i in cur_stock_bar_result[-120:]:\r\n if i.pct_chg >= 2:\r\n cur_stock_pct_chg_data.append(1)\r\n elif i.pct_chg <= -2:\r\n cur_stock_pct_chg_data.append(-1)\r\n else:\r\n n+=1\r\n cur_stock_pct_chg_data.append(0)\r\n cur_stock_liveness_data=(120-n)/120\r\n for i in cur_stock_bar_result:\r\n cur_stock_close_data.append(i.close)\r\n\r\n\r\n return jsonify({'associate_rule':associate_rule_data,'fluctuation_statistics':fluctuation_statistics_data,'fluctuation_correlation':fluctuation_correlation_data,\r\n 'fluctuation_sequencing':{'sequencing_data':fluctuation_sequencing_data,'high_change_statistics':high_change_statistics},\r\n 'similarity_fluctuation':{'stock':similarity_fluctuation_stock_data,'liveness':similarity_fluctuation_liveness_list,'pct_chg':similarity_fluctuation_pct_chg_data,'cur_stock_pct_chg':cur_stock_pct_chg_data,'cur_stock_liveness':cur_stock_liveness_data},\r\n 'similarity_trend':{'stock':similarity_trend_stock_data,'liveness':similarity_trend_liveness_list,'close':similarity_trend_close_data,'cur_stock_close':cur_stock_close_data,'cur_stock_liveness':cur_stock_liveness_data},\r\n 'today_bar_data':today_bar_data,'today_basic_data':today_basic_data})\r\n\r\n\r\n@stock_prediction.route('api_stock_basic', methods=['GET'])\r\ndef api_stock_basic():\r\n code = request.args.get('code')\r\n basic_data=Stock_Basic.query.filter_by(ts_code=code).first()\r\n company_data=Stock_Company.query.filter_by(ts_code=code).first()\r\n company_data_extend= Stock_Company_Extend.query.filter_by(ts_code=code).first()\r\n result={'fullname':basic_data.fullname,'website':company_data.website,'email':company_data.email,'enname':basic_data.enname,'chairman':company_data.chairman,'office':company_data.office,\r\n 'manager':company_data.manager,'secretary':company_data.secretary,'reg_capital':company_data.reg_capital,'industry':company_data_extend.industry_csrc_name,'organizationcode':company_data_extend.organizationcode,'fax':company_data_extend.fax,'setup_date':company_data.setup_date,'list_date':basic_data.list_date,\r\n 'employees':company_data.employees,'main_business':company_data.main_business,'introduction':company_data.introduction,'business_scope':company_data.business_scope}\r\n return jsonify(result)\r\n\r\n@stock_prediction.route('api_stock_basic_ipo', methods=['GET'])\r\ndef api_stock_basic_ipo():\r\n code = request.args.get('code')\r\n ipo_data=Stock_IPO_Info.query.filter_by(ts_code=code).first()\r\n result={'ipo_date':ipo_data.ipo_date,'ipo_price':ipo_data.ipo_price,'ipo_collection':ipo_data.ipo_collection,'ipo_puboffrdate':ipo_data.ipo_puboffrdate,'ipo_leadundr':ipo_data.ipo_leadundr,'ipo_nominator':ipo_data.ipo_nominator,'ipo_sponsorrepresentative':ipo_data.ipo_sponsorrepresentative,'ipo_type':ipo_data.ipo_type,\r\n 'ipo_expense':ipo_data.ipo_expense,'ipo_amount':ipo_data.ipo_amount,'ipo_weightedpe':ipo_data.ipo_weightedpe}\r\n return jsonify(result)\r\n\r\n\r\n","sub_path":"webapp/controller/stock_prediction/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":64356,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"439732715","text":"import httplib2\nimport json\n\n#google map api key\ngoogle_geocode_id = \"AIzaSyAvnFnZSiMNLeXX3eGh5kf9NEySf-zjkHY\"\n\nclass Maps:\n\t\n\t@staticmethod\n\tdef getGeocodeLocation(inputString):\n\t\tlocationString = inputString.replace(\" \",\"+\")\n\t\turl = (\"https://maps.googleapis.com/maps/api/geocode/json?address=%s&key=%s\"%(locationString, google_geocode_id))\n\t\th = httplib2.Http()\n\t\tresponse, content = h.request(url, 'GET')\n\t\tresult = json.loads(content)\n\t\treturn result['results'][0]['geometry']['location']\n\n\t#test code\n\t#print getGeocodeLocation('Chaguanas')","sub_path":"api/map.py","file_name":"map.py","file_ext":"py","file_size_in_byte":546,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"72505237","text":"import abc\nfrom typing import List\nimport numpy as np\nimport attr\nfrom naturalnets.brains.i_brain import IBrain, IBrainCfg\n\n\n@attr.s(slots=True, auto_attribs=True, frozen=True, kw_only=True)\nclass ILayerBasedBrainCfg(IBrainCfg):\n # The structure of the layers\n # Each list entry translates to the size of one layer\n # The layers are in the given order\n hidden_layer_structure: List[int]\n # Whether a neuron can only use its own state form the last timestep\n diagonal_hidden_to_hidden: bool = False\n use_bias: bool = False\n\n\nclass ILayerBasedBrain(IBrain, abc.ABC):\n\n @staticmethod\n @abc.abstractmethod\n def layer_step(layer_input: np.ndarray, weight_ih, weight_hh, bias_h, hidden):\n # Compute one layer step\n pass\n\n @staticmethod\n @abc.abstractmethod\n def get_number_gates():\n # How many Gates are used in the specific network?\n # Haw many matrices are needed for each layer to calculate the next state and output value\n pass\n\n @staticmethod\n @abc.abstractmethod\n def get_number_hidden_values():\n # How many hidden values are used in one cell\n pass\n\n def __init__(self, input_size: int, output_size: int, individual: np.ndarray, configuration: ILayerBasedBrainCfg,\n brain_state: dict):\n if not type(configuration) is ILayerBasedBrainCfg:\n configuration = ILayerBasedBrainCfg(**configuration)\n hidden_layer_structure: List[int] = configuration.hidden_layer_structure\n\n # initialize weights out of individual\n\n individual_index = 0 # progress index\n # initialize empty\n self.weight_ih = [] # Weights for weighted input values\n self.weight_hh = [] # Weights for weighted stored values\n self.bias_h = [] # Biases\n self.hidden = [] # Initial values for state storage\n self.layer_output = [] # Weights from output last Layer to output nodes\n number_gates = self.get_number_gates()\n number_hidden_values = self.get_number_hidden_values()\n\n # iterate for all given layers in the structure\n for layer in range(len(hidden_layer_structure)):\n\n # Matrices for weighted input values in calculations\n layer_input_size = input_size if layer == 0 else hidden_layer_structure[layer - 1]\n number_elements = number_gates * hidden_layer_structure[layer] * layer_input_size\n self.weight_ih.append(\n np.array(\n individual[individual_index: individual_index + number_elements]\n ).reshape((number_gates, hidden_layer_structure[layer], layer_input_size))\n )\n individual_index += number_elements\n\n # Matrices for weighted state values in calculations\n if configuration.diagonal_hidden_to_hidden: # Whether each neuron can only access its own state\n self.weight_hh.append(\n [np.diag(individual[\n individual_index + k * hidden_layer_structure[layer]:\n individual_index + k * hidden_layer_structure[layer] + hidden_layer_structure[layer]\n ])\n for k in range(number_gates)\n ]\n )\n individual_index += number_gates * hidden_layer_structure[layer]\n else:\n number_elements = number_gates * hidden_layer_structure[layer] * hidden_layer_structure[layer]\n self.weight_hh.append(\n np.array(\n individual[individual_index: individual_index + number_elements]\n ).reshape((number_gates, hidden_layer_structure[layer], hidden_layer_structure[layer]))\n )\n individual_index += number_elements\n\n # initialize biases\n\n # Biases for gates\n if configuration.use_bias:\n number_elements = hidden_layer_structure[layer] * number_gates\n self.bias_h.append(\n np.array(\n individual[individual_index: individual_index + number_elements]\n ).reshape((number_gates, hidden_layer_structure[layer]))\n )\n individual_index += number_elements\n else:\n self.bias_h.append(np.zeros((number_gates, hidden_layer_structure[layer])).astype(np.float32))\n\n # initialize initial state values\n self.hidden.append(np.zeros((number_hidden_values, hidden_layer_structure[layer])))\n\n self.layer_output.append(np.zeros((hidden_layer_structure[layer])))\n # for end\n\n # Matrix for transforming output of last layer into output neurons\n number_elements = hidden_layer_structure[len(hidden_layer_structure) - 1] * output_size\n self.weight_ho = np.array(\n individual[individual_index: individual_index + number_elements]\n ).reshape((output_size, hidden_layer_structure[len(hidden_layer_structure) - 1]))\n individual_index += number_elements\n\n # Has all values been used and therefore does get_individual_size() provide the right number?\n assert individual_index == len(individual)\n\n @classmethod\n def get_free_parameter_usage(cls, input_size: int, output_size: int, configuration: dict, brain_state: dict):\n\n configuration = ILayerBasedBrainCfg(**configuration)\n number_gates = cls.get_number_gates()\n hidden_size = cls.get_number_hidden_values()\n hidden_structure = configuration.hidden_layer_structure\n individuals = {}\n\n for layer in range(len(hidden_structure)):\n layer_dict = {\n # Matrices for weighted input values\n # The first Layer don't has an output from the previous layer, but the input values\n 'input_weight_matrix': number_gates * hidden_structure[layer] * (\n input_size if layer == 0 else hidden_structure[layer - 1]),\n # Matrices for weighted state values\n 'hidden_weight_matrix': number_gates * hidden_structure[layer] * (\n 1 if configuration.diagonal_hidden_to_hidden else hidden_structure[layer])\n }\n # initialize biases\n if configuration.use_bias:\n layer_dict['bias'] = hidden_structure[layer] * number_gates\n\n individuals['layer ' + str(layer)] = layer_dict\n # for end\n\n # Matrix for transforming output of last layer into output neurons\n individuals['output_weight_matrix'] = hidden_structure[len(hidden_structure) - 1] * output_size\n return individuals\n\n def step(self, ob: np.ndarray):\n layer_input = ob\n # iterate for all given layers\n for layer in range(len(self.hidden)):\n if layer == 0:\n x = layer_input\n else:\n x = self.layer_output[layer - 1]\n # Returns a list with two elements.\n # The first element is the calculated new hidden cell state, the second is the layer output\n # Necessary for LSTM\n layer_result = self.layer_step(x, self.weight_ih[layer], self.weight_hh[layer], self.bias_h[layer],\n self.hidden[layer])\n self.hidden[layer] = layer_result[0]\n self.layer_output[layer] = layer_result[1]\n return np.dot(self.weight_ho, self.layer_output[len(self.layer_output) - 1])\n\n def reset(self):\n self.hidden = np.zeros_like(self.hidden)\n\n @classmethod\n def generate_brain_state(cls, input_size: int, output_size: int, configuration: dict):\n pass\n\n @classmethod\n def save_brain_state(cls, path, brain_state):\n pass\n","sub_path":"naturalnets/brains/i_layer_based_brain.py","file_name":"i_layer_based_brain.py","file_ext":"py","file_size_in_byte":7815,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"290959533","text":"from django.conf.urls import patterns, include, url\nfrom django.contrib import admin\nfrom nordostra import views\nadmin.autodiscover()\nurlpatterns = patterns('',\n url(r'^admin/', include(admin.site.urls)),\n url(r'^session/$', 'nordostra.views.home', name='home'),\n url(r'^session/(?P\\d{1,4})/$', 'nordostra.views.percent', name='percent'),\n url(r'^accounts/', include('registration.backends.default.urls')),\n)\n\n \n","sub_path":"nordostra/nordostra/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":436,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"135680930","text":"class ApiKeySecurity:\n\n def __init__(self, key_in: str = None, name: str = None, description: str = None, **kwargs):\n self.configs = {\n 'type': 'apiKey',\n 'in': key_in,\n 'name': name,\n 'description': description,\n **kwargs\n }\n\n\nclass ApiKeySecurityIn:\n header = 'header'\n query = 'query'\n","sub_path":"nest/packages/swagger/core/api_key_security.py","file_name":"api_key_security.py","file_ext":"py","file_size_in_byte":366,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"205017750","text":"#!/usr/bin/python\n# -*- coding: UTF-8 -*-\n\nimport os\nimport json\nimport base64\nimport time\nimport datetime\nimport xlrd\nfrom .task import Task\n\nimport traceback\n\nfrom models import conn, purse, api, member\n\nTEMP_DIR = os.path.dirname(os.path.realpath(__file__)) + '/temp'\n\nclass Settlement(Task):\n \n conn = None\n providerName = 'bzl-provider'\n\n def setApi(self, conf):\n Task.setApi(self, conf)\n self.conf = self.api.conf\n self.tempFile = '%s/settlementtime-%s.txt' % (TEMP_DIR, self.conf['name'])\n\n def queryUserBoardList(self, start, end, index = 0):\n params = {\n \"end_time_start\": start,\n \"end_time_end\": end,\n \"query_index\": index\n }\n # print(('query param:', params))\n data = self.api.queryUserBoard(params)\n\n if len(data) >= 30:\n newdata = self.queryUserBoardList(start, end, index = index + 30)\n data.extend(newdata)\n\n return data\n\n def settlement(self):\n try:\n # 判断是否开启\n statusResult = api.getLoginInfo(self.conn, self.conf['serviceCode'])\n # 关闭同步功能\n if not statusResult or statusResult['status'] == 0:\n return\n except Exception as e:\n traceback.print_exc()\n return\n \n now = datetime.datetime.now()\n nowStr = now.strftime('%Y-%m-%d %H:%M:%S')\n lastTimeStr = False\n\n if os.path.exists(self.tempFile):\n try:\n tempfileReader = open(self.tempFile, 'r')\n lastTimeStr = json.loads(tempfileReader.read())['lastTime']\n lastTime = datetime.datetime.strptime(lastTimeStr,'%Y-%m-%d %H:%M:%S')\n lastTimeStr = (lastTime + datetime.timedelta(minutes = -60)).strftime('%Y-%m-%d %H:%M:%S')\n except Exception as e:\n traceback.print_exc()\n if not lastTimeStr:\n lastTimeStr = (now + datetime.timedelta(days = -3)).strftime('%Y-%m-%d %H:%M:%S')\n\n try:\n dataList = self.queryUserBoardList(\n lastTimeStr,\n nowStr,\n 0\n )\n\n print(dataList)\n\n if dataList:\n print(('fetch:', len(dataList)))\n\n for record in dataList:\n self.settleRecord(record)\n else:\n print(('no records'))\n\n tempfile = open(self.tempFile, 'w+')\n tempfile.write(json.dumps({ 'lastTime': nowStr }))\n except Exception as e:\n traceback.print_exc()\n\n def getCustTimestamp(self, timeStr, seconds = 0, minutes = 0):\n t = datetime.datetime.strptime(timeStr,'%Y-%m-%d %H:%M:%S')\n t = t + datetime.timedelta(seconds = seconds, minutes = minutes)\n return str(time.mktime(t.timetuple()))\n\n def settleRecord(self, record):\n if record['pccid'] != '2525717358':\n print('user is not 2525717358')\n return\n currentTime = str(time.time())\n gameEndTime = self.getCustTimestamp(record['end_time'])\n\n print(('开始处理:', record))\n \n # 结算判断标志 pccid_roomName_clubName_buyIn_bringOut_endTime\n settleGameInfo = base64.b64encode(\n ('%s_%s_%s_%s_%s_%s' % (\n record['pccid'],\n record['room_name'],\n record['club_name'],\n record['buy_in'],\n record['bring_out'],\n record['end_time']\n )).encode('utf-8')\n )\n\n gameEndLog = {\n 'game_uid': record['pccid'],\n 'game_id': record['room_name'],\n 'board_id': '',\n 'end_game_time': gameEndTime,\n 'apply_time': currentTime,\n 'settle_game_info': settleGameInfo,\n }\n\n # 判断是否查无此人\n memberResult = purse.getPurseInfoByGameId(self.conn, record['pccid'])\n if not memberResult:\n print(('no user'))\n gameEndLog['action'] = 'no UID'\n purse.addSettleFailLog(self.conn, gameEndLog)\n return\n\n\n # 查询结算表中是否已有结算记录.如果已经存在,则抛弃\n countResult = purse.getSettleRecord(self.conn, settleGameInfo)\n if countResult['settle_count'] > 0:\n print(('already settlemented'))\n return\n\n # 查询游戏期间该用户的所有带入金额是否足够与代理接口一致,不足则不结算\n joinToken = base64.b64encode(('%s_%s' % (record['club_name'], record['room_name'])).encode('utf-8'))\n print(('join token is:', joinToken))\n\n beginTime = self.getCustTimestamp(record['end_time'], minutes = -720)\n endTime = self.getCustTimestamp(record['end_time'], minutes = 120)\n buyInAmountResult = purse.getTotoalBuyinAmount(\n self.conn,\n record['pccid'],\n beginTime,\n endTime,\n joinToken\n )\n\n print(('total buy in:',buyInAmountResult))\n\n if buyInAmountResult['totalAmount'] < record['buy_in']:\n if not buyInAmountResult['totalAmount'] or buyInAmountResult['totalAmount'] == 0:\n print(('no apply'))\n gameEndLog['action'] = 'no Buyin'\n else:\n print(('amount not match, local:', buyInAmountResult['totalAmount'], ', remote', record['buy_in']))\n gameEndLog['action'] = 'no enough, local buyin: %s, remote buyin: %s' % (buyInAmountResult['totalAmount'], record['buy_in'])\n \n purse.addSettleFailLog(self.conn, gameEndLog)\n return\n\n # 记录结算日志\n gameEndLog['action'] = 'OK'\n purse.addSettleFailLog(self.conn, gameEndLog)\n\n memberResult['settle_game_info'] = settleGameInfo\n\n # 更新钱包\n purse.updatePurse(self.conn, memberResult, record['buy_in'] + record['afterwater'])\n\n def toData(self, file):\n name2columnMap = {\n 0: 'pccname',\n 1: 'pccid',\n 6: 'username',\n 8: 'club_name',\n 10: 'room_name',\n 12: 'end_time',\n 14: 'buy_in',\n 15: 'bring_out',\n 17: 'afterwater'\n }\n data = []\n print(('begin transfer local file:', file))\n x1 = xlrd.open_workbook(file)\n sheet1 = x1.sheet_by_index(0)\n print(sheet1)\n if sheet1.nrows <= 1:\n return data\n for rn in range(1, sheet1.nrows):\n rowData = {}\n row = sheet1.row(rn)\n for cn2 in range(0, len(row)):\n if name2columnMap.has_key(cn2):\n name = name2columnMap[cn2]\n rowData[name] = row[cn2].value\n\n rowData['buy_in'] = int(float(rowData['buy_in']))\n rowData['bring_out'] = int(float(rowData['bring_out']))\n rowData['afterwater'] = int(float(rowData['afterwater']))\n\n data.append(rowData)\n\n print(('local datas:', data))\n return data\n\n def localSettlement(self):\n if not os.path.exists(self.conf['localDataPath']):\n return\n files = os.listdir(self.conf['localDataPath'])\n print(('local files:', files))\n if len(files) == 0:\n return \n\n for num in range(0, len(files)):\n if files[num] == 'failed':\n continue\n try:\n rfile = os.path.join(self.conf['localDataPath'], files[num])\n data = self.toData(rfile)\n if len(data) == 0:\n continue\n for dnum in range(0, len(data)):\n self.settleRecord(data[dnum])\n os.remove(rfile)\n except Exception as e:\n print(('local settlement fail:', rfile))\n faileddir = os.path.join(self.conf['localDataPath'], 'failed')\n if not os.path.exists(faileddir):\n os.makedirs(faileddir)\n os.rename(rfile, os.path.join(faileddir, files[num]))\n traceback.print_exc()\n\n\n def callback(self):\n try:\n self.conn = conn(self.config['db'])\n self.localSettlement()\n except Exception as e:\n traceback.print_exc()\n finally:\n self.conn.close()\n\n try:\n self.conn = conn(self.config['db'])\n self.settlement()\n except Exception as e:\n traceback.print_exc()\n finally:\n self.conn.close()\n","sub_path":"task/Settlement.py","file_name":"Settlement.py","file_ext":"py","file_size_in_byte":7510,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"420397835","text":"import dash\nfrom dash.dependencies import Output, Input, State\nimport dash_core_components as dcc\nimport dash_bootstrap_components as dbc\nimport dash_html_components as html\nimport dash_daq as daq\n\nimport plotly\nimport random\nimport plotly.graph_objs as go\nfrom collections import deque\nimport sqlite3\nimport pandas as pd\nimport numpy as np\n\n#conn = sqlite3.connect('twitter.db')\n#c = conn.cursor()\n# external CSS stylesheets\nexternal_stylesheets = [\n 'https://codepen.io/chriddyp/pen/bWLwgP.css',\n {\n 'href': \"https://stackpath.bootstrapcdn.com/bootstrap/3.4.1/css/bootstrap.min.css\",\n 'rel': 'stylesheet',\n 'integrity': \"sha384-HSMxcRTRxnN+Bdg0JdbxYKrThecOKuH5zCYotlSAcp1+c8xmyTe9GYg1l9a69psu\",\n 'crossorigin': 'anonymous'\n }\n]\n# external JavaScript files\nexternal_scripts = [\n 'https://www.google-analytics.com/analytics.js',\n {'src': 'https://cdn.polyfill.io/v2/polyfill.min.js'},\n {\n 'src': 'https://cdnjs.cloudflare.com/ajax/libs/lodash.js/4.17.10/lodash.core.js',\n 'integrity': 'sha256-Qqd/EfdABZUcAxjOkMi8eGEivtdTkh3b65xCZL4qAQA=',\n 'crossorigin': 'anonymous'\n }\n]\n\napp = dash.Dash(__name__, update_title='Coletando tweets...', \n external_stylesheets=[dbc.themes.BOOTSTRAP]\n #external_scripts=external_scripts\n )\napp.layout = dbc.Container(\n [ html.H1('Bolsometro'),\n html.Hr(),\n\n dbc.NavbarSimple(\n children=[\n #dbc.NavItem(dbc.NavLink(\"Page 1\", href=\"#\")),\n dbc.DropdownMenu(\n children=[\n dbc.DropdownMenuItem(\"Redes Sociais\", header=True),\n dbc.DropdownMenuItem(\"Linkedin\", href=\"https://www.linkedin.com/in/neto-figueira/\"),\n dbc.DropdownMenuItem(\"Github\", href=\"https://github.com/netofigueira\"),\n ],\n nav=True,\n in_navbar=True,\n label=\"Contato\",\n ),\n ],\n brand=\"Tendência de Sentimento no Twitter\",\n brand_href=\"#\",\n color=\"dark\",\n dark=True,\n ),\n \n\n\n\n dbc.Col([ \n dbc.Col(dcc.Graph(id='live-graph', animate=True ) ), \n \n ]),\n###\n # dbc.Row([ \n # \n # dbc.Col(html.Div(dcc.Graph(id='my-gauge', animate=True)) ),\n\n # ]),\n dcc.Interval(\n id='graph-update',\n interval= 800,\n ),\n dcc.Interval(\n id='my-gauge-update',\n interval= 800,\n ), \n\n html.Div(html.H2('Tweets recentes ao vivo')),\n html.Hr(),\n html.Div(className='row', children=[html.Div(id=\"recent-tweets-table\")] ),\n\n\n dcc.Interval(\n id='recent-table-update',\n interval= 4*10**3,\n #n_intervals=2\n ),\n ]\n)\ndef color_select(d):\n if d > 5:\n return \"#95D2EC\"\n if d < -5:\n return \"#FF4242\"\n else:\n return \"#FFFFFF\"\n\ndef generate_table(df, max_rows=10):\n return html.Table(className=\"table\",\n children=[\n html.Thead(\n html.Tr(\n children=[\n html.Th(col.title()) for col in df.columns.values],\n \n )\n ),\n html.Tbody(\n [\n \n html.Tr(\n children=[\n html.Td(data) for data in d\n ], style={'background-color':color_select(d[1])}\n )\n for d in df.values.tolist()])\n ]\n )\n\n\n\"\"\"\n@app.callback(\n Output(\"popover\", \"is_open\"),\n [Input(\"popover-target\", \"n_clicks\")],\n [State(\"popover\", \"is_open\")],\n)\ndef toggle_popover(n, is_open):\n if n:\n return not is_open\n return is_open\n\n@app.callback(Output('my-gauge', 'figure'),\n [Input('my-gauge-update', 'n_intervals')])\n\ndef update_gauge(input_data):\n try:\n conn = sqlite3.connect('twitter.db')\n df = pd.read_sql(\"SELECT * FROM sentiment ORDER BY unix DESC LIMIT 500\", conn)\n df.sort_values('unix', inplace=True)\n s_array = df.sentiment.values\n df['sentiment'] = np.interp(s_array, (s_array.min(), s_array.max()), (-10,10) )\n\n neg = df[(df.sentiment < 0)].sentiment.mean()\n\n df['sentiment_smoothed'] = df['sentiment'].rolling(int(len(df)/100)).mean()\n\n fig = go.Indicator(\n mode = \"gauge+number\",\n value = neg,\n domain = {'x': [0, 1], 'y': [0, 1]},\n title = {'text': \"Bolsometro\"},\n #delta = {'reference': 8, 'increasing': {'color': \"Green\"}},\n gauge = {\n 'axis': {'range': [0, -10], 'tickwidth': 1, 'tickcolor': \"#EF553B\"},\n 'bar': {'color': \"#453938\"},\n 'bgcolor': \"white\",\n 'borderwidth': 2,\n 'bordercolor': \"gray\",\n 'steps': [\n {'range': [0, -5], 'color': 'white'},\n {'range': [-5, -6.5], 'color': '#ffb0a8'},\n {'range': [-6.5, -8], 'color': '#EF553B'}],\n 'threshold': {\n 'line': {'color': \"red\", 'width': 4},\n 'thickness': 0.75,\n 'value': 9}}\n )\n return {'data': [fig]}\n\n except Exception as e:\n with open('errors.txt','a') as f:\n f.write(str(e))\n f.write('\\n')\n\"\"\"\n \n@app.callback(Output('live-graph', 'figure'),\n [Input('graph-update', 'n_intervals')])\ndef update_graph_scatter(input_data):\n try:\n conn = sqlite3.connect('twitter.db')\n c = conn.cursor()\n df = pd.read_sql(\"SELECT * FROM sentiment ORDER BY unix DESC LIMIT 2000\", conn)\n df.sort_values('unix', inplace=True)\n\n s_array = df.sentiment.values\n df['sentiment'] = np.interp(s_array, (s_array.min(), s_array.max()), (-10,10) )\n\n df['sentiment_smoothed'] = df['sentiment'].rolling(int(len(df)/500)).mean()\n df.dropna(inplace=True)\n df['date'] = pd.to_datetime(df['unix'],unit='ms')\n # converting to são paulo time. \n df['date'] = df.date.dt.tz_localize('UTC').dt.tz_convert('America/Sao_Paulo')\n df.set_index('date', inplace=True)\n\n df_bolso = df[df.tweet.str.contains('bolsonaro', case=False)]\n\n df_lula = df[df.tweet.str.contains('lula', case=False)]\n\n #df = df.resample('10s').mean()\n df_bolso = df_bolso.resample('60s').mean()\n #X = df.index\n \n \n X = df_bolso.index\n\n Y= df_bolso.sentiment_smoothed.round(decimals=2)\n Y2 = df_lula.sentiment_smoothed.round(decimals=2)\n\n data = plotly.graph_objs.Scatter(\n x=X,\n y=Y,\n name='Bolsonaro',\n mode= 'lines',\n line = dict(color = 'green')\n )\n\n\n data2 = plotly.graph_objs.Scatter(\n x=X,\n y=Y2,\n name='Lula',\n mode = 'lines',\n line = dict(color = 'red')\n \n )\n\n return {'data': [data, data2],'layout' : go.Layout(xaxis=dict(range=[min(X),max(X)]),\n yaxis=dict(range=[-5,5]),)}\n\n except Exception as e:\n with open('errors.txt','a') as f:\n f.write(str(e))\n f.write('\\n')\n\n@app.callback(Output('recent-tweets-table', 'children'),\n [Input('recent-table-update', 'n_intervals')],\n ) \ndef update_recent_tweets(input_data):\n conn = sqlite3.connect('twitter.db')\n c = conn.cursor()\n\n df = pd.read_sql(\"SELECT * FROM sentiment ORDER BY unix DESC LIMIT 10\", conn)\n\n df['date'] = pd.to_datetime(df['unix'], unit='ms')\n s_array = df.sentiment.values\n df['sentiment'] = np.interp(s_array, (s_array.min(), s_array.max()), (-10,10) )\n df['sentiment'] = df.sentiment.round(2)\n df = df.drop(['unix'], axis=1)\n df = df[['date','tweet','sentiment']]\n # converting to são paulo time. \n df['date'] = df.date.dt.tz_localize('UTC').dt.tz_convert('America/Sao_Paulo')\n df.set_index('date', inplace=True)\n return generate_table(df, max_rows=10)\n\n\n#external_css = [\"https://stackpath.bootstrapcdn.com/bootstrap/3.4.1/css/bootstrap.min.css\"]\n#for css in external_css:\n# app.css.append_css({\"external_url\": css})\n\n\n\nif __name__ == '__main__':\n app.run_server(debug=True)","sub_path":"dash_bootstrap.py","file_name":"dash_bootstrap.py","file_ext":"py","file_size_in_byte":9091,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"102529090","text":"import cv2\r\nimport numpy as np\r\nimport os\r\nimport urllib.request\r\n\r\ndef getImages(link):\r\n\r\n\turls = urllib.request.urlopen(link).read().decode()\r\n\tpic_num = 1\r\n \r\n\tfor pic in urls.split('\\n'):\r\n\t\ttry:\r\n\t\t\turllib.request.urlretrieve(pic, 'people/ppl ({}).png'.format(pic_num))\r\n\t\t\tpic_num += 1\r\n\r\n\t\texcept Exception as e:\r\n\t\t\tprint(str(e))\r\n\r\n\r\nif __name__ == '__main__' :\r\n\r\n\t############## BUDYNKI\r\n\t#link = 'http://www.image-net.org/api/text/imagenet.synset.geturls?wnid=n04413969'\r\n\t############# LUDZIE\r\n\tlink = 'http://www.image-net.org/api/text/imagenet.synset.geturls?wnid=n07942152'\r\n\tgetImages(link)\r\n","sub_path":"getimg.py","file_name":"getimg.py","file_ext":"py","file_size_in_byte":610,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"49222613","text":"#!/usr/bin/env python\n\n# 2014-11-15\n# tarte.py\n# Public Domain\n\nimport time\n\nimport pigpio\n\nLED1=21\nLED2=26\nLED3=12\nLED4=6\n\n\"\"\"\n7.5 10 12 16\n\nFind the number of cycles needed for an integral switch on/off for each LED.\n\nThat's 2*7.5 which is 2 seconds worth.\n\nIn 2 seconds there will be this many cycles of on/off\n\n15 20 24 32\n\nHow many micros for each cycle?\n\n15 66666 on 66667 off = 1999995\n20 50000 on 50000 off = 2000000\n24 41666 on 41667 off = 1999992\n32 31250 on 31250 off = 2000000\n\nThere will be a slight error which will not be detectable by most means.\n\"\"\"\n\ndef wave(pi, gpio, hz, secs, on=1, offset=0):\n \"\"\"\n Generate a hz cycles per second square wave on gpio for\n secs seconds. The first transition is to level on at\n offset microseconds from the start.\n \"\"\"\n micros_left = int(secs * 1000000)\n transitions = int(2 * hz * secs)\n micros = micros_left / transitions\n\n if (offset < 0) or (offset > micros):\n print(\"Illegal offset {} for hz {}\".format(offset, hz))\n exit()\n\n wf = [] # Empty waveform.\n\n if offset:\n wf.append(pigpio.pulse(0, 0, offset))\n micros_left -= micros\n last_micros = micros - offset\n transitions -= 1\n\n for t in range(transitions, 0, -1):\n micros = micros_left / t\n if (t & 1) == (on & 1):\n wf.append(pigpio.pulse(0, 1< 0 :\n k = k + 1\n alist = aline.split('\t') #取出列表中的一项,并赋值给新的列表\n #print(alist[3],lines.index(aline))\n z = float(alist[3]) + z #统计持续时间\n \n if k != 0 :\n print(y,':共',k,'条记录,耗时','%.2f' % z,'小时')\n return [k,'%.2f' % z]\n\n\ndef tongjiFile(path_checkDateFile):\n \"\"\"\n 功能:每个维度都统计时间开销,并输出到一个文件中。\n \"\"\"\n\n with open(tongjiRecord,'at', encoding='utf-8') as fnew:\n for aweed in weedlist:\n tu_sum = sumWeed(aweed,path_checkDateFile)\n tjr = str(aweed) + '\\t' + str(tu_sum[0]) + '\\t' + str(tu_sum[1]) + '\\n'\n fnew.write(tjr)\n tu_sum = []\n print('结果已写入文件:',fnew)\n\ndef everydaySumWeed():\n \"\"\"\n 功能:按天统计数据。\n \"\"\"\n\n yourdate = str(input(\"请输入想要统计的日期\\n格式为20190121\\n\"))\n \n #查找有没有该文件\n yy=yourdate[:4]\n mm=yourdate[4:6]\n dd=yourdate[6:]\n if len(yy) < 2 :\n yy = \"0\" + str(yy)\n if len(mm) < 2 :\n mm = \"0\" + str(mm)\n if len(dd) < 2 :\n dd = \"0\" + str(dd) \n newfilename = str(yy)+str(mm)+str(dd)\n pathNew = path_Clear + 'everyday' + newfilename + '.txt'\n print(pathNew)\n if os.path.exists(pathNew):\n tongjiFile(pathNew)\n else:\n print('未查找该日期的数据')\n\nmain()\n","sub_path":"timebill.py","file_name":"timebill.py","file_ext":"py","file_size_in_byte":8557,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"149940740","text":"from helpers import *\n\nfrom mobject.tex_mobject import TexMobject\nfrom mobject import Mobject\nfrom mobject.image_mobject import ImageMobject\nfrom mobject.vectorized_mobject import *\n\nfrom animation.animation import Animation\nfrom animation.transform import *\nfrom animation.simple_animations import *\nfrom animation.playground import *\nfrom topics.geometry import *\nfrom topics.characters import *\nfrom topics.functions import *\nfrom topics.fractals import *\nfrom topics.number_line import *\nfrom topics.combinatorics import *\nfrom topics.numerals import *\nfrom topics.three_dimensions import *\nfrom topics.objects import *\nfrom scene import Scene\nfrom scene.zoomed_scene import ZoomedScene\nfrom scene.reconfigurable_scene import ReconfigurableScene\nfrom camera import Camera\nfrom mobject.svg_mobject import *\nfrom mobject.tex_mobject import *\n\nclass ConfettiSpiril(Animation):\n CONFIG = {\n \"x_start\" : 0,\n \"spiril_radius\" : 1,\n \"num_spirils\" : 4,\n \"run_time\" : 10,\n \"rate_func\" : None,\n }\n def __init__(self, mobject, **kwargs):\n digest_config(self, kwargs)\n mobject.next_to(self.x_start*RIGHT + SPACE_HEIGHT*UP, UP)\n self.total_vert_shift = \\\n 2*SPACE_HEIGHT + mobject.get_height() + 2*MED_SMALL_BUFF\n \n Animation.__init__(self, mobject, **kwargs)\n\n def update_submobject(self, submobject, starting_submobject, alpha):\n submobject.points = np.array(starting_submobject.points)\n\n def update_mobject(self, alpha):\n Animation.update_mobject(self, alpha)\n angle = alpha*self.num_spirils*2*np.pi\n vert_shift = alpha*self.total_vert_shift\n\n start_center = self.mobject.get_center()\n self.mobject.shift(self.spiril_radius*OUT)\n self.mobject.rotate(angle, axis = UP, about_point = start_center)\n self.mobject.shift(vert_shift*DOWN)\n\nclass Anniversary(TeacherStudentsScene):\n CONFIG = {\n \"num_confetti_squares\" : 50,\n }\n def construct(self):\n self.celebrate()\n self.complain()\n\n def celebrate(self):\n title = TextMobject(\"2 year Anniversary!\")\n title.scale(1.5)\n title.to_edge(UP)\n\n first_video = Rectangle(\n height = 2, width = 2*(16.0/9),\n stroke_color = WHITE,\n fill_color = \"#111111\",\n fill_opacity = 0.75,\n )\n first_video.next_to(self.get_teacher(), UP+LEFT)\n first_video.shift(RIGHT)\n formula = TexMobject(\"e^{\\\\pi i} = -1\")\n formula.move_to(first_video)\n first_video.add(formula)\n\n hats = self.get_party_hats()\n confetti_spirils = self.get_confetti_animations()\n self.play(\n Write(title, run_time = 2),\n *[\n ApplyMethod(pi.change_mode, \"hooray\")\n for pi in self.get_pi_creatures()\n ]\n )\n self.play(\n DrawBorderThenFill(\n hats,\n submobject_mode = \"lagged_start\",\n rate_func = None,\n run_time = 2,\n ),\n *confetti_spirils + [\n Succession(\n Animation(pi, run_time = 2),\n ApplyMethod(pi.look, UP+LEFT),\n ApplyMethod(pi.look, UP+RIGHT),\n Animation(pi),\n ApplyMethod(pi.look_at, first_video),\n rate_func = None\n )\n for pi in self.get_students()\n ] + [\n Succession(\n Animation(self.get_teacher(), run_time = 2),\n Blink(self.get_teacher()),\n Animation(self.get_teacher(), run_time = 2),\n ApplyMethod(self.get_teacher().change_mode, \"raise_right_hand\"),\n rate_func = None\n ),\n DrawBorderThenFill(\n first_video, \n run_time = 10,\n rate_func = squish_rate_func(smooth, 0.5, 0.7)\n )\n ]\n )\n self.change_student_modes(*[\"confused\"]*3)\n\n def complain(self):\n self.student_says(\n \"Why are you \\\\\\\\ talking so fast?\",\n student_index = 0,\n target_mode = \"sassy\",\n )\n self.change_student_modes(*[\"sassy\"]*3)\n self.play(self.get_teacher().change_mode, \"guilty\")\n self.dither(2)\n\n def get_party_hats(self):\n hats = VGroup(*[\n PartyHat(\n pi_creature = pi,\n height = 0.5*pi.get_height()\n )\n for pi in self.get_pi_creatures()\n ])\n max_angle = np.pi/6\n for hat in hats:\n hat.rotate(\n random.random()*2*max_angle - max_angle,\n about_point = hat.get_bottom()\n )\n return hats\n\n def get_confetti_animations(self):\n colors = [RED, YELLOW, GREEN, BLUE, PURPLE, RED]\n confetti_squares = [\n Square(\n side_length = 0.2,\n stroke_width = 0,\n fill_opacity = 0.5,\n fill_color = random.choice(colors),\n )\n for x in range(self.num_confetti_squares)\n ]\n confetti_spirils = [\n ConfettiSpiril(\n square,\n x_start = 2*random.random()*SPACE_WIDTH - SPACE_WIDTH,\n rate_func = squish_rate_func(lambda t : t, a, a+0.5)\n )\n for a, square in zip(\n np.linspace(0, 0.5, self.num_confetti_squares),\n confetti_squares\n )\n ]\n return confetti_spirils\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"efvgt.py","file_name":"efvgt.py","file_ext":"py","file_size_in_byte":5727,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"485682946","text":"#\n# Chess knight problem\n# lets try 8 x 8 sq\n#\n# X\n# 0 1 2 3 4 5 6 7\n# 8 9 10 11 12 13 14 15\n# 16 17 18 19 20 21 22 23\n# Y 24 25 26 _27_ 28 29 30 31\n# 32 33 34 35 36 37 38 39\n# 40 41 42 43 44 45 46 47\n# 48 49 50 51 52 53 54 55\n# 56 57 58 59 60 61 62 63\n\n\ndef pprint (M):\n for i in range(len(M)):\n if i%8 == 0:\n print (\"\")\n print (\"{:3}\".format(M[i]),end='')\n\n\ndef get_moves (pos, M):\n \"\"\" return possible moves \"\"\"\n y,x = divmod(pos, 8)\n moves=[-17, -15, -10, -6, +6, +10, +15, +17]\n list=[]\n # 1: if pos + move is > 63 or < 0, don't allow\n for i in moves:\n # 2: check for moves across a board boundary (not allowed)\n if (pos + i >= 0) and (pos+i <= 63):\n ynew,xnew=divmod(pos+i, 8)\n if abs(xnew-x) <= 2:\n # 3: check that the proposed position has not yet been occupied\n if M[pos+i] == 0:\n list.append (pos+i)\n list.sort()\n list.reverse()\n return (list)\n\n\ndef ulist(M, pos,v):\n \"\"\" append a value to M and return \"\"\"\n import copy\n list= copy.deepcopy(M)\n list[pos]=v\n return list\n \n\ndef solve (M, cpos, move):\n \"\"\" solve the question recursively \"\"\" \n if move == 64:\n print (\"\\n\\nmove: \", move)\n print (\"sum: \", sum(M))\n pprint (M)\n #exit()\n for next in get_moves(cpos, M):\n solve(ulist(M, next, move+1), next, move+1)\n\n\nif __name__ == '__main__':\n M=[0]*64\n M[2]=1\n solve(M, cpos=2, move=1)\n\n","sub_path":"chess.py","file_name":"chess.py","file_ext":"py","file_size_in_byte":1432,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"582133235","text":"#!/usr/bin/env python\n# Determine what databases to backup and do it\n\nimport os, sys\nimport re\nimport subprocess\nimport MySQLdb\nimport optparse\n\nparser = optparse.OptionParser()\nparser.add_option('-v', '--verbose', action=\"store_true\",\n help = \"Enable verbose output\",\n dest = \"verbose\",\n default = False)\nparser.add_option('-s', '--secrets-file', action=\"store\",\n help = \"Secrets file containing mysql login (%default)\",\n dest = \"secret\",\n default = \"/root/.my.cnf\")\nparser.add_option('-e', '--exclude-file', action=\"store\",\n help = \"File of databases to exclude from backup, one per line (%default)\",\n dest = \"exclude\",\n default = \"/etc/copy-db.exclude\")\nparser.add_option('-d', '--backup-dir', action=\"store\",\n help = \"Directory to store backup (%default)\",\n dest = \"backup_dir\",\n default = \"/var/lib/mysql-backup\")\nparser.add_option('-n', '--no-hot-copy', action=\"store_true\",\n help = \"Never backup using mysqlhotcopy\",\n dest = \"disable_hotcopy\",\n default = False)\n\ndo_verbose = False\n(options,args) = parser.parse_args()\n\ndo_verbose = options.verbose\nsecret_file = options.secret\nexclude_file = options.exclude\nbackup_dir = options.backup_dir\ndisable_hotcopy = options.disable_hotcopy\n\ndef verbose(s):\n if do_verbose:\n print >>sys.stderr, s\n\n# we connect multiple times to avoid leaving an idle connection open\n# while we do long dump operations; it might get timed out\ndef connect_to_db():\n return MySQLdb.connect(host=\"localhost\",\n user=\"root\",\n read_default_file=secret_file)\n\ndbs = [] # Databases on the machine, got from MySQL\nuidbs = {} # Databases not to be backed up, read from copy-db.exclude\n\n# Get available DBs list\nconn = connect_to_db()\nconn.set_character_set(\"utf8\")\ncursor = conn.cursor()\ncursor.execute(\"SHOW DATABASES\")\nfor fields in cursor.fetchall():\n dbs.append(unicode(fields[0], \"utf8\"))\ncursor.close()\nconn.close()\n\n# Get not-to-be-backed-up list\ntry:\n list = open(exclude_file)\nexcept IOError:\n list = []\nfor line in list:\n if not line.startswith ('#'):\n dbname, who, when = line.strip ().split ()\n uidbs[dbname] = (who, when)\n\n# Spit warnings and remove not-to-be-backed-up databases from the list\nfor i in uidbs:\n if i not in dbs:\n sys.stderr.write('WARNING: redundant entry for database %s in %s\\n\\n' % (i, exclude_file))\n else:\n verbose ('database %s not being backed up (request by %s on %s)' % (i, uidbs[i][0], uidbs[i][1]))\n dbs.remove (i)\n\n# Turn a database name into a filename. What we consider\n# filename-safe is the same MySQL, but the encoding of non-safe\n# characters differs. MySQL has tables for some non-ASCII unicode -\n# e.g. U+00C0 LATIN CHARACTER CAPITAL LETTER A WITH ACUTE is @0G\n# then it uses @xxxx for the rest. We use @xxxx for everything.\n# We don't actually need a match with what MySQL does, just\n# something that won't contain meta-characters like '/', but matching\n# up for ASCII names like 'db_backup' is slightly useful\ndef encode_as_filename(s):\n return re.sub('[^A-Za-z0-9]', escape_match, s)\n\ndef escape_match(m):\n o = ord(m.group(0))\n if o < 0x10000:\n return \"@%04x\" % o\n else:\n return \"@%04x@%04x\" % (0xd800 + (o / 1024), 0xdc00 + (o % 1024))\n\n# Backup!\nfor db in dbs:\n # mysqlhotcopy only works for MyISAM and ARCHIVE tables. If a database has\n # only tables of those types, then we use mysqlhotcopy.\n #\n # For InnoDB tables we can use mysqldump --single-transaction to get a\n # consistent snapshot of the database.\n #\n # For tables with a mixture of InnoDB and MyISAM tables, neither of the\n # above methods will work and give a consistent snapshot. We could\n # use 'mysqldump --lock-tables', but that would keep the entire database\n # locked for the entire length of the dump. Instead we assume that in\n # this case, the application doesn't care much about the consistentcy\n # of the MyISAM tables and use --single-transaction anyways. (This is the\n # right thing to do for bugzilla where everything but the bugs_fulltext\n # table is InnoDB. bugs_fulltext is just a mirror of the other tables for\n # searching purposes.)\n #\n # Note that mysqlhotcopy is not necessarily faster than mysqldump - the\n # compressed dump will typically be much smaller and faster to write to\n # disk than the copy. The hot copy, on the other hand, may be more rsync\n # friendly when we rsync the databases to the backup machine (This theory\n # is untested.)\n #\n # Future enhancement would be to extent copy-db.exclude to allow specifying\n # per-database backup methods.\n\n can_hotcopy = True\n\n db_filename = encode_as_filename(db)\n if db_filename != db:\n # mysqlhotcopy doesn't understand encoded database names\n can_hotcopy = False\n if disable_hotcopy:\n can_hotcopy = False\n\n # Figure out what types of tables the database has\n conn = connect_to_db()\n conn.set_character_set(\"utf8\")\n conn.select_db(db.encode(\"utf8\"))\n cursor = conn.cursor()\n cursor.execute(\"SHOW TABLE STATUS\")\n for fields in cursor.fetchall():\n engine = fields[1]\n if engine != 'MyISAM' and engine != 'ARCHIVE':\n can_hotcopy = False\n cursor.close()\n conn.close()\n\n if can_hotcopy:\n verbose(\"Backing up %s via mysqlhotcopy\"% db)\n hotcopy = subprocess.Popen(['mysqlhotcopy', '--quiet', '--allowold', db, backup_dir])\n hotcopy.wait()\n else:\n verbose(\"Backing up %s via mysqldump\" % db)\n outfilename = os.path.join(backup_dir, db_filename + \".dump.gz\")\n outfilename_tmp = outfilename + \".tmp\"\n\n # Add a bit of error checking before freaking out\n if not os.path.exists(backup_dir):\n sys.stderr.write(\"ERROR: '%s' does not exist to backup files into\\n\" % backup_dir)\n sys.exit(1)\n if not os.access(backup_dir, os.W_OK):\n sys.stderr.write(\"ERROR: '%s' is not writable\\n\" % backup_dir)\n sys.exit(1)\n\n outfile = open(outfilename_tmp, \"w\")\n dump = subprocess.Popen(['mysqldump',\n '--single-transaction',\n '--default-character-set=utf8',\n db.encode(\"utf8\")],\n stdout=subprocess.PIPE)\n gzip = subprocess.Popen(['gzip', '-c'],\n stdin=dump.stdout, stdout=outfile)\n dump.wait()\n gzip.wait()\n outfile.close()\n if dump.returncode == 0 and gzip.returncode == 0:\n os.rename(outfilename_tmp, outfilename)\n else:\n print >>sys.stderr, \"Failed to back up %s, leaving old backup\" % db\n os.remove(outfilename_tmp)\n","sub_path":"copy-db.py","file_name":"copy-db.py","file_ext":"py","file_size_in_byte":7023,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"26865490","text":"from .voc_converter import VocConverter\nfrom .voc_to_sa_pixel import voc_instance_segmentation_to_sa_pixel\nfrom .voc_to_sa_vector import voc_object_detection_to_sa_vector\n\n\nclass VocObjectDetectionStrategy(VocConverter):\n name = \"ObjectDetection converter\"\n\n def __init__(\n self, dataset_name, export_root, project_type, output_dir, task,\n direction\n ):\n self.direction = direction\n super().__init__(\n dataset_name, export_root, project_type, output_dir, task\n )\n\n self.__setup_conversion_algorithm()\n\n def __setup_conversion_algorithm(self):\n if self.direction == \"to\":\n raise NotImplementedError(\"Doesn't support yet\")\n else:\n if self.project_type == \"Vector\":\n if self.task == \"object_detection\":\n self.conversion_algorithm = voc_object_detection_to_sa_vector\n elif self.task == \"instance_segmentation\":\n raise NotImplementedError(\"Doesn't support yet\")\n elif self.project_type == \"Pixel\":\n if self.task == \"object_detection\":\n raise NotImplementedError(\"Doesn't support yet\")\n elif self.task == \"instance_segmentation\":\n self.conversion_algorithm = voc_instance_segmentation_to_sa_pixel\n\n def __str__(self):\n return '{} object'.format(self.name)\n\n def from_sa_format(self):\n pass\n\n def to_sa_format(self):\n loader = self.conversion_algorithm(self.export_root, self.output_dir)\n","sub_path":"superannotate/input_converters/converters/voc_converters/voc_strategies.py","file_name":"voc_strategies.py","file_ext":"py","file_size_in_byte":1559,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"225026666","text":"_payload = [(1, 28), (2, 14), (3, 9), (4, 7), (5, 5), (7, 4), (9, 3), (14, 2), (28, 1)]\n\n\ndef cnt_bits(x):\n i = 0\n while (x >> i) > 0:\n i += 1\n return i\n\n\ndef simple9_encode_lst(lst):\n res = []\n i = 0\n while i < len(lst):\n j = i\n m = cnt_bits(lst[j])\n while j < len(lst) and (j - i + 1) * m <= 28:\n j += 1\n if j < len(lst):\n m = max(m, cnt_bits(lst[j]))\n num = simple9_encode(lst[i:j])\n res.append(num)\n i = j\n return res\n\n\ndef simple9_encode(x):\n n = len(x)\n x.reverse()\n res = 0\n res += (n << 28)\n m = 28 // n\n for i in range(n):\n res += (x[i] << (i * m + 28 % n))\n return res\n\n\ndef simple9_decode_lst(x):\n res = []\n for num in x:\n res += simple9_decode(num)\n return res\n\n\ndef simple9_decode(x):\n res = []\n n = x >> 28\n m = 28 // n\n for i in range(n):\n ll = (28 - i * m)\n cur = (x & ((1 << ll) - 1)) >> (ll - m)\n res.append(cur)\n return res\n\n\ndef x_to_str(x):\n s = ''\n for i in range(32):\n if (x & (1 << i)) > 0:\n s += '1'\n else:\n s += '0'\n i += 1\n return s[::-1]\n","sub_path":"indexes/simple9.py","file_name":"simple9.py","file_ext":"py","file_size_in_byte":1205,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"226556395","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nimport glob\nimport re\n\ndef count_key_words(key_words):\n contents = []\n \n for file_name in glob.glob('*.txt'):\n with open(file_name, 'r') as file:\n contents.append(file.read())\n \n str = ''.join(contents).lower()\n return dict(map(lambda x:(x, len(re.findall(x, str))), key_words))\n \nif __name__ == '__main__':\n print(count_key_words(['byte', 'type']))","sub_path":"006/count_key_words.py","file_name":"count_key_words.py","file_ext":"py","file_size_in_byte":447,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"329098198","text":"import os\nimport random\nimport numpy as np\nimport tensorflow as tf\nimport matplotlib.pyplot as plt\n\nFEATURESMAP = {\n #1: ['weight0', 'height0', 'waistline0','hipline0','chest0','thigh0','arm0'],\n 1: ['weight0', 'height0', 'waistline0'],\n 2: ['weight1', 'height0', 'waistline1'],\n 3: ['weight2', 'height0', 'waistline2'],\n 4: ['weight3', 'height0', 'waistline3']\n}\n\ndef processData(filePath):\n\n dataList = []\n\n with open(filePath, 'r') as f:\n records = f.readlines()\n\n # 读第一行获取每一列的列名\n # strip() 去掉行尾的换行符\n keys = records[0].strip().split(',')\n\n for i, record in enumerate(records):\n if i > 0:\n dic = {}\n values = record.strip().split(',')\n for index, key in enumerate(keys):\n dic[key] = float(values[index])\n\n dataList.append(dic)\n\n return dataList\n\ndef generateDataAndLabel(type, metaDatas, week):\n LABELMAP = {}\n if type == 'weight':\n LABELMAP = {\n 1: 'deltaWeightAll',\n 2: 'deltaWeight2',\n 3: 'deltaWeight3',\n 4: 'deltaWeight4'\n }\n elif type == 'waist':\n LABELMAP = {\n 1: 'deltaWaistAll',\n 2: 'deltaWaist2',\n 3: 'deltaWaist3',\n 4: 'deltaWaist4'\n }\n trainDatas = []\n trainLabels = []\n\n useableKeys = FEATURESMAP[week]\n\n for metaData in metaDatas:\n trainData = []\n for key in useableKeys:\n trainData.append(metaData[key])\n deltaWeight = metaData[LABELMAP[week]]\n\n\n trainDatas.append(trainData)\n trainLabels.append(deltaWeight)\n\n return (trainDatas, trainLabels)\n\n\n# Normalize by column (min-max norm)\ndef normalize_cols(m):\n col_max = m.max(axis=0)\n col_min = m.min(axis=0)\n return (m-col_min) / (col_max - col_min)\n\n\n#[{ all values of a person },{}...]\ntrainDataList = processData(filePath='/home/shen/Trying/Predict/up/t1/more_dimension/more_dimension_data/train.csv')\n\n(trainDatas, trainLabels) = generateDataAndLabel(type='weight', metaDatas=trainDataList, week=1)\n\ntrainDatas = np.array(trainDatas)\ntrainLabels = np.array(trainLabels)\ntrainLabels /= 10\n\nx_vals_train = np.nan_to_num(normalize_cols(trainDatas))\n\n\n\n\ntestDataList = processData(filePath='/home/shen/Trying/Predict/up/t1/more_dimension/more_dimension_data/test.csv')\n\n(testDatas, testLabels) = generateDataAndLabel(type='weight', metaDatas=testDataList, week=1)\ntestDatas = np.array(testDatas)\ntestLabels = np.array(testLabels)\ntestLabels /= 10\n\nx_vals_test = np.nan_to_num(normalize_cols(testDatas))\n\n# Create graph session\nsess = tf.Session()\n\n\n\n# Declare batch size\n#batch_size = 3000\nbatch_size = 4500\n\n# Define Variable Functions (weights and bias)\ndef init_weight(shape, st_dev):\n weight = tf.Variable(tf.random_normal(shape, stddev=st_dev))\n return weight\n\n\ndef init_bias(shape, st_dev):\n bias = tf.Variable(tf.random_normal(shape, stddev=st_dev))\n return bias\n\n# Create a fully connected layer:\ndef fully_connected(input_layer, weights, biases):\n layer = tf.add(tf.matmul(input_layer, weights), biases)\n return tf.nn.relu(layer)\n\n# Initialize placeholders\n#x_data = tf.placeholder(shape=[None, 3], dtype=tf.float32)\nx_data = tf.placeholder(shape=[None, 1], dtype=tf.float32)\ny_target = tf.placeholder(shape=[None, 1], dtype=tf.float32)\n\n\nweight_1 = init_weight(shape=[1, 10], st_dev=1.0)\nbias_1 = init_bias(shape=[10], st_dev=1.0)\nlayer_1 = fully_connected(x_data, weight_1, bias_1)\n\n\nweight_2 = init_weight(shape=[10, 10], st_dev=1.0)\nbias_2 = init_bias(shape=[10], st_dev=1.0)\nlayer_2 = fully_connected(layer_1, weight_2, bias_2)\n\n\n\nweight_3 = init_weight(shape=[10, 10], st_dev=1.0)\nbias_3 = init_bias(shape=[10], st_dev=1.0)\nlayer_3 = fully_connected(layer_2, weight_3, bias_3)\n\n\n\nweight_4 = init_weight(shape=[10, 8], st_dev=1.0)\nbias_4 = init_bias(shape=[8], st_dev=1.0)\nlayer_4 = fully_connected(layer_3, weight_4, bias_4)\n\nweight_5 = init_weight(shape=[8,1], st_dev=1.0)\nbias_5 = init_bias(shape=[1], st_dev=1.0)\n\nfinal_output = tf.sigmoid(tf.add(tf.matmul(layer_4, weight_5), bias_5))\n\n# Declare loss function (MSE)\nloss = tf.reduce_mean(tf.square(y_target - final_output))\n\n\n# This is caculate the accuracy\nTemp =tf.abs( tf.subtract(final_output ,y_target))\naccuracy = tf.reduce_mean( tf.cast( tf.less(Temp ,0.12) ,tf.float32))\n\n\n# Declare optimizer\n# my_opt = tf.train.GradientDescentOptimizer(0.005)\n# train_step = my_opt.minimize(loss)\n\nglobal_step = tf.Variable(0, trainable=False)\nstarter_learning_rate = 0.005\nlearning_rate = tf.train.exponential_decay(starter_learning_rate, global_step,40000, 0.96, staircase=True)\noptimizer = tf.train.GradientDescentOptimizer(learning_rate)\ntrain_step = optimizer.minimize(loss, global_step=global_step)\n\n\n# Initialize variables\ninit = tf.global_variables_initializer()\nsess.run(init)\n\n# # Training loop\n\nsaver = tf.train.Saver() # defaults to saving all variables - in this case w and b\n\nloss_vec = []\ntest_loss = []\nfor i in range(80000):\n rand_index = np.random.choice(len(x_vals_train), size=batch_size, replace=False)\n rand_x = x_vals_train[rand_index]\n rand_y = np.transpose([trainLabels[rand_index]])\n sess.run(train_step, feed_dict={x_data: rand_x, y_target: rand_y})\n\n temp_loss = sess.run(loss, feed_dict={x_data: rand_x, y_target: rand_y})\n loss_vec.append(np.sqrt(temp_loss))\n\n\n if (i + 1) % 100 == 0:\n print('Generation: ' + str(i+1) + '. Loss = ' + str(temp_loss))\n\n\n# save_dir ='/home/shen/Trying/Predict/up/t1/more_dimension/Net_save/'\n# saver.save(sess, save_dir + 'model.ckpt', global_step=i+1)\n\n\nx = x_vals_test\ny = np.transpose([testLabels])\nprint(\"Testing Accuracy:\", sess.run(accuracy*100, feed_dict={x_data: x,y_target: y,}\n ))\n\n# Plot loss (MSE) over time\nplt.plot(loss_vec, 'k-', label='Train Loss')\nplt.title('Loss (MSE) per Generation')\nplt.legend(loc='upper right')\nplt.xlabel('Generation')\nplt.ylabel('Loss')\nplt.show()\n","sub_path":"git/NeuralNet_predict_weight/t1/more_dimension/NerualNet_final.py","file_name":"NerualNet_final.py","file_ext":"py","file_size_in_byte":6046,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"14364491","text":"from datetime import datetime\n\nfrom nba_data.data.game import Game\nfrom nba_data.data.matchup import Matchup\nfrom nba_data.data.outcome import Outcome\nfrom nba_data.data.season import Season\nfrom nba_data.data.season_type import SeasonType\n\n\nclass TeamGameLogDeserializer:\n game_date_format = \"%b %d, %Y\"\n\n result_set_index = 0\n game_id_index = 1\n game_date_index = 2\n matchup_index = 3\n home_team_outcome_index = 4\n\n def __init__(self):\n pass\n\n @staticmethod\n def deserialize_team_game_log(team_game_log_json):\n deserialized_results = []\n results = team_game_log_json[\"resultSets\"][TeamGameLogDeserializer.result_set_index][\"rowSet\"]\n season = Season.get_season(team_game_log_json[\"parameters\"][Season.get_query_parameter_name()])\n season_type = SeasonType.get_season_type(team_game_log_json[\"parameters\"][SeasonType.get_query_parameter_name()])\n for result in results:\n matchup = TeamGameLogDeserializer.parse_matchup(result[TeamGameLogDeserializer.matchup_index])\n home_team_outcome = Outcome.get_outcome_from_abbreviation(result[TeamGameLogDeserializer.home_team_outcome_index])\n deserialized_results.append(\n Game(nba_id=str(result[TeamGameLogDeserializer.game_id_index]),\n matchup=matchup,\n date=TeamGameLogDeserializer.parse_date(result[TeamGameLogDeserializer.game_date_index]),\n season=season,\n season_type=season_type,\n home_team_outcome=home_team_outcome))\n return deserialized_results\n\n @staticmethod\n def parse_matchup(matchup):\n\n if \" vs. \" in matchup:\n teams = matchup.split(\" vs. \")\n return Matchup.create(home_team_abbreviation=str(teams[0]),\n away_team_abbreviation=str(teams[1]))\n\n elif \" @ \" in matchup:\n teams = matchup.split(\" @ \")\n return Matchup.create(home_team_abbreviation=str(teams[1]),\n away_team_abbreviation=str(teams[0]))\n\n else:\n raise RuntimeError(\"Unexpected matchup: %s\", matchup)\n\n @staticmethod\n def parse_date(date_string):\n return datetime.strptime(date_string, TeamGameLogDeserializer.game_date_format).date()","sub_path":"nba_data/deserializers/team_game_log_deserializer.py","file_name":"team_game_log_deserializer.py","file_ext":"py","file_size_in_byte":2335,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"539236976","text":"import time\n\nimport Moana.Core.Logger.Logger as mLogger\nimport Moana.Core.Ruler.Ruler as mRuler\n\nclass BaseWorkerManager:\n def __init__(self):\n self._logger = mLogger.Logger.logger()\n self._ruler = mRuler.Ruler.ruler()\n\n self._workers = {} # Processes\n self._instanceWorkers = {} # Processes\n self._handlers = {} # interface class\n self._roles = {} # name\n\n self._rWorkers = {} # Running workers\n self._currentWorker = \"Core\"\n\n self._logger.debug(\"Generated {0}\".format(self))\n\n def __repr__(self):\n return type(self).__name__\n\n def _addHandler(self, name, handler):\n self._handlers[name] = handler\n self._roles[name] = [name]\n\n def addWorker(self, name, worker, handler):\n workerType = worker.__class__.__name__\n self._logger.debug(\n \"Add {0} worker: {1}\".format(workerType, name))\n\n if name == \"Core\":\n self._addHandler(name, handler)\n if workerType == \"Process\":\n self._workers[name] = worker\n self._addHandler(name, handler)\n else:\n self._logger.critical(\n \"Wrong worker type: {0}\".format(workerType))\n\n def setRole(self, role, worker):\n self._roles[worker].append(role)\n self._logger.debug(\n \"Append role {0} to {1}\".format(role, worker))\n\n def getRoles(self):\n return self._roles\n\n def getHandler(self, role):\n handler = None\n\n for k, v in self._roles.items():\n if role in v:\n handler = k\n break\n\n if not handler:\n self._logger.critical(\n \"Wrong role: {0}\".format(role))\n\n return self._handlers[handler]\n\n def _runWorker(self, worker, name):\n if name in self._rWorkers:\n self._logger.debug(\"Already running: {0}\".format(name))\n return\n\n worker.start()\n self._rWorkers[name] = worker\n self._logger.debug(\"Run: {0}\".format(name))\n\n def runMessenger(self):\n messengers = None\n\n exe = self._ruler.getConfig(\"exe\")\n if exe == \"Moana\":\n messengers = (\"Publisher\", \"Subscriber\")\n elif exe == \"Boat\":\n messengers = (\"Requester\", \"Replyer\")\n else:\n self._logger.critical(\n \"Wrong exe type: {0}\".format(exe))\n\n for messenger in messengers:\n if messenger in self._workers:\n worker = self._workers[messenger]\n self._runWorker(worker, messenger)\n\n time.sleep(1)\n\n def runWorkers(self):\n self.runMessenger()\n\n for name in self._workers.keys():\n worker = self._workers[name]\n self._runWorker(worker, name)\n\n def runInstanceWorker(self, name, worker, handler):\n workerType = worker.__class__.__name__\n self._logger.debug(\n \"Add {0} instance worker: {1}\".format(workerType, name))\n\n if workerType == \"Process\":\n self._instanceWorkers[name] = worker\n self._addHandler(name, handler)\n else:\n self._logger.critical(\n \"Wrong worker type: {0}\".format(workerType))\n\n self._runWorker(worker, name)\n def stop(self):\n for workerName, worker in self._rWorkers.items():\n worker.terminate()\n self._logger.debug(\"Terminate: {0}\".format(workerName))\n\n def getWorkersList(self):\n return self._workers\n\n def deleteRunningWorker(self, name):\n del self._rWorkers[name]\n\n self._logger.debug(\n \"Worker deleted: {0}\".format(name))\n\n def getHandlersList(self):\n return self._handlers\n\n def getWorkerType(self):\n types = []\n for k, v in self._workers.items():\n types.append(type(v))\n\n return types\n\n def getHandlerType(self):\n types = []\n for k, v in self._handlers.items():\n types.append(type(v))\n\n return types\n\n def getCurrentWorker(self):\n return self._currentWorker\n\n def setCurrentWorker(self, worker):\n self._currentWorker = worker\n\n def getRunningWorkersList(self):\n return self._rWorkers\n\nclass WorkerManager(BaseWorkerManager):\n _instance = None\n\n @classmethod\n def workerManager(cls, *args, **kwargs):\n if not cls._instance:\n cls._instance = cls(*args, **kwargs)\n\n return cls._instance","sub_path":"Moana/Core/WorkerManager/WorkerManager.py","file_name":"WorkerManager.py","file_ext":"py","file_size_in_byte":4445,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"577321623","text":"import sys\r\nif sys.platform == 'linux':\r\n STATUSMARIO = 0x569f4699\r\n MOEDAS = 0x569f543f\r\n YOSHICOINS = 0x569f5aa2\r\nelse:\r\n STATUSMARIO = 0x72B481\r\n MOEDAS = 0x72C227\r\n YOSHICOINS = 0x72C888\r\n\r\nMARIO_PEQUENO = 0\r\nMARIO_GRANDE = 1\r\nMARIO_PENINHA = 2\r\nMARIO_FLOR_DE_FOGO = 3","sub_path":"Enderecos.py","file_name":"Enderecos.py","file_ext":"py","file_size_in_byte":290,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"634695304","text":"\"\"\"Contains the `Enduse` Class. This is the most important class\nwhere the change in enduse specific energy demand is simulated\ndepending on scenaric assumptions.\"\"\"\nimport logging\nimport math\nimport numpy as np\nfrom energy_demand.profiles import load_profile as lp\nfrom energy_demand.profiles import load_factors as lf\nfrom energy_demand.technologies import diffusion_technologies\nfrom energy_demand.technologies import fuel_service_switch\nfrom energy_demand.technologies import tech_related\nfrom energy_demand.basic import lookup_tables\n\nclass Enduse(object):\n \"\"\"Enduse Class for all endueses in each SubModel\n\n For every region and sector, a different instance\n is generated. In this class, first the change in\n energy demand is calculated on a annual temporal scale.\n Calculations are performed in a cascade (e.g. first\n reducing climate change induced savings, then substracting\n further behavioral savings etc.). After annual calculations,\n the demand is converted to hourly demand.\n\n Also within this function, the fuel inputs are converted\n to energy service (short: service) and converted back to\n fuels (e.g. electricit).\n\n Arguments\n ----------\n submodel : str\n Submodel\n region : str\n Region name\n scenario_data : dict\n Scenario data\n assumptions : dict\n Assumptions\n non_regional_lp_stock : dict\n Load profile stock\n base_yr : int\n Base year\n curr_yr : int\n Current year\n enduse : str\n Enduse name\n sector : str\n Sector name\n fuel : array\n Yearly fuel data for different fueltypes\n tech_stock : object\n Technology stock of region\n heating_factor_y : array\n Distribution of fuel within year to days (yd) (directly correlates with HDD)\n cooling_factor_y : array\n Distribution of fuel within year to days (yd) (directly correlates with CDD)\n fuel_fueltype_tech_p_by : dict\n Fuel tech assumtions in base year\n sig_param_tech : dict\n Sigmoid parameters\n enduse_overall_change : dict\n Assumptions related to overal change in endyear\n regional_lp_stock : object\n Load profile stock\n dw_stock : object,default=False\n Dwelling stock\n reg_scen_drivers : bool,default=None\n Scenario drivers per enduse\n flat_profile_crit : bool,default=False\n Criteria of enduse has a flat shape or not\n\n Note\n ----\n - Load profiles are assigned independently of the fueltype, i.e.\n the same profiles are assumed to hold true across different fueltypes\n\n - ``self.fuel_y`` is always overwritten\n in the cascade of calculations\n\n Warning\n -------\n Not all enduses have technologies assigned. Load peaks are derived\n from techstock in case technologies are defined. Otherwise enduse load\n profiles are used.\n \"\"\"\n def __init__(\n self,\n submodel,\n region,\n scenario_data,\n assumptions,\n regional_lp_stock,\n non_regional_lp_stock,\n base_yr,\n curr_yr,\n enduse,\n sector,\n fuel,\n tech_stock,\n heating_factor_y,\n cooling_factor_y,\n fuel_fueltype_tech_p_by,\n sig_param_tech,\n enduse_overall_change,\n criterias,\n strategy_variables,\n fueltypes_nr,\n fueltypes,\n model_yeardays_nrs,\n dw_stock=False,\n reg_scen_drivers=None,\n flat_profile_crit=False\n ):\n \"\"\"Enduse class constructor\n \"\"\"\n #logging.info(\" =====Enduse: {} Sector: {}\".format(enduse, sector))\n self.region = region\n self.enduse = enduse\n self.fuel_y = fuel\n self.flat_profile_crit = flat_profile_crit\n\n self.techs_fuel_yh = None\n\n if np.sum(fuel) == 0:\n #If enduse has no fuel return empty shapes\n self.flat_profile_crit = True\n self.fuel_y = fuel\n self.fuel_yh = 0\n self.enduse_techs = []\n else:\n # Get correct parameters depending on model configuration\n load_profiles = get_lp_stock(\n enduse,\n non_regional_lp_stock,\n regional_lp_stock)\n\n # Get technologies of enduse\n self.enduse_techs = get_enduse_techs(fuel_fueltype_tech_p_by)\n\n # -------------------------------\n # Cascade of calculations on a yearly scale\n # --------------------------------\n # --Change fuel consumption based on climate change induced temperature differences\n _fuel_new_y = apply_climate_change(\n enduse,\n self.fuel_y,\n cooling_factor_y,\n heating_factor_y,\n assumptions.enduse_space_heating,\n assumptions.ss_enduse_space_cooling)\n self.fuel_y = _fuel_new_y\n #logging.debug(\"... Fuel train B: \" + str(np.sum(self.fuel_y)))\n\n # --Change fuel consumption based on smart meter induced general savings\n _fuel_new_y = apply_smart_metering(\n enduse,\n self.fuel_y,\n assumptions.smart_meter_assump,\n strategy_variables,\n base_yr,\n curr_yr)\n self.fuel_y = _fuel_new_y\n #logging.debug(\"... Fuel train C: \" + str(np.sum(self.fuel_y)))\n\n # --Enduse specific fuel consumption change in %\n _fuel_new_y = apply_specific_change(\n enduse,\n self.fuel_y,\n enduse_overall_change,\n strategy_variables,\n base_yr,\n curr_yr)\n self.fuel_y = _fuel_new_y\n #logging.debug(\"... Fuel train D: \" + str(np.sum(self.fuel_y)))\n\n # Calculate new fuel demands after scenario drivers\n _fuel_new_y = apply_scenario_drivers(\n submodel,\n enduse,\n sector,\n self.fuel_y,\n dw_stock,\n region,\n scenario_data['gva'],\n scenario_data['population'],\n scenario_data['industry_gva'],\n reg_scen_drivers,\n base_yr,\n curr_yr)\n self.fuel_y = _fuel_new_y\n #logging.debug(\"... Fuel train E: \" + str(np.sum(self.fuel_y)))\n\n # Apply cooling scenario variable\n _fuel_new_y = apply_cooling(\n enduse,\n self.fuel_y,\n strategy_variables,\n assumptions.cooled_ss_floorarea_by,\n enduse_overall_change['other_enduse_mode_info'],\n base_yr,\n curr_yr)\n self.fuel_y = _fuel_new_y\n #logging.debug(\"... Fuel train E1: \" + str(np.sum(self.fuel_y)))\n\n # Industry related change\n _fuel_new_y = industry_enduse_changes(\n enduse,\n sector,\n base_yr,\n curr_yr,\n strategy_variables,\n self.fuel_y,\n enduse_overall_change['other_enduse_mode_info'],\n assumptions)\n self.fuel_y = _fuel_new_y\n #logging.debug(\"... Fuel train E2: \" + str(np.sum(self.fuel_y)))\n\n # ----------------------------------\n # Hourly Disaggregation\n # ----------------------------------\n if self.enduse_techs == []:\n \"\"\"If no technologies are defined for an enduse, the load profiles\n are read from dummy shape, which show the load profiles of the whole enduse.\n No switches can be implemented and only overall change of enduse.\n\n Note: for heating, technologies need to be assigned.\n \"\"\"\n if flat_profile_crit:\n self.fuel_y = self.fuel_y * model_yeardays_nrs / 365.0\n else:\n self.fuel_yh = assign_lp_no_techs(\n enduse,\n sector,\n load_profiles,\n self.fuel_y)\n else:\n \"\"\"If technologies are defined for an enduse\n \"\"\"\n # ----\n # Get enduse specific configurations\n # ----\n mode_constrained = get_enduse_configuration(\n criterias['mode_constrained'],\n enduse,\n assumptions.enduse_space_heating)\n\n # ------------------------------------\n # Calculate regional energy service\n # ------------------------------------\n s_tot_y_cy, s_tech_y_by = fuel_to_service(\n enduse,\n self.fuel_y,\n fuel_fueltype_tech_p_by,\n tech_stock,\n fueltypes,\n mode_constrained)\n\n # ------------------------------------\n # Reduction of service because of heat recovery\n # ------------------------------------\n s_tot_y_cy, s_tech_y_cy = apply_heat_recovery(\n enduse,\n strategy_variables,\n assumptions.enduse_overall_change,\n s_tot_y_cy,\n s_tech_y_by,\n base_yr,\n curr_yr)\n\n # ------------------------------------\n # Reduction of service because of improvement in air leakeage\n # ------------------------------------\n s_tot_y_cy, s_tech_y_cy = apply_air_leakage(\n enduse,\n strategy_variables,\n assumptions.enduse_overall_change,\n s_tot_y_cy,\n s_tech_y_cy,\n base_yr,\n curr_yr)\n\n # --------------------------------\n # Switches\n # Calculate services per technology for cy based on fitted parameters\n # --------------------------------\n s_tech_y_cy = calc_service_switch(\n enduse,\n s_tech_y_cy,\n self.enduse_techs,\n sig_param_tech,\n curr_yr,\n base_yr,\n sector,\n assumptions.crit_switch_happening)\n\n # -------------------------------------------\n # Convert annual service to fuel per fueltype\n # -------------------------------------------\n self.fuel_y, fuel_tech_y = service_to_fuel(\n enduse,\n s_tech_y_cy,\n tech_stock,\n fueltypes_nr,\n fueltypes,\n mode_constrained)\n #logging.debug(\"... Fuel train Post service: \" + str(np.sum(self.fuel_y)))\n\n # Delete all technologies with no fuel assigned\n for tech, fuel_tech in fuel_tech_y.items():\n if fuel_tech == 0:\n self.enduse_techs.remove(tech)\n\n # ------------------------------------------\n # Assign load profiles\n # ------------------------------------------\n if self.flat_profile_crit:\n #logging.info(\"flat profile\")\n self.fuel_y = calc_fuel_tech_y(\n enduse,\n tech_stock,\n fuel_tech_y,\n fueltypes_nr,\n fueltypes,\n mode_constrained)\n else:\n fuel_yh = calc_fuel_tech_yh(\n enduse,\n sector,\n self.enduse_techs,\n fuel_tech_y,\n load_profiles,\n fueltypes_nr,\n fueltypes,\n model_yeardays_nrs,\n mode_constrained)\n\n # --------------------------------------\n # Demand Management (peak shaving)\n # ---------------------------------------\n if mode_constrained:\n self.techs_fuel_yh = {}\n for tech in fuel_yh:\n self.techs_fuel_yh[tech] = demand_management(\n enduse,\n base_yr,\n curr_yr,\n strategy_variables,\n fuel_yh[tech],\n [tech],\n sector,\n fuel_tech_y,\n tech_stock,\n load_profiles,\n mode_constrained=True)\n\n self.fuel_yh = None\n else: # (not specific for technologies)\n\n # Demand management for heating related technologies\n self.fuel_yh = demand_management(\n enduse,\n base_yr,\n curr_yr,\n strategy_variables,\n fuel_yh,\n self.enduse_techs,\n sector,\n fuel_tech_y,\n tech_stock,\n load_profiles,\n mode_constrained=False)\n\ndef demand_management(\n enduse,\n base_yr,\n curr_yr,\n strategy_variables,\n fuel_yh,\n enduse_techs,\n sector,\n fuel_tech_y,\n tech_stock,\n load_profiles,\n mode_constrained\n ):\n \"\"\"Demand management. This function shifts peak per of this enduse\n depending on peak shifting factors. So far only inter day load shifting\n\n Arguments\n ----------\n enduse : str\n Enduse\n base_yr : int\n Base year\n curr_yr : int\n Current year\n strategy_variables : dict\n Assumptions of strategy variables\n fuel_yh : array\n Fuel per hours\n enduse_techs : list\n Enduse specfic technologies\n sector : str\n Sector\n fuel_tech_y : dict\n Annual fuel per technology\n tech_stock : obj\n Technology stock\n load_profiles : obj\n Load profiles\n mode_constrained : bool\n Running mode\n If mode_constrained, always only one technology imported\n\n Returns\n -------\n fuel_yh : array\n Fuel of yh\n \"\"\"\n # ------------------------------\n # Test if peak is shifted or not\n # ------------------------------\n try:\n # Get assumed load shift\n param_name = 'demand_management_improvement__{}'.format(enduse)\n\n if strategy_variables[param_name]['scenario_value'] == 0:\n\n # no load management\n peak_shift_crit = False\n else:\n # load management\n peak_shift_crit = True\n except KeyError:\n\n # no load management\n peak_shift_crit = False\n\n # ------------------------------\n # If peak shifting implemented, calculate new lp\n # ------------------------------\n if peak_shift_crit:\n\n # Calculate average for every day\n if mode_constrained:\n average_fuel_yd = np.average(fuel_yh, axis=1)\n else:\n average_fuel_yd = np.average(fuel_yh, axis=2)\n\n # Calculate load factors (only inter_day load shifting as for now)\n loadfactor_yd_cy = lf.calc_lf_d(\n fuel_yh, average_fuel_yd, mode_constrained)\n\n # Calculate current year load factors\n lf_improved_cy = calc_lf_improvement(\n strategy_variables[param_name]['scenario_value'],\n base_yr,\n curr_yr,\n loadfactor_yd_cy,\n strategy_variables['demand_management_yr_until_changed']['scenario_value'])\n\n fuel_yh = lf.peak_shaving_max_min(\n lf_improved_cy, average_fuel_yd, fuel_yh, mode_constrained)\n\n else: # no peak shifting\n pass\n\n return fuel_yh\n\ndef calc_lf_improvement(\n lf_improvement_ey,\n base_yr,\n curr_yr,\n loadfactor_yd_cy,\n yr_until_changed\n ):\n \"\"\"Calculate load factor improvement depending on linear diffusion\n over time.\n\n Arguments\n ---------\n lf_improvement_ey : dict\n Load factor improvement until end year\n base_yr : int\n Base year\n curr_yr : int\n Current year\n loadfactor_yd_cy : float\n Yd Load factor of current year\n yr_until_changed : int\n Year until fully changed\n\n Returns\n -------\n lf_improved_cy : str\n Improved load factor of current year\n peak_shift_crit : bool\n True: Peak is shifted, False: Peak isn't shifed\n \"\"\"\n # Calculate linear diffusion of improvement of load management\n lin_diff_factor = diffusion_technologies.linear_diff(\n base_yr, curr_yr, 0, 1, yr_until_changed)\n\n # Current year load factor improvement\n lf_improvement_cy = lf_improvement_ey * lin_diff_factor\n\n # Add load factor improvement to current year load factor\n lf_improved_cy = loadfactor_yd_cy + lf_improvement_cy\n\n # Where load factor larger than zero, set to 1\n lf_improved_cy[lf_improved_cy > 1] = 1\n\n return lf_improved_cy\n\ndef assign_lp_no_techs(enduse, sector, load_profiles, fuel_y):\n \"\"\"Assign load profiles for an enduse which has no technologies defined\n\n Arguments\n ---------\n enduse : str\n Enduse\n sector : str\n Enduse\n load_profiles : obj\n Load profiles\n fuel_y : array\n Fuels\n\n Returns\n -------\n fuel_yh : array\n Fuel yh\n \"\"\"\n fuel = fuel_y[:, np.newaxis, np.newaxis]\n\n fuel_yh = load_profiles.get_lp(\n enduse, sector, 'placeholder_tech', 'shape_yh') * fuel\n\n return fuel_yh\n\ndef get_lp_stock(enduse, non_regional_lp_stock, regional_lp_stock):\n \"\"\"Defines the load profile stock depending on `enduse`.\n (Get regional or non-regional load profile data)\n\n Arguments\n ----------\n enduse : str\n Enduse\n non_regional_lp_stock : object\n Non regional dependent load profiles\n regional_lp_stock : object\n Regional dependent load profiles\n\n Returns\n -------\n load_profiles : object\n Load profile\n\n Note\n -----\n Because for some enduses the load profiles depend on the region\n they are stored in the `WeatherRegion` Class. One such example is\n heating. If the enduse is not dependent on the region, the same\n load profile can be used for all regions\n\n If the enduse depends on regional factors, `regional_lp_stock`\n is returned. Otherwise, non-regional load profiles which can\n be applied for all regions is used (`non_regional_lp_stock`)\n \"\"\"\n if enduse in non_regional_lp_stock.stock_enduses:\n return non_regional_lp_stock\n else:\n return regional_lp_stock\n\ndef get_running_mode(enduse, mode_constrained, enduse_space_heating):\n \"\"\"Checks which mode needs to be run for an enduse.\n\n Arguments\n -----------\n mode_constrained : bool\n Criteria of running mode\n enduse_space_heating : dict\n All heating enduses across all models\n\n Returns\n -------\n bool : bool\n The return value\n\n Note\n ----\n If 'crit_mode' == True, then overall heat is provided to\n the supply model not specified for technologies. Otherwise,\n heat demand is supplied per technology\n \"\"\"\n if mode_constrained:\n return True\n elif not mode_constrained and enduse in enduse_space_heating:\n return False\n elif not mode_constrained and enduse not in enduse_space_heating:\n # All other not constrained enduses where technologies are defined\n # are run in 'constrained' mode (e.g. lighting)\n return True\n\ndef get_enduse_configuration(\n mode_constrained,\n enduse,\n enduse_space_heating,\n ):\n \"\"\"Get enduse specific configuration\n\n Arguments\n ---------\n mode_constrained : bool\n Constrained mode criteria\n enduse : str\n Enduse\n enduse_space_heating : list\n All endueses classified as space heating\n base_yr, curr_yr : int\n Base, current, year\n \"\"\"\n mode_constrained = get_running_mode(\n enduse,\n mode_constrained,\n enduse_space_heating)\n\n return mode_constrained\n\ndef get_peak_day_all_fueltypes(fuel_yh):\n \"\"\"Iterate yh and get day with highes fuel (across all fueltypes).\n The day with most fuel across all fueltypes is considered to\n be the peak day. Over the simulation period,\n the peak day may change date in a year.\n\n Arguments\n ---------\n fuel_yh : array (fueltype, 365, 24)\n Fuel for every yh (fueltypes, yh) \n\n Return\n ------\n peak_day_nr : int\n Day with most fuel or service across all fueltypes\n \"\"\"\n\n # Sum all fuel across all fueltypes for every hour in a year\n all_fueltypes_tot_h = np.sum(fuel_yh, axis=0)\n\n if np.sum(all_fueltypes_tot_h) == 0:\n logging.warning(\"No peak can be found because no fuel assigned\")\n return 0\n else:\n # Sum fuel within every hour for every day and get day with maximum fuel\n peak_day_nr = np.argmax(np.sum(all_fueltypes_tot_h, axis=1))\n\n return peak_day_nr\n\ndef get_peak_day_single_fueltype(fuel_yh):\n \"\"\"Iterate yh and get day with highes fuel for a single fueltype\n The day with most fuel is considered to\n be the peak day. Over the simulation period,\n the peak day may change date in a year. If no fuel is\n provided, the program is crashed\n\n Arguments\n ---------\n fuel_yh : array (365, 24)\n Fuel for every yh (yh)\n\n Return\n ------\n peak_day_nr : int\n Day with most fuel or service\n \"\"\"\n if np.sum(fuel_yh) == 0:\n logging.info(\"No peak can be found because no fuel assigned\")\n # Return first entry of element (which is zero)\n return 0\n else:\n # Sum fuel within every hour for every day and get day with maximum fuel\n peak_day_nr = np.argmax(np.sum(fuel_yh, axis=1))\n return peak_day_nr\n\ndef calc_peak_tech_dh(\n enduse,\n sector,\n enduse_techs,\n enduse_fuel_tech,\n fuel_yh,\n tech_stock,\n load_profile,\n mode_constrained\n ):\n \"\"\"Iterate technologies in enduse and calculate peak demand\n\n Arguments\n ----------\n enduse : str\n Enduse\n sector : str\n Sector\n enduse_techs : list\n Enduse technologies\n enduse_fuel_tech : array\n Fuel per enduse and technology\n fuel_yh : array\n Fuel per hours\n tech_stock : data\n Technology stock\n load_profile : object\n Load profile\n mode_constrained : bool\n Constrained mode criteria\n\n Returns\n -------\n fuels_peak_dh : array\n Peak values for peak day for every fueltype\n\n Note\n ----\n - This function gets the hourly values of the peak day for every fueltype.\n The daily fuel is converted to dh for each technology.\n\n - For some technology types (heat_pump)\n the dh peak day profile is not read in from technology\n stock but from shape_yh of peak day.\n \"\"\"\n if mode_constrained:\n fuels_peak_dh = {}\n else:\n fuels_peak_dh = np.zeros((24), dtype=float)\n\n for tech in enduse_techs:\n\n tech_type = tech_stock.get_tech_attr(\n enduse, tech, 'tech_type')\n\n if tech_type == 'heat_pump':\n \"\"\"Read fuel from peak day\n \"\"\"\n # Get day with most fuel\n if isinstance(fuel_yh, dict):\n peak_day_nr = get_peak_day_single_fueltype(fuel_yh[tech])\n else:\n peak_day_nr = get_peak_day_single_fueltype(fuel_yh)\n\n # Calculate absolute fuel values for yd (multiply fuel with yd_shape)\n fuel_tech_yd = enduse_fuel_tech[tech] * load_profile.get_lp(\n enduse, sector, tech, 'shape_yd')\n\n # Calculate fuel for peak day\n fuel_tech_peak_d = fuel_tech_yd[peak_day_nr]\n\n # The 'shape_peak_dh'is not defined in technology stock because\n # in the 'Region' the peak day is not yet known\n # Therefore, the shape_yh is read in and with help of information on peak day\n tech_peak_dh = load_profile.get_lp(\n enduse, sector, tech, 'shape_y_dh')[peak_day_nr]\n else:\n \"\"\"Calculate fuel with peak factor\n \"\"\"\n f_peak_yd = load_profile.get_lp(\n enduse, sector, tech, 'f_peak_yd')\n\n # Calculate fuel for peak day\n fuel_tech_peak_d = enduse_fuel_tech[tech] * f_peak_yd\n\n # Assign Peak shape of a peak day of a technology\n tech_peak_dh = load_profile.get_shape_peak_dh(\n enduse, sector, tech)\n\n # Multiply absolute d fuels with dh peak fuel shape\n fuel_tech_peak_dh = tech_peak_dh * fuel_tech_peak_d\n\n if mode_constrained:\n fuels_peak_dh[tech] = fuel_tech_peak_dh\n else:\n # Peak day fuel shape * fueltype distribution for peak day\n fuels_peak_dh += fuel_tech_peak_dh\n\n return fuels_peak_dh\n\ndef get_enduse_techs(fuel_fueltype_tech_p_by):\n \"\"\"Get all defined technologies of an enduse\n\n Arguments\n ----------\n fuel_fueltype_tech_p_by : dict\n Percentage of fuel per enduse per technology\n\n Return\n ------\n enduse_techs : list\n All technologies\n\n Note\n ----\n All technologies are read out, including those which\n are potentially defined in fuel or service switches.\n\n If for an enduse a dummy technology is defined,\n the technologies of an enduse are set to an empty\n list.\n\n Warning\n -------\n For every enduse technologes must either be defined\n for no fueltype or for all fueltypes\n \"\"\"\n enduse_techs = []\n\n for tech_fueltype in fuel_fueltype_tech_p_by.values():\n if 'placeholder_tech' in tech_fueltype.keys():\n return []\n else:\n enduse_techs += tech_fueltype.keys()\n\n return list(set(enduse_techs))\n\ndef calc_fuel_tech_yh(\n enduse,\n sector,\n enduse_techs,\n enduse_fuel_tech,\n load_profiles,\n fueltypes_nr,\n fueltypes,\n model_yeardays_nrs,\n mode_constrained\n ):\n \"\"\"Iterate fuels for each technology and assign shape yd and yh shape\n\n Arguments\n ----------\n enduse_fuel_tech : dict\n Fuel per technology in enduse\n tech_stock : object\n Technologies\n load_profiles : object\n Load profiles\n fueltypes_nr : dict\n Nr of fueltypes\n fueltypes : dict\n Fueltypes lookup\n mode_constrained : bool\n Mode criteria\n model_yeardays_nrs : int\n Number of modelled yeardays\n\n Return\n ------\n fuels_yh : array\n Fueltype storing hourly fuel for every fueltype (fueltype, model_yeardays_nrs, 24)\n \"\"\"\n if mode_constrained:\n\n fuels_yh = {}\n for tech in enduse_techs:\n\n load_profile = load_profiles.get_lp(\n enduse, sector, tech, 'shape_yh')\n\n if model_yeardays_nrs != 365:\n load_profile = lp.abs_to_rel(load_profile)\n\n fuel_tech_yh = enduse_fuel_tech[tech] * load_profile\n\n fuels_yh[tech] = fuel_tech_yh\n else:\n # --\n # Unconstrained mode, i.e. not technolog specific.\n # Store according to fueltype and heat\n # --\n fuels_yh = np.zeros((fueltypes_nr, model_yeardays_nrs, 24), dtype=float)\n\n for tech in enduse_techs:\n\n load_profile = load_profiles.get_lp(\n enduse, sector, tech, 'shape_yh')\n\n if model_yeardays_nrs != 365:\n load_profile = lp.abs_to_rel(load_profile)\n\n # If no fuel for this tech and not defined in enduse\n fuel_tech_yh = enduse_fuel_tech[tech] * load_profile\n\n fuels_yh[fueltypes['heat']] += fuel_tech_yh\n\n return fuels_yh\n\ndef calc_fuel_tech_y(\n enduse,\n tech_stock,\n fuel_tech_y,\n fueltypes_nr,\n fueltypes,\n mode_constrained\n ):\n \"\"\"Calculate yearly fuel per technology (no load profile assigned).\n\n Arguments\n -----------\n enduse : str\n Enduse\n tech_stock : object\n Technology stock\n fuel_tech_y : dict\n Fuel per technology per year\n lookups : dict\n look-up\n fueltype : dict\n Integer of fueltypes\n mode_constrained : bool\n Running mode\n\n Returns\n -------\n fuel_y : array\n Fuel per year per fueltype\n\n Note\n ----\n This function can be run in two different modes\n \"\"\"\n fuel_y = np.zeros((fueltypes_nr), dtype=float)\n\n for tech, fuel_tech_y in fuel_tech_y.items():\n if mode_constrained:\n fueltype_int = tech_stock.get_tech_attr(\n enduse, tech, 'fueltype_int')\n\n fuel_y[fueltype_int] += np.sum(fuel_tech_y)\n else:\n # Assign all to heat fueltype\n fuel_y[fueltypes['heat']] += np.sum(fuel_tech_y)\n\n return fuel_y\n\ndef service_to_fuel(\n enduse,\n service_tech,\n tech_stock,\n fueltypes_nr,\n fueltypes,\n mode_constrained\n ):\n \"\"\"Convert yearly energy service to yearly fuel demand.\n For every technology the service is taken and converted\n to fuel based on efficiency of current year\n\n Arguments\n ------\n enduse : str\n Enduse\n service_tech : dict\n Service per fueltype and technology\n tech_stock : object\n Technological stock\n fueltypes_nr : int\n Number of fueltypes\n fueltypes : dict\n Fueltypes\n mode_constrained : bool\n Mode running criteria\n\n Returns\n -------\n fuel_y : array\n Fuel per fueltype\n fuel_per_tech : dict\n Fuel per technology\n\n Note\n -----\n - Fuel = Energy service / efficiency\n \"\"\"\n fuel_tech_y = {}\n fuel_y = np.zeros((fueltypes_nr), dtype=float)\n\n if mode_constrained:\n for tech, service in service_tech.items():\n\n tech_eff = tech_stock.get_tech_attr(\n enduse, tech, 'eff_cy')\n fueltype_int = tech_stock.get_tech_attr(\n enduse, tech, 'fueltype_int')\n\n # Convert to fuel\n fuel = service / tech_eff\n\n # Add fuel\n fuel_tech_y[tech] = fuel\n fuel_y[fueltype_int] += fuel\n else:\n for tech, fuel_tech in service_tech.items():\n fuel_y[fueltypes['heat']] += fuel_tech\n fuel_tech_y[tech] = fuel_tech\n\n return fuel_y, fuel_tech_y\n\ndef fuel_to_service(\n enduse,\n fuel_y,\n fuel_fueltype_tech_p_by,\n tech_stock,\n fueltypes,\n mode_constrained\n ):\n \"\"\"Converts fuel to energy service. Calculate energy service\n of each technology based on assumptions about base year fuel\n shares of an enduse (`fuel_fueltype_tech_p_by`).\n\n Arguments\n ----------\n enduse : str\n Enduse\n fuel_y : array\n Fuel per fueltype\n fuel_fueltype_tech_p_by : dict\n Fuel composition of base year for every fueltype for each\n enduse (assumtions for national scale)\n tech_stock : object\n Technology stock of region\n fueltypes : dict\n Fueltype look-up\n mode_constrained : bool\n Criteria about mode\n\n Return\n ------\n tot_s_y : array\n Total annual energy service per technology\n s_tech_y : dict\n Total annual energy service per technology\n\n Note\n -----\n - Efficiency changes of technologis are considered.\n - Energy service = fuel * efficiency\n - This function can be run in two modes, depending on `mode_constrained`\n - The base year efficiency is taken because the actual service can\n only be calculated with base year.\n Efficiencies are only considered if converting back to fuel\n The 'self.fuel_y' is taken because the actual\n service was reduced e.g. due to smart meters or temperatur changes\n \"\"\"\n s_tech_y = {}\n s_tot_y = 0\n\n # Calculate share of service\n for fueltype_int, tech_list in fuel_fueltype_tech_p_by.items():\n\n # Get technologies to iterate\n if tech_list == {} and fuel_y[fueltype_int] == 0: # No technology or fuel defined\n techs_with_fuel = {}\n elif tech_list == {} and fuel_y[fueltype_int] > 0: # Fuel defined but no technologies\n fueltype_str = tech_related.get_fueltype_str(fueltypes, fueltype_int)\n placeholder_tech = 'placeholder_tech__{}'.format(fueltype_str)\n techs_with_fuel = {placeholder_tech: 1.0}\n else:\n techs_with_fuel = tech_list\n\n for tech, fuel_share in techs_with_fuel.items():\n\n if mode_constrained:\n \"\"\"Constrained version\n \"\"\"\n tech_eff = tech_stock.get_tech_attr(enduse, tech, 'eff_by')\n\n # Get fuel share and convert fuel to service per technology\n s_tech = fuel_y[fueltype_int] * fuel_share * tech_eff\n\n s_tech_y[tech] = s_tech\n\n # Sum total yearly service\n s_tot_y += s_tech #(y)\n else:\n \"\"\"Unconstrained version\n efficiencies are not considered, because not technology\n specific service calculation\n \"\"\"\n # Calculate fuel share\n fuel_tech = fuel_y[fueltype_int] * fuel_share\n\n s_tech_y[tech] = fuel_tech\n\n # Sum total yearly service\n s_tot_y += fuel_tech\n\n return s_tot_y, s_tech_y\n\ndef apply_heat_recovery(\n enduse,\n strategy_variables,\n enduse_overall_change,\n service,\n service_techs,\n base_yr,\n curr_yr\n ):\n \"\"\"Reduce heating demand according to assumption on heat reuse\n\n Arguments\n ----------\n enduse : str\n Enduse\n strategy_variables : dict\n Strategy variables\n enduse_overall_change : dict\n Sigmoid diffusion info\n service : dict or array\n Service of current year\n crit_dict : str\n Criteria to run function differently\n base_yr : int\n Base year\n curr_yr : int\n Current year\n\n Returns\n -------\n service_reduced : dict or array\n Reduced service after assumption on reuse\n\n Note\n ----\n A standard sigmoid diffusion is assumed from base year to end year\n \"\"\"\n try:\n # Fraction of heat recovered until end year\n heat_recovered_p = strategy_variables[\"heat_recoved__{}\".format(enduse)]['scenario_value']\n\n if heat_recovered_p == 0:\n return service, service_techs\n else:\n # Fraction of heat recovered in current year\n sig_diff_factor = diffusion_technologies.sigmoid_diffusion(\n base_yr,\n curr_yr,\n strategy_variables['heat_recovered_yr_until_changed']['scenario_value'],\n enduse_overall_change['other_enduse_mode_info']['sigmoid']['sig_midpoint'],\n enduse_overall_change['other_enduse_mode_info']['sigmoid']['sig_steepness'])\n\n heat_recovered_p_cy = sig_diff_factor * heat_recovered_p\n\n # Apply to technologies each stored in dictionary\n service_reduced_techs = {}\n for tech, service_tech in service_techs.items():\n service_reduced_techs[tech] = service_tech * (1.0 - heat_recovered_p_cy)\n\n # Apply to array\n service_reduced = service * (1.0 - heat_recovered_p_cy)\n\n return service_reduced, service_reduced_techs\n except KeyError:\n # no recycling defined\n return service, service_techs\n\ndef apply_air_leakage(\n enduse,\n strategy_variables,\n enduse_overall_change,\n service,\n service_techs,\n base_yr,\n curr_yr\n ):\n \"\"\"Reduce heating demand according to assumption on\n improvements in air leaking\n\n Arguments\n ----------\n enduse : str\n Enduse\n strategy_variables : dict\n Strategy variables\n enduse_overall_change : dict\n Sigmoid diffusion info\n service : dict or array\n Service of current year\n crit_dict : str\n Criteria to run function differently\n base_yr : int\n Base year\n curr_yr : int\n Current year\n\n Returns\n -------\n service_reduced : dict or array\n Service after assumptions on air leaking improvements\n\n Note\n ----\n A standard sigmoid diffusion is assumed from base year to end year\n \"\"\"\n try:\n # Fraction of heat recovered until end year\n air_leakage_improvement = strategy_variables[\"air_leakage__{}\".format(enduse)]['scenario_value']\n\n if air_leakage_improvement == 0:\n return service, service_techs\n else:\n air_leakage_by = 1\n\n # Fraction of heat recovered in current year\n sig_diff_factor = diffusion_technologies.sigmoid_diffusion(\n base_yr,\n curr_yr,\n strategy_variables['air_leakage_yr_until_changed']['scenario_value'],\n enduse_overall_change['other_enduse_mode_info']['sigmoid']['sig_midpoint'],\n enduse_overall_change['other_enduse_mode_info']['sigmoid']['sig_steepness'])\n\n air_leakage_improvement_cy = sig_diff_factor * air_leakage_improvement\n air_leakage_cy = 1 - air_leakage_improvement_cy\n\n f_improvement = air_leakage_cy / air_leakage_by\n\n # Apply to technologies each stored in dictionary or array\n service_reduced_techs = {}\n for tech, service_tech in service_techs.items():\n service_reduced_techs[tech] = service_tech * f_improvement\n\n service_reduced = service * f_improvement\n\n return service_reduced, service_reduced_techs\n except KeyError:\n return service, service_techs\n\ndef apply_scenario_drivers(\n submodel,\n enduse,\n sector,\n fuel_y,\n dw_stock,\n region,\n gva,\n population,\n industry_gva,\n reg_scen_drivers,\n base_yr,\n curr_yr\n ):\n \"\"\"The fuel data for every end use are multiplied with respective\n scenario drivers. If no dwelling specific scenario driver is found,\n the identical fuel is returned.\n\n Arguments\n ----------\n enduse: str\n Enduse\n fuel_y : array\n Yearly fuel per fueltype\n dw_stock : object\n Dwelling stock\n region : str\n Region name\n gva : dict\n GVA\n population : dict\n Population\n reg_scen_drivers : dict\n Scenario drivers per enduse\n base_yr : int\n Base year\n curr_yr : int\n Current year\n\n Returns\n -------\n fuel_y : array\n Changed yearly fuel per fueltype\n \"\"\"\n if reg_scen_drivers is None:\n reg_scen_drivers = {}\n\n if not dw_stock:\n \"\"\"Calculate non-dwelling related scenario drivers, if no dwelling stock\n Info: No dwelling stock is defined for this submodel\n \"\"\"\n scenario_drivers = reg_scen_drivers[enduse]\n\n by_driver, cy_driver = 1, 1 #not 0\n\n for scenario_driver in scenario_drivers:\n\n # Get correct data depending on driver\n if scenario_driver == 'gva':\n by_driver_data = gva[base_yr][region]\n cy_driver_data = gva[curr_yr][region]\n\n '''if submodel == 'is_submodel':\n\n # Map enduse to SIC letter\n lu_industry_sic = lookup_tables.industrydemand_name_sic2007()\n sic_letter = lu_industry_sic[sector][sic_2007_letter]\n\n by_driver_data = industry_gva[base_yr][region][sic_lettersector]\n cy_driver_data = industry_gva[curr_yr][region][sic_letter]\n else:\n\n # Calculate overall GVA for all sectors TODO\n\n by_driver_data = gva[base_yr][region]\n cy_driver_data = gva[curr_yr][region]'''\n\n elif scenario_driver == 'population':\n by_driver_data = population[base_yr][region]\n cy_driver_data = population[curr_yr][region]\n #TODO :ADD OTHER ENDSES\n\n if math.isnan(by_driver_data):\n logging.warning(\"INF ERROR\")\n by_driver_data = 1\n if math.isnan(cy_driver_data):\n logging.warning(\"INF ERROR\")\n cy_driver_data = 1\n\n # Multiply drivers\n by_driver *= by_driver_data\n cy_driver *= cy_driver_data\n\n try:\n factor_driver = cy_driver / by_driver # FROZEN (as in chapter 3.1.2 EQ E-2)\n except ZeroDivisionError:\n factor_driver = 1\n\n if math.isnan(factor_driver):\n raise Exception(\"Error xcx\")\n\n fuel_y = fuel_y * factor_driver\n else:\n \"\"\"Scenario driver calculation based on dwelling stock\n \"\"\"\n # Test if enduse has a dwelling related scenario driver\n if hasattr(dw_stock[base_yr], enduse) and curr_yr != base_yr:\n\n # Scenariodriver of dwelling stock base year and new stock\n by_driver = getattr(dw_stock[base_yr], enduse)\n cy_driver = getattr(dw_stock[curr_yr], enduse)\n #assert by_driver != 'nan' and assert cy_driver != 'nan'\n\n # base year / current (checked)\n try:\n factor_driver = cy_driver / by_driver\n except ZeroDivisionError:\n factor_driver = 1\n\n # Check if float('nan')\n if math.isnan(factor_driver):\n logging.warning(\"Something went wrong wtih scenario\")\n factor_driver = 1\n\n #logging.debug(\"... Scenario drivers: {} {} {}\".format(\n # by_driver, cy_driver, factor_driver))\n\n fuel_y = fuel_y * factor_driver\n else:\n pass #enduse not define with scenario drivers\n\n assert math.isnan(np.sum(fuel_y)) != 'nan' #SPEED ESTING\n\n return fuel_y\n\ndef apply_specific_change(\n enduse,\n fuel_y,\n enduse_overall_change,\n strategy_variables,\n base_yr,\n curr_yr\n ):\n \"\"\"Calculates fuel based on assumed overall enduse specific\n fuel consumption changes.\n\n The changes are assumed across all fueltypes.\n Because for enduses where no technologies are defined, a linear\n diffusion is suggested to best represent multiple sigmoid efficiency\n improvements of individual technologies.\n\n Either a sigmoid standard diffusion or linear diffusion can be\n implemented. Linear is suggested.\n\n Arguments\n ----------\n enduse : str\n Enduse\n fuel_y : array\n Yearly fuel per fueltype\n enduse_overall_change : dict\n Info about how the enduse is overall changed (e.g. diff method)\n strategy_variables : dict\n Change in overall enduse for every enduse (percent ey)\n base_yr : int\n Base year\n curr_yr : int\n Current year\n\n Returns\n -------\n fuel_y : array\n Yearly new fuels\n \"\"\"\n # Fuel consumption shares in base and end year\n percent_by = 1.0\n\n percent_ey = percent_by + strategy_variables['enduse_change__{}'.format(enduse)]['scenario_value']\n\n # Share of fuel consumption difference\n diff_fuel_consump = percent_ey - percent_by\n diffusion_choice = enduse_overall_change['other_enduse_mode_info']['diff_method']\n\n if diff_fuel_consump != 0: # If change in fuel consumption\n\n # Lineare diffusion up to cy\n if diffusion_choice == 'linear':\n lin_diff_factor = diffusion_technologies.linear_diff(\n base_yr,\n curr_yr,\n percent_by,\n percent_ey,\n strategy_variables['enduse_specific_change_yr_until_changed']['scenario_value'])\n change_cy = lin_diff_factor\n\n # Sigmoid diffusion up to cy\n elif diffusion_choice == 'sigmoid':\n sig_diff_factor = diffusion_technologies.sigmoid_diffusion(\n base_yr,\n curr_yr,\n strategy_variables['enduse_specific_change_yr_until_changed']['scenario_value'],\n enduse_overall_change['other_enduse_mode_info']['sigmoid']['sig_midpoint'],\n enduse_overall_change['other_enduse_mode_info']['sigmoid']['sig_steepness'])\n change_cy = diff_fuel_consump * sig_diff_factor\n\n return fuel_y * change_cy\n else:\n return fuel_y\n\ndef apply_climate_change(\n enduse,\n fuel_y,\n cooling_factor_y,\n heating_factor_y,\n enduse_space_heating,\n enduse_space_cooling\n ):\n \"\"\"Change fuel demand for heat and cooling service\n depending on changes in HDD and CDD within a region\n (e.g. climate change induced)\n\n Arguments\n ----------\n enduse : str\n Enduse\n fuel_y : array\n Yearly fuel per fueltype\n cooling_factor_y : array\n Distribution of fuel within year to days (yd)\n heating_factor_y : array\n Distribution of fuel within year to days (yd)\n enduse_space_heating : list\n Enduses defined as space heating\n enduse_space_cooling : list\n Enduses defined as space cooling\n\n Return\n ------\n fuel_y : array\n Changed yearly fuel per fueltype\n\n Note\n ----\n - `cooling_factor_y` and `heating_factor_y` are based on the sum\n over the year. Therefore it is assumed that fuel correlates\n directly with HDD or CDD.\n \"\"\"\n if enduse in enduse_space_heating:\n fuel_y = fuel_y * heating_factor_y\n elif enduse in enduse_space_cooling:\n fuel_y = fuel_y * cooling_factor_y\n\n return fuel_y\n\ndef apply_smart_metering(\n enduse,\n fuel_y,\n sm_assump,\n strategy_variables,\n base_yr,\n curr_yr\n ):\n \"\"\"Calculate fuel savings depending on smart meter penetration\n\n Arguments\n ----------\n enduse : str\n Enduse\n fuel_y : array\n Yearly fuel per fueltype\n sm_assump : dict\n smart meter assumptions\n strategy_variables : dict\n Base simulation parameters\n base_yr, curr_yr : int\n years\n\n Returns\n -------\n fuel_y : array\n New fuel per year\n\n Note\n -----\n - The smart meter penetration is assumed with a sigmoid diffusion.\n\n - In the assumptions the maximum penetration and also the\n generally fuel savings for each enduse can be defined.\n \"\"\"\n try:\n\n enduse_savings = strategy_variables['smart_meter_improvement_{}'.format(enduse)]['scenario_value']\n\n # Sigmoid diffusion up to current year\n sigm_factor = diffusion_technologies.sigmoid_diffusion(\n base_yr,\n curr_yr,\n strategy_variables['smart_meter_yr_until_changed']['scenario_value'],\n sm_assump['smart_meter_diff_params']['sig_midpoint'],\n sm_assump['smart_meter_diff_params']['sig_steepness'])\n\n # Check if float\n assert isinstance(sigm_factor, float)\n\n # Improvement of smart meter penetration\n penetration_improvement = strategy_variables['smart_meter_improvement_p']['scenario_value']\n\n # Smart Meter penetration (percentage of people having smart meters)\n penetration_by = sm_assump['smart_meter_p_by']\n penetration_cy = sm_assump['smart_meter_p_by'] + sigm_factor * penetration_improvement\n\n saved_fuel = fuel_y * (penetration_cy - penetration_by) * enduse_savings\n fuel_y = fuel_y - saved_fuel\n\n return fuel_y\n\n except KeyError:\n # not defined for this enduse\n return fuel_y\n\ndef convert_service_to_p(tot_s_y, s_fueltype_tech):\n \"\"\"Calculate fraction of service for every technology\n of total service\n\n Arguments\n ----------\n tot_s_y : float\n Total yearly service\n s_fueltype_tech : dict\n Service per technology and fueltype\n\n Returns\n -------\n s_tech_p : dict\n All tecnology services are\n provided as a fraction of total service\n\n Note\n ----\n Iterate over values in dict and apply calculations\n \"\"\"\n if tot_s_y == 0:\n _total_service = 0\n else:\n _total_service = 1 / tot_s_y\n\n # Iterate all technologies and calculate fraction of total service\n s_tech_p = {}\n for tech_services in s_fueltype_tech.values():\n for tech, service_tech in tech_services.items():\n s_tech_p[tech] = _total_service * service_tech\n\n return s_tech_p\n\ndef get_service_diffusion(sig_param_tech, curr_yr):\n \"\"\"Calculate energy service fraction of technologies with increased service\n for current year based on sigmoid diffusion\n\n Arguments\n ----------\n sig_param_tech : dict\n Sigmoid diffusion parameters per technology\n curr_yr : dict\n Current year\n\n Returns\n -------\n s_tech_p : dict\n Share of service per technology of current year\n \"\"\"\n if sig_param_tech['l_parameter'] is None:\n s_tech_p = 0\n elif sig_param_tech['l_parameter'] == 'linear':\n s_tech_p = 'identical'\n else:\n s_tech_p = diffusion_technologies.sigmoid_function(\n curr_yr,\n sig_param_tech['l_parameter'],\n sig_param_tech['midpoint'],\n sig_param_tech['steepness'])\n\n return s_tech_p\n\ndef calc_service_switch(\n enduse,\n s_tech_y_cy,\n all_technologies,\n sig_param_tech,\n curr_yr,\n base_yr,\n sector,\n crit_switch_happening\n ):\n \"\"\"Apply change in service depending on defined service switches.\n\n The service which is fulfilled by new technologies as defined\n in the service switches is substracted of the replaced\n technologies proportionally to the base year distribution\n of these technologies.\n\n Arguments\n ---------\n tot_s_yh_cy : array\n Hourly service of all technologies\n all_technologies : dict\n Technologies to iterate\n sig_param_tech : dict\n Sigmoid diffusion parameters\n curr_yr : int\n Current year\n\n Returns\n -------\n switched_s_tech_y_cy : dict\n Service per technology in current year after switch in a year\n \"\"\"\n # ----------------------------------------\n # Test wheter switch is defined or not\n # ----------------------------------------\n crit_switch_service = fuel_service_switch.get_switch_criteria(\n enduse,\n sector,\n crit_switch_happening,\n base_yr,\n curr_yr)\n\n # ----------------------------------------\n # Calculate switch\n # ----------------------------------------\n if crit_switch_service:\n\n switched_s_tech_y_cy = {}\n\n # Service of all technologies\n service_all_techs = sum(s_tech_y_cy.values())\n\n for tech in all_technologies:\n\n # Calculated service share per tech for cy with sigmoid parameters\n s_tech_cy_p = get_service_diffusion(\n sig_param_tech[tech], curr_yr)\n\n if s_tech_cy_p == 'identical':\n switched_s_tech_y_cy[tech] = s_tech_y_cy[tech]\n else:\n switched_s_tech_y_cy[tech] = service_all_techs * s_tech_cy_p\n\n assert switched_s_tech_y_cy[tech] >= 0\n\n return switched_s_tech_y_cy\n else:\n return s_tech_y_cy\n\ndef apply_cooling(\n enduse,\n fuel_y,\n strategy_variables,\n cooled_floorarea_p_by,\n other_enduse_mode_info,\n base_yr,\n curr_yr):\n \"\"\"Apply changes for cooling enduses depending\n on assumption of how much of the floor area in percent\n is cooled\n\n It is aassumption a linear correlation between the\n percentage of cooled floor space (area) and energy demand.\n\n Arguments\n ---------\n enduse : str\n Enduse\n fuel_y : array\n Annual fuel demand\n strategy_variables : dict\n Strategy variables\n cooled_floorarea_p_by : dict\n Assumption about cooling floor area in base year\n other_enduse_mode_info : dict\n diffusion parameters\n base_yr : int\n Base year\n curr_yr : int\n Current year\n\n Returns\n -------\n fuel_y : array\n Fuel array (either changed fuel depending on cooling percentage)\n of identical array\n \"\"\"\n try:\n\n # Floor area share cooled in end year\n cooled_floorearea_p_ey = cooled_floorarea_p_by + strategy_variables[\"cooled_floorarea__{}\".format(enduse)]['scenario_value']\n\n # Fraction of heat recovered up to current year\n sig_diff_factor = diffusion_technologies.sigmoid_diffusion(\n base_yr,\n curr_yr,\n strategy_variables['cooled_floorarea_yr_until_changed']['scenario_value'],\n other_enduse_mode_info['sigmoid']['sig_midpoint'],\n other_enduse_mode_info['sigmoid']['sig_steepness'])\n\n # Additionall floor area\n additional_floor_area_p = sig_diff_factor * (cooled_floorearea_p_ey - cooled_floorarea_p_by)\n\n cooled_floorarea_p_cy = cooled_floorarea_p_by + additional_floor_area_p\n\n # Calculate factor\n floorare_cooling_factor = cooled_floorarea_p_cy / cooled_floorarea_p_by\n\n # Apply factor\n fuel_y = fuel_y * floorare_cooling_factor\n return fuel_y\n\n except KeyError:\n # no cooling defined for enduse\n return fuel_y\n\ndef industry_enduse_changes(\n enduse,\n sector,\n base_yr,\n curr_yr,\n strategy_variables,\n fuels,\n other_enduse_mode_info,\n assumptions\n ):\n \"\"\"This function changes the demand if the enduse\n is a an industrial enduse depending on assumed\n industry related scenario paramters\n\n Arguments\n ---------\n enduse : str\n Enduse\n sector : str\n Sector\n curr_yr : int\n Current year\n strategy_variables : dict\n All strategy variables\n fuels : array\n Annual fuels\n\n Returns\n --------\n fuels : np.array\n Changed fuels depending on scenario\n\n Info\n ----\n OLD MODEL TODO\n\n \"\"\"\n factor = 1\n\n if enduse == \"is_low_temp_process\":\n\n # Diffusion of policy\n #cy_factor = by_value / cy_value / by_value\n #Multiply fuels\n #fuels = fuels * cy_factor\n\n '''\n Theoretical maximal potential for every sector\n --> improvement in % of every sector?\n\n\n '''\n pass\n elif enduse == 'is_high_temp_process':\n\n\n if sector == 'basic_metals':\n\n # Calculate factor depending on fraction of hot and cold steel rolling process\n factor = hot_cold_process(\n base_yr,\n curr_yr,\n strategy_variables,\n other_enduse_mode_info,\n assumptions)\n\n #elif sector == 'non_metallic_mineral_products':\n\n # # Calculate factor depending on cement processes\n\n else:\n pass\n\n fuels_out = fuels * factor\n\n return fuels_out\n\ndef hot_cold_process(\n base_yr,\n curr_yr,\n strategy_variables,\n other_enduse_mode_info,\n assumptions\n ):\n \"\"\"Calculate factor based on the fraction of hot\n and cold rolling processes in steel manufacturing.\n The fraction of either process is calculated based on\n the scenario input of the future share of cold rollling\n processes. A sigmoid diffusion towards this fucture defined\n fraction is implemented.\n\n Arguments\n ----------\n base_yr : int\n Base year\n curr_yr : int\n Current year\n strategy_variables : dict\n Strategy variables\n other_enduse_mode_info : dict\n Sigmoid diffusion parameters\n assumptions : dict\n Assumptions including efficiencies of either process\n and the base year share\n\n Returns\n -------\n factor : float\n Factor to change energy demand\n \"\"\"\n\n # Reduce demand depending on fraction of hot and cold steel rolling process\n p_cold_rolling_by = assumptions.p_cold_rolling_steel_by\n p_hot_rolling_by = 1.0 - p_cold_rolling_by\n\n # Get sigmoid transition for share in rolling\n sig_diff_factor = diffusion_technologies.sigmoid_diffusion(\n base_yr,\n curr_yr,\n strategy_variables['hot_cold_rolling_yr_until_changed']['scenario_value'],\n other_enduse_mode_info['sigmoid']['sig_midpoint'],\n other_enduse_mode_info['sigmoid']['sig_steepness'])\n\n # Difference p cold rolling\n diff_cold_rolling = strategy_variables['p_cold_rolling_steel']['scenario_value'] - p_cold_rolling_by\n\n # Difference until cy\n diff_cold_rolling_cy = sig_diff_factor * diff_cold_rolling\n\n # Calculate cy p\n p_cold_rolling_cy = p_cold_rolling_by + diff_cold_rolling_cy\n p_hot_rolling_cy = 1 - p_cold_rolling_cy\n\n # Calculate factor\n eff_cold = assumptions.eff_cold_rolling_process\n eff_hot = assumptions.eff_hot_rolling_process\n\n p_by = p_cold_rolling_by * eff_cold + p_hot_rolling_by * eff_hot\n p_cy = p_cold_rolling_cy * eff_cold + p_hot_rolling_cy * eff_hot\n\n factor = p_cy / p_by\n\n return factor\n","sub_path":"energy_demand/enduse_func.py","file_name":"enduse_func.py","file_ext":"py","file_size_in_byte":57900,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"140602583","text":"#Created By: Logan Fillo\n#Created On: 2019-03-11\n\n\"\"\"\nThis module contains the functionality\nfor creating test cases\n\"\"\"\n\nimport sys\n\nimport constants as const\nimport vehicle\nimport scene\nimport object\n\n\nclass SimTestCase:\n \"\"\"\n A class representing a simulation test case\n \"\"\"\n def __init__(self, name):\n \"\"\"\n Constructs a SimTestCase\n \"\"\"\n self.name = name\n self.is_dynamic = const.DEFAULT_IS_DYNAMIC\n self.timeout = const.DEFAULT_TIMEOUT\n self.vehicle = None\n self.scene = None\n self.objects = None\n\n\nclass SimTestBuilder:\n \"\"\"\n A helper class for building simulation tests\n \"\"\"\n def __init__(self):\n \"\"\"\n Construct a SimTestBuilder\n \"\"\"\n self._curr_test = None\n self._curr_vhcl = None\n self._curr_scene = None\n self._curr_objs = None\n\n # For storing forall data\n self._glob_timeout = None\n self._glob_is_dynamic = None\n self._glob_vhcl_data = dict()\n self._glob_scene_data = dict()\n self._glob_objs = []\n\n def build_test_case(self, name):\n \"\"\"\n Constructs the objects needed for a test\n and sets their global data, if any\n\n :param name: test case name\n \"\"\"\n self._curr_test = SimTestCase(name)\n self._curr_vhcl = vehicle.Vehicle()\n self._curr_scene = scene.Scene()\n self._curr_objs = []\n self._set_globals()\n\n def get_result(self):\n \"\"\"\n Returns the current test case configured with\n the current vehicle, scene, and objects\n \"\"\"\n self._curr_test.vehicle = self._curr_vhcl\n self._curr_test.scene = self._curr_scene\n self._curr_test.objects = self._curr_objs\n return self._curr_test\n\n def add_object(self, name, model,\n x_pos, y_pos, z_pos,\n r_rot, p_rot, y_rot,\n is_global):\n sim_object = object.SimObject(name, model,\n x_pos, y_pos, z_pos,\n r_rot, p_rot, y_rot)\n if is_global:\n self._glob_objs.append(sim_object)\n else:\n self._curr_objs.append(sim_object)\n\n def set_wave_scale(self, scale, is_global):\n self._set_scene_elem(const.SCENE_WAVE_SCALE,\n 1*(10**(-scale)), is_global)\n\n def set_timeout(self, timeout, is_global):\n if is_global:\n self._glob_timeout = timeout\n else:\n self._curr_test.timeout = timeout\n\n def use_dynamics(self, is_dynamic, is_global):\n if is_global:\n self._glob_is_dynamic = is_dynamic\n else:\n self._curr_test.is_dynamic = is_dynamic\n\n def set_vehicle_position(self, x, y, z, is_global):\n if z < 0:\n sys.exit(\"Error: vehicle z position cannot be less than 0 (above water)\")\n self._set_vhcl_elem(const.VEHICLE_X_POS, x, is_global)\n self._set_vhcl_elem(const.VEHICLE_Y_POS, y, is_global)\n self._set_vhcl_elem(const.VEHICLE_Z_POS, z, is_global)\n\n def set_vehicle_rotation(self, r, p, y, is_global):\n self._set_vhcl_elem(const.VEHICLE_R_ROT, r, is_global)\n self._set_vhcl_elem(const.VEHICLE_P_ROT, p, is_global)\n self._set_vhcl_elem(const.VEHICLE_Y_ROT, y, is_global)\n\n def _set_globals(self):\n if self._glob_timeout is not None:\n self._curr_test.timeout = self._glob_timeout\n if self._glob_is_dynamic is not None:\n self._curr_test.is_dynamic = self._glob_is_dynamic\n if not len(self._glob_objs) == 0:\n self._curr_objs = self._glob_objs[:]\n for elem in self._glob_vhcl_data:\n self._curr_vhcl.data[elem] = self._glob_vhcl_data[elem]\n for elem in self._glob_scene_data:\n self._curr_scene.data[elem] = self._glob_scene_data[elem]\n\n def _set_vhcl_elem(self, elem, val, is_global):\n if is_global:\n self._glob_vhcl_data[elem] = val\n else:\n self._curr_vhcl.data[elem] = val\n\n def _set_scene_elem(self, elem, val, is_global):\n if is_global:\n self._glob_scene_data[elem] = val\n else:\n self._curr_scene.data[elem] = val\n","sub_path":"src/simulator/pysimtest/_pysimtest/case.py","file_name":"case.py","file_ext":"py","file_size_in_byte":4351,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"62407831","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri May 24 14:30:05 2019\n\n@author: eo\n\"\"\"\n\n\n# ---------------------------------------------------------------------------------------------------------------------\n#%% Add local path\n\nimport os\nimport sys\n\ndef find_path_to_local(target_folder = \"local\"):\n \n # Skip path finding if we successfully import the dummy file\n try:\n from local.dummy import dummy_func; dummy_func(); return\n except ImportError:\n print(\"\", \"Couldn't find local directory!\", \"Searching for path...\", sep=\"\\n\")\n \n # Figure out where this file is located so we can work backwards to find the target folder\n file_directory = os.path.dirname(os.path.abspath(__file__))\n path_check = []\n \n # Check parent directories to see if we hit the main project directory containing the target folder\n prev_working_path = working_path = file_directory\n while True:\n \n # If we find the target folder in the given directory, add it to the python path (if it's not already there)\n if target_folder in os.listdir(working_path):\n if working_path not in sys.path:\n tilde_swarm = \"~\"*(4 + len(working_path))\n print(\"\\n{}\\nPython path updated:\\n {}\\n{}\".format(tilde_swarm, working_path, tilde_swarm))\n sys.path.append(working_path)\n break\n \n # Stop if we hit the filesystem root directory (parent directory isn't changing)\n prev_working_path, working_path = working_path, os.path.dirname(working_path)\n path_check.append(prev_working_path)\n if prev_working_path == working_path:\n print(\"\\nTried paths:\", *path_check, \"\", sep=\"\\n \")\n raise ImportError(\"Can't find '{}' directory!\".format(target_folder))\n \nfind_path_to_local()\n\n# ---------------------------------------------------------------------------------------------------------------------\n#%% Imports\n\nimport cv2\nimport numpy as np\n\nfrom collections import deque\n\nfrom local.lib.ui_utils.local_ui.drawing import Entity_Drawer\n\nfrom local.eolib.utils.cli_tools import Color\n\n# ---------------------------------------------------------------------------------------------------------------------\n#%% Define Classes\n\n\nclass Simple_Window:\n \n # ................................................................................................................. \n \n def __init__(self, window_name,\n frame_wh = None,\n provide_mouse_xy = False,\n create_on_startup = True):\n \n # Get window name so we can continue to refer to this window!\n self.window_name = window_name\n \n # Allocate variables for (potential) mouse-xy feedback\n self.enable_mouse_feedback = provide_mouse_xy\n self._mouse_feedback = None\n \n # Variables for recording the window position\n self.x_px = None\n self.y_px = None\n \n # Variables used to record the size of the displayed image\n self.window_wh_is_set = False\n self.width = None\n self.height = None\n if frame_wh is not None:\n self.set_window_wh(*frame_wh)\n \n # Create the display, if needed\n if create_on_startup:\n self.create_window()\n \n # ................................................................................................................. \n \n def __repr__(self):\n return \"{} ({})\".format(self.class_name, self.window_name)\n \n # ................................................................................................................. \n \n @property\n def class_name(self):\n return self.__class__.__name__\n \n # ................................................................................................................. \n \n @property\n def mouse_xy(self):\n return self._mouse_feedback.xy if self.enable_mouse_feedback else None\n \n # ................................................................................................................. \n \n def set_window_wh(self, window_width, window_height): \n self.width = window_width\n self.height = window_height\n self.window_wh_is_set = True\n \n # ................................................................................................................. \n \n def get_window_name(self):\n return self.window_name\n \n # .................................................................................................................\n \n def get_window_wh(self):\n return self.width, self.height\n \n # ................................................................................................................. \n \n def imshow(self, display_frame):\n \n # Check if the window exists and whether the input data is valid\n window_exists = self.exists()\n valid_frame_data = (display_frame is not None)\n \n # Only update showing if the window exists & a valid image is supplied\n if window_exists and valid_frame_data:\n cv2.imshow(self.window_name, display_frame)\n \n return window_exists\n \n # ................................................................................................................. \n \n def imshow_blank(self, blank_wh = None):\n \n # Set blank size if needed\n if self.width is None and self.height is None:\n blank_wh = (500, 30) if blank_wh is None else blank_wh\n else:\n blank_wh = (self.width, self.height)\n \n # Only update showing if the window exists\n window_exists = self.exists()\n if window_exists:\n blank_image = np.zeros((blank_wh[1], blank_wh[0], 3), dtype=np.uint8)\n cv2.imshow(self.window_name, blank_image)\n \n return window_exists\n \n # ................................................................................................................. \n \n def move_corner_pixels(self, x_pixels, y_pixels, create_if_missing = True):\n \n ''' Move the window corner to a screen position, specified in pixels '''\n \n # Make sure the window exists before we move it around\n self._create_window_if_missing(create_if_missing)\n \n # Force inputs to integers, since floats aren't accepted\n self.x_px = int(round(x_pixels))\n self.y_px = int(round(y_pixels))\n \n cv2.moveWindow(self.window_name, self.x_px, self.y_px)\n \n return self\n \n # ................................................................................................................. \n \n def move_center_pixels(self, x_pixels, y_pixels, frame_width = None, frame_height = None,\n create_if_missing = True):\n \n '''\n Move the window center to a screen position, specified in pixels\n '''\n \n # Make sure the window exists before we move it around\n self._create_window_if_missing(create_if_missing)\n \n # Update frame width/height if needed\n self.width = frame_width if frame_width is not None else self.width\n self.height = frame_height if frame_height is not None else self.height\n \n # Get the frame half sizing for centering\n try:\n half_frame_width = self.width / 2\n half_frame_height = self.height / 2\n except TypeError:\n raise AttributeError(\"Can't move the window without knowing it's frame width/height!\")\n \n # Find window corner location, so that the frame center lands at the target screen position\n window_corner_x_px = x_pixels - half_frame_width\n window_corner_y_px = y_pixels - half_frame_height\n \n return self.move_corner_pixels(window_corner_x_px, window_corner_y_px)\n \n # .................................................................................................................\n \n def exists(self):\n return cv2.getWindowProperty(self.window_name, cv2.WND_PROP_AUTOSIZE) > 0\n \n # ................................................................................................................. \n \n def close(self):\n if self.exists(): \n cv2.destroyWindow(self.window_name)\n \n # ................................................................................................................. \n \n def attach_callback(self, mouse_callback, callback_data = {}, create_if_missing = True):\n \n self._create_window_if_missing(create_if_missing)\n cv2.setMouseCallback(self.window_name, mouse_callback, callback_data)\n \n return self\n \n # ................................................................................................................. \n \n def add_trackbar(self, label, initial_value, max_value): \n cv2.createTrackbar(label, self.window_name, initial_value, max_value, lambda x: None)\n \n # ................................................................................................................. \n \n def set_trackbar(self, label, new_value):\n cv2.setTrackbarPos(label, self.window_name, new_value)\n \n # ................................................................................................................. \n \n def read_trackbar(self, label):\n '''Returns current trackbar value (integer)'''\n return cv2.getTrackbarPos(label, self.window_name)\n \n # ................................................................................................................. \n \n def create_window(self):\n \n # Create window\n cv2.namedWindow(self.window_name)\n self.imshow_blank()\n self.move_corner_pixels(x_pixels = 50, y_pixels = 50, create_if_missing = False)\n \n # Enable mouse xy reporting, if needed\n if self.enable_mouse_feedback:\n self._mouse_feedback = Mouse_Follower()\n cv2.setMouseCallback(self.window_name, self._mouse_feedback)\n \n return self\n \n # ................................................................................................................. \n \n def _create_window_if_missing(self, create_if_missing = True):\n \n ''' Function which checks that the window exists, and if not, will create it '''\n \n window_is_missing = (not self.exists())\n if window_is_missing and create_if_missing:\n self.create_window()\n \n return\n \n # .................................................................................................................\n # .................................................................................................................\n\n# /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////\n# /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////\n \nclass Control_Window(Simple_Window):\n \n # .................................................................................................................\n \n def __init__(self, window_name, control_list_json, frame_wh = (500, 30),\n create_on_startup = True):\n \n # Inherit from simple window\n provide_mouse_xy = False\n super().__init__(window_name, frame_wh, provide_mouse_xy, create_on_startup)\n \n # Set width/height of control window (which is normally just a small blackout area)\n self.width, self.height = frame_wh\n \n # Store controls for future reference if needed\n self.control_list_json = control_list_json\n \n # Draw initial blank frame\n self._blank_frame = np.zeros((self.height, self.width, 3), dtype=np.uint8)\n self.imshow(self._blank_frame) \n \n # Allocate space to store trackbar info\n self.tooltip_dict = {}\n self.units_dict = {}\n self.menu_labels_dict = {}\n self.variable_to_label_lut = []\n self.trackbar_position_dict = {}\n self.trackbar_minimums_dict = {}\n self.variable_name_list = []\n \n # Allocate space for mapping functions\n self._map_to_raw_func_dict = {}\n self._raw_to_map_func_dict = {}\n \n # Create all the trackbars and read their initial values\n self._build_trackbars(self.control_list_json)\n \n # .................................................................................................................\n \n def print_info(self, return_string = True):\n \n # Loop through every variable and construct a nice info printout\n print_str_list = []\n for each_variable in self.variable_name_list:\n \n # Pull out control info\n control_label = self.variable_to_label_lut[each_variable]\n control_units = self.units_dict[each_variable]\n control_tooltip = self.tooltip_dict[each_variable]\n control_menu_labels = self.menu_labels_dict[each_variable]\n \n # Bail if no tooltip is present. If a tooltip comes in as a list, treat it as separate lines to print out\n if control_tooltip is None:\n continue\n elif control_tooltip == \"\":\n tooltip_str = \" No tooltip!\"\n elif type(control_tooltip) in {tuple, list}:\n tooltip_str = \"\\n\".join([\" {}\".format(each_line) for each_line in control_tooltip])\n else:\n tooltip_str = \" {}\".format(control_tooltip)\n \n # Set up the heading title string for each variable print out\n heading_str = control_label\n if control_units:\n heading_str += \" ({}):\".format(control_units)\n elif len(control_menu_labels) > 1:\n menu_labels_str = \" / \".join(control_menu_labels)\n heading_str += \" ({}):\".format(menu_labels_str)\n else:\n heading_str += \":\"\n \n # Combine the label heading and tooltip strings\n new_print_str = [\"\",\n Color(heading_str).bold.str,\n tooltip_str]\n \n # Add to the list of variable info printouts\n print_str_list += new_print_str\n \n # Finally, return or print out all the control info\n print_out_str = \"\\n\".join(print_str_list)\n if return_string:\n return print_out_str\n else:\n print(print_out_str)\n \n # .................................................................................................................\n \n def set_trackbar(self, variable_name, mapped_value):\n \n map_to_raw_func = self._map_to_raw_func_dict[variable_name]\n raw_value = map_to_raw_func(mapped_value)\n \n control_label = self.variable_to_label_lut[variable_name]\n self._set_trackbar_raw(control_label, raw_value)\n \n # .................................................................................................................\n \n def read_trackbar(self, variable_name, force_minimum = True):\n \n # Get the function needed to map from raw (trackbar position) values to mapped values\n raw_to_map_func = self._raw_to_map_func_dict[variable_name]\n \n # Get trackbar position\n control_label = self.variable_to_label_lut[variable_name]\n raw_value = self._read_trackbar_raw(control_label)\n \n # Stop the user from going below some minimum trackbar position\n if force_minimum:\n tb_min = self.trackbar_minimums_dict[control_label]\n if raw_value < tb_min:\n raw_value = tb_min\n self._set_trackbar_raw(control_label, raw_value)\n \n # Check if the trackbar position changed\n value_changed = (raw_value != self.trackbar_position_dict[control_label])\n if value_changed:\n self.trackbar_position_dict[control_label] = raw_value\n \n # Finally, return the variable in it's proper representation\n map_value = raw_to_map_func(raw_value)\n \n return value_changed, map_value\n \n # .................................................................................................................\n \n def read_trackbar_changes(self):\n \n # Loop through all trackbar, reading values\n # Need to check values against some recorded value, and report them if they changed...\n # Report back using variable_name and new value\n value_changes_dict = {}\n for each_variable in self.variable_name_list:\n value_changed, map_value = self.read_trackbar(each_variable)\n if value_changed:\n value_changes_dict.update({each_variable: map_value})\n \n return value_changes_dict\n \n # .................................................................................................................\n \n def _set_trackbar_raw(self, label, raw_value): \n if self.exists():\n cv2.setTrackbarPos(label, self.window_name, raw_value)\n \n # .................................................................................................................\n \n def _read_trackbar_raw(self, label):\n return cv2.getTrackbarPos(label, self.window_name) if self.exists() else self.trackbar_position_dict[label]\n \n # .................................................................................................................\n \n def _build_trackbars(self, control_list):\n \n # Allocate space to store trackbar info\n self.tooltip_dict = {}\n self.units_dict = {}\n self.menu_labels_dict = {}\n self.variable_to_label_lut = {}\n self.trackbar_position_dict = {}\n self.trackbar_minimums_dict = {}\n self.variable_name_list = []\n \n for each_entry in control_list:\n \n '''\n print(\"\")\n print(\"({})\".format(os.path.basename(__file__)))\n print(\"CONTROL ENTRY:\", each_entry)\n print(\"Control_type\", control_type)\n '''\n \n # Get important identifying info\n control_label = each_entry[\"label\"]\n variable_name = each_entry[\"variable_name\"]\n control_type = each_entry[\"control_type\"]\n visible = each_entry.get(\"visible\", True)\n \n # Skip over any non-visible controls\n if not visible:\n continue\n \n # Configure each control and figure out the trackbar (initial) settings\n config_function = self._control_type_lookup(control_type)\n tb_minimum, tb_maximum, tb_initial = config_function(each_entry) \n self.add_trackbar(control_label, tb_initial, tb_maximum)\n \n # Store the units and tooltip info so we can print it out later\n self.tooltip_dict[variable_name] = each_entry.get(\"tooltip\", \"\")\n self.units_dict[variable_name] = each_entry.get(\"units\", \"\")\n self.menu_labels_dict[variable_name], _ = zip(*each_entry.get(\"option_label_value_list\", [(\"\", \"\")]))\n \n # Store data we'll need for reading the trackbars later\n self.variable_to_label_lut[variable_name] = control_label\n self.trackbar_position_dict[control_label] = tb_initial\n self.trackbar_minimums_dict[control_label] = tb_minimum\n self.variable_name_list.append(variable_name)\n \n # .................................................................................................................\n \n def _control_type_lookup(self, control_type):\n\n # Use dictionary as a simple lookup for matching control types with configuration functions\n ctrl_type_lut = {\"toggle\": self._toggle_config,\n \"slider\": self._slider_config,\n \"numentry\": self._numentry_config,\n \"menu\": self._menu_config,\n \"button\": self._button_config} \n \n return ctrl_type_lut[control_type]\n \n # .................................................................................................................\n \n def _toggle_config(self, config_data):\n \n # Expects\n '''\n variable_name, label\n default_value,\n tooltip, visible\n '''\n \n # Pull out some relevant data for convenience\n variable_name = config_data[\"variable_name\"]\n default_value = config_data.get(\"default_value\", 0)\n \n # Get the mapping functions based on the config data\n raw_to_map_func, map_to_raw_func = bool_to_int()\n \n # Store the mapping functions so we can use them when reading/setting the trackbar\n self._map_to_raw_func_dict[variable_name] = map_to_raw_func\n self._raw_to_map_func_dict[variable_name] = raw_to_map_func\n \n # Get the default and maximum trackbar values\n trackbar_initial = map_to_raw_func(default_value)\n trackbar_minimum = 0\n trackbar_maximum = 1\n \n return trackbar_minimum, trackbar_maximum, trackbar_initial\n \n # ................................................................................................................. \n \n def _slider_config(self, config_data):\n \n # Expects\n '''\n variable_name, label\n default_value\n min_value, max_value, step_size \n units, return_type, zero_referenced,\n tooltip, visible\n '''\n \n # Pull out some relevant data for convenience\n variable_name = config_data[\"variable_name\"]\n default_value = config_data.get(\"default_value\", 0)\n min_value = config_data[\"min_value\"]\n max_value = config_data[\"max_value\"]\n step_size = config_data.get(\"step_size\", 1)\n return_type = return_type_strings_to_functions(config_data.get(\"return_type\", None))\n zero_referenced = config_data.get(\"zero_referenced\", False)\n \n # Get the mapping functions based on the config data\n if zero_referenced:\n raw_to_map_func, map_to_raw_func = minceil_affine(min_value, max_value, step_size, return_type)\n else:\n raw_to_map_func, map_to_raw_func = simple_affine(min_value, max_value, step_size, return_type)\n \n # Store the mapping functions so we can use them when reading/setting the trackbar\n self._map_to_raw_func_dict[variable_name] = map_to_raw_func\n self._raw_to_map_func_dict[variable_name] = raw_to_map_func\n \n # Get the default and maximum trackbar values\n trackbar_initial = map_to_raw_func(default_value)\n trackbar_minimum = map_to_raw_func(min_value)\n trackbar_maximum = map_to_raw_func(max_value)\n \n return trackbar_minimum, trackbar_maximum, trackbar_initial\n \n # .................................................................................................................\n \n def _numentry_config(self, config_data):\n \n # Expects\n '''\n variable_name,\n label,\n default_value,\n min_value,\n max_value,\n step_size = 1,\n units = None,\n return_type = float,\n zero_referenced = False,\n force_min = True,\n force_max = True,\n force_step = True,\n tooltip = \"\",\n visible = True\n '''\n \n # Pull outdata that allows us to createa regular slider \n # (numerical entry doesn't behave differently from a slider when using the local ui!)\n grab_slider_keys = [\"variable_name\", \"label\", \"default_value\",\n \"min_value\", \"max_value\", \"step_size\",\n \"units\", \"return_type\", \"zero_referenced\",\n \"tooltip\", \"visible\"]\n slider_config_data = {each_key: config_data[each_key] for each_key in grab_slider_keys}\n \n return self._slider_config(slider_config_data)\n \n # .................................................................................................................\n \n def _menu_config(self, config_data):\n \n # Expects\n '''\n variable_name,\n label,\n default_value,\n option_label_value_dict,\n tooltip = \"\",\n visible = True\n '''\n \n # Pull out some relevant data for convenience\n variable_name = config_data[\"variable_name\"]\n default_value = config_data.get(\"default_value\", 0)\n option_label_value_list = config_data[\"option_label_value_list\"]\n \n # Separate the labels and values, since we only need the values for local usage\n label_list, value_list = list(zip(*option_label_value_list))\n \n # Get the mapping functions based on the config data\n raw_to_map_func, map_to_raw_func = value_list_lookup(value_list)\n \n # Store the mapping functions so we can use them when reading/setting the trackbar\n self._map_to_raw_func_dict[variable_name] = map_to_raw_func\n self._raw_to_map_func_dict[variable_name] = raw_to_map_func\n \n # Get the default and maximum trackbar values\n trackbar_initial = map_to_raw_func(default_value)\n trackbar_minimum = 0\n trackbar_maximum = len(value_list) - 1\n \n return trackbar_minimum, trackbar_maximum, trackbar_initial\n \n # .................................................................................................................\n \n def _button_config(self, config_data):\n \n # Expects\n '''\n variable_name,\n label,\n default_value,\n return_type = bool,\n tooltip = \"\",\n visible = True\n '''\n \n # Pull out some relevant data for convenience\n variable_name = config_data[\"variable_name\"]\n default_value = config_data.get(\"default_value\", 0)\n \n # Get the mapping functions based on the config data\n set_trackbar_func = self._set_trackbar_raw\n raw_to_map_func, map_to_raw_func = button_map(variable_name, set_trackbar_func)\n \n # Store the mapping functions so we can use them when reading/setting the trackbar\n self._map_to_raw_func_dict[variable_name] = map_to_raw_func\n self._raw_to_map_func_dict[variable_name] = raw_to_map_func\n \n # Get the default and maximum trackbar values\n trackbar_initial = map_to_raw_func(default_value)\n trackbar_minimum = 0\n trackbar_maximum = 1\n \n return trackbar_minimum, trackbar_maximum, trackbar_initial\n \n # .................................................................................................................\n \n # .................................................................................................................\n # .................................................................................................................\n\n# /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////\n# /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////\n\n\nclass Slideshow_Window(Simple_Window):\n \n # .................................................................................................................\n \n def __init__(self, window_name,\n frame_wh = None,\n missing_image_test = \"No image...\",\n max_storage = 10):\n \n # Inherit from parent class\n provide_mouse_xy = False\n super().__init__(window_name, frame_wh, provide_mouse_xy)\n \n # Initialize storage variables\n self.frame_deck = self._initialize_empty_frame_deck(missing_image_test, max_storage)\n self.current_select = 0\n self._update_enabled = True\n \n # Add a trackbar to control access to selecting which image to display\n self._trackbar_enable_label = \"Enable Updates\"\n self._trackbar_select_label = \"Image select\"\n self.add_trackbar(self._trackbar_enable_label, 1, 1)\n self.add_trackbar(self._trackbar_select_label, self.current_select, max_storage - 1)\n \n # Draw initial image\n self.imshow_by_index()\n \n # .................................................................................................................\n \n def _initialize_empty_frame_deck(self, missing_image_test, deque_size, default_blank_wh = (360, 240)):\n \n # Create new deque for storing 'slideshow' images\n new_deque = deque([], maxlen = deque_size)\n \n # Create blank frame\n frame_width = self.width if self.width else default_blank_wh[0]\n frame_height = self.height if self.height else default_blank_wh[1]\n \n # Draw an empty frame with some text indicating that no image is available\n blank_frame = np.zeros((frame_height, frame_width, 3), dtype=np.uint8)\n cv2.putText(blank_frame, missing_image_test,\n (10, 35), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (255, 255, 255), 1, cv2.LINE_AA)\n \n # Fill in frame deque with blank frames\n for k in range(deque_size):\n new_deque.append(blank_frame.copy())\n \n return new_deque\n \n # .................................................................................................................\n \n def imshow(self, display_frame):\n \n # Only update if the window exists\n window_exists = self.exists()\n if window_exists:\n \n # Only update the frame deck (and display) if the slideshow updates are still enabled\n if self._update_enabled:\n self.frame_deck.appendleft(display_frame)\n self.imshow_by_index()\n \n return window_exists\n \n # .................................................................................................................\n \n def imshow_by_index(self, index_select = None):\n \n # Automatically use the current index if one isn't provided\n if index_select is None:\n index_select = self.current_select\n \n # Only update if the window exists\n window_exists = self.exists()\n if window_exists:\n cv2.imshow(self.window_name, self.frame_deck[index_select])\n \n return window_exists\n \n # .................................................................................................................\n \n def read_trackbars(self):\n \n # Determine if updates are enabled\n self._update_enabled = self.read_trackbar(self._trackbar_enable_label)\n \n # Determine if we need to update the displayed index\n new_select = self.read_trackbar(self._trackbar_select_label)\n if new_select != self.current_select:\n self.current_select = new_select\n self.imshow_by_index(new_select)\n \n # .................................................................................................................\n # .................................................................................................................\n\n\n# /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////\n# /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////\n\n\nclass Max_WH_Window(Simple_Window):\n \n # .................................................................................................................\n \n def __init__(self, window_name,\n frame_wh = None,\n max_wh = None,\n interpolation_type = cv2.INTER_NEAREST,\n provide_mouse_xy = False,\n create_on_startup = True):\n \n # Inherit from parent class\n super().__init__(window_name, frame_wh, provide_mouse_xy, create_on_startup)\n \n # Variables for limiting frame size\n self.interpolation_type = interpolation_type\n self.max_width = None\n self.max_height = None\n self._check_resize = False\n if max_wh is not None:\n self._check_resize = True\n self.max_width, self.max_height = max_wh\n\n # ................................................................................................................. \n \n def imshow(self, display_frame):\n \n # Check if the window exists (by looking for window properties)\n window_exists = self.exists()\n \n # Don't do anything if a valid frame isn't supplied\n if display_frame is None:\n return self.exists()\n \n # Only update showing if the window exists\n if window_exists:\n cv2.imshow(self.window_name, self._scale_to_max_wh(display_frame))\n \n return window_exists\n \n # ................................................................................................................. \n \n def _scale_to_max_wh(self, display_frame):\n \n # Don't do anything if we're not checking for resizing\n if not self._check_resize:\n return display_frame\n \n # Check if we need to resize the displayed frame\n display_height, display_width = display_frame.shape[0:2]\n needs_resize = (display_width > self.max_width) or (display_height > self.max_height)\n if not needs_resize:\n return display_frame\n \n width_scale = display_width / self.max_width\n height_scale = display_height / self.max_height\n max_scale = max(width_scale, height_scale)\n \n # Figure out scaled width/height values and apply resizing!\n scaled_width = int(display_width / max_scale)\n scaled_height = int(display_height / max_scale)\n return cv2.resize(display_frame, dsize = (scaled_width, scaled_height),\n interpolation = self.interpolation_type)\n \n # .................................................................................................................\n # .................................................................................................................\n\n\n# /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////\n# /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////\n\n\nclass Drawing_Window(Simple_Window):\n \n # .................................................................................................................\n \n def __init__(self, window_name, frame_wh, drawing_json,\n border_size_px = 60, create_on_startup = True):\n \n # Don't pass a real frame size on initialization, we want to make sure the drawing\n # uses the actual displayed frame size, so everything scales properly...\n # but we don't know this sizing until we get our first .imshow() call! So figure out sizing there\n initial_frame_wh = None\n \n # Inherit from parent class\n drawing_name = \"{} (Drawing)\".format(window_name)\n provide_mouse_xy = False\n super().__init__(drawing_name, initial_frame_wh, provide_mouse_xy, create_on_startup = False)\n \n # Convert drawing json data to entity drawing inputs\n self.drawing_variable_name = drawing_json[\"variable_name\"]\n min_max_entities = drawing_json[\"min_max_entities\"]\n min_max_points = drawing_json[\"min_max_points\"]\n real_border_size = (border_size_px if drawing_json[\"out_of_bounds\"] else 0)\n default_entities = drawing_json.get(\"default_value\", [[]])\n \n # Handle None entries\n min_entities = None if (min_max_entities is None) else min_max_entities[0]\n max_entities = None if (min_max_entities is None) else min_max_entities[1]\n min_points = 3 if (min_max_points is None) else min_max_points[0]\n max_points = None if (min_max_points is None) else min_max_points[1]\n \n # Bundle config needed by entity drawing object\n drawer_config = {\"minimum_entities\": min_entities,\n \"maximum_entities\": max_entities,\n \"minimum_points\": min_points,\n \"maximum_points\": max_points,\n \"border_size_px\": real_border_size}\n \n # Set up object to keep tracking of drawing\n self.drawer = Entity_Drawer(frame_wh, **drawer_config)\n self.drawer.initialize_entities(default_entities)\n \n # Create the display, if needed\n if create_on_startup:\n self.create_window()\n \n # ................................................................................................................. \n \n def print_info(self):\n \n # Print out info for each window\n header_str = \"Drawing Controls\"\n max_len = 60\n full_spacer_len = max(0, max_len - len(header_str))\n half_spacer_len = int(full_spacer_len / 2)\n end_spacer_len = max(0, max_len - len(header_str) - 2*half_spacer_len)\n \n # Build components for printing control title blocks, then print control info!\n title_spacer = (\" \" * half_spacer_len)\n end_spacer = (\" \" * end_spacer_len)\n full_heading_str = \"\".join([title_spacer, header_str, title_spacer, end_spacer])\n \n # Create key highlight function\n keycolor = Color().bold.italic\n key_text = lambda key_code, info: \" [{}] {}\".format(keycolor(key_code), info)\n \n # Big printout to explain how to do drawing stuff\n print(\"\",\n \"\",\n \"\",\n Color(full_heading_str.upper()).bold.invert,\n \"\",\n Color(\"Hover Mode:\").bold.underline,\n \"\",\n key_text(\"left-click\", \"to move points\"),\n key_text(\"shift + left-click\", \"to enter drawing mode\"),\n key_text(\"ctrl + left-click\", \"to insert points into an existing shape\"),\n key_text(\"right-click\", \"to delete a single point\"),\n key_text(\"ctrl + right-click\", \"to delete an entire shape\"),\n key_text(\"ctrl + z\", \"to undo recent actions\"),\n key_text(\"arrow keys\", \"to nudge points (hold shift for a larger effect)\"),\n key_text(\"b key\", \"to snap points to nearby borders\"),\n \"\",\n Color(\"Drawing Mode:\").bold.underline,\n \"\",\n key_text(\"shift + left-click\", \"to add more points to a shape-in-progress\"),\n key_text(\"double left-click\", \"to complete a shape\"),\n key_text(\"right-click\", \"to cancel a shape\"),\n key_text(\"ctrl + z\", \"to undo last point\"),\n \"\",\n sep=\"\\n\")\n \n # ................................................................................................................. \n \n def initialize_drawing(self, initial_settings_dict):\n \n # Load existing initial data, if present\n variable_in_initial_settings = (self.drawing_variable_name in initial_settings_dict)\n if variable_in_initial_settings:\n initial_entities = initial_settings_dict[self.drawing_variable_name]\n self.drawer.initialize_entities(initial_entities)\n \n return\n \n # ................................................................................................................. \n \n def update_control(self):\n \n # Get changes in zone data\n variables_changed_dict = {}\n if self.drawer.on_change():\n variables_changed_dict.update({self.drawing_variable_name: self.drawer.get_entities_list()})\n \n return variables_changed_dict\n \n # ................................................................................................................. \n \n def keypress(self, key_code, modifier_code):\n self.drawer.keypress_callback(key_code, modifier_code)\n \n # ................................................................................................................. \n \n def imshow(self, display_frame):\n \n # Check if the window exists (by looking for window properties)\n window_exists = self.exists()\n \n # Don't do anything if a valid frame isn't supplied\n if display_frame is None:\n return self.exists()\n \n # Make sure we're using the right frame size, since the drawing depends on it!\n if not self.window_wh_is_set:\n display_height, display_width = display_frame.shape[0:2]\n self.set_window_wh(display_width, display_height)\n self.drawer.update_frame_wh(display_width, display_height)\n \n # Only update showing if the window exists\n if window_exists:\n drawn_frame = self.drawer.annotate(display_frame)\n cv2.imshow(self.window_name, drawn_frame)\n \n return window_exists\n \n # ................................................................................................................. \n \n def create_window(self):\n \n # Create window\n cv2.namedWindow(self.window_name)\n self.imshow_blank()\n self.move_corner_pixels(x_pixels = 50, y_pixels = 50, create_if_missing = False)\n \n # Add drawing callback\n cv2.setMouseCallback(self.window_name, self.drawer)\n \n return self\n \n # ................................................................................................................. \n # ................................................................................................................. \n\n\n# ---------------------------------------------------------------------------------------------------------------------\n#%% Define callback handlers\n\nclass Mouse_Follower:\n \n # .................................................................................................................\n \n def __init__(self):\n \n # Allocate storage for mouse position and whether following is enabled or not\n self.mouse_xy = np.array((0, 0), dtype=np.int32)\n self.follow_state = True\n \n # .................................................................................................................\n \n def __call__(self, *args, **kwargs):\n ''' Convenience wrapper. Allows object to be used as a callback function directly '''\n return self.callback(*args, **kwargs)\n \n # .................................................................................................................\n \n def callback(self, event, mx, my, flags, param):\n \n # Record mouse xy position\n if self.follow_state:\n self.mouse_xy = np.int32((mx, my))\n \n # Toggle following state on left click\n if event == cv2.EVENT_LBUTTONDOWN:\n self.follow_state = (not self.follow_state)\n \n # .................................................................................................................\n \n def draw_mouse_xy(self, display_frame, point_radius = 5, point_color = (255, 0, 255)):\n \n ''' Function to help with debugging. Displays a point at the mouse location, along with x/y co-ordinates '''\n \n xy_tuple = tuple(self.xy)\n text_xy = (xy_tuple[0] + point_radius + 2, xy_tuple[1] + 5)\n \n drawn_frame = display_frame.copy()\n cv2.circle(drawn_frame, xy_tuple, point_radius, point_color, -1, cv2.LINE_AA)\n cv2.putText(drawn_frame,\n \"({:.0f}, {:.0f})\".format(*xy_tuple),\n text_xy,\n cv2.FONT_HERSHEY_SIMPLEX,\n 0.5,\n (255, 255, 255),\n 1,\n cv2.LINE_AA)\n \n return drawn_frame\n \n # .................................................................................................................\n \n @property\n def xy(self):\n return self.mouse_xy\n \n # .................................................................................................................\n # .................................................................................................................\n\n# ---------------------------------------------------------------------------------------------------------------------\n#%% Define functions\n\n# .....................................................................................................................\n \ndef simple_affine(min_value, max_value, step_size = 1, return_type = None):\n \n # Using y = mx + b (y -> mapped value, x -> raw/trackbar value)\n # Where y = min when x = 0\n # y = max_value when x = (max_value - min_value) / step_size\n # So:\n # min = 0 + b\n # max = m * (1 / step) * (max - min) + b\n \n # Therefore\n # b = min\n # m = step * (max - b) * (1 / max - min)\n #\n # b = min, m = step\n # y = step * x + min\n # x = (y - min) / step\n \n def raw_to_map_func(raw_value):\n map_value = step_size * raw_value + min_value\n return return_type(map_value) if return_type else map_value\n \n def map_to_raw_func(map_value): \n raw_value = (map_value - min_value) / step_size\n return (int(round(raw_value)))\n \n return raw_to_map_func, map_to_raw_func\n\n# .....................................................................................................................\n \ndef minceil_affine(min_value, max_value, step_size = 1, return_type = None):\n \n # Same as an affine mapping, but won't allow the value to go below the minimum value\n # Intended for cases where the trackbar is pinned to 0 for display purposes.\n \n # Pre-calculate/generate some useful variables\n simple_raw_to_map_func, simple_map_to_raw_func = simple_affine(min_value, max_value, step_size, return_type)\n min_raw_value = min_value / step_size\n min_offset = (min_value / step_size)\n \n def raw_to_map_func(raw_value): \n ceil_raw_value = max(min_raw_value, raw_value) - min_offset\n map_value = simple_raw_to_map_func(ceil_raw_value)\n return map_value\n \n def map_to_raw_func(map_value):\n raw_value = simple_map_to_raw_func(map_value) + min_offset\n return int(round(raw_value))\n \n return raw_to_map_func, map_to_raw_func\n\n# .....................................................................................................................\n \ndef value_list_lookup(value_list):\n \n def raw_to_map_func(raw_value):\n # For menus, the raw value is the trackbar location,\n # which selects a value from the value list as a simple (list) index\n return value_list[raw_value]\n \n def map_to_raw_func(map_value):\n # For menus, the mapped value is an entry in the value list\n # The raw value is the trackbar location, which is also just the index of the value in the list\n return value_list.index(map_value)\n \n return raw_to_map_func, map_to_raw_func\n\n# .....................................................................................................................\n \ndef bool_to_int():\n \n def raw_to_map_func(raw_value):\n return bool(raw_value)\n \n def map_to_raw_func(map_value):\n return int(map_value)\n \n return raw_to_map_func, map_to_raw_func\n\n# .....................................................................................................................\n \ndef button_map(control_label, set_trackbar_func):\n \n raise NotImplementedError\n def raw_to_map_func(raw_value):\n button_state = bool(raw_value)\n set_trackbar_func(control_label, 0) # Does this work?\n return button_state\n \n def map_to_raw_func(map_value):\n return int(map_value)\n \n return raw_to_map_func, map_to_raw_func\n\n# .....................................................................................................................\n \ndef return_type_strings_to_functions(return_type_str):\n \n ret_str_lut = {None: None,\n \"string\": str,\n \"integer\": int,\n \"float\": float,\n \"bool\": bool,\n \"list\": list,\n \"tuple\": tuple}\n \n return ret_str_lut[return_type_str]\n\n# .....................................................................................................................\n# .....................................................................................................................\n\n\n# ---------------------------------------------------------------------------------------------------------------------\n#%% Demo\n\nif __name__ == \"__main__\":\n \n # Set display parameters\n frame_width, frame_height = 600, 300\n blank_frame = np.full((frame_height, frame_width, 3), (33, 166, 83), dtype=np.uint8)\n frame_wh = (frame_width, frame_height)\n \n # Set up example mouse follower\n follower = Mouse_Follower()\n \n # Window creation & callback assignment\n window_name = \"FOLLOWER EXAMPLE\"\n cv2.namedWindow(window_name) \n cv2.setMouseCallback(window_name, follower)\n \n while True:\n \n # Get a clean copy of the video\n display_frame = blank_frame.copy()\n \n # Draw mouse location as an example\n drawn_frame = follower.draw_mouse_xy(display_frame)\n cv2.imshow(window_name, drawn_frame)\n \n # Get keypress\n keypress = cv2.waitKey(40)\n esc_key_press = (keypress == 27)\n q_key_pressed = (keypress == 113)\n if esc_key_press or q_key_pressed:\n break\n \n # Clean up windows\n cv2.destroyAllWindows()\n\n# ---------------------------------------------------------------------------------------------------------------------\n#%% Scrap\n\n\n","sub_path":"local/lib/ui_utils/local_ui/windows_base.py","file_name":"windows_base.py","file_ext":"py","file_size_in_byte":51074,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"459894856","text":"import pymysql\r\nimport random\r\nconnection = pymysql.connect(\"46.4.115.158\",\"beo\", \"beo@123\",\"testdb\")\r\ntable_name=str(input(\"Enter the Table name:\"))\r\naction = connection.cursor()\r\nrows=int(input(\"How many rows want to add:\"))\r\nsql=\"\"\"desc \"\"\"+table_name\r\naction.execute(sql)\r\nresult=action.fetchall()\r\naa=int(action.execute(sql))\r\n#print(aa)\r\nsql=\"\"\"insert into \"\"\"+table_name+\" values\"\r\nfor i in range(1,rows+1):\r\n count = 1\r\n sql = sql + \"(\"\r\n print(\"ROW: \", i)\r\n for j in result:\r\n print(\"Enter\",j[0],end='')\r\n value=input()\r\n sql=sql+\"'\"+value+\"'\"\r\n if(count 0.75 and SPE > 0.75:\n print('SEN:%f' % (SEN))\n print('SPE:%f' % (SPE))\n print(\"Acc:%f\" % acc)\n print('cutoff:%f' % (cutoff))\n print('PPV:%f' % (TP / (TP + FP)))\n print('NPV:%f' % (TN / (TN + FN)))\n print(\"tp:%d tn:%d fp:%d fn:%d\" % (TP, TN, FP, FN))\n shrink = True\n return cutoff, shrink, SEN, SPE, acc, TP, TN, FP, FN\n\n\ndef evaluate_on_testset(fpr, tpr, threshold, cutoff, pos_num, neg_num):\n '''find tpr and fpr'''\n temp_pos = 0\n for cont, thre in enumerate(threshold):\n if thre < cutoff:\n temp_pos = cont\n break\n if temp_pos == 0:\n temp_pos = 1\n print('temp_pos is 0')\n # print(temp_pos)\n proportion = (threshold[temp_pos - 1] - cutoff) / (threshold[temp_pos - 1] - threshold[temp_pos])\n # print(proportion)\n sen = tpr[temp_pos - 1] + (tpr[temp_pos] - tpr[temp_pos - 1]) * proportion\n spe = 1 - (fpr[temp_pos - 1] + (fpr[temp_pos] - fpr[temp_pos - 1]) * proportion)\n TP = sen * pos_num\n TN = spe * neg_num\n FP = neg_num - TN\n FN = pos_num - TP\n acc = (TP + TN) / (FP + FN + TP + TN)\n shrink = False\n if sen > 0.75 and spe > 0.7:\n print('SEN:%f' % (sen))\n print('SPE:%f' % (spe))\n print(\"Acc:%f\" % acc)\n print('PPV:%f' % (TP / (TP + FP)))\n print('NPV:%f' % (TN / (TN + FN)))\n print(\"tp:%d tn:%d fp:%d fn:%d\" % (TP, TN, FP, FN))\n shrink = True\n return shrink, sen, spe, acc, TP, TN, FP, FN\n\nif __name__ == '__main__':\n path = \"E:\\\\img_breast_dataset\"\n print(\"get data set\")\n train_neg_set, train_pos_set, test_pos_set, test_neg_set = get_data_set(path=path, times=0)\n total_train_set = train_neg_set + train_pos_set\n total_test_set = test_neg_set + test_pos_set\n print(\"get data set completed\")\n print(\"get vgg features\")\n vgg_feature_selector = vgg_models.vgg_16_bn(num_class=2, pretrained=True, pool_out=True, first_fc_out=False)\n num_features = get_vgg_feature(vgg_feature_selector, total_train_set, total_test_set, batchsize=16)\n print(\"get vgg features completed\")\n print(num_features)\n # Classifer model\n model = pool_feature_model.pool_feature_model_2(num_features=num_features, hidden=1800, num_class=2)\n # Learning rate and momentum\n lr = 0.1\n classifier_optimizer = optim.SGD(model.parameters(), lr=lr, momentum=0.9)\n\n length = len(train_neg_set)\n i = 0\n t = 0\n con = False\n\n TR_sen, TR_spe, TR_acc, TR_tp, TR_tn, TR_fp, TR_fn = 0, 0, 0, 0, 0, 0, 0\n TE_sen, TE_spe, TE_acc, TE_tp, TE_tn, TE_fp, TE_fn = 0, 0, 0, 0, 0, 0, 0\n roc_auc_TE, roc_auc_TR = 0, 0\n G_y_t1, G_y_p1, G_y_t2, G_y_p2 = 0, 0, 0, 0\n\n for epoch in range(1, 2000):\n if (epoch + 1) % 20 == 0:\n for param_group in classifier_optimizer.param_groups:\n param_group['lr'] = lr / math.sqrt(epoch)\n print(\"training\")\n pos_set, i = get_batch(i=i, train_pos_set=train_pos_set, length=length)\n\n train_set = train_neg_set + pos_set\n random.shuffle(train_set)\n\n train_batch(epoch, train_set, optimizer=classifier_optimizer, model=model, batchsize=16)\n\n print(\"Testing trainset\")\n y_t1, y_p1 = test(test_set=total_train_set, model=model)\n fpr, tpr, threshold = roc_curve(y_t1, y_p1, pos_label=1)\n cutoff, shrink_tr, tr_sen, tr_spe, tr_acc, tr_tp, tr_tn, tr_fp, tr_fn = evaluate_on_trainset(fpr=fpr, tpr=tpr, threshold=threshold,\n pos_num=len(train_pos_set),\n neg_num=len(train_neg_set))\n roc_auc_tr = auc(fpr, tpr)\n print(\"auc: %f\" % roc_auc_tr)\n\n print(\"Testing testset\")\n y_t2, y_p2 = test(test_set=total_test_set, model=model)\n fpr, tpr, threshold = roc_curve(y_t2, y_p2, pos_label=1)\n shrink_te, te_sen, te_spe, te_acc, te_tp, te_tn, te_fp, te_fn = evaluate_on_testset(fpr=fpr, tpr=tpr, threshold=threshold, cutoff=cutoff,\n pos_num=len(test_pos_set),\n neg_num=len(test_neg_set))\n roc_auc_te = auc(fpr, tpr)\n print(\"auc: %f\" % roc_auc_te)\n\n if t < 10:\n if shrink_tr and shrink_te:\n if roc_auc_te > roc_auc_TE and roc_auc_tr > roc_auc_TR:\n t = 0\n con = True\n # update\n roc_auc_TE = roc_auc_te\n roc_auc_TR = roc_auc_tr\n TR_sen, TR_spe, TR_acc, TR_tp, TR_tn, TR_fp, TR_fn = tr_sen, tr_spe, tr_acc, tr_tp, tr_tn, tr_fp, tr_fn\n TE_sen, TE_spe, TE_acc, TE_tp, TE_tn, TE_fp, TE_fn = te_sen, te_spe, te_acc, te_tp, te_tn, te_fp, te_fn\n G_y_t1, G_y_p1, G_y_t2, G_y_p2 = y_t1, y_p1, y_t2, y_p2\n lr = lr / 2\n elif con == True:\n t += 1\n \n else:\n print(\"Best result:\")\n print(\"Acc:%f\" % roc_auc_TR)\n print('SEN:%f' % (TR_sen))\n print('SPE:%f' % (TR_spe))\n print(\"Acc:%f\" % TR_acc)\n print('PPV:%f' % (TR_tp / (TR_tp + TR_fp)))\n print('NPV:%f' % (TR_tn / (TR_tn + TR_fn)))\n print(\"tp:%d tn:%d fp:%d fn:%d\" % (TR_tp, TR_tn, TR_fp, TR_fn))\n print(\"\\n\")\n print(\"Acc:%f\" % roc_auc_TE)\n print('SEN:%f' % (TE_sen))\n print('SPE:%f' % (TE_spe))\n print(\"Acc:%f\" % TE_acc)\n print('PPV:%f' % (TE_tp / (TE_tp + TE_fp)))\n print('NPV:%f' % (TE_tn / (TE_tn + TE_fn)))\n print(\"tp:%d tn:%d fp:%d fn:%d\" % (TE_tp, TE_tn, TE_fp, TE_fn))\n p_plt(G_y_t1, G_y_p1, G_y_t2, G_y_p2)\n print(time.asctime(time.localtime(time.time())))\n auc_cal(G_y_t1, G_y_p1)\n auc_cal(G_y_t2, G_y_p2)\n plt.show()\n print(time.asctime(time.localtime(time.time())))","sub_path":"transfer_find_best.py","file_name":"transfer_find_best.py","file_ext":"py","file_size_in_byte":7044,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"200561110","text":"import os\nimport bpy\nfrom bpy.props import BoolProperty, IntProperty\n\nfrom .functions.global_settings import ProjectSettings, Extensions\nfrom .functions.file_management import *\nfrom .functions.sequences import find_empty_channel\n\n\n# TODO: Fix img imported from subfolder -\nclass ImportLocalFootage(bpy.types.Operator):\n bl_idname = \"power_sequencer.import_local_footage\"\n bl_label = \"PS.Import local footage\"\n bl_description = \"Import video and audio from the project \\\n folder to VSE strips\"\n bl_options = {'REGISTER', 'UNDO'}\n\n import_all = BoolProperty(\n name=\"Always Reimport\",\n description=\"If true, always import all local files to new strips. \\\n If False, only import new files (check if footage has \\\n already been imported to the VSE).\",\n default=False)\n keep_audio = BoolProperty(\n name=\"Keep audio from video files\",\n description=\"If False, the audio that comes with video files \\\n will not be imported\",\n default=True)\n\n img_length = IntProperty(\n name=\"Image strip length\",\n description=\"Controls the duration of the imported image strip\",\n default=96,\n min=1)\n img_padding = IntProperty(\n name=\"Image strip padding\",\n description=\"Padding added between imported image strips in frames\",\n default=24,\n min=1)\n\n @classmethod\n def poll(cls, context):\n return True\n\n def execute(self, context):\n if not bpy.data.is_saved:\n self.report(\n {\"ERROR_INVALID_INPUT\"},\n \"You need to save your project first. Import cancelled.\")\n return {\"CANCELLED\"}\n\n sequencer = bpy.ops.sequencer\n context = bpy.context\n frame_current = bpy.context.scene.frame_current\n empty_channel = find_empty_channel()\n\n bpy.ops.screen.animation_cancel(restore_frame=True)\n\n # Store reference to the Sequencer area to import files to\n for window in bpy.context.window_manager.windows:\n screen = window.screen\n for area in screen.areas:\n if area.type == 'SEQUENCE_EDITOR':\n SEQUENCER_AREA = {'window': window,\n 'screen': screen,\n 'area': area,\n 'scene': bpy.context.scene}\n\n\n # Find folders for audio, img and video strips\n directory = get_working_directory()\n folders, files, files_dict = {}, {}, {}\n file_types = \"AUDIO\", \"IMG\", \"VIDEO\"\n\n for folder in os.listdir(path=directory):\n folder_upper = folder.upper()\n if folder_upper in file_types:\n folders[folder_upper] = os.path.join(directory, folder)\n\n for file_type in file_types:\n if file_type not in folders.keys():\n continue\n files[file_type] = find_files(folders[file_type],\n Extensions.DICT[file_type],\n recursive=file_type == \"IMG\")\n\n # TODO: walk the project dir tree and collect all files that have a supported Extension\n #\n # files, files_dict = {}, {}\n # file_types = \"AUDIO\", \"IMG\", \"VIDEO\"\n # for file_type in file_types:\n # files[file_type] = find_files_temp(get_working_directory(),\n # Extensions.DICT[file_type])\n # filepaths = []\n # for dirpath, dirname, filenames in os.walk(directory, topdown=True):\n # for filename in filenames:\n # files.append(os.path.join(dirparth, filename)\n\n # Find or create new text files to keep track of imported material\n TEXT_FILE_PREFIX = 'IMPORT_'\n texts = bpy.data.texts\n import_files = {}\n for file_type in file_types:\n if texts.get(TEXT_FILE_PREFIX + file_type):\n import_files[file_type] = texts[TEXT_FILE_PREFIX + file_type]\n\n if not import_files:\n for name in file_types:\n import_files[name] = create_text_file(TEXT_FILE_PREFIX + name)\n assert len(import_files) == 3\n\n # Write new imported paths to the text files and import new strips\n channel_offset = 0\n new_sequences, new_video_sequences = [], []\n for name in file_types:\n if name not in folders.keys():\n continue\n\n text_file_content = [\n line.body\n for line in bpy.data.texts[TEXT_FILE_PREFIX + name].lines\n ]\n new_paths = [path\n for path in files[name]\n if path not in text_file_content]\n for line in new_paths:\n bpy.data.texts[TEXT_FILE_PREFIX + name].write(line + \"\\n\")\n\n if not new_paths:\n continue\n\n # Import new strips if new files were found\n import_channel = empty_channel + channel_offset\n folder = folders[name]\n files_dict = files_to_dict(new_paths, folder)\n\n if name == \"VIDEO\":\n import_channel += 1 if self.keep_audio else 0\n sequencer.movie_strip_add(SEQUENCER_AREA,\n filepath=folder,\n files=files_dict,\n frame_start=frame_current,\n channel=import_channel,\n sound=self.keep_audio)\n new_sequences.extend(bpy.context.selected_sequences)\n # Blender places audio tracks on top, we want them under video\n new_video_sequences.extend(bpy.context.selected_sequences)\n elif name == \"AUDIO\":\n sequencer.sound_strip_add(\n SEQUENCER_AREA,\n filepath=folder,\n files=files_dict,\n frame_start=frame_current,\n channel=import_channel)\n new_sequences.extend(bpy.context.selected_sequences)\n elif name == \"IMG\":\n img_frame = frame_current\n for img in files_dict:\n path = os.path.join(folder, img['subfolder'])\n # FIXME: temp hack so images import properly\n file = [{'name': img['name'].replace(\"img\\\\\", \"\")}]\n sequencer.image_strip_add(\n SEQUENCER_AREA,\n directory=path,\n files=file,\n frame_start=img_frame,\n frame_end=img_frame + self.img_length,\n channel=import_channel)\n new_sequences.extend(bpy.context.selected_sequences)\n img_frame += self.img_length + self.img_padding\n channel_offset += 1\n\n # Swap channels for audio and video tracks\n if not new_video_sequences:\n return {\"FINISHED\"}\n\n # Reorder the sequences so all MOVIE strips are on top\n sequencer.select_all(action='DESELECT')\n for s in new_video_sequences:\n s.select = True\n sequencer.meta_make()\n sequencer.meta_toggle()\n videos_in_meta = [s for s in bpy.context.selected_sequences if s.type == 'MOVIE']\n for s in videos_in_meta:\n s.channel += 2\n for s in new_video_sequences:\n s.channel -= 1\n sequencer.meta_toggle()\n sequencer.meta_separate()\n\n # Set the strips to use proxies based if set in the addon preferences\n prefs = context.user_preferences.addons[__package__].preferences\n if prefs.auto_render_proxies:\n bpy.ops.power_sequencer.set_video_proxies()\n\n # Show audio waveforms\n for s in [strip for strip in new_sequences if strip.type == 'SOUND']:\n s.show_waveform = True\n\n for s in new_sequences:\n s.select = True\n return {\"FINISHED\"}\n\n\n# TODO: Ignore the blender proxy folders\n# TODO: Detect img sequences\ndef find_files(directory,\n file_extensions,\n recursive=False,\n ignore_folders=('_proxy', 'BL_proxy')):\n \"\"\"\n Walks through a folder and returns a list of filepaths\n that match the extensions.\n Args:\n - file_extensions is a tuple of extensions with the form \"*.ext\".\n Use the Extensions helper class in .functions.global_settings.\n It gives default extensions to check the files against.\n Returns a list of file paths, or [] if nothing was found\n \"\"\"\n if not directory and file_extensions:\n return None\n\n files = []\n\n from glob import glob\n from os.path import basename\n\n # TODO: Folder containing img files = img sequence?\n for ext in file_extensions:\n source_pattern = directory + \"/\"\n pattern = source_pattern + ext\n files.extend(glob(pattern))\n if not recursive:\n continue\n pattern = source_pattern + \"**/\" + ext\n files.extend(glob(pattern))\n\n if basename(directory) == \"IMG\":\n psd_names = [f for f in glob(directory + \"/*.psd\")]\n for i, name in enumerate(psd_names):\n psd_names[i] = name[len(directory):-4]\n\n psd_folders = (f for f in os.listdir(directory) if f in psd_names)\n for f in psd_folders:\n for ext in file_extensions:\n files.extend(glob(directory + \"/\" + f + \"/\" + ext))\n return files\n\n\n# TODO: issue with img vs other strip types: img have separate filepath and filename slots\n# but video/audio only have direct filepath e.g. audio/file.wav\ndef files_to_dict(files, folder_path):\n \"\"\"Converts a list of files to Blender's dictionary format for import\n Returns a list of dictionaries with the\n {'name': filename, 'subfolder': subfolder} format\n If the provided files are placed at the root of the import folders,\n subfolder will be an empty string\n Args:\n - files: a list or a tuple of files\n - folder_path: a string of the path to the files' containing folder\"\"\"\n if not files and folder_path:\n return None\n\n dictionary = []\n for f in files:\n filepath_tail = f[len(folder_path) + 1:]\n head, tail = os.path.split(filepath_tail)\n\n project_path, subfolder_name = os.path.split(folder_path)\n dict_form = {'name': os.path.join(subfolder_name, tail), 'subfolder': head}\n dictionary.append(dict_form)\n return dictionary\n","sub_path":"load_files.py","file_name":"load_files.py","file_ext":"py","file_size_in_byte":10680,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"365434998","text":"# encoding: utf-8\nimport pandas as pd\nfrom collections import Counter\nimport time\nimport sys\nimport re\nimport pymorphy2\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom datetime import datetime\nfrom sklearn.base import TransformerMixin\nfrom sklearn.cluster import AgglomerativeClustering, DBSCAN\nfrom sklearn.pipeline import Pipeline\nfrom functools import wraps\n\n\nclass DenseTransformer(TransformerMixin):\n\n def fit(self, X, y=None, **fit_params):\n return self\n\n def transform(self, X, y=None, **fit_params):\n return X.todense()\n\n\ndef timethis(func):\n \"\"\"\n Decorator that reports the execution time.\n \"\"\"\n\n @wraps(func)\n def wrapper(*args, **kwargs):\n start = time.time()\n print(\"Executing {}...\".format(func.__name__))\n result = func(*args, **kwargs)\n end = time.time()\n print('{} took {}'.format(func.__name__, end - start))\n return result\n\n return wrapper\n\n\nmorph = pymorphy2.MorphAnalyzer()\ncount = Counter()\nnrows_to_load = None\nrandom_sample = None\nnum_clasters_for_kMeans = 20\nengine_for_pd = 'c'\n# engine_for_pd = 'python'\ndata_file_name = r'D:\\data\\oscar'\n\n\ndef text_cleaner(text):\n text = str(text).lower()\n text = re.sub(r'[\\W]+', ' ', text) # удаление лишних символов\n\n text = ' '.join(list(map(lambda x: morph.parse(x)[0].normal_form, text.split())))\n stw = ['в', 'по', 'на', 'из', 'и', 'или', 'не', 'но', 'за', 'над', 'под', 'то', 'для', \"как\",\n 'a', 'at', 'on', 'of', 'and', 'or', 'in', 'for', 'at']\n remove = r'\\b(' + '|'.join(stw) + ')\\b'\n text = re.sub(remove, ' ', text)\n\n text = re.sub(r'\\b\\w\\b', ' ', text) # удаление отдельно стоящих букв\n\n # text = re.sub(r'\\b\\d+\\b', ' digit ', text) # замена цифр\n return text\n\n\n@timethis\ndef load_from_csv(file):\n df = pd.read_csv(file, '\\t', parse_dates=['datetime'], index_col='datetime',\n converters={'normal_query': str},\n nrows=nrows_to_load,\n engine=engine_for_pd,\n )\n # df.info()\n print(\"считано: {}\".format(df.shape))\n return df\n\n\n@timethis\ndef load_data_and_lemmatize(file='oscar1'):\n df = pd.read_csv(file, '\\t', parse_dates=['datetime'], index_col='datetime',\n converters={'normal_query': text_cleaner},\n nrows=nrows_to_load,\n engine=engine_for_pd,\n )\n # df.info()\n return df\n\n\n@timethis\ndef save_data(data, file='oscar2'):\n data.to_csv(file, sep='\\t')\n\n\n@timethis\ndef learn_and_predict_DBSCAN(data, *args):\n text_clstz = Pipeline([\n ('tfidf', TfidfVectorizer(binary=True)),\n # ('svd', TruncatedSVD(n_components=100, random_state=123)),\n # ('to_dense', DenseTransformer()),\n ('DBSCAN', DBSCAN(eps=0.2, min_samples=5, metric='euclidean'))\n ])\n data['tag'] = text_clstz.fit_predict(data['normal_query'])\n print(\"{} clusters\".format(len(set(data['tag'].tolist()))))\n return data\n\n\n@timethis\ndef learn_and_predict_AgglomerativeClustering(data, num_clasters_for_kMeans):\n text_clstz = Pipeline([\n ('tfidf', TfidfVectorizer(binary=True)),\n # ('svd', TruncatedSVD(n_components=100, random_state=123)),\n ('to_dense', DenseTransformer()),\n ('afp', AgglomerativeClustering(n_clusters=num_clasters_for_kMeans,\n affinity='cosine',\n linkage='complete'))\n ])\n data['tag'] = text_clstz.fit_predict(data['normal_query'])\n print(\"{} clusters\".format(len(set(data['tag'].tolist()))))\n return data\n\n\ndef commons(text):\n count.clear()\n count.update(filter(lambda x: len(x) > 2, text.split()))\n text = str(count.most_common(10))\n return text\n\n\ndef count_words(text):\n count.update(filter(lambda x: len(x) > 2, text.split())) # take words more than 2 symbols\n return text\n\n\n@timethis\ndef save_file(dt, excel_file_to):\n writer = pd.ExcelWriter(excel_file_to, engine='xlsxwriter')\n dt.to_excel(writer, 'Sheet1')\n writer.save()\n\n\n@timethis\ndef filter_data_oscar(data):\n return data[data['normal_query'].str.contains('oscar|оскар')].sort_index()\n\n\n@timethis\ndef main():\n data = load_from_csv(data_file_name) # load all data\n only_oscars = filter_data_oscar(data) # filter only that contains oscars\n save_data(only_oscars, data_file_name + '_oscar') # save obly oscar to file\n data = '' # free memory from trash\n only_oscars = load_from_csv(data_file_name + '_oscar') # load again only oscars from file\n only_oscars['normal_query'] = only_oscars['normal_query'].map(\n lambda x: text_cleaner(x)) # lemmatize only oscars\n\n save_data(only_oscars, data_file_name + '_oscar_normal') # save lemmatized oscars\n only_oscars = only_oscars.sort_index()\n # on MOW time oscar was from 03/00 25/02/19 till 07/00 25/02/2019\n # lets assume that time in this table is MOW\n delimited = [only_oscars[:datetime(2019, 2, 25, 2, 59, 59)].copy(deep=True), # before\n only_oscars[datetime(2019, 2, 25, 3, 0, 0):datetime(2019, 2, 25, 6, 59, 59)].copy(\n deep=True), # during\n only_oscars[datetime(2019, 2, 25, 7, 0, 0):].copy(deep=True)] # after\n\n names_of_periods = {0: 'BEFORE\\n\\n', 1: 'DURING\\n\\n', 2: 'AFTER\\n\\n'}\n # print info about these datasets\n for (num, d) in enumerate(delimited):\n print(names_of_periods[num])\n d.info()\n print(d.head())\n\n for (num, d) in enumerate(delimited):\n print(names_of_periods[num])\n count.clear()\n fn = names_of_periods[num].strip().lower()\n d['normal_query'].map(lambda x: count_words(x))\n save_file(pd.DataFrame(count.most_common()),\n r'd:\\data\\oscar_freq_{}.xlsx'.format(fn)) # save frequency of words to excel\n\n # lets try to make Hierarchical clustering\n data = learn_and_predict_AgglomerativeClustering(d, num_clasters_for_kMeans)\n save_data(data.sort_values('tag'), r'D:\\data\\oscarAgglomerativeClustering_{}'.format(fn))\n # most common words in clusters\n df = data.groupby('tag')['normal_query'].apply(lambda words: ' '.join(words))\n df = pd.DataFrame(df)\n df['normal_query'] = df['normal_query'].apply(commons)\n\n save_data(df, data_file_name + 'Agglomerative_Group_{}'.format(fn))\n\n #also lets try DBSCAN clustering\n data = learn_and_predict_DBSCAN(d, num_clasters_for_kMeans)\n save_data(data.sort_values('tag'), r'D:\\data\\oscarDBSCANClustering_{}'.format(fn))\n # most common words in clusters\n df = data.groupby('tag')['normal_query'].apply(lambda words: ' '.join(words))\n df = pd.DataFrame(df)\n df['normal_query'] = df['normal_query'].apply(commons)\n\n save_data(df, data_file_name + 'DBSCAN_Group_{}'.format(fn))\n\n\nif __name__ == '__main__':\n sys.exit(main())\n","sub_path":"tsts/clasterizer-text/tst_oscar_new.py","file_name":"tst_oscar_new.py","file_ext":"py","file_size_in_byte":7044,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"341861071","text":"# -*- coding: utf-8 -*-\n\nfrom setuptools import setup, find_packages\n\nwith open('README.rst') as f:\n readme = f.read()\n\nsetup(\n name='pytemplate',\n version='0.0.1',\n description='Generic template for python projects',\n long_description=readme,\n author='Quadyster Cloud Devs',\n author_email='',\n url='https://github.com/raghava-aparna/python-project-template',\n packages=find_packages(exclude=('tests', 'docs'))\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":440,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"34113920","text":"from __future__ import print_function\nfrom app import mongo, db\nfrom datetime import datetime\nfrom bson.objectid import ObjectId\nfrom random import randint\nfrom rtw import *\nimport sys\n\n#subCategoryBelongsTo(identifications): dict | None\n#pickRandomCategory(): dict | None\n#pickRandomSubCategory(identifications): dict | None\n\n#createUser(identifications): ObjectId\n#updateUser(identifications, updates): bool\n#getUser(identifications): dict | None\n#isUserOnline(identifications): bool\n#changeUserStatus(identifications, online): bool\n#updateScore(identifications, score): bool\n\n#createGame(identifications): ObjectId\n#updateGame(identifications, updates): bool\n#getGame(identifications): dict | None\n#findWaitingGame(identifications, user): dict | None\n#joinGame(identifications, user): bool\n#isGameReady(identifications): bool\n#startGame(identifications): bool\n#checkGameStatus(identifications): int\n#userFromGame(identifications, number): ObjectId | None\n#finishGame(identifications): bool\n\ndef subCategoryBelongsTo(identifications):\n\t#cursor = mongo.db.subcategories.find(identifications)\n\tcursor = db.subcategories.find(identifications)\n\tif cursor.count() > 0:\n\t\tidentifications = dict()\n\t\tidentifications[\"_id\"] = cursor[0][\"category\"]\n\t\t#cursor = mongo.db.categories.find(identifications)\n\t\tcursor = db.categories.find(identifications)\n\t\tif cursor.count() > 0:\n\t\t\treturn cursor[0]\n\t\treturn None\n\treturn None\n\ndef pickRandomCategory():\n\t#cursor = mongo.db.categories.find({})\n\tcursor = db.categories.find({})\n\tif cursor.count() == 0:\n\t\treturn None\n\trand = randint(0, cursor.count() - 1)\n\treturn cursor[rand]\t\n\ndef pickRandomSubCategory(identifications):\n\t#cursor = mongo.db.subcategories.find(identifications)\n\tcursor = db.subcategories.find(identifications)\n\tif cursor.count() == 0:\n\t\treturn None\n\trand = randint(0, cursor.count() - 1)\n\treturn cursor[rand]\n\ndef createUser(identifications):\n\tidentifications[\"online\"] = False\n\tidentifications[\"score\"] = 0\n\tregDate = datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\t\n\tidentifications[\"regDate\"] = regDate\n\t#_id = mongo.db.users.insert_one(identifications)\n\t_id = db.users.insert_one(identifications)\n\treturn _id.inserted_id\n\ndef updateUser(identifications, updates):\n\t#cursor = mongo.db.users.find(identifications)\n\tcursor = db.users.find(identifications)\n\tif cursor.count() > 0:\n\t\t#mongo.db.users.update_one(identifications, {\"$set\": updates})\n\t\tdb.users.update_one(identifications, {\"$set\": updates})\n\t\treturn True\n\treturn False\n\ndef getUser(identifications):\n\t#cursor = mongo.db.users.find(identifications)\n\tcursor = db.users.find(identifications)\n\tif cursor.count() > 0:\n\t\treturn cursor[0]\n\treturn None\n\ndef isUserOnline(identifications):\n\tuser = getUser(identifications)\n\treturn user != None and user[\"online\"]\n\ndef changeUserStatus(identifications, online):\n\t#cursor = mongo.db.users.find(identifications)\n\tcursor = db.users.find(identifications)\n\tupdates = dict()\n\tupdates[\"online\"] = online\n\treturn updateUser(identifications, updates)\n\ndef updateScore(identifications, score):\n\t#cursor = mongo.db.users.find(identifications)\n\tcursor = db.users.find(identifications)\n\tif cursor.count() == 0:\n\t\treturn False\n\tupdates = dict()\n\tupdates[\"score\"] = cursor[0][\"score\"] + score\n\treturn updateUser(identifications, updates)\n\n#initial identifications: gameType, theme\ndef createGame(identifications):\n\tcreatedTime = datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\n\tidentifications[\"createdTime\"] = createdTime\n\tidentifications[\"finished\"] = False\n\tidentifications[\"user1\"] = None\n\tidentifications[\"data1\"] = None\n\tidentifications[\"score1\"] = 0\n\tif not \"data2\" in identifications.keys():\n\t\tidentifications[\"data2\"] = None \t\t\t#for game type 2, data2 will be the collection of hints\n\tidentifications[\"score2\"] = 0 \t\t\t\t\t#for game type 2, score2 will store the number of hints needed for the user to give the correct answer\n\tif identifications[\"gameType\"] != 2 and identifications[\"gameType\"] != 4:\n\t\tidentifications[\"user2\"] = None\n\tidentifications[\"start\"] = None\n\tidentifications[\"finish\"] = None\n\tidentifications[\"status\"] = 0\n\t#_id = mongo.db.games.insert_one(identifications)\n\t_id = db.games.insert_one(identifications)\n\treturn _id.inserted_id\n\ndef updateGame(identifications, updates):\n\t#cursor = mongo.db.games.find(identifications)\n\tcursor = db.games.find(identifications)\n\tif cursor.count() > 0:\n\t\t#mongo.db.games.update_one(identifications, {\"$set\": updates})\n\t\tdb.games.update_one(identifications, {\"$set\": updates})\n\t\treturn True\n\treturn False\n\ndef getGame(identifications):\n\t#cursor = mongo.db.games.find(identifications)\n\tcursor = db.games.find(identifications)\n\tif cursor.count() > 0:\n\t\treturn cursor[0]\n\treturn None\n\n#initial identifications: gameType\n#STATUS:\n#0: esperando jogador\n#1: encerrado\n#2: sendo jogado\n#3: parcialmente encerrado\n#4: jogo nao criado\ndef findWaitingGame(identifications, user):\n\tidentifications[\"status\"] = 0\n\t#cursor = mongo.db.games.find(identifications)\n\tcursor = db.games.find(identifications)\n\tfor i in range(cursor.count()):\n\t\tif cursor[i][\"user1\"] != user:\n\t\t\treturn cursor[i]\n\treturn None\n\ndef joinGame(identifications, user):\n\t#cursor = mongo.db.games.find(identifications)\n\tcursor = db.games.find(identifications)\n\tif cursor.count() > 0:\n\t\tupdates = dict()\n\t\tif cursor[0][\"user1\"] == None:\n\t\t\tupdates[\"user1\"] = user\n\t\t\treturn updateGame(identifications, updates)\n\t\telif \"user2\" in cursor[0].keys() and cursor[0][\"user2\"] == None:\n\t\t\tupdates[\"user2\"] = user\n\t\t\treturn updateGame(identifications, updates)\n\t\treturn False\n\treturn False\n\ndef isGameReady(identifications):\n\t#cursor = mongo.db.games.find(identifications)\n\tcursor = db.games.find(identifications)\n\tif cursor.count() > 0:\n\t\tif \"user2\" in cursor[0].keys():\n\t\t\treturn cursor[0][\"user1\"] != None and cursor[0][\"user2\"] != None\n\t\telse:\n\t\t\treturn cursor[0][\"user1\"] != None\n\treturn False\n\ndef startGame(identifications):\n\tstart = datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\n\tupdates = dict()\n\tupdates[\"status\"] = 2\n\tupdates[\"start\"] = start\n\treturn updateGame(identifications, updates) \n\ndef checkGameStatus(identifications):\n\t#cursor = mongo.db.games.find(identifications)\n\tcursor = db.games.find(identifications)\n\tfor c in cursor:\n\t\treturn int(c[\"status\"])\n\treturn 4\n\ndef userFromGame(identifications, number):\n\t#cursor = mongo.db.games.find(identifications)\n\tcursor = db.games.find(identifications)\n\tif cursor.count() > 0 and \"user\" + str(number) in cursor[0].keys():\n\t\treturn cursor[0][\"user\" + str(number)]\n\treturn None\t\n\ndef pickRandomGame(identifications):\n\t#cursor = mongo.db.games.find(identifications)\n\tcursor = db.games.find(identifications)\n\tif cursor.count() == 0:\n\t\treturn None\n\trand = randint(0, cursor.count() - 1)\n\treturn cursor[rand]\n\ndef finishGame(identifications):\n\t#cursor = mongo.db.games.find(identifications)\n\tcursor = db.games.find(identifications)\n\tif cursor.count() > 0:\n\t\tif cursor[0][\"data1\"] != None and cursor[0][\"data2\"] != None and not cursor[0][\"finished\"]:\n\t\t\tupdateGame(identifications, {\"finished\": True})\n\t\t\tsubIdentifications = dict()\n\t\t\tsubIdentifications[\"name\"] = str(cursor[0][\"theme\"])\n\t\t\tif int(cursor[0][\"gameType\"]) == 1:\n\t\t\t\tcategory = subCategoryBelongsTo(subIdentifications)\n\t\t\t\tif category == None: \n\t\t\t\t\treturn False\n\t\t\telif int(cursor[0][\"gameType\"]) == 2:\n\t\t\t\t#cur = mongo.db.categories.find(subIdentifications)\n\t\t\t\tcur = db.categories.find(subIdentifications)\n\t\t\t\tif cur.count() == 0:\n\t\t\t\t\treturn False\n\t\t\t\telse:\n\t\t\t\t\tcategory = cur[0]\n\t\t\tdata1 = str.split(str(cursor[0][\"data1\"]).lower(), \"||\")\n\t\t\tdata2 = str.split(str(cursor[0][\"data2\"]).lower(), \"||\")\n\t\t\tgameType = int(cursor[0][\"gameType\"])\n\t\t\tupdates = dict()\n\t\t\tif gameType == 1:\n\t\t\t\tscore1, score2 = calculateScores(data1, data2, category[\"name\"], 1)\n\t\t\t\tupdates[\"score1\"] = score1\n\t\t\t\tidUser = dict()\n\t\t\t\tidUser[\"_id\"] = cursor[0][\"user1\"]\n\t\t\t\tupdateScore(idUser, score1)\n\t\t\t\tif \"score2\" in cursor[0].keys():\n\t\t\t\t\tupdates[\"score2\"] = score2\n\t\t\t\t\tidUser[\"_id\"] = cursor[0][\"user2\"]\n\t\t\t\t\tupdateScore(idUser, score2)\n\t\t\telif gameType == 2:\n\t\t\t\tscore1 = int(cursor[0][\"score1\"])\n\t\t\t\tidUser = dict()\n\t\t\t\tidUser[\"_id\"] = cursor[0][\"user1\"]\n\t\t\t\tupdateScore(idUser, score1)\n\t\t\t\tfor i in range(cursor[0][\"score2\"]):\n\t\t\t\t\tfor category in data1:\n\t\t\t\t\t\tentity = data2[i]\n\t\t\t\t\t\t#exists, score = existsInNell(entity, category)\n\t\t\t\t\t\texists, score = None, -1\n\t\t\t\t\t\tfbIdent = dict()\n\t\t\t\t\t\tfbUpdates = dict()\n\t\t\t\t\t\tfbIdent[\"entity\"] = entity\n\t\t\t\t\t\tfbIdent[\"category\"] = category\n\t\t\t\t\t\tfbUpdates[\"score\"] = score\n\t\t\t\t\t\tfbUpdates[\"count\"] = 1\n\t\t\t\t\t\tfbUpdates[\"lazy\"] = True\n\t\t\t\t\t\taddFeedback(fbIdent, fbUpdates, 2)\n\t\t\telif gameType == 3:\n\t\t\t\tscore1, score2 = int(cursor[0][\"score1\"]), int(cursor[0][\"score2\"])\n\t\t\t\tidUser = dict()\n\t\t\t\tidUser[\"_id\"] = cursor[0][\"user1\"]\n\t\t\t\tupdateScore(idUser, score1)\n\t\t\t\tidUser = dict()\n\t\t\t\tidUser[\"_id\"] = cursor[0][\"user2\"]\n\t\t\t\tupdateScore(idUser, score2)\n\t\t\t\tfor category in data1:\n\t\t\t\t\tentity = subIdentifications[\"name\"].split(\"||\")[0]\t\t\t\t\t\n\t\t\t\t\t#exists, score = existsInNell(entity, category)\n\t\t\t\t\texists, score = None, -1\n\t\t\t\t\tfbIdent, fbUpdates = dict(), dict()\n\t\t\t\t\tfbIdent[\"entity\"] = entity\n\t\t\t\t\tfbIdent[\"category\"] = category\n\t\t\t\t\tfbUpdates[\"score\"] = score\n\t\t\t\t\tfbUpdates[\"count\"] = 1\n\t\t\t\t\tfbUpdates[\"lazy\"] = True\n\t\t\t\t\taddFeedback(fbIdent, fbUpdates, 3)\n\t\t\t\tfor category in data2:\n\t\t\t\t\tentity = subIdentifications[\"name\"].split(\"||\")[1]\t\t\t\t\t\n\t\t\t\t\t#exists, score = existsInNell(entity, category)\n\t\t\t\t\texists, score = None, -1\n\t\t\t\t\tfbIdent, fbUpdates = dict(), dict()\n\t\t\t\t\tfbIdent[\"entity\"] = entity\n\t\t\t\t\tfbIdent[\"category\"] = category\n\t\t\t\t\tfbUpdates[\"score\"] = score\n\t\t\t\t\tfbUpdates[\"count\"] = 1\n\t\t\t\t\tfbUpdates[\"lazy\"] = True\n\t\t\t\t\taddFeedback(fbIdent, fbUpdates, 3)\n\t\t\telse:\n\t\t\t\tscore1 = int(cursor[0][\"score1\"])\n\t\t\t\tidUser = dict()\n\t\t\t\tidUser[\"_id\"] = cursor[0][\"user1\"]\n\t\t\t\tupdateScore(idUser, score1)\n\t\t\t\tfor i in range(cursor[0][\"score2\"]):\n\t\t\t\t\tfor entity in data1:\n\t\t\t\t\t\tcategory = data2[i]\n\t\t\t\t\t\t#exists, score = existsInNell(entity, category)\n\t\t\t\t\t\texists, score = None, -1\n\t\t\t\t\t\tfbIdent = dict()\n\t\t\t\t\t\tfbUpdates = dict()\n\t\t\t\t\t\tfbIdent[\"entity\"] = entity\n\t\t\t\t\t\tfbIdent[\"category\"] = category\n\t\t\t\t\t\tfbUpdates[\"score\"] = score\n\t\t\t\t\t\tfbUpdates[\"count\"] = 1\n\t\t\t\t\t\tfbUpdates[\"lazy\"] = True\n\t\t\t\t\t\taddFeedback(fbIdent, fbUpdates, 4)\n\t\t\tfinish = datetime.now().strftime('%Y-%m-%d %H:%M:%S')\n\t\t\tupdates[\"status\"] = 1\n\t\t\tupdates[\"finish\"] = finish\n\t\t\treturn updateGame(identifications, updates)\n\t\telse:\n\t\t\tupdates = dict()\n\t\t\tupdates[\"status\"] = 3\n\t\t\treturn updateGame(identifications, updates)\t\t\n\treturn False\n\n","sub_path":"query.py","file_name":"query.py","file_ext":"py","file_size_in_byte":10510,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"565018273","text":"from app.api import bp\nfrom flask import jsonify, request, url_for, g, abort\nfrom app.models import User\nfrom app import db\nfrom app.api.errors import bad_request\nfrom app.api.auth import token_auth\n\n\n@bp.route('/users/', methods=['GET'])\n@token_auth.login_required\ndef get_user(id):\n\tresponse = jsonify(User.query.get_or_404(id).to_dict())\n\tresponse.headers.add('Access-Control-Allow-Origin', '*')\n\treturn response\n\n@bp.route('/users/', methods=['GET'])\n@token_auth.login_required\ndef get_user_username(username):\n\tresponse = jsonify(User.query.filter_by(username=username).first().to_dict())\n\tresponse.headers.add('Access-Control-Allow-Origin', '*')\n\treturn response\n\n\n@bp.route('/users', methods=['GET'])\n@token_auth.login_required\ndef get_users():\n\tpage = request.args.get('page', 1, type=int)\n\tper_page = min(request.args.get('per_page', 10, type=int), 100)\n\tdata = User.to_collection_dict(User.query, page, per_page, 'api.get_users')\n\tresponse = jsonify(data)\n\tresponse.headers.add('Access-Control-Allow-Origin', '*')\n\treturn response\n\n\n@bp.route('/users//followers', methods=['GET'])\n@token_auth.login_required\ndef get_followers(id):\n\tuser = User.query.get_or_404(id)\n\tpage = request.args.get('page', 1, type=int)\n\tper_page = min(request.args.get('per_page', 10, type=int), 100)\n\tdata = User.to_collection_dict(user.followers, page, per_page,'api.get_followers', id=id)\n\tresponse = jsonify(data)\n\tresponse.headers.add('Access-Control-Allow-Origin', '*')\n\treturn response\n\n@bp.route('/users//followed', methods=['GET'])\n@token_auth.login_required\ndef get_followed(id):\n\tuser = User.query.get_or_404(id)\n\tpage = request.args.get('page', 1, type=int)\n\tper_page = min(request.args.get('per_page', 10, type=int), 100)\n\tdata = User.to_collection_dict(user.followed, page, per_page,'api.get_followed', id=id)\n\tresponse = jsonify(data)\n\tresponse.headers.add('Access-Control-Allow-Origin', '*')\n\treturn response\n\n\n@bp.route('/users', methods=['POST'])\ndef create_user():\n\tdata = request.get_json() or {}\n\tif 'username' not in data or 'email' not in data or 'password' not in data:\n\t\treturn bad_request('must include username, email and password fields')\n\tif User.query.filter_by(username=data['username']).first():\n\t\treturn bad_request('please use a different username')\n\tif User.query.filter_by(email=data['email']).first():\n\t\treturn bad_request('please use a different email address')\n\tuser = User()\n\tuser.from_dict(data, new_user=True)\n\tdb.session.add(user)\n\tdb.session.commit()\n\tresponse = jsonify(user.to_dict())\n\tresponse.status_code = 201\n\tresponse.headers['Location'] = url_for('api.get_user', id=user.id)\n\tresponse.headers.add('Access-Control-Allow-Origin', '*')\n\treturn response\n\n\n@bp.route('/users/', methods=['PUT'])\n@token_auth.login_required\ndef update_user(id):\n\tprint(id)\n\tif g.current_user.id != id:\n\t\tprint(\"not current user\")\n\t\tabort(403)\n\tuser = User.query.get_or_404(id)\n\tdata = request.get_json() or {}\n\tprint(data)\n\tif 'username' in data and data['username'] != user.username and User.query.filter_by(username=data['username']).first():\n\t\treturn bad_request('please use a different username')\n\tif 'email' in data and data['email'] != user.email and User.query.filter_by(email=data['email']).first():\n\t\treturn bad_request('please use a different email address')\n\tuser.from_dict(data, new_user=False)\n\tdb.session.commit()\n\tresponse = jsonify(user.to_dict())\n\tresponse.headers.add('Access-Control-Allow-Origin', '*')\n\treturn response\n","sub_path":"app/api/users.py","file_name":"users.py","file_ext":"py","file_size_in_byte":3501,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"95911217","text":"from django.contrib import admin\nfrom django.urls import path\n\nfrom .views import (\n home, \n ProfilePageView, \n submittedView,\n about,\n volunteer,\n TranslateFormView,\n NarrateFormView,\n NarrationStatus,\n TranslationStatus,\n NarrateUpdate,\n TranslateUpdate,\n)\n\nurlpatterns = [\n path('', home, name='home'),\n path('about/', about, name='about'),\n path('volunteers/', volunteer, name='volunteers'),\n path('profile/', ProfilePageView.as_view(), name='profile'),\n path('submitted-translation/', TranslateFormView.as_view(), name='translate'),\n path('submitted-narration/', NarrateFormView.as_view(), name='narrate'),\n path('narration-status/', NarrationStatus, name='narrate-status'),\n path('narration//update/', NarrateUpdate.as_view(), name='edit-narration'),\n path('translation//update/', TranslateUpdate.as_view(), name='edit-translation'),\n path('translation-status/', TranslationStatus, name='translate-status'),\n]\n","sub_path":"pages/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":995,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"623515476","text":"import datetime\nfrom typing import List, Optional\n\nfrom pymongo import ReplaceOne, DESCENDING\nfrom pymongo.database import Database\n\nfrom pepy.domain.model import Project, ProjectDownloads, ProjectName, Downloads\nfrom pepy.domain.repository import ProjectRepository\n\n\nclass MongoProjectRepository(ProjectRepository):\n def __init__(self, client: Database):\n self._client = client\n self._client.projects.create_index([(\"name\", DESCENDING)])\n\n def get(self, project_name: str) -> Optional[Project]:\n project_data = self._client.projects.find_one({\"name\": project_name.strip().lower()})\n if project_data is None:\n return None\n project = Project(ProjectName(project_data[\"name\"]), Downloads(project_data[\"total_downloads\"]))\n downloads = sorted(project_data[\"downloads\"].items(), key=lambda x: x[0])\n for date, version_downloads in downloads:\n for r in version_downloads:\n project.add_downloads(datetime.date.fromisoformat(date), r[0], Downloads(r[1]))\n # Don't count the downloads twice\n project.total_downloads -= Downloads(r[1])\n return project\n\n def save(self, project: Project):\n data = self._convert_to_raw(project)\n query = {\"name\": project.name.name}\n self._client.projects.replace_one(query, data, upsert=True)\n\n def _convert_to_raw(self, project):\n data = {\n \"name\": project.name.name,\n \"total_downloads\": project.total_downloads.value,\n \"downloads\": {\n date.isoformat(): [(version, x.value) for version, x in list(versions.items())]\n for date, versions in project._latest_downloads.items()\n },\n }\n return data\n\n def save_projects(self, projects: List[Project]):\n requests = []\n for project in projects:\n requests.append(ReplaceOne({\"name\": project.name.name}, self._convert_to_raw(project), upsert=True))\n self._client.projects.bulk_write(requests)\n\n def update_downloads(self, projects_downloads: List[ProjectDownloads]):\n pass\n\n def save_day_downloads(self, project_downloads: List[ProjectDownloads]):\n pass\n","sub_path":"pepy/infrastructure/db_repository.py","file_name":"db_repository.py","file_ext":"py","file_size_in_byte":2222,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"12714397","text":"import numpy as np\nimport pandas as pd\n\nimport pyqtgraph as pg\nfrom pyqtgraph.Qt import QtCore, QtGui\n\nfrom ..spikesorter import SpikeSorter\nfrom .traceviewer import TraceViewer\nfrom .lists import PeakList, ClusterList\nfrom .ndscatter import NDScatter\n\nimport itertools\n\nclass SpikeSortingWindow(QtGui.QMainWindow):\n def __init__(self, spikesorter):\n QtGui.QMainWindow.__init__(self)\n \n self.spikesorter = spikesorter\n \n self.traceviewer = TraceViewer(spikesorter = spikesorter)\n self.peaklist = PeakList(spikesorter = spikesorter)\n self.clusterlist = ClusterList(spikesorter = spikesorter)\n self.ndscatter = NDScatter(spikesorter = spikesorter)\n \n all = [self.traceviewer, self.peaklist, self.ndscatter]\n \n for w1, w2 in itertools.combinations(all,2):\n w1.peak_selection_changed.connect(w2.on_peak_selection_changed)\n w2.peak_selection_changed.connect(w1.on_peak_selection_changed)\n \n docks = {}\n docks['traceviewer'] = QtGui.QDockWidget('traceviewer',self)\n docks['traceviewer'].setWidget(self.traceviewer)\n self.addDockWidget(QtCore.Qt.RightDockWidgetArea, docks['traceviewer'])\n docks['peaklist'] = QtGui.QDockWidget('peaklist',self)\n docks['peaklist'].setWidget(self.peaklist)\n self.addDockWidget(QtCore.Qt.LeftDockWidgetArea, docks['peaklist'])\n docks['ndscatter'] = QtGui.QDockWidget('ndscatter',self)\n docks['ndscatter'].setWidget(self.ndscatter)\n self.addDockWidget(QtCore.Qt.LeftDockWidgetArea, docks['ndscatter'])\n\n docks['clusterlist'] = QtGui.QDockWidget('clusterlist',self)\n docks['clusterlist'].setWidget(self.clusterlist)\n self.splitDockWidget(docks['peaklist'], docks['clusterlist'], QtCore.Qt.Horizontal)\n \n self.spikesorter.refresh_colors()\n\n \n @classmethod\n def from_classes(cls, dataio, peakdetector, waveformextractor, clustering):\n spikesorter = SpikeSorter(dataio = dataio)\n \n spikesorter.all_peaks = pd.DataFrame(np.zeros(peakdetector.peak_index.size, dtype = 'int32'), columns = ['label'], index = peakdetector.peak_index)\n spikesorter.all_peaks['label'] = clustering.labels\n spikesorter.all_peaks['selected'] = False\n spikesorter.all_waveforms = waveformextractor.get_ajusted_waveforms()\n spikesorter.clustering = clustering\n \n spikesorter.refresh_colors()\n \n return SpikeSortingWindow(spikesorter)\n \n ","sub_path":"tridesclous/gui/mainwindow.py","file_name":"mainwindow.py","file_ext":"py","file_size_in_byte":2551,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"546575699","text":"for i in range(10):\n for j in range(i):\n print(i)\n j += 1\nprint(\"\\n\")\ni += 1\nprint(\" \")\nprint(\"--\")\nprint(\"-\")\nn = int(input(\"entrer un nobre\"))\ncpt = 0\nfor i in range (1,n):\n if( n%i == 0 ):\n cpt = cpt + i\nif(cpt == n):\n print(\"le nombre \",n,\"est parfait\")\nelse:\n print(\"le nombre \",n,\"n'est pas parfait\")","sub_path":"Exo2S.py","file_name":"Exo2S.py","file_ext":"py","file_size_in_byte":335,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"219108305","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.cm as cm\nfrom matplotlib.colors import LinearSegmentedColormap\n\nmina = -50.0\nmaxa = 1000.0\n\ncdict1 = {'red': ((0.0, 0.0, 0.0),\n (0.5, 0.0, 0.1),\n (1.0, 1.0, 1.0)),\n\n 'green': ((0.0, 0.0, 0.0),\n (1.0, 0.0, 0.0)),\n\n 'blue': ((0.0, 0.0, 1.0),\n (0.5, 0.1, 0.0),\n (1.0, 0.0, 0.0))\n }\nblue_red1 = LinearSegmentedColormap('BlueRed1', cdict1)\nplt.register_cmap(cmap=blue_red1)\n\n\ndef viz_file(fname, c='k'):\n xx = []\n yy = []\n cc = []\n\n for idx, line in enumerate(open(fname)):\n #if idx == 0: continue\n\n x, y, z = line.rstrip(' \\n').split(' ')\n x = float(x)\n y = float(y)\n z = float(z)\n if not (90 < y < 135):\n continue\n\n xx.append(x)\n yy.append(y)\n\n if z < mina: z = mina\n if z > maxa: z = maxa\n #z = (z - mina) / (maxa - mina)\n cc.append(z)\n\n sc = plt.scatter(yy, xx, s=10, c=cc, cmap=blue_red1, vmin=mina, vmax=maxa, edgecolors='none', alpha=0.5)\n plt.grid(b=True, which='both')\n plt.colorbar(sc)\n plt.subplots_adjust(left=0.05, right=0.97, top=0.95, bottom=0.05)\n\n\n\ndef compare_viz():\n plt.figure(1, figsize=(12,8))\n plt.subplot(2,2,1)\n plt.ylim([0,50])\n plt.xlim([90,130])\n viz_file('pprand_alg_1e4.csv')\n\n plt.subplot(2,2,2)\n plt.ylim([0,50])\n plt.xlim([90,130])\n viz_file('rand_alt_2000.csv')\n\n plt.subplot(2,2,3)\n plt.ylim([0,50])\n plt.xlim([90,130])\n viz_file('stratified_alt_2000.csv')\n\n plt.subplot(2,2,4)\n plt.ylim([0,50])\n plt.xlim([90,130])\n viz_file('mcs_alt_2000.csv')\n\n plt.show()\n\n\ndef viz_single():\n viz_file('rand_alt_1e6.csv', c = 'k')\n #viz_file('pp_mcs_1e4.csv', c = 'k')\n plt.xlim([80, 135])\n plt.ylim([0, 60])\n plt.show()\n\n\nif __name__ == \"__main__\":\n viz_single()\n","sub_path":"dataset/geolife/viz.py","file_name":"viz.py","file_ext":"py","file_size_in_byte":1957,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"373858535","text":"from django.contrib import admin\nfrom django.contrib.auth.admin import UserAdmin\n\nfrom app.models import CustomUser, Community, Post, Comment, Bookmark, DisLikePost, LikePost, ActivityStream, \\\n CommunityCategory\nfrom .forms import CustomUserCreationForm, CustomUserChangeForm\n\n\n# Register your models here.\n\nclass CustomUserAdmin(UserAdmin):\n add_form = CustomUserCreationForm\n form = CustomUserChangeForm\n model = CustomUser\n list_display = ['username', 'email']\n\n # For customizing the form in Abstract User\n\n add_fieldsets = (\n (None, {\n 'classes': ('wide',),\n 'fields': ('username', 'password1', 'password2', 'community','image')}\n ),\n )\n\n\nadmin.site.register(CustomUser, CustomUserAdmin)\n\n\nclass PostAdmin(admin.ModelAdmin):\n prepopulated_fields = {'post_slug': ('title',)}\n\n\nclass CommunityAdmin(admin.ModelAdmin):\n prepopulated_fields = {'name_slug': ('name',)}\n\n\n# Update the registration to include this customised interface\n\n# register community\nadmin.site.register(Community, CommunityAdmin)\nadmin.site.register(Post, PostAdmin)\nadmin.site.register(Comment)\nadmin.site.register(Bookmark)\nadmin.site.register(LikePost)\nadmin.site.register(DisLikePost)\nadmin.site.register(ActivityStream)\nadmin.site.register(CommunityCategory)\n","sub_path":"app/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":1305,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"102634251","text":"# coding=utf-8\n\nimport json\nimport os\nimport sys\n\nbase_path = os.path.abspath(os.path.join(os.getcwd(), \"..\"))\nsys.path.append(base_path)\n\n\ndef read_json(file_name=None):\n if file_name == None:\n file_path = base_path+\"/data/api_data.json\"\n else:\n file_path = base_path+file_name\n\n with open(file_path,encoding='UTF-8') as f:\n data = json.load(f)\n return data\n\n\ndef get_value(key, file_name=None):\n data = read_json(file_name)\n return data.get(key)\n\n\ndef write_value(data, file_name=None):\n data_value = json.dumps(data)\n if file_name == None:\n path = base_path+\"/Config/cookie.json\"\n else:\n path = base_path+file_name\n with open(path, \"w\") as f:\n f.write(data_value)\n","sub_path":"common/get_data.py","file_name":"get_data.py","file_ext":"py","file_size_in_byte":741,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"492918988","text":"#!/usr/local/bin/python\n# encoding: utf-8\n# -*- coding: utf-8 -*-\n\nimport sys\nfrom jinja2 import Environment, FileSystemLoader\n\ndef create_article(name):\n _tmpArticle = open('./tmp/tmp.html')\n _title=None\n _sub_title=None\n _content=[]\n for i, line in enumerate(_tmpArticle):\n if i == 0:\n _title = line.decode('utf-8')\n elif i == 1:\n _sub_title = line.decode('utf-8')\n else:\n _content.append(line.decode('utf-8'))\n\n _tmpArticle.close()\n env = Environment(loader=FileSystemLoader('./templates'))\n template = env.get_template('article-template.html')\n _html = template.render(title=_title, sub_title=_sub_title, content=''.join(_content))\n _newArticle = open('articles-html/%s.html' % name, 'w+')\n _newArticle.write(_html.encode('utf-8'))\n _newArticle.close()\n\nif __name__=='__main__':\n create_article(sys.argv[1])\n","sub_path":"tools/generate_html.py","file_name":"generate_html.py","file_ext":"py","file_size_in_byte":905,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"477035162","text":"import pandas as pd\n\ndef convert_dates(x):\n x['businessDate']=pd.to_datetime(x['businessDate'])\n x['month']=x['businessDate'].dt.month\n x['is_month_start']=x['businessDate'].dt.is_month_start\n x['is_month_end']=x['businessDate'].dt.is_month_end\n x['year']=x['businessDate'].dt.year\n x['dayofweek']=x['businessDate'].dt.dayofweek\n x['quarter'] = x['businessDate'].apply(lambda x: x.quarter)\n x['week_of_year'] = x['businessDate'].apply(lambda x: x.weekofyear)\n x['day_of_year'] = x['businessDate'].apply(lambda x: x.dayofyear)\n x['Is_Mon'] = (x.dayofweek == 0) *1\n x['Is_Tue'] = (x.dayofweek == 1) *1\n x['Is_Wed'] = (x.dayofweek == 2) *1\n x['Is_Thu'] = (x.dayofweek == 3) *1\n x['Is_Fri'] = (x.dayofweek == 4) *1\n x['Is_Sat'] = (x.dayofweek == 5) *1\n x['Is_Sun'] = (x.dayofweek == 6) *1\n x['Is_wknd'] = x.dayofweek // 4\n x.pop('businessDate')\n # x.pop('year')\n return x\n\ndef merge(x,y,col,col_name):\n x =pd.merge(x, y, how='left', on=None, left_on=col, right_on=col,\n left_index=False, right_index=False, sort=True,\n copy=True, indicator=False,validate=None)\n x=x.rename(columns={'sales':col_name})\n return x","sub_path":"util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":1198,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"64001656","text":"import json\nimport pandas as pd\nimport re\nimport random\nimport argparse\n\n\ndef get_post_titles(inp):\n \"\"\"\n (file) --> (list)\n this function takes as input a file with Reddit post collected with the reddit API\n returns a list of all the titles of each of the posts.\n \"\"\"\n list_of_titles = []\n file_in = open(inp,'r')\n for line in file_in:\n \n data = json.loads(line)\n list_of_titles.append(data['data']['title'])\n return list_of_titles\n \n\n\ndef get_titles_by_candidate(list_of_titles, candidate):\n \"\"\"\n (list, string (biden or trump in lower case)) --> list\n \n This takes as input a list of post titles and returns a list of post titles containing \n the name of the candidate.\n \"\"\"\n if candidate != 'trump' and candidate != 'biden':\n raise ValueError ('candidate must be equal to \"trump\" or \"biden\" ') \n \n titles_containing_the_candidate = []\n for title in list_of_titles:\n lower_title = title.lower()\n if re.search(f\"[^0-9a-zA-Z]{candidate}[^0-9a-zA-Z]\", lower_title) or re.search(f\"{candidate}[^0-9a-zA-Z]\", lower_title):\n titles_containing_the_candidate.append(title)\n \n return titles_containing_the_candidate\n\n \ndef choose_random_line(list_of_post, num_post):\n \"\"\"\n (list, int) --> list\n This function takes as input a list of titles posts and a number. \n It returns a list of list of length of that number containing radomly selected posts\n from list_of_posts.\n \"\"\"\n \n random_post = []\n \n while(len(random_post) < num_post):\n mytitle = list_of_post.pop(random.randint(0,len(list_of_post)-1))\n random_post.append(mytitle)\n return random_post\n\n\n \n \n \ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument('input_file_d1', help = 'this is the file for one of your reddit post collection')\n parser.add_argument('input_file_d2')\n parser.add_argument('input_file_d3')\n parser.add_argument('-c','--candidate')\n parser.add_argument('-o','--output_file')\n args = parser.parse_args()\n \n ## We now get the list of titles for the 3 days we collected the reddit data from\n titles_day1 = get_post_titles(args.input_file_d1)\n titles_day2 = get_post_titles(args.input_file_d2)\n titles_day3 = get_post_titles(args.input_file_d3)\n \n # Now from the lists of titles from the 3 days we get the ones containing the name of \n # of the candidate of our choice\n candidate_titles_day1 = get_titles_by_candidate(titles_day1, args.candidate)\n candidate_titles_day2 = get_titles_by_candidate(titles_day2, args.candidate)\n candidate_titles_day3 = get_titles_by_candidate(titles_day3, args.candidate)\n \n \n # We now chose randomly for the three list of titles containign \n shortlist_day1 = choose_random_line(candidate_titles_day1,66)\n shortlist_day2 = choose_random_line(candidate_titles_day2,66)\n shortlist_day3 = choose_random_line(candidate_titles_day3,66)\n \n \n \n sample_titles = []\n larger_list = [shortlist_day1,shortlist_day2]#,shortlist_day3]\n for sublist in larger_list:\n for title in sublist:\n sample_titles.append(title)\n \n \n \n posts = {'titles': sample_titles}\n \n df = pd.DataFrame(posts,columns = ['titles'])\n \n df.to_csv(f'{args.output_file}.csv', index = False, encoding = 'utf-8')\n \nif __name__ == '__main__':\n main()\n","sub_path":"filtering_the_post.py","file_name":"filtering_the_post.py","file_ext":"py","file_size_in_byte":3446,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"590748294","text":"# -*- coding: utf-8 -*-\n\"\"\"\n\n@author: Carbognin Alberto\n\"\"\"\n\n\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport os\nimport json\nimport random\n\nenumDict = {'FuelStation':0,\n 'ParkingArea':1,\n 'RailwayStation':2,\n 'BusStation':3,\n 'CarSharingPark':4,\n 'BikeSharingPark':5,\n 'Campsite':6}\n\nif __name__ == \"__main__\":\n print(\"Loading dataSet...\")\n\n print('Start making datasets...')\n Facility = pd.DataFrame(columns=['FacilityID','RouteID','Price','Contact','FacilityEnum','Address','Calendar'])\n Price =pd.DataFrame(columns=['FacilityID','TicketID','AutonomousTransportID','Cost','CurrencyType'])\n Contact = pd.DataFrame(columns=['AgencyID','FacilityID','Phone','Email','Website'])\n Address = pd.DataFrame(columns=['FacilityID','RouteID','Province','City','Street','Number','Cap','Location'])\n Location = pd.DataFrame(columns=['AddressID','StopId','Latitude','Longitude','Altitude'])\n Calendar = pd.DataFrame(columns=['FacilityID','StopTimesID','ServiceID','Monday','Tuesday','Wednesday',\n 'Thursday','Friday','Saturday','Sunday','StartDate','EndDate','Exceptions'])\n CalendarDates = pd.DataFrame(columns=['CalendarId','ServiceId','Date','ExceptionType'])\n\n\n #Load the file\n\n json_file_path = '../../dataset/Formal Modeling/data/bikesharing_assured.json'\n\n with open(json_file_path, 'r') as j:\n bikesharing_areas = json.loads(j.read())\n #fuel_pumps = json.loads('../../dataset/Formal Modeling/data/fuel_pumps_assured.json')\n\n print(json.loads(bikesharing_areas[0]))\n\n print(\"Found: {} records in the dataset.\".format(len(bikesharing_areas)))\n \n for i in range(len(bikesharing_areas)):\n bikesharing_area = bikesharing_areas[i]\n \n if i%300 == 0:\n print(\"Processing {}/{} record.\".format(i, len(bikesharing_areas))) \n record = json.loads(bikesharing_area)\n\n address = record['address']\n #print(\"\\taddress:\", address)\n\n location = address['location']\n contact = record['contact']\n\n # ['AgencyID','FacilityID','Phone','Email','Website']\n Contact.loc[i] = [None, i, contact['phone'], contact['email'], contact['website']]\n\n # ['AddressID','StopId','Latitude','Longitude','Altitude']\n Location.loc[i] = [i, None, location['latitude'], location['longitude'], location['altitude']]\n\n # ['FacilityID','RouteID','Province','City','Street','Number','Cap','Location']\n Address.loc[i] = [None, None, address['province'], address['city'], address['street'], address['number'], address['cap'], i]\n\n # ['FacilityID','RouteID','Price','Contact','FacilityEnum','Address','Calendar']\n Facility.loc[i] = [i, None, None, i, 5, i, None]\n\n\n\n\n\n \n print('Start exporting datasets...')\n exportPath = '../../dataset/Data Integration/data/BikeSharingArea/'\n os.mkdir(exportPath)\n Facility.to_csv(exportPath+'Facility.csv')\n Price.to_csv(exportPath+'Price.csv')\n Contact.to_csv(exportPath+'Contact.csv')\n Address.to_csv(exportPath+'Address.csv')\n Location.to_csv(exportPath+'Location.csv')\n Calendar.to_csv(exportPath+'Calendar.csv')\n CalendarDates.to_csv(exportPath+'CalendarDates.csv')\n print('export done.')\n \n","sub_path":"code/Code/DataIntegration/BikeSharingKarma.py","file_name":"BikeSharingKarma.py","file_ext":"py","file_size_in_byte":3504,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"23226823","text":"in_parametrs = ''\nnum = \"{:{align}{width}.{precision}f}\"\nwhile in_parametrs != 'F':\n\n a = int(input(\"Введите результат первого дня - \"))\n b = int(input(\"введите общий километраж - \"))\n\n day = 0\n while a < b:\n day = day + 1\n if day == 1:\n a = a\n else:\n a = a * 1.1\n #print(num.format(a, align='<', width=8, precision=2))\n aa = num.format(a, align='<', width=8, precision=2)\n print(f'{day} день пройдено {aa}')\n print(f'Потребовалось {day} дней')\n\n in_parametrs = input('Введите F д��я окончания программы')\n","sub_path":"Урок1-6.py","file_name":"Урок1-6.py","file_ext":"py","file_size_in_byte":692,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"260931266","text":"from hytest import *\nfrom lib.webapi import apimgr\n\nclass c2:\n name = '添加客户 - API-0152'\n\n #清除方法\n def teardown(self):\n apimgr.customer_del(self.addedCustomerId)\n\n def teststeps(self):\n\n STEP(1,'先列出客户')\n r = apimgr.customer_list()\n listRet1 = r.json()\n customerlist1 = listRet1[\"retlist\"]\n\n\n\n STEP(2, '添加一个客户')\n r = apimgr.customer_add('南京市鼓楼医院',\n '13345679934',\n \"南京市鼓楼北路\")\n\n addRet = r.json()\n\n self.addedCustomerId = addRet['id']\n\n CHECK_POINT('返回的ret值=0',\n addRet['ret'] == 0)\n\n\n STEP(3, '再次列出客户')\n\n r = apimgr.customer_list(11)\n\n listRet = r.json()\n\n expected = {\n \"ret\": 0,\n \"retlist\": [\n {\n \"address\": \"南京市鼓楼北路\",\n \"id\": addRet['id'],\n \"name\": \"南京市鼓楼医院\",\n \"phonenumber\": \"13345679934\"\n }\n ] + customerlist1,\n 'total': 11\n }\n\n CHECK_POINT('返回的消息体数据正确',\n expected == listRet)\n\n","sub_path":"hytest/autotest_bysms_03/cases/数据环境-空白/数据环境-10个客户/客户API/添加客户.py","file_name":"添加客户.py","file_ext":"py","file_size_in_byte":1281,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"643738200","text":"\"\"\"\nBy listing the first six prime numbers: 2, 3, 5, 7, 11, and 13, we can see that the 6th prime is 13.\n\nWhat is the 10001st prime number?\n\"\"\"\nfrom itertools import count\n\n\ndef main():\n generator = prime_generator(10001)\n p\n for p in generator:\n print(p)\n\n\ndef is_prime(n, primes):\n for i in primes:\n if n % i == 0:\n return False\n return True\n\n\ndef prime_generator(n):\n primes = []\n for i in count(2):\n if len(primes) >= n:\n break\n if is_prime(i, primes):\n primes.append(i)\n yield i\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"src/project-euler/problem7.py","file_name":"problem7.py","file_ext":"py","file_size_in_byte":617,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"215543263","text":"import numpy\n\n# This file create the Hamiltonian trajectory for LH2 using the structure\n# from the 1kzu pdb structure file included.\n# To run the code use python 3.7 or newer and type:\n# python GenHam.py\n\nE0=12255 # cm-1 The B850 Chromophore average gap\nE1=240 # cm-1 The extra energy for the B800 Chromophore gap\nMU0=4.481 # Debye The transition dipole before the application of a scale factor\nsigma=320 # cm-1 The width of the energy distribution for B850\nsigma1=141 # cm-1 The width of the energy distribution for B800\ntau=150 # fs The correlation time for the overdamped Brownian oscillators\nfcs=1.25; # Factor to scale couplings (resulting in an effective dipole of 5.001 Debye)\ndt=3 # fs Time between generated snapshots\nunits=27 # The number of chromophores\nsteps=200000 # Number of timesteps\n\n# Set op constants for generating fluctuations\nalpha=numpy.exp(-dt/tau)\nbeta=numpy.sqrt(1-alpha**2)\ndsiatoicm=(0.393430307**2)*219474.631370515*(0.529177249**3) # Conv from deb^2/a^3 to cm-1\n\n# Initialize arrays \n# Positions of Mg, NB, and ND\nx=numpy.zeros((units,3))\ny=numpy.zeros((units,3))\nz=numpy.zeros((units,3))\n# Dipole moment\nmu=numpy.zeros((units,3))\n# Dye position\nr=numpy.zeros((units,3))\n# Hamiltonian\nH=numpy.zeros(int(units*(units+1)/2))\n# Dipole moments\nmu4bin=numpy.zeros((units*3))\n# Positions\npos4bin=numpy.zeros((units*3))\n# Helping arrays for Hamiltonian construction\nB800=numpy.zeros((units))\nssigma=numpy.ones((units))*sigma\n\n# Open files\nfile_input=open(\"1kzu.pdb\",\"r\")\nfile_H=open(\"Energy.bin\",\"wb\")\nfile_x=open(\"Positions.txt\",\"w\")\nfile_mu=open(\"Dipole.bin\",\"wb\")\nfile_pos=open(\"Positions.bin\",\"wb\")\nfile_HH=open(\"Ham.txt\",\"w\")\nfile_dp=open(\"Dipole.txt\",\"w\")\n\n# Symmetry constants to recover C3 symmetry operations\nsa=-0.5\nsb=0.866025\n\n# Read structure from pdb file\nindex=0\nwhile True:\n data=file_input.readline()\n if not data: break\n words=data.split()\n if len(words)>2:\n if words[2]==\"MG\":\n x[index][0]=float(words[6])\n y[index][0]=float(words[7])\n z[index][0]=float(words[8])\n x[index+9][0]=sa*x[index][0]-sb*y[index][0]\n y[index+9][0]=sb*x[index][0]+sa*y[index][0]\n z[index+9][0]=z[index][0]\n x[index+18][0]=sa*x[index][0]+sb*y[index][0]\n y[index+18][0]=-sb*x[index][0]+sa*y[index][0]\n z[index+18][0]=z[index][0]\n if words[2]==\"NB\":\n x[index][1]=float(words[6])\n y[index][1]=float(words[7])\n z[index][1]=float(words[8])\n x[index+9][1]=sa*x[index][1]-sb*y[index][1]\n y[index+9][1]=sb*x[index][1]+sa*y[index][1]\n z[index+9][1]=z[index][1]\n x[index+18][1]=sa*x[index][1]+sb*y[index][1]\n y[index+18][1]=-sb*x[index][1]+sa*y[index][1]\n z[index+18][1]=z[index][1]\n if words[2]==\"ND\":\n x[index][2]=float(words[6])\n y[index][2]=float(words[7])\n z[index][2]=float(words[8])\n x[index+9][2]=sa*x[index][2]-sb*y[index][2]\n y[index+9][2]=sb*x[index][2]+sa*y[index][2]\n z[index+9][2]=z[index][2]\n x[index+18][2]=sa*x[index][2]+sb*y[index][2]\n y[index+18][2]=-sb*x[index][2]+sa*y[index][2]\n z[index+18][2]=z[index][2]\n index=index+1\n\n# Verify that the correct number (9) of unique dye atoms were read from file\nprint(index)\n\n# Construct arrays with transition dipole moments and positions\nfor atom in range(27):\n # Write positions to human readable file\n file_x.write(str(x[atom][0]) + \" \" + str(y[atom][0]) + \" \" + str(z[atom][0]) + \"\\n\")\n mu[atom][0]=x[atom][2]-x[atom][1]\n mu[atom][1]=y[atom][2]-y[atom][1]\n mu[atom][2]=z[atom][2]-z[atom][1] \n mum=numpy.sqrt(mu[atom][0]**2+mu[atom][1]**2+mu[atom][2]**2)\n mu[atom][0]=mu[atom][0]/mum\n mu[atom][1]=mu[atom][1]/mum\n mu[atom][2]=mu[atom][2]/mum\n # Write transition dipole moments to human readable file\n file_dp.write(str(mu[atom][0]) + \" \" + str(mu[atom][1]) + \" \" + str(mu[atom][2]) + \"\\n\")\n r[atom][0]=x[atom][0]\n r[atom][1]=y[atom][0]\n r[atom][2]=z[atom][0]\n # Store data in arrays for saving in binary files\n mu4bin[atom]=mu[atom][0]\n mu4bin[units+atom]=mu[atom][1]\n mu4bin[2*units+atom]=mu[atom][2]\n pos4bin[atom]=r[atom][0]\n pos4bin[units+atom]=r[atom][1]\n pos4bin[2*units+atom]=r[atom][2]\n\n# Generate helping arrays for Hamiltonian construction.\n# Accounting for difference between B850 and B800 chromophores\nfor atom in range(9):\n B800[3*atom+1]=E1\n ssigma[3*atom+1]=sigma1\n\n# Create Hamiltonian first off-diagonal part\nfor ai in range(27):\n for aj in range(ai):\n if ai!=aj:\n rr=r[ai,:]-r[aj,:]\n rd=numpy.sqrt(rr[0]**2+rr[1]**2+rr[2]**2)\n # Equation for transition-dipole coupling\n J=sum(mu[ai,:]*mu[aj,:])/(rd**3)-3*sum(mu[ai,:]*rr[:])*sum(mu[aj,:]*rr[:])/(rd**5)\n # Convert to cm-1\n J=J*dsiatoicm*MU0*MU0*fcs\n # Do indexing for tridiagonal matrix\n ind=int(ai+units*aj-(aj*(aj+1)/2))\n # Print information to screen\n print(ai)\n print(aj)\n print(ind)\n H[ind]=J\n print(J)\n\n# Create initial random numbers\ndiag=numpy.random.randn(27)\n# Create diagonal elements\nfor st in range(steps):\n for ai in range(27):\n ind=int(ai+units*ai-(ai*(ai+1)/2))\n # Find energy gap including shift for B800 chromophores\n H[ind]=diag[ai]*ssigma[ai]+E0+B800[ai]\n # Update random numbers according to J. Chem. Phys. 127:084507 (2007) \n diag=diag*alpha+numpy.random.randn(27)*beta\n # Save Hamiltonian, dipoles, and positions to binary files\n Hf=numpy.array(H,'float32')\n step=numpy.array([0],'float32')\n step.tofile(file_H)\n Hf.tofile(file_H)\n step.tofile(file_mu)\n muf=numpy.array(mu4bin,'float32')\n muf.tofile(file_mu)\n step.tofile(file_pos)\n puf=numpy.array(pos4bin,'float32')\n puf.tofile(file_pos)\n\n# Write square Hamiltonian to hunam readable file\nfor ai in range(27):\n for aj in range(27):\n if ajai:\n ind=int(aj+units*ai-(ai*(ai+1)/2))\n if ai==aj:\n file_HH.write(str(E0+B800[ai]) + \" \")\n if ai!=aj:\n file_HH.write(str(H[ind]) + \" \")\n file_HH.write(\"\\n\")\n\n# Close all files\nfile_input.close\nfile_H.close\nfile_x.close\nfile_mu.close\nfile_pos.close\nfile_dp.close\n","sub_path":"GenHam.py","file_name":"GenHam.py","file_ext":"py","file_size_in_byte":6287,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"489456130","text":"import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom sklearn.cluster import DBSCAN\nfrom collections import Counter\nimport glob\nimport re\nimport pandas as pd\nimport sys\nimport pyfits\nfrom astropy.table import Table\n\nCCD = sys.argv[1]\ndir_in_CCD = '/home/student01/out_team1/'+CCD\ndir_in_CCD_image = '/home/student01/out_team1/'+CCD\ndir_out_CCD = '/home/student02/dic/' + CCD +'object'\ndir_out_CCD_moving = '/home/student02/dic/' + CCD +'moving'\n\ndef get_column_name(filename):\n column_names = []\n with open(filename) as f:\n for line in f:\n if line[0] == '#':\n column_names.append(line.split()[2])\n else:\n break\n \n return column_names\n\ndef file_number(file_name):\n #_index = [m.start() for m in re.finditer('_', file_name)][-1]\n #ext_index = [m.start() for m in re.finditer('\\.', file_name)][-1]\n pieces = file_name.split('/')[-1]\n #print pieces\n numbers = pieces.split('_')[3]\n #print numbers\n return int(numbers)\n #return int(file_name[_index+1:ext_index])\n\n\nfiles = glob.glob(dir_in_CCD+\"*.fits.cat\")\n#print files\nfiles = sorted(files, key=lambda f: file_number(f))\n\n#print files\n#images = glob.glob(dir_in_CCD_image+\"*.fz\")\n\n#print files\n#print images\n#columns = [0,55,56,174,11,10,180]\ncolumn_names = get_column_name(files[0])\nData = None\n#start = 0\n#data_by_epochs = []\n\ninfo_name = ['NUMBER','X_WORLD','Y_WORLD','FLAGS','FLUXERR_AUTO','FLUX_AUTO']\nfor i, file_ in enumerate(files):\n \n #f = np.loadtxt(file_)[:,columns]\n #hdulist = pyfits.open(images[i])\n #epoc_time = hdulist[0].header['MJD-OBS']i\n #epoc_time = i\n #f = np.loadtxt(file_)\n #f = pd.DataFrame(data=f, columns = column_names)\n t = Table.read(file_, table_id=0)\n f = pd.DataFrame(np.array(t))\n f = f[info_name].values\n #print f.shape\n #f = np.hstack((f,np.zeros(f.shape[0], dtype=float).reshape((f.shape[0],1))+epoc_time))\n f = np.hstack((f,np.zeros(f.shape[0], dtype=int).reshape((f.shape[0],1))+i))\n f = np.hstack((f,np.zeros(f.shape[0], dtype=int).reshape((f.shape[0],1))+file_number(file_)))\n #f = np.hstack((f,np.array(f.shape[0]*[file_]).reshape((f.shape[0],1))))\n\n #print f[0,:]\n if Data is None:\n Data = f\n else:\n Data = np.vstack((Data,f))\n\nflag_threshold = 0\nfilt = np.ones(Data.shape[0], dtype=bool)\nfilt[Data[:,3]>flag_threshold] = False\nData = Data[filt,:]\n\nsnr_threshold = 0.15\nfilt2 = np.ones(Data.shape[0], dtype=bool)\nsnr = Data[:,4]/Data[:,5]\nfilt2[snr>snr_threshold] = False\nData = Data[filt2,:]\n\nX = Data[:,[1,2]]\n\ndb = DBSCAN(eps=0.0003, min_samples=3).fit(X)\n\ncore_samples_mask = np.zeros_like(db.labels_, dtype=bool)\ncore_samples_mask[db.core_sample_indices_] = True\nlabels = np.asarray(db.labels_, dtype=int)\n\nn_clusters_ = len(set(labels)) - (1 if -1 in labels else 0)\n\nunique_labels = set(labels)\n\nn_stat = labels[labels>-1].size\n\nstatic = np.zeros(n_clusters_*len(files)).reshape((n_clusters_,len(files))) - 1\n#print Data.shape\n#print len(labels)\n\nfor i in xrange(len(labels)):\n#for i in xrange(len(labels)):\n if labels[i]>-1:\n static[labels[i],Data[i,6]] = Data[i, 0]\n#print labels\npdstatic= pd.DataFrame(data=static, columns = files)\n\nmoving = Data[:, [0,1,2,7]]\n\nmoving = moving[labels==-1,:]\n\n#for i, file_ in enumerate(files):\n#\tData[Data[:,-1]==i,-1] = file_\n\npdstatic.to_pickle(dir_out_CCD)\n\npdmoving = pd.DataFrame(data=moving, columns = ['NUMBER','X','Y','EPOCH'])\npdmoving.to_pickle(dir_out_CCD_moving)\n","sub_path":"code group 2/backup/step1.py","file_name":"step1.py","file_ext":"py","file_size_in_byte":3494,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"459933414","text":"# Time Complexity : O(n + m) \n# Space Complexity : O(1) (We are running algorithm in place)\n# Did this code successfully run on Leetcode : Yes\n# Three line explanation of solution in plain english:\n# - This is similer to merging two array in divide and conqure for sorting but in reverse.\n \nclass Solution:\n def merge(self, nums1: List[int], m: int, nums2: List[int], n: int) -> None:\n \"\"\"\n Do not return anything, modify nums1 in-place instead.\n \"\"\"\n# Initialize m, n and k. m and n is already given, we have to just reduce it by 1 because array starts from 0. K is sum of m and n or length of first array which also has exact space for merging second array.\n m = m - 1\n n = n - 1\n k = len(nums1) - 1\n# We will run this loop until m or n reach less than zero.\n while(n > -1 and m > -1):\n# If number in nums1 is greater than number in nums2we will append it to k index and reduce m index\n if nums1[m] > nums2[n]:\n nums1[k] = nums1[m]\n m -= 1\n# If number in nums2 is greater than number in nums1 we will append it to k index and reduce n index\n elif nums1[m] < nums2[n]:\n nums1[k] = nums2[n]\n n -= 1\n# If both numer are same we can append both of them to the k. M and n will be reduced once but k needs to reduce twice.\n else:\n nums1[k] = nums1[m]\n k -= 1\n m -= 1\n nums1[k] = nums2[n]\n n -= 1\n# Every time we are reducing k\n k -= 1\n \n# because we are stopping our while loop if m or n any one of them reach to -1. The other one might not have reached to -1. \n# Checking if m reached to -1.(If all element from nums1 are appended or not)\n if m > -1:\n# If some elements remained in nums1 we will append it to k\n while( m > -1):\n nums1[k] = nums1[m]\n k -= 1\n m -= 1\n \n# Checking if n reached to -1. \n if n > -1:\n# If some elements remained in nums2 we will append it to k\n while (n > -1):\n nums1[k] = nums2[n]\n k -= 1\n n -= 1\n","sub_path":"Problem2.py","file_name":"Problem2.py","file_ext":"py","file_size_in_byte":2292,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"411620130","text":"\"\"\"\nThis code is to implement the IndRNN (only the recurrent part). The code is based on the implementation from \nhttps://github.com/StefOe/indrnn-pytorch/blob/master/indrnn.py.\nSince this only contains the recurrent part of IndRNN, fully connected layers or convolutional layers are needed before it.\nPlease cite the following paper if you find it useful.\nShuai Li, Wanqing Li, Chris Cook, Ce Zhu, and Yanbo Gao. \"Independently Recurrent Neural Network (IndRNN): Building A Longer and Deeper RNN,\" \nIn Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 5457-5466. 2018.\n@inproceedings{li2018independently,\n title={Independently recurrent neural network (indrnn): Building A longer and deeper RNN},\n author={Li, Shuai and Li, Wanqing and Cook, Chris and Zhu, Ce and Gao, Yanbo},\n booktitle={Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition},\n pages={5457--5466},\n year={2018}\n}\n\"\"\"\n\n\nimport torch\nfrom torch.nn import Parameter\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\nimport math\n\n\nclass IndRNNCell_onlyrecurrent(nn.Module):\n r\"\"\"An IndRNN cell with ReLU non-linearity. This is only the recurrent part where the input is already processed with w_{ih} * x + b_{ih}.\n\n .. math::\n input=w_{ih} * x + b_{ih}\n h' = \\relu(input + w_{hh} (*) h)\n With (*) being element-wise vector multiplication.\n\n Args:\n hidden_size: The number of features in the hidden state h\n\n Inputs: input, hidden\n - **input** (batch, input_size): tensor containing input features\n - **hidden** (batch, hidden_size): tensor containing the initial hidden\n state for each element in the batch.\n\n Outputs: h'\n - **h'** (batch, hidden_size): tensor containing the next hidden state\n for each element in the batch\n \"\"\"\n\n def __init__(self, hidden_size, \n hidden_max_abs=None, recurrent_init=None):\n super(IndRNNCell_onlyrecurrent, self).__init__()\n self.hidden_size = hidden_size\n self.recurrent_init = recurrent_init\n self.weight_hh = Parameter(torch.Tensor(hidden_size)) \n self.reset_parameters()\n\n def reset_parameters(self):\n for name, weight in self.named_parameters():\n if \"weight_hh\" in name:\n if self.recurrent_init is None:\n nn.init.uniform(weight, a=0, b=1)\n else:\n self.recurrent_init(weight)\n\n def forward(self, input, hx):\n return F.relu(input + hx * self.weight_hh.unsqueeze(0).expand(hx.size(0), len(self.weight_hh)))\n\n\nclass IndRNN_onlyrecurrent(nn.Module):\n r\"\"\"Applies an IndRNN with `ReLU` non-linearity to an input sequence. \n This is only the recurrent part where the input is already processed with w_{ih} * x + b_{ih}.\n\n\n For each element in the input sequence, each layer computes the following\n function:\n\n .. math::\n\n h_t = \\relu(input_t + w_{hh} (*) h_{(t-1)})\n\n where :math:`h_t` is the hidden state at time `t`, and :math:`input_t`\n is the input at time `t`. (*) is element-wise multiplication.\n\n Args:\n hidden_size: The number of features in the hidden state `h` \n\n Inputs: input, h_0\n - **input** of shape `(seq_len, batch, input_size)`: tensor containing the features\n of the input sequence. The input can also be a packed variable length\n sequence. See :func:`torch.nn.utils.rnn.pack_padded_sequence`\n or :func:`torch.nn.utils.rnn.pack_sequence`\n for details.\n - **h_0** of shape `( batch, hidden_size)`: tensor\n containing the initial hidden state for each element in the batch.\n Defaults to zero if not provided.\n\n Outputs: output \n - **output** of shape `(seq_len, batch, hidden_size)`\n \"\"\"\n\n def __init__(self, hidden_size,recurrent_init=None, **kwargs):\n super(IndRNN_onlyrecurrent, self).__init__()\n self.hidden_size = hidden_size\n self.indrnn_cell=IndRNNCell_onlyrecurrent(hidden_size, **kwargs)\n\n if recurrent_init is not None:\n kwargs[\"recurrent_init\"] = recurrent_init\n self.recurrent_init=recurrent_init\n # h0 = torch.zeros(hidden_size * num_directions)\n # self.register_buffer('h0', torch.autograd.Variable(h0))\n self.reset_parameters()\n\n def reset_parameters(self):\n for name, weight in self.named_parameters():\n if \"weight_hh\" in name:\n if self.recurrent_init is None:\n nn.init.uniform(weight, a=0, b=1)\n else:\n self.recurrent_init(weight)\n\n def forward(self, input, h0=None):\n assert input.dim() == 2 or input.dim() == 3 \n if h0 is None:\n h0 = input.data.new(input.size(-2),input.size(-1)).zero_().contiguous()\n elif (h0.size(-1)!=input.size(-1)) or (h0.size(-2)!=input.size(-2)):\n raise RuntimeError(\n 'The initial hidden size must be equal to input_size. Expected {}, got {}'.format(\n h0.size(), input.size()))\n outputs=[]\n hx_cell=h0\n for input_t in input:\n hx_cell = self.indrnn_cell(input_t, hx_cell)\n outputs.append(hx_cell)\n out_put = torch.stack(outputs, 0)\n return out_put\n","sub_path":"IndRNN_onlyrecurrent.py","file_name":"IndRNN_onlyrecurrent.py","file_ext":"py","file_size_in_byte":5388,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"378389856","text":"# https://github.com/agdelma/IntroCompPhysics/blob/master/Notebooks/16_SimpleHarmonicMotion.ipynb\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom scipy.constants import pi as π\nfrom scipy.constants import g\n\nℓ = 0.25\nΔt = 0.01\n\nt = np.arange(0.0, 4.0, Δt)\nθ, ω = np.zeros_like(t), np.zeros_like(t)\nθ[0] = π/12.0\n\nbetterθ, betterω = np.zeros_like(t), np.zeros_like(t)\nbetterθ[0] = π/4\n\nfor n in range(t.size - 1):\n# θ[n + 1] = θ[n] + ω[n] * Δt\n# ω[n + 1] = ω[n] - (g / ℓ) * np.sin(θ[n]) * Δt\n ω[n + 1] = ω[n] - (g / ℓ) * np.sin(θ[n]) * Δt\n θ[n + 1] = θ[n] + ω[n + 1] * Δt\n\ndef nonlinearθ(ℓ, θ0, t):\n '''Special function for non-linear pendulum'''\n from scipy import special\n k = np.sin(θ0 / 2)\n K = special.ellipk(k*k)\n (sn, cn, dn, ph) = special.ellipj(K - np.sqrt(g/l) * t, k * k)\n return 2 * np.arcsin(k * sn)\n\n# Small angle solution\n#plt.plot(t, θ[0]*np.cos(np.sqrt(g/ℓ)*t), label='Small angle solution')\nplt.plot(t, nonlinearθ(ℓ, θ[0], t), label = \"Exact\")\n\n# the Euler method\nplt.plot(t,θ, label='Euler method')\n\nplt.legend(loc='lower left')\n\nplt.xlabel('Time [s]')\nplt.ylabel('θ(t) [rad]')\n\nplt.show()\n","sub_path":"InClass/10-14-16.py","file_name":"10-14-16.py","file_ext":"py","file_size_in_byte":1194,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"79947433","text":"from sensor.co2 import mhz19b\nimport argparse\nimport subprocess\n\n\nclass Mhz19bCtrl:\n SERIAL_DEV = '/dev/ttyS0'\n SERIAL_START = 'sudo systemctl start serial-getty@ttyS0.service'\n SERIAL_STOP = 'sudo systemctl stop serial-getty@ttyS0.service'\n\n def __init__(self):\n self.p = subprocess.call(self.SERIAL_STOP, stdout=subprocess.PIPE, shell=True)\n self.sensor = mhz19b.Mhz19b(self.SERIAL_DEV)\n\n def __del__(self):\n self.p = subprocess.call(self.SERIAL_START, stdout=subprocess.PIPE, shell=True)\n\n def calibration(self, status):\n if status == 'on':\n return self.sensor.ABC_logic_ON()\n elif status == 'off':\n return self.sensor.ABC_logic_OFF()\n elif status == 'zero':\n return self.sensor.zero_calibration()\n\n def read_co2(self):\n return self.sensor.read_co2()\n","sub_path":"homeiot/sensor/co2/mhz19bCtrl.py","file_name":"mhz19bCtrl.py","file_ext":"py","file_size_in_byte":797,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"182701663","text":"# -*- coding: utf-8 -*-\n\nimport os\nimport time\nimport datetime\nimport json\nimport linecache\nimport collections\nimport markdown2html\n\nimport sys\nreload(sys) \nsys.setdefaultencoding('utf8')\n\nauthor='Jet Wang'\njson_file=r''\nmarkdown_file=r''\n\ndef getFileModifyTime(filename): \n '''\n time.localtime(os.stat(file).st_ctime) #判断文件的创建时间\n time.localtime(os.stat(file).st_mtime) #判断文件的最后修改时间\n '''\n filemt = time.localtime(os.stat(filename).st_mtime) #判断文件的最后修改时间\n filetime = datetime.datetime(filemt[0] , filemt[1] , filemt[2], filemt[3], filemt[4], filemt[5])\n return filetime\n\ndef convertDateFormat(dt):\n date_str = dt.strftime(\"%Y-%m-%d %H:%M:%S\")\n return date_str[:10]+'T'+date_str[11:]+'.000Z'\n \ndef buildJsonFromMarkdownFile(filename, fileid):\n '''\n jekyll博客markdown文件的格式例子\n \n ---\n layout: post\n title: \"文章标题\"\n description: \"\"\n category: 技术\n tags: [Linux]\n ---\n 正文部分\n \n '''\n \n posts = collections.OrderedDict()\n \n posts['id'] = str(fileid)\n posts['uuid'] = str(uuidGen())\n \n #获取post中文标题,去掉引号\n posts['title'] = linecache.getline(filename, 3)[len('title:'):].strip()[1:-1]\n #获取post链接地址,根据markdown文件名拼接,去掉.md后缀\n posts['slug'] = filename.split('\\\\')[-1][:-3]\n #获取post正文内容\n text = []\n lines = open(filename, 'r').readlines()[9:]\n for line in lines:\n# text.append(line.strip('\\n').decode('utf-8'))\n text.append(line)\n post_content = ''.join(text)\n posts['markdown'] = post_content\n \n posts['mobiledoc'] = 'null'\n posts['html'] = markdown2html.convertMarkdownText2HtmlText(filename)\n posts['image'] = 'null'\n posts['featured'] = '0'\n posts['page'] = '0'\n posts['status'] = 'published'\n posts['language'] = 'en_US'\n posts['visibility'] = 'public'\n posts['meta_title'] = posts['title']\n posts['meta_description'] = 'null'\n posts['author_id'] = 1\n posts['created_at'] = convertDateFormat(getFileModifyTime(filename))\n posts['created_by'] = 1\n posts['updated_at'] = convertDateFormat(getFileModifyTime(filename))\n posts['updated_by'] = 1\n posts['published_at'] = convertDateFormat(getFileModifyTime(filename))\n posts['published_by'] = 1\n\n return posts\n \ndef uuidGen():\n import uuid\n return uuid.uuid1()\n\ndef dict2JsonFile(dict, filename):\n j = json.dumps(dict,ensure_ascii=False,indent=4)\n f = open(filename, 'w+')\n print >> f, j\n f.close()\n \ndef getContentFromJsonFile(filename):\n '''\n Ghost博客导出的post格式\n \n \"posts\": [{\n \"id\": 2,\n \"uuid\": \"4150734c-4fd6-437c-9a05-d4d28d3bb986\",\n \"title\": \"Hello Ghost\",\n \"slug\": \"hello-ghost\",\n \"markdown\": \"This is the begining\",\n \"mobiledoc\": null,\n \"html\": \"This is the begining
\",\n \"image\": null,\n \"featured\": 0,\n \"page\": 0,\n \"status\": \"published\",\n \"language\": \"en_US\",\n \"visibility\": \"public\",\n \"meta_title\": null,\n \"meta_description\": null,\n \"author_id\": 1,\n \"created_at\": \"2016-07-06T08:30:55.000Z\",\n \"created_by\": 1,\n \"updated_at\": \"2016-07-06T08:31:28.000Z\",\n \"updated_by\": 1,\n \"published_at\": \"2016-07-06T08:31:28.000Z\",\n \"published_by\": 1\n },\n '''\n \n json_post = {}\n \n with open(filename) as json_data:\n d = json.load(json_data)\n for d1 in d['db']:\n for d2 in d1['data']['posts']:\n id = d2['id']\n uuid = d2['uuid']\n title = d2['title']\n slug = d2['slug']\n markdown = d2['markdown']\n mobiledoc = d2['mobiledoc']\n html = d2['html']\n image = d2['image']\n featured = d2['featured']\n page = d2['page']\n status = d2['status']\n language = d2['language']\n visibility = d2['visibility']\n meta_title = d2['meta_title']\n meta_description = d2['meta_description']\n author_id = d2['author_id']\n created_at = d2['created_at']\n created_by = d2['created_by']\n updated_at = d2['updated_at']\n updated_by = d2['updated_by']\n published_at = d2['published_at']\n published_by = d2['published_by']\n\n json_post[uuid] = [id,title,slug,markdown,mobiledoc,html,image,featured,\n page,status,language,visibility,meta_title,meta_description,\n author_id,created_at,created_by,updated_at,updated_by,published_at,\n published_by]\n \n return json_post\n\ndef setContentToJsonFile():\n '''\n 提取jekyll博客markdown文件的内容,按照Ghost博客的格式构造出可以供导入的json文件\n '''\n i = 3\n targetFile = r'C:\\temp\\ghost\\2016-08-15-all-post-contents.json'\n folder = r'D:\\code\\github\\hexo\\myhexoblog\\source\\_posts'\n posts = []\n for dirpath, dirnames, filenames in os.walk(folder):\n for filename in filenames:\n i = i + 1\n filename = os.path.join(dirpath, filename)\n post = buildJsonFromMarkdownFile(filename, i)\n posts.append(post)\n \n dict2JsonFile(posts, targetFile)\n \nif __name__ == '__main__':\n setContentToJsonFile()\n\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5782,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"35698425","text":"# -*- coding: utf-8 -*-\n\nfrom odoo import models\nfrom odoo.tools.translate import _\nfrom odoo.tools.misc import formatLang, format_date\n\nimport logging\n_logger = logging.getLogger(__name__)\n\n\nLINE_FILLER = '*'\nINV_LINES_PER_STUB = 9\n\nclass report_print_check(models.Model):\n _inherit = 'account.payment'\n\n def make_stub_line(self, invoice):\n result = super(report_print_check,self).make_stub_line(invoice)\n discount = 0.0\n for line in invoice.invoice_line_ids:\n discount += line.price_unit *(line.discount or 0.00) / 100.0\n result['discount'] = formatLang(self.env, discount, currency_obj=invoice.currency_id)\n _logger.info('========dis==%r',discount)\n return result","sub_path":"prime_doors_check_printing_update/models/print_check.py","file_name":"print_check.py","file_ext":"py","file_size_in_byte":725,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"581267285","text":"# determine how long it will take you to save enough money to make housing down payment\n\n# annual salary\nannual_salary = int(input(\"Enter your annual salary: \"))\n# dedicate a certain amount of your salary each month to saving for the down payment\nportion_saved = float(input(\"Enter the percent of your salary to save, as a decimal: \"))\n# cost of dream house\ntotal_cost = int(input(\"Enter the cost of your dream house: \"))\n\n\ndef calc():\n # portion of cost needed for down payment\n portion_down_payment = .25\n # monthly return on investment rate\n r = 0.04 / 12\n # down payment for dream house\n down_payment = portion_down_payment * total_cost\n monthly_savings = annual_salary / 12 * portion_saved\n current_savings = 0\n return_on_invest = 0\n month_count = 0\n\n while current_savings != down_payment:\n month_count += 1\n return_on_invest = current_savings * r\n current_savings = current_savings + monthly_savings + return_on_invest\n # print(month_count, current_savings, monthly_savings, return_on_invest)\n\n if current_savings > down_payment:\n print(\"Number of months: {}\".format(month_count))\n break\n\n\ncalc()\n","sub_path":"MIT_Python/ps1a.py","file_name":"ps1a.py","file_ext":"py","file_size_in_byte":1194,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"349618597","text":"# Copyright (c) 2001-2016, Canal TP and/or its affiliates. All rights reserved.\n#\n# This file is part of Navitia,\n# the software to build cool stuff with public transport.\n#\n# Hope you'll enjoy and contribute to this project,\n# powered by Canal TP (www.canaltp.fr).\n# Help us simplify mobility and open public transport:\n# a non ending quest to the responsive locomotion way of traveling!\n#\n# LICENCE: This program is free software; you can redistribute it and/or modify\n# it under the terms of the GNU Affero General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Affero General Public License for more details.\n#\n# You should have received a copy of the GNU Affero General Public License\n# along with this program. If not, see .\n#\n# Stay tuned using\n# twitter @navitia\n# IRC #navitia on freenode\n# https://groups.google.com/d/forum/navitia\n# www.navitia.io\nfrom flask_restful import abort\nimport flask_restful\nfrom pymongo.errors import PyMongoError\nfrom tartare import app\nfrom tartare.core import models\nimport logging\nfrom tartare.interfaces import schema\nfrom marshmallow import ValidationError\nfrom flask import request\n\n\nclass Coverage(flask_restful.Resource):\n def post(self):\n coverage_schema = schema.CoverageSchema(strict=True)\n try:\n coverage = coverage_schema.load(request.json).data\n except ValidationError as err:\n return {'error': err.messages}, 400\n\n try:\n coverage.save()\n except PyMongoError as e:\n logging.getLogger(__name__).exception('impossible to add coverage {}'.format(coverage))\n return {'error': str(e)}, 500\n\n return {'coverage': coverage_schema.dump(coverage).data}, 201\n\n def get(self, coverage_id=None):\n if coverage_id:\n c = models.Coverage.get(coverage_id)\n if c is None:\n abort(404)\n\n result = schema.CoverageSchema().dump(c)\n return {'coverage': result.data}, 200\n\n coverages = models.Coverage.all()\n\n return {'coverages': schema.CoverageSchema(many=True).dump(coverages).data}, 200\n\n def delete(self, coverage_id):\n c = models.Coverage.delete(coverage_id)\n if c == 0:\n abort(404)\n return {'coverage': None}, 204\n\n def patch(self, coverage_id):\n coverage = models.Coverage.get(coverage_id)\n if coverage is None:\n abort(404)\n if 'id' in request.json and coverage.id != request.json['id']:\n return {'error': 'The modification of the id is not possible'}, 400\n coverage_schema = schema.CoverageSchema(partial=True)\n errors = coverage_schema.validate(request.json, partial=True)\n if errors:\n return {'error': errors}, 400\n\n logging.debug(request.json)\n try:\n coverage = models.Coverage.update(coverage_id, request.json)\n except PyMongoError as e:\n logging.getLogger(__name__).exception('impossible to update coverage with dataset {}'.format(request.json))\n return {'error': str(e)}, 500\n\n return {'coverage': schema.CoverageSchema().dump(coverage).data}, 200\n","sub_path":"tartare/interfaces/coverages.py","file_name":"coverages.py","file_ext":"py","file_size_in_byte":3483,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"586633630","text":"from __future__ import division, print_function, absolute_import\n\nimport codecs\nimport json\nimport logging\nimport os\nimport urllib\n\nimport numpy as np\nimport pandas as pd\nimport requests\n\n\nfrom config import pathDict, api_call\n\nlogging.basicConfig(level=logging.DEBUG, filename=\"logfile.log\", filemode=\"w\",\n format=\"%(asctime)-15s %(levelname)-8s %(message)s\")\n\n\n\nmap_size = [400,400]\nzoom_lvl = 40\nmetaURL_head = 'https://maps.googleapis.com/maps/api/geocode/json?address='\naerialURL_head = 'https://maps.googleapis.com/maps/api/staticmap?center='\nmetaURL_tail = '&key=%s'%(api_call['google_meta_key'])\naerialURL_tail = '&maptype=satellite&key=%s'%(api_call['google_aerial_key'])\nreader = codecs.getreader(\"utf-8\")\n\n# Statistic and Image Dump paths\ngoogle_aaerial_stats_path = pathDict['google_aerial_stats_path']\ngoogle_house_dump_path = os.path.join(pathDict['google_aerial_image_path'], 'house')\ngoogle_land_dump_path = os.path.join(pathDict['google_aerial_image_path'], 'land')\ngoogle_unknown_dump_path = os.path.join(pathDict['google_aerial_image_path'], 'unknown')\n\nfor dir in [google_aaerial_stats_path, google_house_dump_path, google_land_dump_path, google_unknown_dump_path]:\n if not os.path.exists(dir):\n os.makedirs(dir)\n\n\ndef metadata_prep(metadata):\n metadata.columns = ['row_id', 'removed', 'property_id', 'state', 'county_name', 'pin',\n 'address_line1', 'address_line2', 'address_city', 'address_zip',\n 'zoning', 'improvement_level', 'type', 'exterior',\n 'last_reviewed_timestamp', 'gone_timestamp', 'indicator',\n 'assessor_photo']\n\n metadata['state'] = metadata['state'].astype('str')\n metadata['county_name'] = metadata['county_name'].astype('str')\n metadata['pin'] = metadata['pin'].astype('str')\n metadata['address_line1'] = metadata['address_line1'].astype('str')\n metadata['address_line2'] = metadata['address_line2'].astype('str')\n metadata['address_city'] = metadata['address_city'].astype('str')\n metadata['address_zip'] = metadata['address_zip'].astype('str')\n metadata['zoning'] = metadata['zoning'].astype('str')\n metadata['improvement_level'] = metadata['improvement_level'].astype('str')\n metadata['type'] = metadata['type'].astype('str')\n metadata['exterior'] = metadata['exterior'].astype('str')\n metadata['last_reviewed_timestamp'] = metadata['last_reviewed_timestamp'].astype('str') # .astype('str')\n metadata['gone_timestamp'] = metadata['gone_timestamp'].astype('str')\n metadata['indicator'] = metadata['indicator'].astype('str')\n metadata['assessor_photo'] = metadata['assessor_photo'].astype('str')\n \n return metadata\n\n\n\nclass GoogleFetch_AerialMap():\n def __init__(self, params):\n pass\n \n @staticmethod\n def get_latlon_locationtype(address_line, city=None, state=None):\n '''\n :param address_line : '555E, 33rd Place'\n :param city: 'chicago'\n :param state: 'IL'\n :return:\n lat: latitude of the property\n lon: longitude of the property\n location_type = ROOFTOP\n url : The URL used to fetch the meta data information\n \n '''\n address_string = '+'.join([add for add in address_line.split(' ')])\n if city:\n address_string = address_string + '+' + '+'.join([add for add in city.split(' ')])\n if state:\n address_string = address_string + '+' + state\n \n # print (address_string)\n url = metaURL_head + address_string + metaURL_tail\n r = urllib.request.urlopen(url)\n res_body = r.read()\n content = json.loads(res_body.decode(\"utf-8\"))\n\n if content['status'] == 'OK':\n # try:\n lat = content['results'][0]['geometry']['location']['lat']\n lon = content['results'][0]['geometry']['location']['lng']\n location_type = content['results'][0]['geometry']['location_type']\n return lat, lon, location_type, url\n elif content['status'] == 'OVER_QUERY_LIMIT':\n return 'EXCEED', 'EXCEED', 'EXCEED', 'EXCEED'\n else:\n # except KeyError:\n logging.info('GET_LATLON: Content lat lon not found')\n return None, None, None, None\n \n @staticmethod\n def get_aerial_image_given_latlon(lat, lon, zoom=19, map_size='400x400'):\n '''\n :param lat: The input latitude\n :param lon: The input Longitude\n :param zoom: The input zoom level\n :param map_size: The input mapSize\n :return:\n : image_data : The image to be saved\n : location_url: The url used to fetch the image\n '''\n location_url = aerialURL_head + str(lat) + ' ' + str(lon) + '&zoom=' + str(zoom) + '&size=' + map_size + \\\n aerialURL_tail\n try:\n img_data = requests.get(location_url).content\n return img_data, location_url\n\n except:\n logging.info('GET_AERIAL_IMAGE: Response error')\n return None, None, None\n\n\n\ndef fetch_google_aerial_images(dataIN, batch_size, get_stats=False):\n data_arr = np.array(dataIN[['pin', 'address_line1', 'address_city', 'indicator']], dtype='str')\n\n statistics = []\n prev = 0\n state = 'IL'\n zoom = 19\n map_size = '400x400'\n for num, (pin, add1, city, indicator) in enumerate(data_arr):\n # if num\n lat = 'nan'\n lon = 'nan'\n meta_url = 'nan'\n img_url = 'nan'\n location_type = 'nan'\n if str(add1) != 'nan':\n\n lat, lon, location_type, meta_url = GoogleFetch_AerialMap.get_latlon_locationtype(address_line=add1,\n city=city, state=state)\n if lat ==None or lon == None or meta_url == None:\n lat = 'nan'\n lon = 'nan'\n meta_url = 'nan'\n elif lat == 'EXCEED':\n logging.info('Total extraction quota for today EXCEEDS the Free Quota LIMIT')\n else:\n image_data, img_url = GoogleFetch_AerialMap.get_aerial_image_given_latlon(lat=lat, lon=lon,\n zoom=zoom, map_size=map_size)\n \n if indicator == \"Likely House\":\n with open(os.path.join(google_house_dump_path, '%s.jpg' % str(pin)), 'wb') as handler:\n handler.write(image_data)\n elif indicator == 'Likely Land':\n with open(os.path.join(google_land_dump_path, '%s.jpg' % str(pin)),'wb') as handler:\n handler.write(image_data)\n else:\n with open(os.path.join(google_unknown_dump_path, '%s.jpg' % str(pin)), 'wb') as handler:\n handler.write(image_data)\n \n b = \"TOTAL RECORDS PARSED: IMAGES DONE ======== %s\"\n print(b % (num), end=\"\\r\")\n \n if get_stats:\n statistics.append([pin, add1, city, lat, lon, location_type, indicator, meta_url, img_url, ])\n \n if ((num + 1) % batch_size) == 0 or num == len(data_arr) - 1:\n if get_stats:\n file_path = os.path.join(google_aaerial_stats_path, '%s_%s.csv' % (prev, num))\n statistics = pd.DataFrame(statistics,\n columns=['pin', 'address', 'city', 'lat','lon','loc_type',\n 'indicator','meta_url','img_url'])\n statistics.to_csv(file_path, index=None)\n prev = num + 1\n \n statistics = []\n \n \n \n\n\n\ndebugg1 = False\nif debugg1:\n lat, lon, location_type, url = GoogleFetch_AerialMap.get_latlon_locationtype(address_line='555E 33rd place',\n city='chicago', state='IL')\n img, location_url = GoogleFetch_AerialMap.get_aerial_image_given_latlon(lat=lat, lon=lon, zoom=19,\n map_size='400x400')\n\ndebugg = False\nif debugg:\n input_path = os.path.join(pathDict['parent_path'], 'house_metadata_nw.csv')\n print (input_path)\n metadata = pd.read_csv(input_path)\n logging.info('Metadata shape: %s', str(metadata.shape))\n metadata = metadata_prep(metadata)\n \n # Remove Test Data set\n metadata = metadata[metadata['removed'] == 0]\n logging.info('Metadata after removing test data set shape: %s', str(metadata.shape))\n\n # Remove data where the last_reviewed_timestamp column doesn't have a valid timestamp\n metadata = metadata[metadata['last_reviewed_timestamp'] != 'nan']\n logging.info('Metadata after retaining last_reviewed_timestamp: %s', str(metadata.shape))\n logging.info('Metadata Head: \\n %s', str(metadata.head()))\n\n metadata = pd.concat([metadata[metadata['indicator'] == 'Likely Land'].head(50),\n metadata[metadata['indicator'] == 'Likely House'].head(50)])\n\n fetch_google_aerial_images(metadata, batch_size=50, get_stats=True)\n","sub_path":"external_data/google_aerial.py","file_name":"google_aerial.py","file_ext":"py","file_size_in_byte":9262,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"253678339","text":"\"\"\"\n------------------------------------------------------------------------\n_THREAD.PY\n\nAUTHOR(S): Peter Walker pwalker@csumb.edu\n\nPURPOSE- This object will represent a single thread, or pipe, in a network\n speed test. It will hold an array of Measurement objects, and has\n some basic object information\n------------------------------------------------------------------------\n\"\"\"\nif __name__==\"__main__\":\n raise SystemExit\n\n# IMPORTS\nfrom __Base import Formatting\nfrom _Measurement import Measurement as Msmt\nfrom _Measurement import Final_Measurement as FMsmt\n#END IMPORTS\n\n\nclass Thread(Formatting):\n\n \"\"\"\n An abstract Thread class that takes care of most parsing\n\n ATTRIBUTES\n ThreadNumber Integer, the number identifier associated with this thread\n DataDirection String, the direction of data travel ('UP' or 'DOWN')\n LocalIP String, the IP address of the device conducting the test\n LocalPort Integer, the port this test is using\n ServerIP String, the IP address of the server this device is connected to\n ServerPort Integer, the port this device is sending information to\n Measurements List of Measurement objects\n FinalMsmt Measurement object, the final summation measurement\n \"\"\"\n\n def __init__(self, dataArr=None, threadNum=0, direction=\"UP\", units=(\"KBytes\", \"Kbits/sec\")):\n \"\"\"\n Used to initialize an object of this class\n ARGS:\n dataArr List of Strings, each String is a measurement that will be parsed and stored in this object\n ThreadNum Integer, the number that this thread is (generally between 3 and 6)\n direction String, the direction of the data in this thread (UP or DOWN)\n units Tuple of two Strings, the units being used by the measurements\n \"\"\"\n #Setting up the whitespace padding that this class will need\n Formatting.__init__(self)\n self.StringPadding = self.StringPadding*3\n #Class variables\n self.Measurements = []\n self.ThreadNumber = threadNum\n self.DataDirection = direction\n #This function assumes that the array of strings (dataArr) is not in order\n #This takes the given data String and parses the object information\n for line in dataArr:\n if \"connected with\" in line:\n line = line.split(\"local\", 1)[1].strip()\n self.LocalIP = line.split(\"port\")[0].strip()\n line = line.split(\"port\", 1)[1].strip()\n self.LocalPort = line.split(\"connected\")[0].strip()\n line = line.split(\"connected with\", 1)[1].strip()\n self.ServerIP = line.split(\"port\", 1)[0].strip()\n line = line.split(\"port\", 1)[1].strip()\n self.ServerPort = line.split(\"\\n\")[0].strip()\n break\n #END IF\n #END FOR\n #Removing the line from the array of pings that contains the connection info\n # and then creating all of the pings from the remaining strings\n allMeasurements = [dataLine for dataLine in dataArr\n if \"connected with\" not in dataLine]\n self.FinalMsmt = None\n for line in allMeasurements:\n #We do a quick check for the string stored in units[1]. If that string is\n # present in a line, then it must be a measurement that we want to parse\n if (units[1] in line) and (\"%\" not in line):\n #Make a measurement object out of the line.\n newMsmt = Msmt(data=line, units=units)\n #If the measurement's start time is one second behind it's end time, then\n # we can assume that this is one of interval measurements. Otherwise, it is\n # the final summary measurement, and we put the object in self.FinalMsmt\n if (newMsmt.TimeStart == newMsmt.TimeEnd-1):\n #This is for the UDP 1 second tests, where this only 1 regular\n # measurement, and then a final measurement. We delete the old object,\n # and create a new one of type Final_Measurement\n if (newMsmt.TimeStart == 0) and (len(self.Measurements) == 1):\n FinalMsmt = FMsmt(data=line, units=units)\n self.FinalMsmt = FinalMsmt\n break\n else:\n self.Measurements.insert(int(newMsmt.TimeStart), newMsmt)\n #END IF/ELSE\n else:\n FinalMsmt = FMsmt(data=line, units=units)\n self.FinalMsmt = FinalMsmt\n #END IF/ELSE\n #END IF\n #END FOR\n #END DEF\n\n def arrayOfMsmts(self, attribute=\"Speed\"):\n \"\"\"\n Will return an array of the Measurements in self.Measurements as an array\n of Numbers. Can be given the attribute of the measurement that needs to be arraytized\n ARGS:\n attribute String, can be \"speed\" or \"size\" (attribute of Measurment)\n RETURNS:\n list, containing all of the values in the Measurement object of myMeasurements\n \"\"\"\n if attribute not in [\"Speed\", \"Size\"]:\n print(\"The attribute specified must be either 'Speed' or 'Size'. Using 'Speed'\")\n attribute = \"Speed\"\n #END IF\n #This uses list comprehension to return a list of all of the measurement's attribute,\n # specified by the variable attribute\n return [msmt.__dict__[attribute] for msmt in self.Measurements]\n #END DEF\n\n\n# STRING PRINTOUT --------------------------------------------------------------\n\n def __str__(self):\n \"\"\"Returns a string representation of the object\"\"\"\n string = (self.StringPadding +\n \"Thread Number: {}\\n\".format(self.ThreadNumber) +\n self.StringPadding +\n \"Data Direction: {}\\n\".format(self.DataDirection) +\n self.StringPadding +\n \"Local: {}:{}\\n\".format(self.LocalIP,self.LocalPort) +\n self.StringPadding +\n \"Server: {}:{}\\n\".format(self.ServerIP,self.ServerPort)\n )\n for msmt in self.Measurements:\n string += str(msmt) + \"\\n\"\n string += str(self.FinalMsmt) + \"\\n\"\n return string\n #END DEF\n#END CLASS\n","sub_path":"PyFiles/FileParser/_Thread.py","file_name":"_Thread.py","file_ext":"py","file_size_in_byte":6525,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"362270007","text":"from django.db import transaction\nfrom django.utils import timezone\nfrom rest_framework import viewsets, permissions, status\nfrom rest_framework.decorators import action\nfrom rest_framework.response import Response\nfrom island.exceptions import PermissionDenied, ServiceUnavailable, APIException\nfrom twitter_image.models import Task, TweetData, ImageData, TaskTweet\nfrom twitter_image.serializers import TaskSerializer, TaskTweetSerializer, ImageDataSerializer\nfrom aiohttp.client_exceptions import ClientError\nfrom asyncio import TimeoutError\nimport logging\n\nlogger = logging.getLogger('twitter_image')\n\n\nclass TaskViewSet(viewsets.ReadOnlyModelViewSet):\n serializer_class = TaskSerializer\n permission_classes = [permissions.IsAuthenticated]\n filterset_fields = ['username', 'tag']\n search_fields = ['tag']\n ordering_fields = ['last_update']\n ordering = ['-last_update']\n\n def get_queryset(self):\n return Task.objects.filter(owner=self.request.user)\n\n @action(detail=True, methods=['patch'])\n def refresh(self, request, pk=None):\n try:\n with transaction.atomic():\n task = Task.objects.select_for_update(skip_locked=True).get(pk=pk)\n if task.owner != request.user:\n raise PermissionDenied()\n with timezone.override(None):\n task.update()\n task.last_update = timezone.now()\n task.save()\n serializer = TaskSerializer(task, context={'request': request})\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n except (ClientError, TimeoutError) as e:\n logger.error('[Proxy Down]', exc_info=e)\n raise ServiceUnavailable()\n except BaseException as e:\n logger.fatal('[Unknow Error]', exc_info=e)\n raise APIException()\n\n\nclass TaskTweetViewSet(viewsets.ReadOnlyModelViewSet):\n serializer_class = TaskTweetSerializer\n permission_classes = [permissions.IsAuthenticated]\n filterset_fields = ['new', 'task']\n search_fields = ['tweet__tweet']\n ordering_fields = ['tweet__time']\n ordering = ['-tweet__time']\n\n def get_queryset(self):\n return TaskTweet.objects.filter(task__owner=self.request.user)\n","sub_path":"twitter_image/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2273,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"199323041","text":"# -*- coding: utf-8 -*-\nimport os\nimport sys\nimport traceback\nimport json\nfrom pprint import pprint\n\nfrom django.shortcuts import render\nfrom django.http import HttpResponse\nfrom django.template import RequestContext\nfrom django.shortcuts import render_to_response\nfrom django.forms.formsets import formset_factory\n\nfrom kerotan.forms import AddressForm\n\nfrom .ekitan.ekitan_api import Ekitan\nfrom .gmaps_geocoder.gmaps_geocoder import GoogleMapsGeocoder\nfrom .bing_news.bing_api import Bing\nfrom .wikipedia.wikipedia_api import get_overview\n\nAPI_DIR = os.path.join(os.path.dirname(os.path.abspath(__file__)), u'..', u'..', u'API')\nsys.path.append(API_DIR)\nfrom get_API_key import get_API_key\n\n\ndef display_google_map(request):\n if request.method == 'POST':\n form = AddressForm(request.POST)\n company_address = {\n \"TIS\": \"東京都新宿区 西新宿8丁目17−1\",\n \"幕張メッセ\": \"〒261-0023 千葉市美浜区中瀬2-1\",\n \"新宿駅\": \"〒160-0023 東京都新宿区西新宿1丁目1−3\",\n \"東京駅\": \"〒100-0005 東京都千代田区丸の内1 丁目 呑んき1丸の内北口店 9\",\n \"大阪駅\": \"大阪府大阪市北区梅田3丁目1−1\",\n \"立命館大学\": \"〒525-8577 滋賀県草津市野路東1丁目1-1\",\n }\n company_overview = {\n \"TIS\": \"TIS株式会社(ティーアイエス)は国内大手システムインテグレーター。傘下にインテック、アグレックス、クオリカ、AJSなどを擁するTISインテックグループの中核企業である。\",\n \"幕張メッセ\": \"幕張メッセ(まくはりメッセ、Makuhari Messe)は、千葉県千葉市美浜区にある大型の会議・展示施設である。また、株式会社幕張メッセは、これを運営する企業である。\",\n \"新宿駅\": \"新宿駅(しんじゅくえき)は、東京都新宿区・渋谷区にある、東日本旅客鉄道(JR東日本)・京王電鉄・小田急電鉄・東京地下鉄(東京メトロ)・東京都交通局(都営地下鉄)の駅である。\",\n \"東京駅\": \"東京駅(とうきょうえき)は、東京都千代田区丸の内一丁目にある、東日本旅客鉄道(JR東日本)・東海旅客鉄道(JR東海)・東京地下鉄(東京メトロ)の駅である。\",\n \"大阪駅\": \"大阪駅(おおさかえき)は、大阪府大阪市北区梅田三丁目にある、西日本旅客鉄道(JR西日本)の駅である。\",\n \"立命館大学\": \"立命館大学(りつめいかんだいがく、英語: Ritsumeikan University)は、京都府京都市中京区西ノ京朱雀町1に本部を置く日本の私立大学である。1922年に設置された。大学の略称は立命、立命館、立命大。近畿圏では立大も使用される[1]。\"\n }\n# \t\tcompany_overview={\n# \t\t\t\"TIS\":\"TIS株式会社(初代)は、1971年(昭和46年)4月、三和銀行および三和グループを中心に大阪市東区(現在の大阪市中央区)に\\\n# 株式会社東洋情報システム(資本金6億円)として設立された。現在の法人は、2016年(平成28年)7月にITホールディングス株式会社がTIS株式会社(初代)を\\\n# 吸収合併し、商号変更したものである。三和グループに属する企業で構成されるみどり会の会員企業でもある。\\\n# JCBを中心としたクレジットカード会社の基幹システムなどに強みを持ち、国内シェアは5割弱で首位。\",\n# \t\t\t\"幕張メッセ\":\"幕張メッセのメッセとは、ドイツ語の'見本市'の意味を指す'Messe'に由来する。1989年(平成元年)10月9日に開業した。\\\n# 現在は、東京国際展示場(東京ビッグサイト、東西合計)に次ぐ国内2番目の規模となっている。運営会社は、1986年(昭和61年)4月30日に設立され、\\\n# 2005年(平成17年)7月1日に社名を株式会社日本コンベンションセンター(Nippon Convention Center)から株式会社幕張メッセに変更した。\\\n# 当館の整備費は増設分を合わせると約558億円を投じており、年間の維持管理費が約20億円掛かるとされており、千葉県と千葉市の公費による一部負担が続いている。\\\n# しかし、都心から近い東京国際展示場開業の影響を受け、2011年(平成23年)に東京モーターショーが東京ビッグサイトへ移るなど利用は伸び悩み、\\\n# 2013年(平成23年)度の稼働率は約40%となり、横浜市のパシフィコ横浜の稼働率の約70%を大きく下回っている。\",\n# \t\t\t\"新宿駅\":\"東京の副都心・新宿に位置するターミナル駅である。1885年(明治18年)に日本鉄道により現在の山手線が開業したのが当駅の始まりである。\\\n# 4年後の1889年(明治22年)には南豊島郡淀橋町となる。開業時から新宿を副都心にする計画が発��されるまでは当駅周辺はまだ街の外れであり利用客は少ないものだったが、\\\n# 大正期に入り次第に市街地が拡大するにつれ、多くの私鉄が乗り入れるようになる。ターミナルとなって周辺が発展するにつれて利用客は増え続け、1931年には私鉄や国鉄などを合わせた\\\n# 利用者数で日本一になった[1]。そして、1966年(昭和41年)の乗車人数では、国鉄池袋駅の41万67人を抜いて、当駅が41万69人と日本一になっている。\\\n# さらに1960年代から当駅西側一帯で進められた新宿副都心計画によって、70年代には多くの超高層ビルが建てられ利用者の増加に拍車がかかった。\\\n# 現在ではJR・私鉄・地下鉄の多くの路線が周辺地域のベッドタウンとを結んでおり、多くのビジネス客が利用する。さらに、当駅周辺は日本最大の繁華街・歓楽街となっており、\\\n# 昼夜を問わず人の流れが絶えない。JRの駅を中心に東・西・南口、周辺の各地下鉄駅、商業施設などが通路や地下街などで広範囲に連絡している。\\\n# 一日平均乗降者数は約335万人(2013年)[3]と世界一(ギネス世界記録認定)多い駅であり、地下道などで接続する西武新宿駅まで含めると約358万人(2013年)となり、\\\n# この数字は横浜市の人口に匹敵する。年間の乗降客数に直すと約13億人となりインドの人口をも上回る規模となる。\",\n# \t\t\t\"東京駅\":\"東京の表玄関とも言うべきターミナル駅で、特に東海道新幹線と東北新幹線の起点となっており、全国の新幹線網における最大の拠点となっている。\\\n# また、東海道本線や東北本線など主要幹線の起点駅でもある。当駅から乗り換えなしで実に33都道府県[1]と結んでおり、1日当たりの列車発着本数は約3000本という日本を代表する\\\n# ターミナル駅の一つである。プラットホームの数は日本一多く、在来線が地上5面10線と地下4面8線の合計9面18線、新幹線が地上5面10線、地下鉄は地下1面2線を有しており、\\\n# 面積は東京ドーム約3.6個分に相当する。赤レンガ造りの丸の内口駅舎は辰野金吾らが設計したもので、1914年に竣工、2003年に国の重要文化財に指定されている。\\\n# 「関東の駅百選」認定駅でもある。\",\n# \t\t\t\"大阪駅\":\"大阪府の代表駅(府庁所在地駅)として第1回近畿の駅百選にも選定されている西日本最大の駅。駅長が配置された直営駅であり、\\\n# 管理駅として東海道本線の塚本駅を管轄している。JRの特定都区市内制度における「大阪市内」に属する駅であり、運賃計算の中心駅となる。また、アーバンネットワークの運行の要衝となる駅で、\\\n# 運行系統の軸をなしている。大阪市街の北玄関である梅田に位置し、駅前や駅の東側・南側を中心に繁華街が広がっている。\\\n# 東京・山陽・九州方面への長距離列車については、1964年開業の新大阪駅を発着する東海道・山陽新幹線に地位を譲ったものの、当駅は現在でも北陸方面との特急の始発・終着駅であり、\\\n# 新快速を始めとする京阪神の都市間連絡列車や、北近畿・山陰方面との特急、東京駅発着の寝台特急などの在来線特急も発着している。\\\n# かつては東北・北海道方面に向かう夜行列車も発着していたが、2015年3月のダイヤ改正で寝台特急トワイライトエクスプレスが廃止されたことで東北・北海道方面を行き来する夜行列車は全て消滅した。\\\n# これによって大阪駅を起点、終点とする夜行列車は全て消滅した。ただし、トワイライトエクスプレスは同年5月16日(土)に山陽方面のツアー列車として復活した。\\\n# 貨物列車は北方貨物線および梅田貨物線(いずれも通称)を利用するため大阪駅を通過しない。\"}\n\n if form.is_valid():\n print(\"form.is_valid() ok\")\n # # 出発フォームの入力内容が、辞書に登録されているかどうか確認。\n # try:\n # # 登録されていれば、辞書から住所を取得し、出発住所とする\n # start_company = company_address[form.cleaned_data[\"start_address\"]]\n # except:\n # # 入力内容が、辞書に登録されていなければ、入力内容を出発住所とする。\n # start_company = form.cleaned_data[\"start_address\"]\n\n # # 到着フォームでも同様の処理。\n # try:\n # arriv_company = company_address[form.cleaned_data[\"arriv_address\"]]\n # # 到着の方が辞書に含まれるなら、概要とニュースと取得する。ので、フラグを立てる。\n # FLAG_getOverviewNews = 1\n # except:\n # # 入力内容が、辞書に登録されていなかった。\n # arriv_company = form.cleaned_data[\"arriv_address\"]\n # FLAG_getOverviewNews = 0\n\n # ---------------------------------------------------------------------------------\n # ---------------------------------------------------------------------------------\n # geocode,会社概要、ニュースを取得する。\n try:\n # Get geocode by Google Maps API.\n try:\n print(\"try get_geocode : \", end=\"\")\n # gmg = GoogleMapsGeocoder()\n geocode = {}\n # geocode.update({\"start\": gmg.get_geocode(start_company)[\"location\"]})\n # geocode.update({\"arriv\": gmg.get_geocode(arriv_company)[\"location\"]})\n start_location_latitude = float(form.cleaned_data[\"start_location\"][1:-1].split(\", \")[0])\n start_location_latitude = round(start_location_latitude, 9)\n start_location_longitude = float(form.cleaned_data[\"start_location\"][1:-1].split(\", \")[1])\n start_location_longitude = round(start_location_longitude, 8)\n start_location = {\"lat\": start_location_latitude, \"lng\": start_location_longitude}\n print(form.cleaned_data[\"start_location\"])\n # print(start_location_latitude)\n # print(start_location_longitude)\n print(start_location)\n arriv_location_latitude = float(form.cleaned_data[\"arriv_location\"][1:-1].split(\", \")[0])\n arriv_location_latitude = round(arriv_location_latitude, 9)\n arriv_location_longitude = float(form.cleaned_data[\"arriv_location\"][1:-1].split(\", \")[1])\n arriv_location_longitude = round(arriv_location_longitude, 8)\n arriv_location = {\"lat\": arriv_location_latitude, \"lng\": arriv_location_longitude}\n print(form.cleaned_data[\"arriv_location\"])\n # print(arriv_location_latitude)\n # print(arriv_location_longitude)\n print(arriv_location)\n \n geocode.update({\"start\": start_location})\n geocode.update({\"arriv\": arriv_location})\n\n # geocode.update({\"start\": form.cleaned_data[\"start_location\"]})\n # geocode.update({\"arriv\": form.cleaned_data[\"arriv_location\"]})\n\n print(\"finished.\")\n except Exception as e:\n # 入力された住所から、geocodeを特定できない。\n # 再入力させるために、入力画面に戻す。\n print(\"--------------------------------------------\")\n print(type(e))\n print(e)\n print(traceback.print_exc())\n print(\"--------------------------------------------\")\n raise\n except:\n # 謎のエラー発生時\n print(\"--------------------------------------------\")\n print(\"謎Error in GoogleMaps.\")\n print(traceback.print_exc())\n print(\"--------------------------------------------\")\n raise\n\n # ---------------------------------------------------------------------------------\n # 会社概要を取得\n # if FLAG_getOverviewNews == 1:\n try:\n # wikipedia APIから取得\n print(\"try company_overview : \", end=\"\")\n # overview = company_overview[form.cleaned_data[\"arriv_address\"]]\n overview = get_overview(form.cleaned_data[\"arriv_address\"])\n image_company_chart = \"/static/img/TIS_chart.png\"\n image_company_building = \"/static/img/TIS_building.png\"\n print(\"finished.\")\n except:\n # エラー発生時.\n print(\"--------------------------------------------\")\n print(\"Error in 会社概要.\")\n print(traceback.print_exc())\n print(\"--------------------------------------------\")\n raise\n # elif FLAG_getOverviewNews == 0:\n # 概要はとってこない\n # print(\"getting overview was passed.\")\n # overview = \"\"\n # image_company_chart = \"\"\n # image_company_building = \"\"\n\n # ---------------------------------------------------------------------------------\n # ニュースを取得\n try:\n # bing search APIで、関連ニュースをとってくる\n print(\"try bing.web_search : \", end=\"\")\n bing = Bing()\n keys = [\"Title\", \"Url\", \"Source\", \"Description\", \"Date\"]\n query = form.cleaned_data[\"arriv_address\"]\n news = bing.web_search(query, 5, keys)\n print(\"finished.\")\n print(\"news\", json.dumps(news, indent=2))\n except (TypeError, ConnectionAbortedError, ConnectionResetError, MaxRetryError, requests.packages.urllib3.exceptions.MaxRetryError, AttributeError, TransportError, googlemaps.exceptions.TransportError, ConnectionError, requests.exceptions.ConnectionError, NewConnectionError):\n # 一分間に連続してリクエスト送ると、回数制限に引っかかってエラー。たぶん。その場合、空のニュースを返すことにする。\n # raiseはしない。\n print(\"--------------------------------------------\")\n print(\"BingSearchAPI回数制限に引っかかりました。時間を置きましょう。\")\n print(traceback.print_exc())\n print(\"--------------------------------------------\")\n news = []\n except FileNotFoundError:\n # 検索結果のファイル出力時のエラー。\n # raiseはしない。\n print(\"--------------------------------------------\")\n print(\"BingSearchAPIの結果出力時のエラー。\")\n print(traceback.print_exc())\n print(\"--------------------------------------------\")\n news = []\n except:\n # 謎のエラー発生時。\n print(\"--------------------------------------------\")\n print(\"謎Error in ニュース.\")\n print(traceback.print_exc())\n print(\"--------------------------------------------\")\n # raise #Exception補足できないから、もうraiseしない。\n news = []\n\n\n # raiseしたら、Top画面に戻す。\n except:\n print(\"raised.\")\n return render_to_response('kerotan/main_page.html', {'form': form}, RequestContext(request))\n\n\n # ---------------------------------------------------------------------------------\n # ---------------------------------------------------------------------------------\n # Get route infomation by Ekitan API.\n try:\n print(\"try ekitan search : \", end=\"\")\n ekitan = Ekitan()\n _, results_filtered = ekitan.norikae_search(s_ido=geocode[\"start\"][\"lat\"], s_keido=geocode[\"start\"][\"lng\"], t_ido=geocode[\"arriv\"][\"lat\"], t_keido=geocode[\"arriv\"][\"lng\"], )\n print(\"finished.\")\n # print(\"results_filtered\",results_filtered)\n return render_to_response('kerotan/main_page.html', {\n 'form': form, 'route': results_filtered, \\\n 'start_latitude': geocode[\"start\"][\"lat\"], 'start_longitude': geocode[\"start\"][\"lng\"],\\\n 'arriv_latitude': geocode[\"arriv\"][\"lat\"], 'arriv_longitude': geocode[\"arriv\"][\"lng\"],\\\n 'news': news, 'overview': overview, 'image_company_chart': image_company_chart, 'image_company_building': image_company_building\\\n }, RequestContext(request))\n\n except FileNotFoundError:\n # APIキーが記述されたファイルの読み込みエラー\n print(\"--------------------------------------------\")\n print(\"APIキーが記述されたファイルの読み込みエラー\")\n print(traceback.print_exc())\n print(\"--------------------------------------------\")\n # raise\n except:\n print(\"--------------------------------------------\")\n print(traceback.print_exc())\n print(\"--------------------------------------------\")\n return render_to_response('kerotan/main_page.html', {'form': form}, RequestContext(request))\n # return render_to_response('kerotan/main_page.html', {\n # 'form': form, 'route': results_filtered, \\\n # 'start_latitude': geocode[\"start\"][\"lat\"], 'start_longitude': geocode[\"start\"][\"lng\"],\\\n # 'arriv_latitude': geocode[\"arriv\"][\"lat\"], 'arriv_longitude': geocode[\"arriv\"][\"lng\"],\\\n # 'news': news, 'overview': overview, 'image_company_chart': image_company_chart, 'image_company_building': image_company_building\\\n # }, RequestContext(request))\n\n\n # form.is_valid()を満たさない場合\n else:\n return render_to_response('kerotan/main_page.html', {'form': form}, RequestContext(request))\n # return render_to_response('kerotan/main_page.html', {'formset':formset}, RequestContext(request))\n\n # request.methodがPOSTじゃない場合\n else:\n form = AddressForm()\n return render_to_response('kerotan/main_page.html', {'form': form}, RequestContext(request))\n # return renderrender(RequestContext(request), 'kerotan/main_page.html')\n","sub_path":"kerotan/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":20827,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"311544446","text":"from flask import Flask, request, jsonify\nimport json\nimport os\nimport urllib.request\nfrom flask_cors import CORS\nimport pymysql\n\n# Constant For BASE URL\nBASE_URL = \"/api/v1/\"\nAUTH_CODE = \"Sanketnaik@1999\"\n\n# Connect to MySQL database\ndb = pymysql.connect(\"remotemysql.com\", \"a0EjWgNg3d\", \"xgrm2qldV8\", \"a0EjWgNg3d\", port=3306, autocommit=True)\ncursor = db.cursor()\n\n# Initialize Flask App\napp = Flask(__name__)\nCORS(app, resources={r\"/api/v1/*\": {\"origins\": \"*\"}})\n\n#\n# CREATE DATABASE\n#\n@app.route(BASE_URL + 'create_db', methods=['POST'])\ndef create_db():\n data = request.form\n\n authentication = data['auth']\n\n if authentication == AUTH_CODE:\n try:\n cursor.execute(\"CREATE DATABASE IF NOT EXISTS personal_diary;\")\n cursor.execute('USE personal_diary')\n return jsonify({\"result\": \"SUCCESS\"})\n except:\n return jsonify({\"result\": \"ERROR\"})\n else:\n return jsonify({\"result\": \"ERROR\"})\n\n\n#\n# START OF USER ACTIONS\n#\napp.route(BASE_URL + 'add-user', methods=['POST'])\ndef add_user():\n data = request.form\n\n name = data['display_name']\n email = data['email']\n photoURL = data['photoURL']\n uid = data['uid']\n try:\n cursor.execute(\n \"CREATE TABLE IF NOT EXISTS users (ID int NOT NULL AUTO_INCREMENT ,display_name VARCHAR(100), email VARCHAR(200), photoURL VARCHAR(1000), uid VARCHAR(100), PRIMARY KEY (ID));\")\n\n cursor.execute(f'INSERT INTO users (display_name, email, photoURL, uid) VALUES (\"{name}\", \"{email}\", \"{photoURL}\", \"{uid}\");');\n return jsonify({\"result\": \"SUCCESS\"})\n\n except:\n return jsonify({\"result\": \"ERROR\"})\n\n\n@app.route(BASE_URL + 'get-user-data', methods=['POST'])\ndef get_user_data():\n\n data = request.form\n email = data['email']\n\n try:\n cursor.execute(f\"select * from users where `email` = \\\"{email}\\\"\")\n data = cursor.fetchone()\n return jsonify({\"result\": \"SUCCESS\", \"data\": data})\n except:\n return jsonify({\"result\": \"ERROR\"})\n\n#\n# END OF USER ACTIONS\n#\n\n#\n# START OF DIARY ACTIONS\n#\n@app.route(BASE_URL + 'add-entry', methods=['POST'])\ndef add_entry():\n\n formData = request.form\n uid = formData[\"uid\"]\n id = formData[\"id\"]\n data = formData[\"data\"]\n month = formData[\"month\"]\n year = formData[\"year\"]\n date = formData[\"date\"]\n day = formData[\"day\"]\n\n try:\n cursor.execute(f\"CREATE TABLE IF NOT EXISTS {uid}_diary (UID int NOT NULL AUTO_INCREMENT, id VARCHAR(20), date VARCHAR(10), month VARCHAR(20), year VARCHAR(10), day VARCHAR(10), data VARCHAR(2000), PRIMARY KEY (UID));\")\n cursor.execute(f\"insert into {uid}_diary (id, date, month, year, day, data) VALUES (\\\"{id}\\\", \\\"{date}\\\", \\\"{month}\\\", \\\"{year}\\\", \\\"{day}\\\", \\\"{data}\\\");\")\n\n return jsonify({\"result\": \"SUCCESS\"})\n except:\n return jsonify({\"result\": \"ERROR\"})\n\n\n@app.route(BASE_URL + 'get-data', methods=['POST'])\ndef get_diary_data():\n\n data = request.form\n uid = data['uid']\n\n try:\n cursor.execute(f\"select * from {uid}_diary ORDER BY id DESC;\")\n result = cursor.fetchall()\n return jsonify({\"result\": \"SUCCESS\", \"data\": result})\n except:\n return jsonify({\"result\": \"ERROR\"})\n\n\n@app.route(BASE_URL + 'update-entry', methods=['POST'])\ndef update_entry():\n\n formData = request.form\n uid = formData['uid']\n data = formData['data']\n id = formData['id']\n\n try:\n cursor.execute(f'update {uid}_diary SET `data` = \"{data}\" where id = \"{id}\";')\n return jsonify({\"result\": \"SUCCESS\"})\n except:\n return jsonify({\"result\": \"ERROR\"})\n\n#\n# END OF DIARY ACTIONS\n#\n\n# Run Flask App\nif __name__ == \"__main__\":\n app.run(debug=True)\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":3707,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"143613713","text":"\n# Change log\n# 20200308 Soumya Added feed for space.com\n# 20200308 Soumya Added BeautifulSoup to scrap summary from space.com \n\nfrom django.shortcuts import render\nfrom django.http import JsonResponse,HttpResponse\nimport feedparser\nimport re\nimport requests \nfrom bs4 import BeautifulSoup\n# Import rest_framework\nfrom rest_framework.response import Response\nfrom rest_framework.views import APIView\n\nfrom .serializers import PostSerializer, NASASerializer, SPACEDOTCOMSerializer\nfrom .models import Post, NASA, SPACEDOTCOM\n\n\nclass TestView(APIView):\n def get(self, request, *args, **kwargs):\n qs = Post.objects.all()\n serializer = PostSerializer(qs, many=True)\n return Response(serializer.data)\n\n def post(self, request, *args, **kwargs):\n serializer = PostSerializer(data=request.data)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data)\n else:\n return Response(serializer.errors)\n\n\nclass NASAView(APIView):\n def get(self, request, *args, **kwargs):\n feed = feedparser.parse('https://www.nasa.gov/rss/dyn/breaking_news.rss')\n for entry in feed.entries:\n obj, created = NASA.objects.get_or_create(\n link=entry['link'],\n title=entry['title'],\n author=feed['feed']['author'],\n summary=entry['summary'],\n published=entry['published'],\n article_id=entry['dc_identifier'],\n author_img_url='',\n article_img_url=entry['links'][1]['href']\n )\n\n qs = NASA.objects.all()\n serializer = NASASerializer(qs, many=True)\n return Response(serializer.data) \n\n\nclass SPACEDOTCOMView(APIView):\n def get(self, request, *args, **kwargs):\n article_img_url = ''\n feed = feedparser.parse('https://www.space.com/feeds/all')\n for entry in feed.entries:\n for link in entry['links']:\n if ('image' in link.type):\n article_img_url = link.href\n # Space.com doesnt provide the summary content\n # So, use beautiful soup to scrap the article\n # We are fetching the 'complete' sentences within 500char limit\n r = requests.get(entry['link'])\n htmlContent = r.content\n soup = BeautifulSoup(htmlContent, 'html.parser')\n summary = str(soup.find(id=\"article-body\").get_text())[0:500].split(\".\")\n summary.pop()\n summary = \".\".join(summary)\n\n\n obj, created = SPACEDOTCOM.objects.get_or_create(\n link=entry['link'],\n title=entry['title'],\n author=entry['author'],\n summary=summary,\n published=entry['published'],\n published_parsed=entry['published_parsed'],\n article_id='',\n author_img_url='',\n article_img_url=article_img_url\n )\n\n qs = SPACEDOTCOM.objects.all()\n serializer = SPACEDOTCOMSerializer(qs, many=True)\n return Response(serializer.data) \n\n\ndef dummy(request):\n return JsonResponse({\n 'author': 'Soumya',\n 'NASA': '/NASA',\n 'SPACEDOTCOM': '/SPACEDOTCOM'\n }, safe=False)\n","sub_path":"core/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3312,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"174338359","text":"import matplotlib.pyplot as plt\nimport numpy as np\nfrom scipy.stats import lognorm\n\nx,y,r = np.loadtxt('dist2', unpack = True)\n\n# Do some stats\nmean = np.average(r)\nstd = np.nanstd(r)\nr = r[np.nonzero(r)]\n\nprint(f'mean: {mean}, std/mean: {std/mean}')\n\n# Create circles\ncircles = (plt.Circle((xi,yi),ri,fill=False) for xi, yi, ri in zip(x, y, r))\n\n# Create figure and axis, then add circles\nfig, ax = plt.subplots()\nfor circle in circles:\n ax.add_patch(circle)\n\n# Neaten up\nplt.axis('scaled')\nplt.xlabel('x (nm)')\nplt.ylabel('y (nm)')\nplt.show()\n","sub_path":"data_analysis/plot.py","file_name":"plot.py","file_ext":"py","file_size_in_byte":549,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"526903375","text":"from django.conf import settings\nfrom django.contrib import admin\nfrom django.urls import include, path\n\nurlpatterns = [\n path('', include('apps.landing.urls')),\n path('api/v1/ingredients/', include('apps.ingredients.urls')),\n path('api/v1/recipes/', include('apps.recipes.urls')),\n path('accounts/', include('apps.accounts.urls')),\n path('admin/', admin.site.urls),\n path('api-auth/', include('rest_framework.urls'))\n]\n\nif settings.DEBUG:\n import debug_toolbar\n urlpatterns = [\n path('__debug__/', include(debug_toolbar.urls)),\n ] + urlpatterns","sub_path":"project/core/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":579,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"651537675","text":"from .base import *\nfrom .mgr import CoreManager as Mgr\n\n\nclass CreationPhaseManager(object):\n\n _id_generator = id_generator()\n\n def __init__(self, obj_type, has_color=False, add_to_hist=False):\n\n self._obj = None\n self._obj_type = obj_type\n self._has_color = has_color\n self._add_to_hist = add_to_hist\n self._custom_obj_name = \"\"\n\n self._origin_pos = Point3()\n self._creation_handlers = []\n self._current_creation_phase = 0\n\n if has_color:\n self.set_next_object_color()\n else:\n GlobalData[\"next_%s_color\" % obj_type] = None\n\n Mgr.expose(\"custom_%s_name\" % obj_type, lambda: self._custom_obj_name)\n Mgr.accept(\"set_custom_%s_name\" % obj_type, self.__set_custom_object_name)\n\n def setup(self, creation_phases, status_text):\n\n creation_status = {}\n mode_text = \"Create %s\" % status_text[\"obj_type\"]\n info_text = \"LMB-drag to start creation\"\n creation_status[\"idle\"] = {\"mode\": mode_text, \"info\": info_text}\n\n add_state = Mgr.add_state\n bind = Mgr.bind_state\n state_persistence = -12\n\n for i, phase_data in enumerate(creation_phases):\n\n main_starter, main_handler = phase_data\n\n if i == 0:\n creation_starter = self.__get_creation_starter(main_starter)\n Mgr.accept(\"start_%s_creation\" % self._obj_type, creation_starter)\n on_enter_state = None\n else:\n on_enter_state = self.__get_creation_phase_starter(main_starter)\n\n state_id = \"%s_creation_phase_%s\" % (self._obj_type, i + 1)\n add_state(state_id, state_persistence, on_enter_state)\n\n self._creation_handlers.append(self.__get_creation_phase_handler(main_handler))\n\n binding_id = \"quit %s creation\" % self._obj_type\n bind(state_id, binding_id, \"escape\", self.__end_creation)\n binding_id = \"cancel %s creation\" % self._obj_type\n bind(state_id, binding_id, \"mouse3-up\", self.__end_creation)\n\n info_text = \"move mouse to %s;\" % status_text[\"phase%s\" % (i + 1)]\n get_command = lambda state_id: lambda: Mgr.enter_state(state_id)\n\n if i == len(creation_phases) - 1:\n binding_id = \"finalize %s creation\" % self._obj_type\n bind(state_id, binding_id, \"mouse1-up\",\n lambda: self.__end_creation(cancel=False))\n info_text += \" release LMB to finalize;\"\n else:\n binding_id = \"start %s creation phase %s\" % (self._obj_type, i + 2)\n next_state_id = \"%s_creation_phase_%s\" % (self._obj_type, i + 2)\n bind(state_id, binding_id, \"mouse1-up\", get_command(next_state_id))\n info_text += \" release LMB to set;\"\n\n info_text += \" RMB to cancel\"\n creation_status[\"phase%s\" % (i + 1)] = {\"mode\": mode_text, \"info\": info_text}\n\n status_data = GlobalData[\"status_data\"][\"create\"]\n status_data[self._obj_type] = creation_status\n\n return True\n\n def __get_creation_starter(self, main_creation_func):\n\n def start_creation(origin_pos):\n\n self._origin_pos = origin_pos\n main_creation_func()\n\n Mgr.enter_state(\"%s_creation_phase_1\" % self._obj_type)\n Mgr.add_task(self._creation_handlers[0], \"draw_object\", sort=3)\n Mgr.update_app(\"status\", \"create\", self._obj_type, \"phase1\")\n\n return start_creation\n\n def __get_creation_phase_starter(self, main_start_func):\n\n def start_creation_phase(prev_state_id, is_active):\n\n Mgr.remove_task(\"draw_object\")\n main_start_func()\n self._current_creation_phase += 1\n creation_handler = self._creation_handlers[self._current_creation_phase]\n Mgr.add_task(creation_handler, \"draw_object\", sort=3)\n phase_id = self._current_creation_phase + 1\n Mgr.update_app(\"status\", \"create\", self._obj_type, \"phase%s\" % phase_id)\n\n return start_creation_phase\n\n def __get_creation_phase_handler(self, main_handler_func):\n\n def handle_creation_phase(task):\n\n main_handler_func()\n\n return task.cont\n\n return handle_creation_phase\n\n def __set_custom_object_name(self, custom_name):\n\n self._custom_obj_name = custom_name\n\n def init_object(self, obj):\n\n self._obj = obj\n\n def get_object(self):\n\n return self._obj\n\n def get_object_type(self):\n\n return self._obj_type\n\n def generate_object_id(self):\n\n obj_id = (self._obj_type,) + self._id_generator.next()\n\n return obj_id\n\n def set_next_object_color(self):\n\n color_values = tuple(random.random() * .4 + .5 for i in range(3))\n GlobalData[\"next_%s_color\" % self._obj_type] = color_values\n\n def get_next_object_color(self):\n\n r, g, b = GlobalData[\"next_%s_color\" % self._obj_type]\n color = VBase4(r, g, b, 1.)\n\n return color\n\n def get_origin_pos(self):\n\n return self._origin_pos\n\n def add_history(self, toplevel_obj):\n\n Mgr.do(\"update_history_time\")\n name = toplevel_obj.get_name()\n event_descr = 'Create \"%s\"' % name\n obj_id = toplevel_obj.get_id()\n obj_data = {obj_id: toplevel_obj.get_data_to_store(\"creation\")}\n event_data = {\"objects\": obj_data}\n event_data[\"object_ids\"] = set(Mgr.get(\"object_ids\"))\n Mgr.do(\"add_history\", event_descr, event_data, update_time_id=False)\n\n def __end_creation(self, cancel=True):\n\n Mgr.remove_task(\"draw_object\")\n process = None\n\n if cancel or not self._obj.is_valid():\n\n self._obj.destroy()\n\n else:\n\n finalization = self._obj.finalize()\n\n if finalization:\n\n def finalize():\n\n finalization.next()\n\n for step in finalization:\n yield True\n\n obj_type = self._obj_type\n name = Mgr.get(\"next_obj_name\", obj_type)\n Mgr.update_remotely(\"next_obj_name\", name)\n\n if self._add_to_hist:\n self.add_history(self._obj.get_toplevel_object())\n\n yield False\n\n process = finalize()\n\n else:\n\n obj_type = self._obj_type\n name = Mgr.get(\"next_obj_name\", obj_type)\n Mgr.update_remotely(\"next_obj_name\", name)\n\n if self._has_color:\n self.set_next_object_color()\n\n if self._add_to_hist:\n self.add_history(self._obj.get_toplevel_object())\n\n self._obj = None\n self._current_creation_phase = 0\n\n Mgr.notify(\"creation_ended\")\n Mgr.enter_state(\"creation_mode\")\n\n if process and process.next():\n Mgr.show_screenshot()\n descr = \"Creating %s...\" % self._obj_type\n Mgr.do_gradually(process, \"creation\", descr, cancellable=True)\n","sub_path":"src/core/base/creation_mgr.py","file_name":"creation_mgr.py","file_ext":"py","file_size_in_byte":7095,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"203839692","text":"def rickroll(cycle, message, regex):\n cycle.log(cycle.bot.sendMessage(message.chatID, \"Tsk. Rickrolls are so outdated.\", replyID=message.messageID))\n\ndef reddit(cycle, message, regex):\n cycle.log(cycle.bot.sendMessage(message.chatID, 'Pfft, redditfag. Try voat.co or just remove yourself.', replyID=message.messageID))\n\nplugin={\n 'name': 'anti-rickroll',\n 'functions': {\n r'dQw4w9WgXcQ': rickroll,\n r'reddit': reddit\n },\n 'description':\n \"\"\"Detects and snuffs rickrolls and other things\"\"\",\n 'summary':\n None #undocumented\n }\n","sub_path":"plugins/rickroll.py","file_name":"rickroll.py","file_ext":"py","file_size_in_byte":582,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"114854232","text":"from PyQt4.QtCore import *\nfrom PyQt4.QtGui import *\nimport subprocess \nimport os\nimport time\nimport shlex\n\nclass MakeProcess(QObject):\n\tdef __init__(self, server, directory):\n\t\tsuper(type(self),self).__init__()\n\t\tself.notice=SIGNAL(\"progress(QString)\")\n\t\tself.server=server\n\t\tself.settings=QSettings(\"Gogo, Inc.\", \"Batsview\")\n\t\tself.directory=directory\n\n\tdef make(self,target):\n\t\tself.target=target\n\n\tdef lru(self, lru):\n\t\tself.lru = lru\n\n\tdef work(self):\n\t\tif self.lru:\n\t\t\tmakecmd='make %s BORG=%s'%(self.target, self.lru)\n\t\telse:\n\t\t\tmakecmd='make %s'%(self.target)\n\t\tuser=str(self.settings.value(\"username\").toString())\n\t\tif len(user)>0:\n\t\t\tuser=user+\"@\"\n\t\tshell=str(self.settings.value(\"shell\").toString())\n\t\tif shell==\"xterm\":\n\t\t\tcmd='xterm -T \"%s\" -e \"ssh %s%s \\'bash -c \\\\\"cd %s; if ! %s; then echo FAIL; sleep 3000; fi\\\\\"\\'\"'%(makecmd,user,self.server,self.directory, makecmd)\n\t\telse:\n\t\t\tcmd='gnome-terminal -e \"ssh %s%s \\'bash -c \\\\\"cd %s; if ! %s; then echo FAIL; sleep 3000; fi\\\\\"\\'\"'%(user,self.server,self.directory, makecmd)\n\t\tos.system(cmd)\n\t\tself.emit(self.notice, \"DONE\")\n\n\tdef old(self):\n\t\tcmd='xterm -e \"ssh %s \\'bash -c \\\\\"cd %s; if ! make %s BORG=%s; then echo FAIL; sleep 3000; fi\\\\\"\\'\"'%(self.server,self.directory, self.target, self.lru)\n\t\tproc=subprocess.Popen(shlex.split(cmd),stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n\n\t\t\n\t\tline=\" \"\n\t\twhile line != '':\n\t\t\tline=proc.stdout.readline()\n\t\t\tself.emit(self.notice, line[0:-1])\n\t\t\t\n\t\t\ttime.sleep(1)\n\t\tresult=proc.wait()\n\t\tself.emit(self.notice, \"DONE\")\n\nclass Monitor(QObject):\n\tdef __init__(self, server, directory):\n\t\tsuper(type(self),self).__init__()\n\t\tself.proc=MakeProcess(server,directory)\n\t\tself.thr=QThread(self)\n\t\tself.proc.moveToThread(self.thr)\n\n\t\tself.connect(self.thr, SIGNAL(\"started()\"),self.proc.work)\n\t\tself.connect(self.proc, SIGNAL(\"progress(QString)\"),self.progress)\n\n\tdef run(self, cmd=\"help\", lru=None):\n\t\tself.proc.make(cmd)\n\t\tself.proc.lru(lru)\n\t\tself.start_time=time.time()\n\t\tself.thr.start()\n\t\n\tdef progress(self,title):\n\t\tif title==\"DONE\":\n\t\t\tself.stop_time=time.time()\n\t\t\tself.thr.quit()\n\n\tdef wait(self):\n\t\tself.thr.wait()\n\tdef duration(self):\n\t\tif self.stop_time and self.start_time:\n\t\t\treturn self.stop_time - self.start_time\n\n\t\nif __name__ == \"__main__\":\n\timport sys\n\timport signal\n\tsignal.signal(signal.SIGINT, signal.SIG_DFL)\n\tapp=QApplication(sys.argv)\n\twin=Monitor()\n\twin.run(\"rfs\")\n\twin.wait()\n\tapp.exec_()\t\n","sub_path":"build.py","file_name":"build.py","file_ext":"py","file_size_in_byte":2429,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"297063968","text":"\"\"\"Contains the scoring algorithms used in the model.\n\"\"\"\n\nimport numpy as np\nimport pandas as pd\nfrom .models import OPC\nfrom .utils import k_kohler, ri_eff\nfrom .mie import cscat\n\n\ndef compute_bin_assessment(opc, refr, kappa, rh_values=[0., 35., 95.]):\n \"\"\"Assess the ability of an OPC to assign particles to their correct bin.\n\n Parameters\n ----------\n opc: opcsim.OPC\n refr: complex\n The complex refractive index of the material to assess\n kappa: float\n The kappa value to use for hygroscopic growth\n rh_values: list-like\n A list of relative humidities to assess the OPC at.\n\n Returns\n -------\n rv: pd.DataFrame\n A dataframe containing the results with self-explanatory columns.\n\n Examples\n --------\n\n \"\"\"\n assert(isinstance(opc, OPC)), \"opc must be an instance of the opcsim.OPC class\"\n\n # init the dataframe to hold our results\n rv = pd.DataFrame()\n\n for rh in rh_values:\n for i, _bins in enumerate(opc.bins):\n # compute the wet diameter\n wet_diam_lo = k_kohler(diam_dry=_bins[0], kappa=kappa, rh=rh)\n wet_diam_hi = k_kohler(diam_dry=_bins[-1], kappa=kappa, rh=rh)\n\n # compute the pct_dry\n pct_dry = (_bins[0]**3) / (wet_diam_lo**3)\n\n # compute the effective RI\n ri = ri_eff(species=[refr, complex(1.333, 0)], weights=[pct_dry, 1-pct_dry])\n\n # compute the scattering cross-section\n cscat_lo_exp = cscat(\n dp=_bins[0], wl=opc.wl, refr=refr, theta1=opc.theta[0], theta2=opc.theta[1])\n cscat_hi_exp = cscat(\n dp=_bins[-1], wl=opc.wl, refr=refr, theta1=opc.theta[0], theta2=opc.theta[1])\n\n cscat_lo = cscat(\n dp=wet_diam_lo, wl=opc.wl, refr=ri, theta1=opc.theta[0], theta2=opc.theta[1])\n cscat_hi = cscat(\n dp=wet_diam_hi, wl=opc.wl, refr=ri, theta1=opc.theta[0], theta2=opc.theta[1])\n\n # assign bins\n bin_assign_lo = opc.calibration_function(values=[cscat_lo])\n bin_assign_hi = opc.calibration_function(values=[cscat_hi])\n\n # add results to the dataframe\n rv = rv.append({\n \"bin_true\": i,\n \"bin_lo\": bin_assign_lo[0] if len(bin_assign_lo) > 0 else -99,\n \"bin_hi\": bin_assign_hi[0] if len(bin_assign_hi) > 0 else -99,\n \"refr_eff\": ri,\n \"rh\": rh,\n \"cscat_hi_ratio\": cscat_hi / cscat_hi_exp,\n \"cscat_lo_ratio\": cscat_lo / cscat_lo_exp,\n }, ignore_index=True)\n \n # force datatypes to be correct\n rv[\"bin_true\"] = rv[\"bin_true\"].astype(int)\n rv[\"bin_lo\"] = rv[\"bin_lo\"].astype(int)\n rv[\"bin_hi\"] = rv[\"bin_hi\"].astype(int)\n rv[\"rh\"] = rv[\"rh\"].astype(float)\n rv[\"cscat_hi_ratio\"] = rv[\"cscat_hi_ratio\"].astype(float)\n rv[\"cscat_lo_ratio\"] = rv[\"cscat_lo_ratio\"].astype(float)\n\n return rv\n\n# def nv_score(model, distribution, dmin=0.0, dmax=2.5, **kwargs):\n# \"\"\"Calculate and return the number-to-volume ratio.\n\n# The total number of particles is calculated by calculating the total number\n# of particles in each individual bin, and then summing them. The total volume\n# in the distribution is calculated by integrating the Volume-weighted CDF\n# between 0 and `dmax` microns.\n\n# Parameters\n# ----------\n# model : OPC\n# A valid OPC model describing an OPC that can be evaluated.\n# distribution : AerosolDistribution\n# A valid AerosolDistribution instance that can be evaluated.\n# dmin : float\n# The minimum particle size to integrate the CDF under. Default is 0.0\n# microns.\n# dmax : float\n# The maximum particle size to integrate the CDF under. Default is 2.5\n# microns.\n\n# Returns\n# -------\n# N/V : float\n# Returns the number-to-volume ratio as a single float.\n\n# Examples\n# --------\n\n# Compute the number-to-volume ratio for a 2-bin OPC on the Urban distribution\n\n# >>> opc = opcsim.OPC(n_bins=2)\n# >>> urban = opcsim.load_distribution(\"Urban\")\n# >>> n_v = opcsim.metrics.nv_score(opc, urban)\n\n# \"\"\"\n# # evaluate the total number of particles in each bin (then sum)\n# total_number = model.number(distribution, **kwargs).sum()\n\n# # evaluate the total volume in the distribution < dmax\n# total_volume = distribution.cdf(weight='volume', dmax=dmax)\n\n# return total_number / total_volume\n\n# def vv_score(model, distribution, dmin=0.0, dmax=2.5, **kwargs):\n# \"\"\"Calculate and return the volume-to-volume ratio.\n\n# The total volume of particles per the OPC is calculated by calculating the\n# total number of particles in each individual bin, and then multiplying each\n# bin by a 'volume-factor'. The sum of individual bin volumes is then used.\n# The total volume in the distribution is calculated by integrating the\n# Volume-weighted CDF between 0 and `dmax` microns.\n\n# Parameters\n# ----------\n# model : OPC\n# A valid OPC model describing an OPC that can be evaluated.\n# distribution : AerosolDistribution\n# A valid AerosolDistribution instance that can be evaluated.\n# dmin : float\n# The minimum particle size to integrate the CDF under. Default is 0.0\n# microns.\n# dmax : float\n# The maximum particle size to integrate the CDF under. Default is 2.5\n# microns.\n\n# Returns\n# -------\n# V/V : float\n# Returns the volume-to-volume ratio as a single float.\n\n# Examples\n# --------\n\n# Compute the number-to-volume ratio for a 2-bin OPC on the Urban distribution\n\n# >>> opc = opcsim.OPC(n_bins=2)\n# >>> urban = opcsim.load_distribution(\"Urban\")\n# >>> v_v = opcsim.metrics.vv_score(opc, urban)\n\n# \"\"\"\n# # evaluate the total number of particles in each bin (then sum)\n# measured_volume = model.volume(distribution, **kwargs).sum()\n\n# # evaluate the total volume in the distribution < dmax\n# total_volume = distribution.cdf(weight='volume', dmin=dmin, dmax=dmax)\n\n# return measured_volume / total_volume\n","sub_path":"opcsim/metrics.py","file_name":"metrics.py","file_ext":"py","file_size_in_byte":6188,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"321371836","text":"import discord\r\nimport asyncio\r\nfrom discord.ext.commands import Bot\r\nimport platform\r\nimport random\r\nfrom egg_assets import greet_txt, tulku_memes, bw_text, fortune_list\r\nimport re\r\n\r\ncommands = dict()\r\nvars = dict()\r\nqueue = []\r\n\r\n'''\r\nscript = 'eggscript\\n' \\\r\n '0 for 0:3\\n' \\\r\n '1 print hello\\n' \\\r\n '2 end\\n' \\\r\n '3 number i 0\\n' \\\r\n '4 number j 2 \\n' \\\r\n '5 print true\\n' \\\r\n '6 ++ i\\n' \\\r\n '7 if < $i $j\\n' \\\r\n '8 print $i\\n' \\\r\n '9 goto 4\\n' \\\r\n '10 end\\n' \\\r\n '11 print $i\\n' \\\r\n '12 -- i\\n' \\\r\n '13 print $i'\r\n'''\r\ndef for_loop(start, end):\r\n param = commands[start][4:]\r\n x = int(param[0:param.find(':')])\r\n y = int(param[param.find(':') + 1:])\r\n\r\n for i in range(x, y):\r\n for j, cmd in enumerate(range(start + 1, end)):\r\n queue.insert(j, cmd)\r\n\r\nasync def prnt(start, client, channel):\r\n param = commands[start][6:]\r\n if param.startswith('$'):\r\n print(vars.keys())\r\n var_name = param[1:]\r\n await client.send_message(channel, str(vars[var_name]))\r\n else:\r\n await client.send_message(channel, commands[start][6:])\r\n\r\ndef number(start):\r\n param = commands[start][7:]\r\n var_name = param[0:param.find(' ')]\r\n var_val = int(param[param.find(' ') + 1:])\r\n\r\n vars[var_name] = var_val\r\n\r\ndef incr(start):\r\n param = commands[start][3:]\r\n new_value = vars[param] + 1\r\n vars[param] = new_value\r\n\r\ndef decr(start):\r\n param = commands[start][3:]\r\n new_value = vars[param] - 1\r\n vars[param] = new_value\r\n\r\ndef boolean_operation(boolean_op, x, y):\r\n if boolean_op == '>':\r\n return x > y\r\n elif boolean_op == '<':\r\n return x < y\r\n elif boolean_op == '=':\r\n return x == y\r\n\r\n#TODO: work on this\r\ndef if_statement(start, end):\r\n param = commands[start][3:]\r\n boolean_op = param[0:1]\r\n values = param[2:].split(' ')\r\n x = int(vars[values[0][1:]] if values[0].startswith('$') else values[0])\r\n y = int(vars[values[1][1:]] if values[1].startswith('$') else values[1])\r\n if boolean_operation(boolean_op, x, y):\r\n for j, cmd in enumerate(range(start + 1, end)):\r\n queue.insert(j, cmd)\r\n else:\r\n for j, cmd in enumerate(range(start, end)):\r\n queue.pop(0)\r\n\r\n\r\ndef goto(start):\r\n param = int(commands[start][5:])\r\n for j, cmd in enumerate(range(param, start + 1)):\r\n queue.insert(j, cmd)\r\n\r\nasync def parse(client, channel):\r\n while (len(queue) > 0):\r\n i = queue.pop(0)\r\n cmd = commands[i]\r\n print('current line: ' + str(i))\r\n print('current cmd: ' + cmd)\r\n print('current queue: ' + str(queue))\r\n\r\n if cmd.startswith('STOP'):\r\n while (len(queue) > 0):\r\n queue.pop()\r\n elif cmd.startswith('for'):\r\n end_for = -1\r\n for j in range(i, len(commands.keys())):\r\n if commands[j].startswith('end'):\r\n end_for = j\r\n break\r\n for_loop(i, end_for)\r\n elif cmd.startswith('print'):\r\n await prnt(i, client, channel)\r\n elif cmd.startswith('number'):\r\n number(i)\r\n elif cmd.startswith('++'):\r\n incr(i)\r\n elif cmd.startswith('--'):\r\n decr(i)\r\n elif cmd.startswith('if'):\r\n end_if = -1\r\n for j in range(i, len(commands.keys())):\r\n if commands[j].startswith('end'):\r\n end_if = j\r\n break\r\n if_statement(i, end_if)\r\n elif cmd.startswith('goto'):\r\n goto(i)\r\n\r\nasync def go(script, client, channel):\r\n for line in script.split(';'):\r\n if line.startswith('eggscript;') or len(line) == 0:\r\n continue\r\n lineInd = int(line[0:line.find(' ')])\r\n lineCommand = line[line.find(' ') + 1:]\r\n\r\n commands[lineInd] = lineCommand\r\n\r\n queue.append(lineInd)\r\n\r\n await parse(client, channel)\r\n\r\n","sub_path":"eggscript.py","file_name":"eggscript.py","file_ext":"py","file_size_in_byte":4023,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"453871149","text":"# coding: utf8\n\"\"\" Tests:\n\n- :class:`MarginalProbaDescentProcesses` check that the marginal probability of selecting any integer is indeed given by the ``_bernoulli_param`` attribute\n\"\"\"\n\nimport unittest\n\nimport numpy as np\n\nimport sys\nsys.path.append('..')\n\nfrom dppy.exotic_dpps import CarriesProcess, DescentProcess, VirtualDescentProcess\n\n\nclass MarginalProbaDescentProcesses(unittest.TestCase):\n \"\"\" Check that the marginal probability of selecting any integer is indeed given by the ``_bernoulli_param`` attribute\n \"\"\"\n\n size = 10000\n tol = 1e-2\n\n def test_carries_process(self):\n\n cp = CarriesProcess(base=10)\n cp.sample(size=self.size)\n\n estim = len(cp.list_of_samples[-1]) / self.size\n\n self.assertTrue(np.abs(estim - cp._bernoulli_param) < self.tol)\n\n def test_descent_process(self):\n\n dp = DescentProcess()\n dp.sample(size=self.size)\n\n estim = len(dp.list_of_samples[-1]) / self.size\n\n self.assertTrue(np.abs(estim - dp._bernoulli_param) < self.tol)\n\n def test_virtual_descent_process(self):\n\n vdp = VirtualDescentProcess(x_0=0.5)\n vdp.sample(size=self.size)\n\n estim = len(vdp.list_of_samples[-1]) / self.size\n\n self.assertTrue(np.abs(estim - vdp._bernoulli_param) < self.tol)\n\n\ndef main():\n\n unittest.main()\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"tests/test_descent_processes.py","file_name":"test_descent_processes.py","file_ext":"py","file_size_in_byte":1368,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"292870974","text":"import numpy as np\n\nfrom scipy.optimize import curve_fit\nfrom scipy.special import erf\n\n#=================================================================================================\n\ndef extractLineProfile( img, line=[ 1., 0. ] ):\n '''\n extractLineProfile( img, line ):\n Returns a line profile of a given image 'img' along the line=[ m, c ]\n where m is the slope and c is the y-intercept.\n\n Params: \n img: 2D Numpy array\n line: list containing slope and y-intercept\n\n Returns:\n intens: 1D Numpy array of intensities along the line\n '''\n x, y = np.meshgrid( np.arange( img.shape[0] ), np.arange( img.shape[1] ) )\n y = np.flipud( y )\n here = np.where( np.absolute( y - ( line[0]*x + line[1] ) ) < 1. )\n return img[ here ], here\n\n#=================================================================================================\n\ndef myEdge( x, mu, sig, amp=1. ):\n '''\n myEdge: \n Models a rising edge by the following function:\n f(x) = \\frac{A}{2} \\left[ 1 + \\text{erf}\\left(\\frac{x-\\mu}{\\sigma}\\right) \\right ]\n '''\n return( amp / 2. ) * ( 1. + erf( ( x - mu ) / sig ) )\n\n#=================================================================================================\n\nif __name__=='__main__':\n import spatialResolution as sr\n from argparse import Namespace\n import scipy.io as sio\n\n filename = '/home/smaddali/ANL/BeamRuns/Feb2018/reconstructions/stdSample_solution.mat'\n calfilename = '/home/smaddali/ANL/Manuscripts/HEBCDI/data/bases_FINAL.mat'\n dat = Namespace( **sio.loadmat( filename ) )\n cal = Namespace( **sio.loadmat( calfilename ) )\n print( 'Array shape = ', dat.rho.shape )\n intens, here = sr.extractLineProfile( \n np.absolute( dat.rho[:,:,33] ), \n line=[ 1., 0. ]\n )\n\n # fitting edge\n data = intens[50:66] \n fspace_steps = cal.real_stdSample[:,0].reshape(-1,1)@here[0].reshape(1,-1) + cal.real_stdSample[:,1].reshape(-1,1)@here[1].reshape(1,-1)\n fspace_steps = fspace_steps - fspace_steps[:,0].reshape( -1, 1 ).repeat( fspace_steps.shape[1], axis=1 )\n my_x = np.sqrt( ( fspace_steps**2 ).sum( axis=0 ) )[50:66]\n seg = np.array(\n [ \n [ here[1][50], here[1][66] ], \n [ here[0][50], here[0][66] ]\n ]\n )\n popt, pcov = curve_fit( \n myEdge, \n my_x, data, \n p0=[ 1750., 100., 0.09 ]\n )\n \n # plotting\n plt.figure( 1 )\n plt.clf()\n plt.imshow( np.absolute( dat.rho[:,:,33] ) )\n plt.xlim( [ 44, 84 ] )\n plt.ylim( [ 44, 84 ] )\n plt.colorbar()\n plt.plot( seg[0], seg[1], 'c' )\n plt.set_cmap( 'inferno' )\n plt.xticks( [] )\n plt.yticks( [] )\n \n plt.figure( 2 )\n plt.clf()\n plt.plot( my_x, data, '-o', label='Line profile' )\n plt.plot( my_x, myEdge( my_x, popt[0], popt[1], popt[2] ), label='Fitted edge' )\n plt.grid()\n plt.legend( loc='best' )\n plt.xlabel( 'nm', fontsize=18, fontweight='bold' )\n plt.ylabel( '$\\\\left|\\\\rho\\\\right|$', fontsize=18, fontweight='bold' )\n","sub_path":"Python/spatialResolution.py","file_name":"spatialResolution.py","file_ext":"py","file_size_in_byte":3050,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"605488469","text":"from re import split\n\nfrom django.contrib.auth import get_user_model\nfrom django.contrib.auth.tokens import default_token_generator\nfrom django.db.models.aggregates import Avg\nfrom rest_framework import permissions\nfrom rest_framework.decorators import action, api_view, permission_classes\nfrom rest_framework.exceptions import ValidationError\nfrom rest_framework.filters import SearchFilter\nfrom rest_framework.generics import get_object_or_404\nfrom rest_framework.mixins import (CreateModelMixin, DestroyModelMixin,\n ListModelMixin)\nfrom rest_framework.pagination import PageNumberPagination\nfrom rest_framework.permissions import AllowAny, IsAuthenticated\nfrom rest_framework.views import Response, status\nfrom rest_framework.viewsets import GenericViewSet, ModelViewSet\nfrom rest_framework_simplejwt.tokens import AccessToken\n\nfrom .filters import TitleFilter\nfrom .models import Category, Genre, Review, Title\nfrom .permissions import IsAdmin, IsModerator, IsOwnerOrReadOnly, ReadOnly\nfrom .serializers import (CategorySerializer, CommentsSerializer,\n ConfirmTokenSerializer, CreateUserSerializer,\n GenreSerializer, ReviewsSerializer,\n TitleSerializerRead, TitleSerializerWrite,\n UserSerializer)\n\nUser = get_user_model()\n\n\nclass CreateListDestroyView(CreateModelMixin, ListModelMixin,\n DestroyModelMixin, GenericViewSet):\n pass\n\n\n@api_view(['POST'])\n@permission_classes([AllowAny])\ndef create_user(request):\n serialized = CreateUserSerializer(data=request.data)\n serialized.is_valid()\n username = split(r'_', serialized.data['email'])[0]\n user, created = User.objects.get_or_create(\n email=serialized.data['email'],\n defaults={'username': username})\n if created:\n user.set_unusable_password()\n user.save()\n confirmation_code = default_token_generator.make_token(user)\n user.email_user(\n subject='Confirmation code',\n message='Код подтверждения - {}'.format(confirmation_code)\n )\n return Response(serialized.data, status=status.HTTP_201_CREATED)\n\n\n@api_view(['POST'])\n@permission_classes([AllowAny])\ndef get_token(request):\n serialized = ConfirmTokenSerializer(data=request.data)\n serialized.is_valid(raise_exception=True)\n user = User.objects.get(email=serialized.validated_data['email'])\n if not default_token_generator.check_token(\n user,\n serialized.validated_data['confirmation_code']\n ):\n raise ValidationError('Data is not valid')\n access = AccessToken.for_user(user)\n return Response(\n {\"token\": str(access)}\n )\n\n\nclass UsersListCreateViewSet(ModelViewSet):\n lookup_field = 'username'\n queryset = User.objects.all()\n serializer_class = UserSerializer\n permission_classes = [IsAuthenticated, IsAdmin]\n filter_backends = [SearchFilter]\n search_fields = ('username',)\n\n @action(\n methods=['GET', 'PATCH'],\n detail=False,\n permission_classes=[IsAuthenticated]\n )\n def me(self, request, pk=None):\n user = self.request.user\n if request.method == 'PATCH':\n serializer = self.get_serializer(\n user,\n data=request.data,\n partial=True)\n serializer.is_valid(raise_exception=True)\n serializer.save(role=user.role)\n serializer = self.get_serializer(user)\n return Response(serializer.data)\n\n\nclass ReviewsViewSet(ModelViewSet):\n serializer_class = ReviewsSerializer\n permission_classes = [IsOwnerOrReadOnly | IsAdmin | IsModerator]\n\n def get_queryset(self):\n title = get_object_or_404(\n Title,\n id=self.kwargs['title_id']\n )\n return title.reviews.all()\n\n def perform_create(self, serializer):\n title = get_object_or_404(Title, id=self.kwargs.get('title_id'))\n serializer.save(author=self.request.user, title=title)\n\n\nclass CommentsViewSet(ModelViewSet):\n serializer_class = CommentsSerializer\n permission_classes = [IsOwnerOrReadOnly | IsAdmin | IsModerator]\n pagination_class = PageNumberPagination\n\n def get_queryset(self):\n review = get_object_or_404(\n Review,\n title__id=self.kwargs['title_id'],\n id=self.kwargs['review_id']\n )\n return review.comments.all()\n\n def perform_create(self, serializer):\n review = get_object_or_404(\n Review,\n title__id=self.kwargs['title_id'],\n id=self.kwargs.get('review_id')\n )\n serializer.save(author=self.request.user, review=review)\n\n\nclass CategoryViewSet(CreateListDestroyView):\n queryset = Category.objects.all()\n permission_classes = [ReadOnly | IsAdmin]\n serializer_class = CategorySerializer\n lookup_field = 'slug'\n filter_backends = [SearchFilter]\n search_fields = ['=name']\n\n\nclass GenreViewSet(CreateListDestroyView):\n queryset = Genre.objects.all()\n permission_classes = [ReadOnly | IsAdmin]\n serializer_class = GenreSerializer\n lookup_field = 'slug'\n filter_backends = [SearchFilter]\n search_fields = ['=name']\n\n\nclass TitleViewSet(ModelViewSet):\n queryset = Title.objects.annotate(\n rating=Avg('reviews__score')\n ).order_by('-rating')\n permission_classes = [ReadOnly | IsAdmin]\n filterset_class = TitleFilter\n\n def get_serializer_class(self):\n if self.request.method in permissions.SAFE_METHODS:\n return TitleSerializerRead\n return TitleSerializerWrite\n","sub_path":"api_v1/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5630,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"207503841","text":"#Archivo: Tuplas.py\n#Descripcion: Ejemplo en el que se muestra la utilizacion de las tuplas\n#\n\n#Declaracion de una tupla\nconjunto = (1,2,\"tres\",\"cuatro\")\n\n#Recorrido de una tupla como secuencia\ni = 0\nfor elemento in conjunto:\n print(f'conjunto[{i}]={elemento}')\n i+=1\n\n#Modificacion de un elemento de una tupla - No inmutable\n#conjunto[0] = 0\n#print('Despues de modificar, la lista quedo',conjunto)\n\n#Eliminacion de un elemento de una tupla - No inmutable\n#del(conjunto[0])\n\n#Agregado de un elemento de una lista\n#conjunto.append(100)\n#print('Despues de agregar el elemento al final, la lista quedo',conjunto)\n\n#Verificacion de a pertenencia\n\nif 'cuatro' in conjunto:\n print(\"El cuatro esta en el conjunto\")\n\n#Eliminacion de la tupla entera\ndel conjunto\n\n#Genera una excepcion - Manejo de excepciones\ntry:\n print('Despues de eliminar la el conjunto quedo',conjunto)\nexcept:\n print('Ya no existe el conjunto')\n\n","sub_path":"Tuplas.py","file_name":"Tuplas.py","file_ext":"py","file_size_in_byte":924,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"62715222","text":"import uiScriptLocale\n\nROOT = \"d:/ymir work/ui/minimap/\"\n\nwindow = {\n\t\"name\" : \"AtlasWindow\",\n\t\"style\" : (\"movable\", \"float\",),\n\n\t\"x\" : SCREEN_WIDTH - 136 - 256 - 10,\n\t\"y\" : 0,\n\n\t\"width\" : 256 + 15,\n\t\"height\" : 256 + 38+50,\n\n\t\"children\" :\n\t(\n\t\t{\n\t\t\t\"name\" : \"TitleBar\",\n\t\t\t\"type\" : \"roofbar\",\n\t\t\t\"style\" : (\"attach\",),\n\n\t\t\t\"x\" : -8,\n\t\t\t\"y\" : 7,\n\n\t\t\t\"width\" : 256+30+15,\n\t\t\t\"color\" : \"red\",\n\t\t\t\n\t\t\t\"children\" : (\n\t\t\t\t{\n\t\t\t\t\t\"name\" : \"enableWarpWindowButton\",\n\t\t\t\t\t\"type\" : \"button\",\n\t\t\t\t\t\n\t\t\t\t\t\"x\" : 35,\n\t\t\t\t\t\"y\" : 20,\n\t\t\t\t\t\n\t\t\t\t\t\"default_image\" : \"yamato_button/button_small_n.tga\",\n\t\t\t\t\t\"over_image\" : \"yamato_button/button_small_h.tga\",\n\t\t\t\t\t\"down_image\" : \"yamato_button/button_small_p.tga\",\t\t\n\t\t\t\t\t\"text\" : \"GM: Warp\",\n\t\t\t\t\n\t\t\t\t},\n\t\t\t\n\t\t\t),\n\n\t\t},\n\t\t## BOARD\n\t\t{\n\t\t\t\"name\" : \"board\",\n\t\t\t\"type\" : \"board\",\n\t\t\t\n\t\t\t\"style\" : (\"attach\",),\n\n\t\t\t\"x\" : 0,\n\t\t\t\"y\" : 50,\n\n\t\t\t\"width\" : 256 + 15,\n\t\t\t\"height\" : 256 + 38,\n\t\t\t\n\t\t\t# \"children\" : (\n\t\t\t\t# {\n\t\t\t\t\t# \"name\" : \"warpWindow\",\n\t\t\t\t\t# \"type\" : \"window\",\n\t\t\t\t\t# \"style\" : (\"attach\",),\n\t\t\t\t\t\n\t\t\t\t\t# \"x\" : 0,\n\t\t\t\t\t# \"y\" : 50,\n\n\t\t\t\t\t# \"width\" : 256 + 15,\n\t\t\t\t\t# \"height\" : 256 + 38,\n\t\t\t\t# },\n\t\t\t\n\t\t\t# ),\n\n\t\t},\n\t),\n}\n\n# import uiScriptLocale\n\n# ROOT = \"d:/ymir work/ui/minimap/\"\n\n# window = {\n\t# \"name\" : \"AtlasWindow\",\n\t# \"style\" : (\"movable\", \"float\",),\n\n\t# \"x\" : SCREEN_WIDTH - 136 - 256 - 10,\n\t# \"y\" : 0,\n\n\t# \"width\" : 256 + 15,\n\t# \"height\" : 256 + 38,\n\n\t# \"children\" :\n\t# (\n\t\t# ## BOARD\n\t\t# {\n\t\t\t# \"name\" : \"board\",\n\t\t\t# \"type\" : \"board_with_titlebar\",\n\n\t\t\t# \"x\" : 0,\n\t\t\t# \"y\" : 0,\n\n\t\t\t# \"width\" : 256 + 15,\n\t\t\t# \"height\" : 256 + 38,\n\n\t\t\t# \"title\" : uiScriptLocale.ZONE_MAP,\n\t\t# },\n\t# ),\n# }\n","sub_path":"uiscript/uiscript/atlaswindow.py","file_name":"atlaswindow.py","file_ext":"py","file_size_in_byte":1638,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"359707694","text":"from selenium.webdriver.common.by import By\nfrom .base_page import BasePage\nfrom .locators import MainLocators\n\nclass MainPage(BasePage):\n\n def start_bnt_click(self):\n self.browser.execute_script(\"window.scrollBy(0, 600);\")\n start_button = self.browser.find_element(*MainLocators.START_GAME_BNT)\n start_button.click()\n self.browser.execute_script(\"window.scrollBy(0, -300);\")\n\n def close_cookies(self):\n close_button = self.browser.find_element(*MainLocators.CLOSE_COOKIES_BTN)\n close_button.click()\n","sub_path":"pages/main_page.py","file_name":"main_page.py","file_ext":"py","file_size_in_byte":548,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"472934245","text":"\nimport csv\nfrom os.path import expanduser\nhome = expanduser(\"~\")\n\nvars = [\"ACEDEPRS\", \"ACEDRINK\", \"ACEDRUGS\", \"ACEPRISN\", \"ACEDIVRC\", \"ACEPUNCH\", \"ACEHURT\", \"ACESWEAR\", \"ACETOUCH\", \"ACETTHEM\", \"ACEHVSEX\"]\n\nparsed_file_path = home + \"/Downloads/ICGE_course/project/project_ace_python/ace_2012_project/\"\nparsed_info_file = \"parsed_ace_data_2012.csv\"\nstate_age_csv_file = \"state_age_distribution_ace_data_2012.csv\"\nstate_age_ace_csv_file = \"state_age_ace_statistic_2012.csv\"\n\nvariables_nbr = 359\nage_variable_idx = 50\nace_var_start_idx, ace_var_end_idx = 229, 239\n\nfieldnames = [i for i in range(1, variables_nbr + 1)]\n\nwith open(state_age_csv_file, 'w') as csvfile1, open(state_age_ace_csv_file, 'w') as csvfile2:\n writer_age = csv.writer(csvfile1, delimiter=',')\n writer_ace = csv.writer(csvfile2, delimiter=',')\n\n age_categories = [\"state_id\", \"people\", \"Don't know/Not sure\", \"Refused\", \"18-24\", \"25-34\", \"35-44\", \"45-54\",\n \"55-64\", \"65-99\"]\n writer_age.writerow(age_categories)\n\n states = [i for i in range(1, 57)] # no states: 52 43 14 7 3\n states.append(66) # Guam\n states.append(72) # Puerto Rico\n other_states = set()\n\n for state_id in states:\n\n state_content = []\n with open(parsed_file_path + parsed_info_file, \"r\") as content:\n\n for idx, line in enumerate(content):\n if idx == 0:\n continue\n\n line = line.split(\",\")\n if line[variables_nbr - 1] != \"nan\":\n line[variables_nbr - 1] = line[variables_nbr - 1][:len(line[variables_nbr - 1]) - 1]\n\n if int(line[0]) == state_id: # state data\n state_content.append(line)\n\n if 56 < int(line[0]) < 1:\n other_states.add(int(line[0]))\n\n#####################################################################################################\n # getting state and age based distribution for 2012 dataset\n age_distribution = {i: 0 for i in age_categories[2:]}\n\n # getting ace statistics by states and ages for 2012 dataset\n general_age_ace_data = {}\n\n for age_group_idx in age_distribution:\n general_ace_distributions = []\n\n # 22.1-4 and 22.5 -- combined with \"Parents not married\", idx = 8\n ace_answers1 = [\"yes\", \"no\", \"Don’t know/Not Sure\", \"Parents not married\", \"Refused\", \"Not asked or Missing\"]\n ace_answers_values1 = [\"1\", \"2\", \"7\", \"8\", \"9\", \"nan\"]\n for j in range(0, 5): # five of ace questions with these set of answers\n ace_set1_distribution = {i: 0 for i in ace_answers_values1}\n general_ace_distributions.append(ace_set1_distribution)\n # writer_ace.writerow(age_categories)\n\n # 22.6-11\n ace_answers2 = [\"Never\", \"Once\", \"More than once\", \"Don’t know/Not Sure\", \"Refused\", \"Not asked or Missing\"]\n ace_answers_values2 = [\"1\", \"2\", \"3\", \"7\", \"9\", \"nan\"]\n for j in range(0, 6): # six of ace questions with these set of answers\n ace_set2_distribution = {i: 0 for i in ace_answers_values2}\n general_ace_distributions.append(ace_set2_distribution)\n\n general_age_ace_data[age_group_idx] = general_ace_distributions\n\n ace_columns = [\"state_id\", \"state sample population\", \"age category\", \"age category population\",\n \"ace question\", \"\"]\n for i in range(0, len(ace_answers1)):\n ace_columns.append(ace_answers1[i] + \" / \" + ace_answers2[i] + \" -- number of responses\")\n writer_ace.writerow(ace_columns)\n\n for idx, line in enumerate(state_content):\n if idx == 0:\n continue\n\n age_nbr = int(line[age_variable_idx])\n\n ace_values = [line[i] for i in range(ace_var_start_idx, ace_var_end_idx + 1)]\n # age_category = age_categories[2]\n # for var_idx in range(len(ace_values)):\n # # age_ace_data = general_age_ace_data[age_category]\n # # ace_data_distribution = age_ace_data[age_category][var_idx]\n # # ace_data_distribution[ace_values[var_idx]] += 1\n # general_age_ace_data[age_category][var_idx][ace_values[var_idx]] += 1\n\n if age_nbr == 7:\n age_distribution[age_categories[2]] += 1\n age_category = age_categories[2]\n for var_idx in range(len(ace_values)):\n general_age_ace_data[age_category][var_idx][ace_values[var_idx]] += 1\n elif age_nbr == 9:\n age_distribution[age_categories[3]] += 1\n age_category = age_categories[3]\n for var_idx in range(len(ace_values)):\n general_age_ace_data[age_category][var_idx][ace_values[var_idx]] += 1\n elif 25 > age_nbr > 17:\n age_distribution[age_categories[4]] += 1\n age_category = age_categories[4]\n for var_idx in range(len(ace_values)):\n general_age_ace_data[age_category][var_idx][ace_values[var_idx]] += 1\n elif 35 > age_nbr > 24:\n age_distribution[age_categories[5]] += 1\n age_category = age_categories[5]\n for var_idx in range(len(ace_values)):\n general_age_ace_data[age_category][var_idx][ace_values[var_idx]] += 1\n elif 45 > age_nbr > 34:\n age_distribution[age_categories[6]] += 1\n age_category = age_categories[6]\n for var_idx in range(len(ace_values)):\n general_age_ace_data[age_category][var_idx][ace_values[var_idx]] += 1\n elif 55 > age_nbr > 44:\n age_distribution[age_categories[7]] += 1\n age_category = age_categories[7]\n for var_idx in range(len(ace_values)):\n general_age_ace_data[age_category][var_idx][ace_values[var_idx]] += 1\n elif 65 > age_nbr > 54:\n age_distribution[age_categories[8]] += 1\n age_category = age_categories[8]\n for var_idx in range(len(ace_values)):\n general_age_ace_data[age_category][var_idx][ace_values[var_idx]] += 1\n elif 100 > age_nbr > 64:\n age_distribution[age_categories[9]] += 1\n age_category = age_categories[9]\n for var_idx in range(len(ace_values)):\n general_age_ace_data[age_category][var_idx][ace_values[var_idx]] += 1\n\n#####################################################################################################\n # outputting rows into csv file\n\n print(state_id)\n age_data = [state_id, len(state_content)]\n [age_data.append(j) for i, j in age_distribution.items()]\n writer_age.writerow(age_data)\n\n no_ace_status = 0\n age_ace_line = [state_id, len(state_content)]\n writer_ace.writerow(age_ace_line)\n\n age_idx = 0\n for i, j in age_distribution.items():\n age_ace_line = [\"\\t\", str(i), str(j)]\n writer_ace.writerow(age_ace_line)\n\n ace_data = general_age_ace_data[i]\n for var_id in range(len(vars)):\n age_ace_line = [\"\\t\", \"\\t\", vars[var_id]]\n writer_ace.writerow(age_ace_line)\n\n age_ace_line = [\"\\t\", \"\\t\", \"\\t\"]\n responses = ace_data[var_id]\n [age_ace_line.append(str(k) + \" -- \" + str(l)) for k, l in responses.items()]\n writer_ace.writerow(age_ace_line)\n\n cc = 0\n for k, l in responses.items():\n if cc == 5:\n continue\n no_ace_status += l\n cc += 1\n\n age_idx += 1\n if no_ace_status != 0:\n print(\"Stat exists in :\" + str(state_id))\n print(\"other states: \")\n print(other_states)\n\n\n#####################################################################################################\n","sub_path":"ace_2012_project/ace_need_data_2012.py","file_name":"ace_need_data_2012.py","file_ext":"py","file_size_in_byte":8108,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"540766730","text":"\"\"\"\nSplit each videos frame by frame.\n\"\"\"\n\nimport os\nimport imageio\nfrom scipy import misc\nfrom tqdm import tqdm\n\nnames = {\n '1_1': (0, 624),\n '1_2': (625, 1452),\n '2_1': (1453, 2001),\n '2_2': (2002, 2686),\n '2_3': (2687, 3454),\n '2_4': (3455, 4033),\n '2_5': (4034, 4928),\n '2_6': (4929, 5595),\n '3_1': (5596, 6253),\n '3_2': (6254, 6930),\n '3_3': (6931, 7738)\n}\n\nif not os.path.exists('UMN_frames'):\n os.makedirs('UMN_frames')\nv = imageio.get_reader('UMN/Crowd-Activity-All.avi', 'ffmpeg')\nnb_frames = v.get_meta_data()['nframes']\nfor f in tqdm(range(nb_frames)):\n try:\n f_data = v.get_data(f)\n misc.imsave('UMN_frames/frame_{}.png'.format(f), misc.imresize(f_data, (224, 224)))\n except RuntimeError:\n pass\nfor k in tqdm(list(names.keys())):\n for i in range(names[k][0], names[k][1] + 1):\n os.rename('UMN_frames/frame_{}.png'.format(i),\n 'UMN_frames/frame_{}-{}.png'.format(k, i))\n","sub_path":"data/UMN_split_to_frames.py","file_name":"UMN_split_to_frames.py","file_ext":"py","file_size_in_byte":971,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"5050313","text":"# -*- coding: utf-8 -*-\n\nfrom backoff import expo\nfrom backoff import on_exception\nfrom google.cloud import error_reporting\nfrom google.cloud import logging\nfrom logging import basicConfig\nfrom logging import getLogger\nfrom logging import NOTSET\n\n# The format for local logs.\nLOGS_FORMAT = (\"%(asctime)s \"\n \"%(name)s \"\n \"%(process)d \"\n \"%(thread)d \"\n \"%(levelname)s \"\n \"%(message)s\")\n\n# The path to the log file for local logging.\nLOG_FILE = \"/trump2cash/temp/trump2cash.log\"\n\n\nclass Logs:\n \"\"\"A helper for logging locally or in the cloud.\"\"\"\n\n def __init__(self, name, to_cloud=True):\n self.to_cloud = to_cloud\n\n # Initialize the local file logger.\n self.local_logger = getLogger(name)\n basicConfig(format=LOGS_FORMAT, level=NOTSET, filename=LOG_FILE)\n\n # If requested, also initialize the Stackdriver logging and error\n # reporting clients.\n if self.to_cloud:\n self.cloud_logger = logging.Client().logger(name)\n self.error_client = error_reporting.Client()\n\n def debug(self, text):\n \"\"\"Logs at the DEBUG level.\"\"\"\n\n if self.to_cloud:\n self.safe_cloud_log_text(text, severity=\"DEBUG\")\n else:\n self.local_logger.debug(text)\n\n def info(self, text):\n \"\"\"Logs at the INFO level.\"\"\"\n\n if self.to_cloud:\n self.safe_cloud_log_text(text, severity=\"INFO\")\n else:\n self.local_logger.info(text)\n\n def warn(self, text):\n \"\"\"Logs at the WARNING level.\"\"\"\n\n if self.to_cloud:\n self.safe_cloud_log_text(text, severity=\"WARNING\")\n else:\n self.local_logger.warning(text)\n\n def error(self, text):\n \"\"\"Logs at the ERROR level.\"\"\"\n\n if self.to_cloud:\n self.safe_cloud_log_text(text, severity=\"ERROR\")\n else:\n self.local_logger.error(text)\n\n def catch(self, exception):\n \"\"\"Logs an exception.\"\"\"\n\n if self.to_cloud:\n self.safe_report_exception()\n self.safe_cloud_log_text(str(exception), severity=\"CRITICAL\")\n else:\n self.local_logger.critical(str(exception))\n\n def safe_cloud_log_text(self, text, severity):\n \"\"\"Logs to the cloud, retries if necessary, and eventually fails over\n to local logs.\n \"\"\"\n\n try:\n self.retry_cloud_log_text(text, severity)\n except BaseException as exception:\n self.local_logger.error(\"Failed to log to cloud: %s %s %s\" %\n (exception, severity, text))\n\n @on_exception(expo, BaseException, max_tries=7)\n def retry_cloud_log_text(self, text, severity):\n \"\"\"Logs to the cloud and retries up to 7 times with exponential backoff\n if the upload fails.\n \"\"\"\n\n self.cloud_logger.log_text(text, severity=severity)\n\n def safe_report_exception(self):\n \"\"\"Reports the latest exception, retries if necessary, and eventually\n fails over to local logs.\n \"\"\"\n\n try:\n self.retry_report_exception()\n except BaseException as exception:\n self.local_logger.error(\"Failed to report exception: %s\" %\n exception)\n\n @on_exception(expo, BaseException, max_tries=7)\n def retry_report_exception(self):\n \"\"\"Reports the latest exception and retries up to 7 times with\n exponential backoff if the upload fails.\n \"\"\"\n\n self.error_client.report_exception()\n","sub_path":"logs.py","file_name":"logs.py","file_ext":"py","file_size_in_byte":3577,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"215772360","text":"import os\nimport numpy as np\nimport cvxpy as cp\nimport scipy.io as sio\nimport scipy.misc\n\nfrom utils import setup_logger\n\nimport argparse\n\nparser = argparse.ArgumentParser(description='Test LADMM with synthetic data')\nparser.add_argument('-c', '--cols', type=int, default=0, help='number of columns in A to be replaced')\nparser.add_argument('-p', '--p', type=float, default=0.2, help='p in the Bernoulli distribution')\nparser.add_argument('-m', '--mu', type=float, default=0.0, help='mu of Gaussian dist')\nparser.add_argument('-s', '--sigma', type=float, default=2.0, help='sigma of Gaussian dist')\nparser.add_argument('--data-type', type=str, default='gaussian', help='data type')\nparser.add_argument('-a', '--alpha', type=float, default=0.01, help='hyper-param in the objective')\nparser.add_argument('--split', type=str, default='test', help='calculate train or test split')\nparser.add_argument('--batch-size', type=int, default=20, help='batch size')\n\ndef loss_l1(X):\n return cp.sum(cp.abs(X))\n\ndef objective_fn(Z, X, A, alpha):\n residual_l1 = loss_l1(cp.matmul(A, Z) - X)\n regularizer_l1 = loss_l1(Z)\n return (residual_l1 + alpha * regularizer_l1) / X.shape[1]\n\nif __name__ == '__main__':\n args = parser.parse_args()\n\n alpha = args.alpha\n split = args.split\n batch_size = args.batch_size\n\n # test data file\n test_file = 'syn_data'\n test_file += '_cols{}'.format(args.cols) if args.cols > 0 else ''\n test_file += '_p{}_mu{}_s{}'.format(args.p, args.mu, args.sigma)\n test_file += '_{}'.format(args.data_type) if args.data_type != 'gaussian' else ''\n test_file += '.mat'\n print('using testing data file {}'.format(test_file))\n\n # logger file\n if not os.path.isdir('cvx-solutions'):\n os.makedirs('cvx-solutions')\n if not os.path.isdir('cvx-solutions/logs'):\n os.makedirs('cvx-solutions/logs')\n save_file = os.path.join('cvx-solutions', '{}-alpha{}-{}.npy'.format(test_file[:-4], alpha, split))\n log_file = os.path.join('cvx-solutions/logs', '{}-alpha{}-{}.log'.format(test_file[:-4], alpha, split))\n print = setup_logger(log_file)\n\n syn_data = sio.loadmat(test_file)\n A = syn_data['A'].astype(np.float32)\n m, n = A.shape\n\n X = syn_data[split + '_x'].astype(np.float32).T # (m, #samples)\n Z = syn_data[split + '_z'].astype(np.float32).T # (n, #samples)\n E = syn_data[split + '_e'].astype(np.float32).T # (m, #samples)\n n_samples = X.shape[1]\n\n Z_var = cp.Variable((n,batch_size))\n X_param = cp.Parameter((m,batch_size))\n objective = cp.Minimize(objective_fn(Z_var, X_param, A, alpha))\n problem = cp.Problem(objective)\n\n Z_sol = np.zeros(Z.shape, dtype=np.float32)\n\n for i in range(n_samples // batch_size):\n\n X_param.value = X[:, i*batch_size:(i+1)*batch_size]\n out = problem.solve()\n print('[{:2d}/{:2d}]\\t{}'.format(i+1, n_samples//batch_size, out))\n Z_sol[:, i*batch_size:(i+1)*batch_size] = Z_var.value\n\n np.save(save_file, Z_sol)\n print('Solutions saved to file {}'.format(save_file))\n\n","sub_path":"compute_cvx_solutions.py","file_name":"compute_cvx_solutions.py","file_ext":"py","file_size_in_byte":3040,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"74164527","text":"'''\n Maan Qraitem \n Hateful Meme Classification \n'''\n\nfrom __future__ import print_function, division\nimport os\nimport os.path as osp \nimport torch\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom torch.utils.data import Dataset, DataLoader\nfrom torchvision import transforms, utils\nimport json \nfrom vocab import Vocab\nfrom tqdm import tqdm\nimport pickle\n#import cv2\n\nclass FbDataset(Dataset): \n def __init__(self, dataset, root_dir, vocab, mode='train'):\n self.root_dir = root_dir\n self.meme_data = pd.read_json(osp.join(root_dir, dataset+'.jsonl'), lines=True) \n self.img_features = np.load(osp.join(root_dir, 'img_features_%s.npy'%dataset))\n self.mode = mode \n with open(osp.join(root_dir, 'img2idx_%s.pkl'%dataset), 'rb') as handle:\n self.img2idx = pickle.load(handle)\n \n self.entries = [] \n self.vocab = vocab\n self.max_length = 15\n \n\n self.load_data() \n self.tokenize() \n\n def add_entry(self, meme_entry):\n image_id = meme_entry['img'].split(\".\")[0].split(\"/\")[1]\n meme_text = meme_entry['text'] \n meme_img_feature = self.img_features[self.img2idx[image_id]] \n meme_id = meme_entry['id'] \n\n entry = { \n 'id':meme_id,\n 'imag_id': image_id,\n 'text': meme_text, \n 'img_feature': meme_img_feature\n }\n \n if self.mode == 'train': \n meme_label = np.array(meme_entry['label']).astype(np.float)\n entry['label'] = meme_label \n\n self.entries.append(entry)\n\n def load_data(self): \n for index, meme_entry in tqdm(self.meme_data.iterrows(), total=self.meme_data.shape[0]):\n self.add_entry(meme_entry) \n\n \n def tokenize(self): \n for entry in self.entries: \n meme_text = entry['text']\n meme_tokens = self.vocab.tokenize(meme_text)\n meme_tokens = [self.vocab.word2idx[token] for token in meme_tokens] \n meme_tokens = meme_tokens[:self.max_length] \n if len(meme_tokens) < self.max_length: \n padding = [self.vocab.padding_word_idx()] * (self.max_length - len(meme_tokens)) \n meme_tokens = meme_tokens + padding\n \n assert len(meme_tokens) == self.max_length, \"meme text size is not %d\"%self.max_length\n entry['text_tokens'] = np.array(meme_tokens) \n\n\n def __len__(self): \n return len(self.entries) \n\n def __getitem__(self, index):\n entry = self.entries[index] \n entry_id = str(entry['id'])\n entry_tokens = entry['text_tokens']\n entry_text = entry['text'] \n entry_img_feature = entry['img_feature'] \n \n sample = { \n 'id' : entry_id, \n 'tokens': entry_tokens, \n 'text' : entry_text, \n 'img_feature' : entry_img_feature, \n } \n\n if self.mode == 'train': \n entry_label = entry['label'] \n sample['label'] = entry_label \n\n return sample \n\ndef main():\n root_path = '../../data'\n vocab = Vocab(root_path) \n vocab.loadfiles('vocab_data.pkl') \n fbdataset = FbDataset('train.jsonl', root_path, vocab) \n for i in range(len(fbdataset)): \n sample = fbdataset[i]\n #print(sample['id'])\n #print(sample['tokens']) \n #print(sample['text']) \n #cv2.imshow('image',sample['img'])\n #cv2.waitKey(0)\n #cv2.destroyAllWindows() \n\nif __name__ == \"__main__\":\n main()\n \n\n","sub_path":"fbhm_text_only/dataset.py","file_name":"dataset.py","file_ext":"py","file_size_in_byte":3245,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"90287916","text":"\r\n#Basketball Court Code adapted from Savvas Tjortjoglou\r\nfrom matplotlib.patches import Circle, Rectangle, Arc\r\nimport matplotlib.pyplot as plt\r\nimport seaborn as sns\r\nimport pandas\r\nimport math \r\n\r\nCATEGORIES = ['GameID', 'Game Date', 'Team ID', 'Team Name', 'Shot Period', 'Shot Number', 'Made/Missed', \r\n 'LeftValue', 'TopValue', 'Shot Info', 'Player Name', 'Player ID', 'ycoord', 'xcoord']\r\n\r\nDATAFRAME = pandas.ExcelFile('C:/Users/Gautam Goel/Documents/Documents/Projects/NCAA Project/New Code/dataframe18.xlsx')\r\nDATAFRAME.sheet_names\r\n\r\nDATA = DATAFRAME.parse('Sheet1')\r\nDATALIST = DATA.values.tolist()\r\nPLAYERDATAMAKE, PLAYERDATAMISS = [], []\r\ncurrTeam = 'duke-blue-devils'\r\ncurrPlayer = 'trae young'\r\n\r\n#Collect player shot data\r\ndef collect_shots():\r\n for data in DATALIST: \r\n if data[11] == currPlayer and data[7] == 'made': PLAYERDATAMAKE.append(data[1::])\r\n elif data[11] == currPlayer: PLAYERDATAMISS.append(data[1::])\r\n #if data[4] == currTeam: TEAMDATA.append(data[1::])\r\n \r\n for val in PLAYERDATAMAKE:\r\n left_value, top_value = val[7], val[8]\r\n \r\n if left_value < 50: val.append(float(left_value * 0.94))\r\n else: val.append(float((100 - left_value) * 0.94))\r\n \r\n if left_value <= 47: val.append(float(top_value * 0.5))\r\n else: val.append(float(50 - (top_value * 0.5)))\r\n \r\n val[13] = (val[13] - 25) * 10\r\n val[12] = val[12] * 10\r\n \r\n for val in PLAYERDATAMISS:\r\n left_value, top_value = val[7], val[8]\r\n \r\n if left_value < 50: val.append(float(left_value * 0.94))\r\n else: val.append(float((100 - left_value) * 0.94))\r\n \r\n if left_value <= 47: val.append(float(top_value * 0.5))\r\n else: val.append(float(50 - (top_value * 0.5)))\r\n \r\n val[13] = (val[13] - 25) * 10\r\n val[12] = val[12] * 10\r\n\r\n#Create NCAA basketball court \r\ndef create_court(ax = None, color = 'black', lw = 1):\r\n if ax is None: ax = plt.gca()\r\n \r\n hoop = Circle((0, 52.5), radius = 7.5, linewidth=lw, color=color, fill=False)\r\n backboard = Rectangle((-30, 40), 60, -1, linewidth=lw, color=color)\r\n paint = Rectangle((-60, 0), 120, 190, linewidth=lw, color=color, fill=False)\r\n \r\n free_throw_arc = Arc((0, 190), 120, 120, theta1=0, theta2=180, linewidth=lw, color=color, fill=False)\r\n three_point_arc = Arc((0, 0), 415, 500, theta1=0, theta2=180, linewidth=lw, color=color)\r\n center_arc = Arc((0, 470), 120, 120, theta1=180, theta2=0, linewidth=lw, color=color)\r\n \r\n line1 = Rectangle((-250, 100), 500, -1, linewidth=lw, color=color)\r\n line2 = Rectangle((-250, 250), 500, -1, linewidth=lw, color=color)\r\n\r\n court_elements = [hoop, backboard, paint, free_throw_arc, three_point_arc, center_arc, line1, line2]\r\n \r\n for element in court_elements: ax.add_patch(element)\r\n return ax\r\n \r\ndef main():\r\n #collect_shots()\r\n playermake_dataframe = pandas.DataFrame(PLAYERDATAMAKE, columns = CATEGORIES)\r\n playermiss_dataframe = pandas.DataFrame(PLAYERDATAMISS, columns = CATEGORIES)\r\n \r\n sns.set_style(\"white\")\r\n sns.set_color_codes()\r\n \r\n plt.figure(figsize=(12,11))\r\n create_court()\r\n plt.scatter(playermake_dataframe.xcoord, playermake_dataframe.ycoord, c='blue')\r\n plt.scatter(playermiss_dataframe.xcoord, playermiss_dataframe.ycoord, c='red')\r\n\r\n plt.xlim(-250, 250)\r\n plt.ylim(0, 500)\r\n \r\n \"\"\"\r\n cmap = plt.cm.YlOrRd_r\r\n cmap = plt.cm.gist_heat_r\r\n joint_chart = sns.jointplot(player_dataframe.xcoord, player_dataframe.ycoord, stat_func=None, \r\n kind='scatter', space=0, alpha=0.5)\r\n joint_chart = sns.jointplot(player_dataframe.xcoord, player_dataframe.ycoord, stat_func=None, \r\n kind='kde', space=0, color=cmap(0.1), cmap=cmap, n_levels=50)\r\n joint_chart = sns.jointplot(player_dataframe.xcoord, player_dataframe.ycoord, stat_func=None, \r\n kind='hex', space=0, color=cmap(.2), cmap=cmap)\r\n joint_chart.fig.set_size_inches(12, 11)\r\n \r\n ax = joint_chart.ax_joint\r\n create_court(ax)\r\n ax.set_xlim(-250, 250)\r\n ax.set_ylim(500, 0) \r\n ax.set_xlabel('')\r\n ax.set_ylabel('')\r\n ax.tick_params(labelbottom='off', labelleft='off')\r\n ax.set_title('Trae Young FGA 2017-2018 Reg. Season', y=1.2, fontsize=16)\r\n ax.text(-250, 500, 'Data Source ESPN.com', fontsize=12) \r\n \"\"\"\r\n \r\n plt.show()\r\n\r\nmain()\r\n","sub_path":"Shot Chart Visualization.py","file_name":"Shot Chart Visualization.py","file_ext":"py","file_size_in_byte":4531,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"423621569","text":"#!/usr/bin/env python\n\"\"\"Strip frame types from an i3 file.\"\"\"\n\nimport sys\nimport os\n\ntry:\n from icecube import icetray,dataio\nexcept ImportError:\n print('You must be inside an IceCube metaproject environment.')\n sys.exit(1)\n\ndef files(args):\n \"\"\"A frame generator that can continue over multiple files\"\"\"\n for a in args:\n for f in dataio.I3File(a):\n yield f\n\ndef main():\n from optparse import OptionParser,OptionGroup\n \n usage = 'usage: %prog [options] input_file [input_file2 ...] output_file\\n\\n'\n usage += 'Strip frame types from an i3 file.'\n parser = OptionParser(usage=usage)\n parser.add_option('-g','--geometry',default=False,action='store_true',\n help='Strip Geometry frames')\n parser.add_option('-c','--calibration',default=False,action='store_true',\n help='Strip Calibration frames')\n parser.add_option('-d','--detector',default=False,action='store_true',\n help='Strip Detector frames')\n parser.add_option('--no-trayinfo',default=True,action='store_false',\n dest='trayinfo',help='Do not strip TrayInfo frames')\n \n (options,args) = parser.parse_args()\n \n if len(args) < 2:\n print('ERROR: require an input and output file')\n print('')\n parser.print_help()\n \n elif os.path.exists(args[-1]):\n print('ERROR: output file',args[-1],'already exists!')\n print('')\n parser.print_help()\n \n else:\n count = 0\n outfile = dataio.I3File(args[-1],'w')\n try:\n for frame in files(args[:-1]):\n if options.geometry and frame.Stop == icetray.I3Frame.Geometry:\n continue\n elif options.calibration and frame.Stop == icetray.I3Frame.Calibration:\n continue\n elif options.detector and frame.Stop == icetray.I3Frame.Detector:\n continue\n elif options.trayinfo and frame.Stop == icetray.I3Frame.TrayInfo:\n continue\n outfile.push(frame)\n finally:\n outfile.close()\n\nif __name__ == '__main__':\n main()\n","sub_path":"dataio/resources/examples/strip.py","file_name":"strip.py","file_ext":"py","file_size_in_byte":2194,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"81834460","text":"import FWCore.ParameterSet.Config as cms\nfrom RecoLocalCalo.HGCalRecProducers.HGCalRecHit_cfi import dEdX_weights as dEdX\n\nHGCalPhotonIDValueMap = cms.EDProducer(\"HGCalPhotonIDValueMapProducer\",\n photons = cms.InputTag(\"photonsFromMultiCl\"),\n pcaRadius = cms.double(3.),\n EERecHits = cms.InputTag('HGCalRecHit:HGCEERecHits'),\n FHRecHits = cms.InputTag('HGCalRecHit:HGCHEFRecHits'),\n BHRecHits = cms.InputTag('HGCalRecHit:HGCHEBRecHits'),\n dEdXWeights = dEdX,\n)\n","sub_path":"EgammaAnalysis/python/HGCalPhotonIDValueMap_cfi.py","file_name":"HGCalPhotonIDValueMap_cfi.py","file_ext":"py","file_size_in_byte":479,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"285735845","text":"str=\"Mr John Smith \"\nnewstr=str.strip()\nn=len(newstr)\nres=[0]*n\nprint(res)\nfor i in range(0,n):\n if ' ' in newstr[i] :\n res[i]='%20'\n else:\n res[i] = newstr[i]\nprint(''.join(res))\n","sub_path":"Strings/replaceWith%20.py","file_name":"replaceWith%20.py","file_ext":"py","file_size_in_byte":202,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"152646669","text":"import click\nfrom cryptography.hazmat.backends import default_backend\nfrom cryptography.hazmat.primitives.ciphers import (\n Cipher, algorithms, modes\n)\nfrom passlib.hash import bcrypt\n\n\ndef encrypt(key, plaintext, associated_data):\n # Generate a random 96-bit IV.\n iv = b'HQ\\xd9\\xb3Kz\\n\\xcc\\xb224Q\\xdb\\xc7u\\xb7' # os.urandom(12)\n\n # Construct an AES-GCM Cipher object with the given key and a\n # randomly generated IV.\n encryptor = Cipher(\n algorithms.AES(key),\n modes.GCM(iv),\n backend=default_backend()\n ).encryptor()\n\n # associated_data will be authenticated but not encrypted,\n # it must also be passed in on decryption.\n encryptor.authenticate_additional_data(associated_data)\n\n # Encrypt the plaintext and get the associated ciphertext.\n # GCM does not require padding.\n ciphertext = encryptor.update(plaintext) + encryptor.finalize()\n\n return iv, ciphertext, encryptor.tag\n\n\ndef decrypt(key, associated_data, iv, ciphertext, tag):\n # Construct a Cipher object, with the key, iv, and additionally the\n # GCM tag used for authenticating the message.\n decryptor = Cipher(\n algorithms.AES(key),\n modes.GCM(iv, tag),\n backend=default_backend()\n ).decryptor()\n\n # We put associated_data back in or the tag will fail to verify\n # when we finalize the decryptor.\n decryptor.authenticate_additional_data(associated_data)\n\n # Decryption gets us the authenticated plaintext.\n # If the tag does not match an InvalidTag exception will be raised.\n return decryptor.update(ciphertext) + decryptor.finalize()\n\n\ndef hash(value, salt):\n if salt == \"\":\n raise Exception(\"You need to specify salt (at least 1 character).\")\n return bcrypt.using(rounds=12, salt=salt.ljust(22, \"x\")).hash(value)\n\n\ndef correctness_hash(*strings, fake=False):\n if not fake:\n return bcrypt.using(rounds=12).hash(''.join(map(str, strings)))\n return bcrypt.using(rounds=12).hash(''.join(map(str, strings)) + \"1\")\n\n\ndef check_correctness_hash(query_result, *keys):\n for item in query_result:\n secret = \"\".join(str(item[key]) for key in keys)\n if not bcrypt.verify(secret, item[\"correctness_hash\"]):\n click.echo(f\"{item} failed correctness hash test!\")\n","sub_path":"client/crypto_utils.py","file_name":"crypto_utils.py","file_ext":"py","file_size_in_byte":2283,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"288149996","text":"\"\"\"Module with psapoc views and html classes.\n\"\"\"\n\nfrom django import forms\nfrom django.shortcuts import render, get_object_or_404, get_list_or_404\nfrom django.http import HttpResponse, HttpResponseRedirect\nfrom django.core.urlresolvers import reverse\nfrom django.views import generic\nfrom django.forms import ModelForm, BaseForm\nfrom django.forms.formsets import formset_factory\nfrom django_tables2 import RequestConfig\nfrom django.db.models import Avg, Max, Min, Count, Sum\nimport django_tables2 as tables\nimport datetime\nfrom django.contrib.auth.decorators import login_required\nfrom psapoc.tables import *\nfrom psapoc.forms import *\nfrom psapoc.models import *\nfrom django.contrib import messages\n\n\n@login_required\ndef projectlist(request):\n\t\"\"\"View to generate a project list table and render to html with template.\"\"\"\n\tq = Project.objects.all()\n\ttable = ProjectTable(q)\n\ttable.exclude = 'alternative_billto','alternative_payer','quote_currency','dream_package','project_manager'\n\ttable.sequence = 'id','view','manage','name','customer','delivery_region','quoted_amount','_revenue','_margin'\n\t\n\t# Enable pagination on table\n\tRequestConfig(request, paginate={\"per_page\": 25}).configure(table)\n\treturn render(request, 'psapoc/projectlist.html', {'table': table})\n\ndef home(request):\n\t\"\"\"View that redirects to the /psapoc/ root.\"\"\"\n\treturn HttpResponseRedirect('/psapoc/') \n\n@login_required\ndef projectfinancials(request, projectid):\n\t\"\"\"View that collect and renders project details with billable items and effort costing with template.\n\t\n\tinput : primary key for project (projectid)\n\toutput : template-rendered with 4 tables summarising project info\n\t\t- financial breakdown of baseline, revenue, cost, and resulting margin\n\t\t- list of fixed price items on the project, with aggregated revenue target\n\t\t- list of time-bade price items on the project, with aggregate revenue targets\n\t\t- all tasks associated to the project, with total time and cost booked on it\t\n\t\n\t\"\"\"\n\tproject = Project.objects.get(pk=projectid)\n\tprojectlist = Project.objects.filter(pk=projectid)\n \n\tfixbillselection = FixBillItem.objects.filter(project=projectid)\n\ttimedbillselection = TimedBillItem.objects.filter(project=projectid)\n\ttaskselection = Task.objects.filter(project=projectid)\n\n\t# convert to renderable tables\n\tfixtable1 = FixBillItemTable(fixbillselection, prefix='1-')\n\ttimedtable2 = TimedBillItemWithTimeTable(timedbillselection, prefix='2-')\n\ttasktable3 = TaskTable(taskselection,prefix='3-')\n\tprojectresultstable = ProjectResultsTable(projectlist,prefix='4-')\n\n\t# set database columns NOT to display\n\tfixtable1.exclude = 'project','completion','planned_date','completed_date','amount','currency','billed_date'\n\ttimedtable2.exclude = 'project','completion','planned_date','completed_date','billed_date'\n\ttasktable3.exclude = 'project','completion','planned_start_date','planned_end_date'\n\t\n\t# set the order of columns to display (db columns and calculated fields from table class)\n\tfixtable1.sequence = 'id','product','_revenue','invoice_text','billed'\n\ttimedtable2.sequence = 'id','product','_revenue','invoice_text','billed','task','_hours','_cost','billing_rate','_hourly_billing_rate','revised_hours'\n\ttasktable3.sequence = 'id','name','resource','_hours','_cost'\n\n\t# Set pagination on each table\n\tRequestConfig(request, paginate={\"per_page\": 10}).configure(fixtable1)\n\tRequestConfig(request, paginate={\"per_page\": 10}).configure(timedtable2)\n\tRequestConfig(request, paginate={\"per_page\": 10}).configure(tasktable3)\n#\tRequestConfig(request).configure(projecttable)\n\n\t# render with the template and pass tables with contents\n\treturn render(request, 'psapoc/projectfinancials.html', {'project':project,'table1': fixtable1,'table2':timedtable2,'table3':tasktable3,'projecttable':projectresultstable})\n\n\n\t# Timesheet form for current week, or week having given date\n@login_required\t\t\t\t\t\ndef TimeSheet(request, resourceid=0, daterange=None):\n\t\"\"\"View to generate timesheet to collect timesheet form input.\n\t\n\tUses currently logged in user for task selection\n\tPass last date for timeinput, defaults to today\n\t\"\"\"\n\t# Initialise parameters\n\t# resource ID of specified or currently logged user\n\tif resourceid == 0:\n\t\tresourceid=request.user.id\n\ttry:\n\t\tSelectedResource = Resource.objects.get(id=resourceid)\n\texcept:\n\t\t# error: specified resource does not exist. redirect to homepage with error message\n\t\treturn HttpResponseRedirect('/psapoc/')\n\t\n\t# if this is submitted timesheet, need to process the form data\n\tif request.method == 'POST':\n\t\tprint('got a POST')\n\t\tfor key, value in request.POST.items():\n\t\t\tprint('looping through post items')\n\t\t\tif ('task' in key) and ('day' in key) and (float(value) != 0): # processing task_$_day_$ field names only, skipping zeros\n\t\t\t\tprint('found a non zero value with task and day in the field label that needs saving')\n\t\t\t\tparsedkey = key.split('_')\n\t\t\t\tnewentry = TimeEntry()\n\t\t\t\tnewentry.task = Task.objects.get(id=parsedkey[1])\n\t\t\t\tnewentry.date = datetime.datetime.strptime(parsedkey[3], '%Y-%m-%d').date()\n\t\t\t\tnewentry.hours = float(value)\n\t\t\t\tnewentry.resource = Resource.objects.get(id=resourceid)\n\t\t\t\tnewentry.project = Task.objects.get(id=parsedkey[1]).project\n\t\t\t\tnewentry.save() # commit new record to DB\n\t\t\t\tmessages.success(request, 'Timesheet data for ' + key + ' added.')\n\t\treturn HttpResponseRedirect(request.path)\n\t# if this is not submission, then create a blank timesheet form for all tasks assigned to the resource\n\telse:\n\t\tformset = [] # dict where each row in the form gets separate item\n\t\t\n\t\theader = TaskTimeSheetForm(daterange)\n\t\theader.createcolumnheaders()# generates row with column headers\n\t\tpreviousm = header.previousmonday() # date passed in the template for a toggle to previous week\n\t\tnextm = header.nextmonday() # date passed in the template for a toggle to next week\n\t\tformset.append(header) # first row in the set\n\t\t\n\t\tfor atask in Task.objects.filter(resource=resourceid, completion__lt=100): # generate new row with fields for each task\n\t\t\ttaskline = TaskTimeSheetForm(daterange)\n\t\t\ttaskline.createtaskrow(atask)\n\t\t\tformset.append(taskline)\n\t\t\n\t\tsummary = TaskTimeSheetForm(daterange)\n\t\tsummary.createsummaryrow(resourceid)\n\t\tformset.append(summary)\n\t\t\n\t\treturn render(request, 'psapoc/timesheet.html',{'formset':formset,'resource':SelectedResource,'previous':previousm.strftime('%Y-%m-%d'),'next':nextm.strftime('%Y-%m-%d')})\n\t\n# unused view, attempt at creqting a model-based form\n@login_required\ndef dosomething(request, project_id):\n\t\"\"\"View to create a model-based form\n\tobsolete; not used in this project\n\t\n\tprocess form input, and if absent, generate new blank form from Project db model\"\"\"\n\t# if this is a POST request we need to process the form data\n\tif request.method == 'POST':\n\t\t# create a form instance and populate it with data from the request:\n\t\tform = ProjectForm(request.POST)\n\t\t# check whether it's valid:\n\t\tif form.is_valid():\n\t\t\tform.save()\n\n\t# if a GET (or any other method) we'll create a blank form\n\telse:\n\t\tform = ProjectForm()\n\treturn render(request, 'psapoc/dosomething.html',{'form':form})\n","sub_path":"Proof_of_concept/psapoc/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":7092,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"137167742","text":"import base64\nfrom Crypto.Cipher import AES\n\nKEY = '1234567890123456'\n\n\n# AES加密算法的作用是替代原先的DES加密算法,该加密算法是对称加密算法,即加解密双方持有的KEY是相同的。\n# 密钥长度的支持为128、192、256(分别对应16字节、24字节、32字节),分组长度128位(16字节)。\n# 密钥可随机生成,这里作为演示的KEY为16字节,但是实际生产中,最好使用32字节的KEY。\n\nclass AESCipher(object):\n '''\n AES加密算法是密码学中的高级加密标准,又称Rijndael加密法。是一种区块加密技术,已然成为对称密钥加密中最流行的算法之一。\n\n 该示例采用 AES-ECB 模式(其余还有CBC、CFB、OFB三种加密模式),PKCS7 补码方法,进行加解密。\n '''\n\n def __init__(self, key):\n self.bs = 16 # 分组长度固定为16字节,若为其他数值,则加解密必然失败。\n self.key = key\n\n def aes_encrypt(self, raw):\n '''\n 加密算法\n\n :param raw:\n :return:\n '''\n raw = self._pad(raw) # 先进行补位,使加密数据的长度为16字节的整数倍。\n iv = b''\n cipher = AES.new(self.key, AES.MODE_ECB, iv)\n return base64.b64encode(iv + cipher.encrypt(raw))\n\n def aes_decrypt(self, enc):\n '''\n 解密算法\n\n :param enc:\n :return:\n '''\n enc = base64.b64decode(enc)\n iv = b''\n cipher = AES.new(self.key, AES.MODE_ECB, iv)\n return self._unpad(cipher.decrypt(enc)) # 去掉补位,还原数据\n\n def _pad(self, s, charset='ascii'):\n '''\n 补位算法\n\n :param s:\n :param charset:\n :return:\n '''\n if isinstance(s, str):\n return s + (self.bs - len(s) % self.bs) * chr(self.bs - len(s) % self.bs)\n if isinstance(s, (bytes, bytearray)):\n return s + bytes((self.bs - len(s) % self.bs) * chr(self.bs - len(s) % self.bs), encoding=charset)\n\n def _unpad(self, s):\n '''\n 去补位算法\n\n :param s:\n :return:\n '''\n return s[:-ord(s[len(s) - 1:])]\n\n\nif __name__ == \"__main__\":\n aes = AESCipher(KEY)\n\n data = '阿萨德' * 200\n # data = b'123456789' * 200\n en_str = aes.aes_encrypt(data) # 进行加密\n de_str = aes.aes_decrypt(en_str) # 进行解密\n\n if isinstance(data, str):\n print(de_str.decode('utf-8') == data)\n if isinstance(data, (bytes, bytearray)):\n print(de_str == data)\n","sub_path":"new_p6_network_security/AES加密与解密.py","file_name":"AES加密与解密.py","file_ext":"py","file_size_in_byte":2557,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"95332639","text":"import os, sys\nsys.path.append(\"..\")\nsys.path.append('../DSB2017')\nfrom DSB2017.main import inference\n\nfrom app.base import blueprint\nfrom app.base.forms import AnonymousForm\nfrom flask import flash, render_template, redirect, request, url_for, current_app\nfrom werkzeug.utils import secure_filename\n\n\n# default page\n@blueprint.route('/')\ndef route_default():\n return redirect(url_for('base_blueprint.home'))\n\n\n@blueprint.route('/home')\ndef home():\n return render_template('homepage/home.html', title='Home')\n\n\n@blueprint.route('/about')\ndef about():\n return render_template('homepage/about.html', title='About')\n\n\n@blueprint.route('/contact')\ndef contact():\n return render_template('homepage/contact.html', title='Contact')\n\n\n@blueprint.route('/upload', methods=['GET', 'POST'])\ndef upload():\n form = AnonymousForm()\n\n if form.cancel.data:\n return redirect(request.url)\n\n if form.submit.data and form.validate_on_submit():\n raw_file = form.raw_file.data\n mhd_file = form.mhd_file.data\n\n if raw_file and mhd_file:\n raw_file_name = secure_filename(raw_file.filename)\n mhd_file_name = secure_filename(mhd_file.filename)\n\n raw_path = os.path.join(current_app.config['UPLOAD_FOLDER'], raw_file_name)\n mhd_path = os.path.join(current_app.config['UPLOAD_FOLDER'], mhd_file_name)\n\n raw_file.save(raw_path)\n mhd_file.save(mhd_path)\n\n import time\n # time.sleep(5)\n\n inference(mhd_path)\n\n return redirect(url_for('base_blueprint.result', raw_file_name=raw_file_name, mhd_file_name=mhd_file_name))\n\n return render_template('homepage/upload.html', title=\"Upload\", form=form)\n\n\n@blueprint.route('/result//', methods=['GET', 'POST'])\ndef result(raw_file_name, mhd_file_name):\n raw = url_for('static', filename=f'uploaded_ct_scan/{raw_file_name}')\n mhd = url_for('static', filename=f'uploaded_ct_scan/{mhd_file_name}')\n\n return render_template('homepage/result.html', title=\"Upload\", ct_scan_files=[raw, mhd])\n","sub_path":"lung/app/base/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":2106,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"83702830","text":"import logging\nlogger = logging.getLogger(__name__)\n\n\nclass ProcessBot:\n\n # def __init__(self, content):\n @property\n def __init__(self):\n\n __version__ = '0.1.0'\n __name__ = 'ProcessBot'\n\n logger.info(\" {0}: {1} \".format(__name__, __version__))\n\n # self.content = content\n\n @property\n def processSearch(self, content):\n for result in content:\n print(result)\n","sub_path":"kollekt/process/ProcessBot.py","file_name":"ProcessBot.py","file_ext":"py","file_size_in_byte":418,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"629846456","text":"#!/usr/bin/env python\nimport sys\nimport time\n\nfrom colorama import Fore\nfrom tf.transformations import euler_from_quaternion\nimport rospy\nimport tf2_ros\nimport geometry_msgs\nfrom geometry_msgs.msg import Twist\nimport math\nimport tf_conversions\nfrom geometry_msgs.msg import PoseStamped\nfrom gazebo_msgs.msg import ModelStates\nfrom sensor_msgs.msg import Image\nfrom cv_bridge import CvBridge\nimport cv2\nimport numpy as np\nfrom sensor_msgs.msg import LaserScan\nimport sys\n\n\nclass Player:\n\n def __init__(self):\n\n rospy.init_node('driver', anonymous=False)\n name = rospy.get_name().strip('/')\n rospy.sleep(0.2) # make sure the rospy time works\n\n self.pub = rospy.Publisher(name + '/cmd_vel', Twist, queue_size=10)\n rospy.Subscriber(name + \"/camera/rgb/image_raw\", Image, self.ImageCallback)\n rospy.Subscriber(name + \"/scan\", LaserScan, self.lidar_callback)\n\n\n #-------------------------------------------------------------------\n #---------------------Variable Initialization-----------------------\n #-------------------------------------------------------------------\n\n # make sure that the code only works it there is image\n self.exist_image = 0\n\n # connectivity between pixels\n self.connectivity = 4\n\n self.my_team = None\n self.index_color = {'blue': 0, 'green': 1, 'red': 2}\n\n self.blue_limits = {'B': {'max': 255, 'min': 100}, 'G': {'max': 50, 'min': 0}, 'R': {'max': 50, 'min': 0}}\n self.red_limits = {'B': {'max': 50, 'min': 0}, 'G': {'max': 50, 'min': 0}, 'R': {'max': 255, 'min': 100}}\n self.green_limits = {'B': {'max': 50, 'min': 0}, 'G': {'max': 255, 'min': 100}, 'R': {'max': 50, 'min': 0}}\n\n self.size_bool = False\n self.list_centroid=[]\n self.state='wait'\n self.substate=None\n self.rate = rospy.Rate(10) # 10hz\n\n # -------------------------------------------------------------------\n # ---------------------Read Parameters-------------------------------\n # -------------------------------------------------------------------\n\n names_red = rospy.get_param('/red_players')\n names_green = rospy.get_param('/green_players')\n names_blue = rospy.get_param('/blue_players')\n\n\n\n\n\n\n if name in names_red:\n self.my_team = 'red'\n self.hunter_team = 'blue'\n self.prey_team = 'green'\n self.my_team_players = names_red\n self.prey_team_players = names_green\n self.hunter_team_players = names_blue\n elif name in names_green:\n self.my_team = 'green'\n self.hunter_team = 'red'\n self.prey_team = 'blue'\n self.my_team_players = names_green\n self.prey_team_players = names_blue\n self.hunter_team_players = names_red\n elif name in names_blue:\n self.my_team = 'blue'\n self.hunter_team = 'green'\n self.prey_team = 'red'\n self.my_team_players = names_blue\n self.prey_team_players = names_red\n self.hunter_team_players = names_green\n else:\n rospy.logfatal('Something is wrong \\n You have been suspended from your own team')\n exit(0)\n\n print('My name is ' + name + ' I am team ' + self.my_team + Fore.GREEN + ' I am hunting ' + Fore.RESET + str(\n self.prey_team_players)\n + Fore.RED + ' and fleeing from ' + Fore.RESET + str(self.hunter_team_players))\n\n\n\n # -------------------------------------------------------------------\n # ---------------------Callbacks Functions---------------------------\n # -------------------------------------------------------------------\n\n\n\n rospy.Timer(rospy.Duration(0.1), self.print_state, oneshot=False)\n\n\n\n def ImageCallback(self,message):\n\n bridge = CvBridge()\n self.cv_image = bridge.imgmsg_to_cv2(message, desired_encoding='bgr8')\n\n self.blue_mask = cv2.inRange(self.cv_image, (self.blue_limits['B']['min'], self.blue_limits['G']['min'], self.blue_limits['R']['min']),\n (self.blue_limits['B']['max'], self.blue_limits['G']['max'], self.blue_limits['R']['max']))\n\n self.red_mask = cv2.inRange(self.cv_image, (self.red_limits['B']['min'], self.red_limits['G']['min'], self.red_limits['R']['min']),\n (self.red_limits['B']['max'], self.red_limits['G']['max'], self.red_limits['R']['max']))\n\n self.green_mask = cv2.inRange(self.cv_image, (self.green_limits['B']['min'], self.green_limits['G']['min'], self.green_limits['R']['min']),\n (self.green_limits['B']['max'], self.green_limits['G']['max'], self.green_limits['R']['max']))\n\n self.exist_image=1\n\n\n def lidar_callback(self,message):\n\n angle = message.angle_min\n\n #print(message.ranges[0])\n\n if message.ranges[0]<1.3:\n self.substate=\"escape_wall\"\n else:\n self.substate=None\n\n def get_centroid(self):\n\n if self.exist_image:\n\n if self.size_bool == False:\n self.height = self.cv_image.shape[0]\n self.width = self.cv_image.shape[1]\n self.size_bool = True\n\n\n mask_list = [self.blue_mask,self.green_mask,self.red_mask]\n\n for_index=0\n list_biggest_target=[]\n self.list_centroid=[]\n\n\n for mask in mask_list:\n\n output = cv2.connectedComponentsWithStats(mask, self.connectivity, cv2.CV_32S)\n num_labels = output[0] # integer with the number of object in the image\n labels = output[1] # in labels we have an image, and each element has a value equivalent to its label\n stats = output[2] # in stats we have all data for each object\n centroids = output[3] # in centroids we have all centroids coordinates for each object\n\n # finding the object with bigger area\n anyone = True\n maximum_area = 0\n object_index = 1\n # if num_labels == 1 means that there is no object, so we cannot paint!\n if num_labels == 1:\n anyone = False\n for i in range(1, num_labels):\n\n object_area = stats[i, cv2.CC_STAT_AREA]\n\n if object_area > maximum_area:\n maximum_area = object_area\n object_index = i\n\n # if maximum_area <500 the object is too small, so its possible that it is not the phone but noise instead\n if maximum_area < 20:\n anyone = False\n # extracting biggest object from segmentation limits\n biggest_target = (labels == object_index)\n biggest_target = biggest_target.astype(np.uint8) * 255\n list_biggest_target.append(biggest_target)\n\n if anyone:\n centroid_coord = centroids[object_index, :].astype(np.uint)\n centroid_coord = tuple(centroid_coord)\n self.list_centroid.append(centroid_coord)\n else:\n self.list_centroid.append(None)\n\n for_index+=1\n\n\n #Algoritmo de decisao\n\n self.DecisionMaking()\n\n\n def DecisionMaking(self):\n\n\n\n if self.list_centroid[self.index_color[self.hunter_team]]!=None and self.list_centroid[self.index_color[self.prey_team]]==None:\n # Caso eu veja um atacante meu e nao veja presas, FUGIR\n self.state='flee'\n elif self.list_centroid[self.index_color[self.hunter_team]]!=None and self.list_centroid[self.index_color[self.prey_team]]!=None:\n #Se eu vir os 2, tenho de tomar uma decisao dependendo da distancia entre eles\n\n\n\n distance_prey_hunter=abs(float(self.list_centroid[self.index_color[self.hunter_team]][0])-float(self.list_centroid[self.index_color[self.prey_team]][0]))\n #print(distance_prey_hunter)\n if distance_prey_hunter > self.width/3:\n self.state='atack'\n else:\n self.state='flee'\n elif self.list_centroid[self.index_color[self.hunter_team]]==None and self.list_centroid[self.index_color[self.prey_team]]!=None:\n self.state = 'atack'\n elif self.list_centroid[self.index_color[self.hunter_team]]==None and self.list_centroid[self.index_color[self.prey_team]]==None:\n self.state = 'wait'\n\n self.take_action()\n\n\n def take_action(self):\n\n twist = Twist()\n\n if self.state=='atack':\n\n if self.list_centroid[self.index_color[self.prey_team]]!=None:\n\n horizontal_distance=self.find_direction(self.list_centroid[self.index_color[self.prey_team]])\n #print(horizontal_distance)\n\n twist.linear.x = 1.0\n twist.angular.z = horizontal_distance/500\n\n elif self.state=='flee':\n\n horizontal_distance = self.find_direction(self.list_centroid[self.index_color[self.hunter_team]])\n #print(horizontal_distance)\n\n if horizontal_distance>0:\n signal = -1\n else:\n signal = 1\n\n twist = Twist()\n\n twist.linear.x = 1.5\n twist.angular.z = signal*1.5\n\n\n\n else:\n\n if self.substate==\"escape_wall\":\n #fugir da parede\n twist.linear.x = 0.2\n twist.angular.z = 2\n\n else:\n twist.linear.x = 0.6\n twist.angular.z = 0.3\n\n self.pub.publish(twist)\n\n\n def find_direction(self,centroid_coord):\n\n #if > 0 turn left\n #if < 0 turn right\n return (self.width/2) - centroid_coord[0]\n\n def print_state(self,event):\n\n if self.state == 'flee':\n print(Fore.RED + self.state + ' from ' + self.hunter_team + Fore.RESET)\n elif self.state == 'atack':\n print(Fore.GREEN + self.state + ' and kill ' + self.prey_team + Fore.RESET)\n else:\n if self.substate==None:\n print(Fore.BLUE + self.state + ' for my next prey ' + Fore.RESET)\n else:\n print(Fore.BLUE +'Too close of a wall, better turn around' + Fore.RESET)\n\n\n\n\n\ndef main():\n\n player = Player()\n\n\n while not rospy.is_shutdown():\n\n player.get_centroid()\n\n player.rate.sleep()\n\n\n\n # ---------------\n # program's end\n # ---------------\n cv2.destroyAllWindows()\n\nif __name__ == '__main__':\n main()\n","sub_path":"p_g5_core/src/driver_camera_lidar.py","file_name":"driver_camera_lidar.py","file_ext":"py","file_size_in_byte":10670,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"143761664","text":"from django.contrib.auth.decorators import login_required\nfrom django.urls import path\nfrom .views import contact, ContactCreate, ContactUpdate, ContactDelete, ContactList, ContactDetail\n\napp_name='contact'\nurlpatterns = [\n\tpath('', contact, name = 'contact'),\n\n\tpath('messages/', login_required(ContactList.as_view()), name = 'contact-messages'),\n\tpath('messages//', login_required(ContactDetail.as_view()), name = 'contact-update'),\n\tpath('messages/add/', login_required(ContactCreate.as_view()), name = 'contact-create'),\n\tpath('messages//update/', login_required(ContactUpdate.as_view()), name = 'contact-update'),\n\tpath('messages//delete/', login_required(ContactDelete.as_view()), name = 'contact-delete'),\n]\n","sub_path":"contact/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":739,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"398841044","text":"import json\nfrom enum import Enum\n\nimport marshmallow\nfrom marshmallow import fields as mf\n\nfrom typing import Any, Dict, Final, List, NoReturn, Union\n\n\nActorId = int\n\nVERSION = (0, 0, 2)\nPORT_BROADCAST = 54300\nPORT_DATA = 54301\nINVENTORY_SIZE: int = 20\nSERIALIZATION_TYPE_FIELD = \"_type\"\nUNASSIGNED_ACTOR_ID: Final[ActorId] = -1\n\n\nclass Hand(Enum):\n LEFT = 0\n RIGHT = 1\n\n def other(self):\n if self == Hand.LEFT:\n return Hand.RIGHT\n else:\n return Hand.LEFT\n\n\nclass DamageVariant(Enum):\n HIT = \"hit\"\n CHOP = \"chop\"\n SMASH = \"smash\"\n ATTACK = \"attack\"\n\n\nclass UpdateVariant(Enum):\n \"\"\"Generic enum describing type of item stack update.\"\"\"\n\n SWAP = 0\n MERGE = 1\n\n\nclass Attachement(Enum):\n LEFT_ITEM = \"left_item\"\n RIGHT_ITEM = \"right_item\"\n\n\nclass Stats:\n class Schema(marshmallow.Schema):\n hunger = mf.Float()\n max_hunger = mf.Float()\n\n @marshmallow.post_load\n def make(self, data, **kwargs):\n return Stats(**data)\n\n def __init__(self, hunger: float, max_hunger: float) -> None:\n self.hunger = hunger\n self.max_hunger = max_hunger\n\n\nclass Debugable:\n DEBUG_FIELDS: List[str] = list()\n\n def __str__(self) -> str:\n fields = {field: str(getattr(self, field)) for field in self.DEBUG_FIELDS}\n return f\"{type(self).__name__}{fields}\"\n\n\nclass Serializable:\n SERIALIZATION_NAME: str = \"___\"\n\n class Schema(marshmallow.Schema):\n pass\n\n def to_dict(self) -> Dict[str, Any]:\n result = self.Schema().dump(self)\n result[SERIALIZATION_TYPE_FIELD] = self.SERIALIZATION_NAME\n return result\n\n def to_json(self) -> str:\n return json.dumps(self.to_dict())\n\n\ndef assert_exhaustive(x: NoReturn) -> NoReturn:\n assert False, \"Unhandled type: {}\".format(type(x).__name__)\n","sub_path":"python/edgin_around_api/defs.py","file_name":"defs.py","file_ext":"py","file_size_in_byte":1849,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"138486123","text":"#!/usr/bin/env python\n#***************************************************\n# * Title: UUV Simulator\n# * Author: The UUV Simulator Authors\n# * Date: 2016\n# * Availability: https://uuvsimulator.github.io/\n#***************************************************\n\nimport rospy\nimport numpy as np\nfrom control_interfaces import DPPIDControllerBase\n\n\nclass ROV_PIDController(DPPIDControllerBase):\n \"\"\"PID controller for the dynamic positioning of ROVs.\"\"\"\n\n _LABEL = 'PID'\n def __init__(self):\n self._tau = np.zeros(6)\n DPPIDControllerBase.__init__(self, False)\n self._is_init = True\n\n def update_controller(self):\n if not self._is_init:\n return False\n # Update PID control action\n self._tau = self.update_pid()\n self.publish_control_wrench(self._tau)\n return True\n\nif __name__ == '__main__':\n print('Starting PID')\n rospy.init_node('rov_pid_controller')\n\n try:\n node = ROV_PIDController()\n rospy.spin()\n except rospy.ROSInterruptException:\n print('caught exception')\n print('exiting')\n","sub_path":"control/control/trajectory_control/scripts/rov_pid_controller.py","file_name":"rov_pid_controller.py","file_ext":"py","file_size_in_byte":1092,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"571197014","text":"import matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nimport scipy.signal\nfrom scipy.fftpack import fft, fftshift\n\ndef main():\n create_sma_plots()\n create_window_comparison_plots()\n\ndef create_sma_plots():\n # Equations from https://tttapa.github.io/Pages/Mathematics/Systems-and-Control-Theory/Digital-filters/Simple%20Moving%20Average/Simple-Moving-Average.html#:~:text=The%20cutoff%20frequency%20is%20defined,)%20%E2%89%88%20%E2%88%92%203.01%20d%20B%20.\n\n # Function for calculating the cut-off frequency of a moving average filter\n def get_sma_cutoff(N, **kwargs):\n func = lambda w: np.sin(N*w/2) - N/np.sqrt(2) * np.sin(w/2) # |H(e^jω)| = √2/2\n deriv = lambda w: np.cos(N*w/2) * N/2 - N/np.sqrt(2) * np.cos(w/2) / 2 # dfunc/dx\n omega_0 = np.pi/N # Starting condition: halfway the first period of sin(Nω/2)\n return scipy.optimize.newton(func, omega_0, deriv, **kwargs)\n\n N = 10 # Window size (number of samples)\n fs_Hz = 1000 # Sampling frequency\n f = np.linspace(0, fs_Hz/2, 1000)\n w = 2*np.pi*(f/fs_Hz)\n with np.errstate(divide='ignore', invalid='ignore'):\n freq_response = (1/(N**2))*((np.sin(w*N/2)**2)/(np.sin(w/2)**2))\n\n freq_response_dB = 10*np.log10(freq_response)\n\n # SMA coefficients\n b = np.ones(N)\n a = np.array([N] + [0]*(N-1))\n w, h = scipy.signal.freqz(b, a, worN=4096)\n f = (w*fs_Hz)/(2*np.pi) # Convert from rad/sample to Hz\n freq_response_dB = 20*np.log10(abs(h))\n phase_response = np.angle(h)*(180/np.pi)\n\n w_c = get_sma_cutoff(N)\n f_c_Hz = w_c * fs_Hz / (2 * np.pi)\n # print(f'f_c_Hz={f_c_Hz}')\n\n fig, axes = plt.subplots(1, 1, figsize=(10, 7), squeeze=False)\n ax = axes[0][0]\n ax.plot(f, freq_response_dB, label='Frequency response')\n ax.axvline(fs_Hz/N, color='C1', linestyle='--', label='Window frequency')\n ax.axvline(f_c_Hz, color='C2', linestyle='--', label='Cutoff frequency')\n ax.set_xlabel('Frequency (Hz)')\n ax.set_ylabel('Magnitude (dB)')\n ax.set_ylim(-50, 10)\n ax.set_title('Magnitude Response Of SMA')\n ax.legend()\n plt.tight_layout()\n plt.savefig('frequency-response-of-sma-magnitude.png')\n\n fig, axes = plt.subplots(1, 1, figsize=(10, 7), squeeze=False)\n ax = axes[0][0]\n ax.plot(f, phase_response, label='Phase response')\n ax.axvline(fs_Hz/N, color='C1', linestyle='--', label='Window frequency')\n ax.axvline(f_c_Hz, color='C2', linestyle='--', label='Cutoff frequency')\n ax.set_xlabel('Frequency (Hz)')\n ax.set_ylabel('Phase (°)')\n ax.set_ylim(-180, 90)\n ax.set_yticks([-180, -135, -90, -45, 0, 45, 90])\n ax.set_title('Phase Response Of SMA')\n ax.legend()\n plt.tight_layout()\n plt.savefig('frequency-response-of-sma-phase.png')\n\ndef create_window_comparison_plots():\n N = 51\n\n window_data = [\n {\n 'name': 'Boxcar',\n 'values': scipy.signal.boxcar(N),\n },\n {\n 'name': 'Exponential',\n 'values': scipy.signal.windows.exponential(N, tau=3.0),\n },\n {\n 'name': 'Gaussian',\n 'values': scipy.signal.windows.gaussian(N, std=7),\n },\n {\n 'name': 'Blackman',\n 'values': scipy.signal.blackman(N),\n },\n ]\n\n def plot_window(window, ax, label):\n ax.plot(window, label=label)\n\n def plot_freq_response(window, ax, label):\n with np.errstate(divide='ignore', invalid='ignore'):\n # fft() does not center the DC component, need to use fftshift() later\n # to do that\n # 2048 significantly larger than window size, so 0 padding will occur\n A = fft(window, 2048) / (len(window)/2.0)\n freq = np.linspace(-0.5, 0.5, len(A)) # This is normalized frequency (w.r.t sampling frequency)\n response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))\n ax.plot(freq, response, label=label)\n\n fig, axes = plt.subplots(1, 1, figsize=(10, 7), squeeze=False)\n ax = axes[0][0]\n for window in window_data:\n plot_window(window['values'], ax, window['name'])\n ax.set_title(\"Popular window shapes, N=51\")\n ax.set_xlabel('Sample')\n ax.set_ylabel('Weight')\n ax.legend()\n plt.tight_layout()\n plt.savefig('window-comparison-shapes.png')\n\n fig, axes = plt.subplots(1, 1, figsize=(10, 7), squeeze=False)\n ax = axes[0][0]\n for window in window_data:\n plot_freq_response(window['values'], ax, window['name'])\n\n ax.axis([0, 0.5, -120, 0])\n ax.set_title(\"Frequency response of popular windows, N=51\")\n ax.set_ylabel(\"Normalized magnitude [dB]\")\n ax.set_xlabel(\"Normalized frequency [cycles per sample]\")\n ax.legend()\n plt.tight_layout()\n plt.savefig('window-comparison-frequency-response.png')\n\nif __name__ == '__main__':\n main()","sub_path":"content/programming/signal-processing/digital-filters/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4832,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"31597131","text":"\"\"\"SNGAN ResNet for conditional generation of ImageNet\"\"\"\n\n# no ACGAN, 1\n# NoLabelConcatInG, 1\n# DECAY, 1\n# N_CRITIC = 5\n# biases=True\n\nimport os\nimport sys\n\nsys.path.append(os.getcwd())\n\nimport numpy as np\nimport tensorflow as tf\n\nimport time\nimport functools\nimport locale\n\nimport common.misc\nimport common.data.cifar10\nimport common.inception.inception_score\n\nimport common as lib\nimport common.ops.linear\nimport common.ops.conv2d\nimport common.ops.embedding\nimport common.ops.normalization\nimport common.plot\n\nfrom common.data import ILSVRC2012\n\n# Download CIFAR-10 (Python version) at\n# https://www.cs.toronto.edu/~kriz/cifar.html and fill in the path to the extracted files here!\nDATA_DIR = '/media/newhd/data/ILSVRC2012/train/'\nif len(DATA_DIR) == 0:\n raise Exception('Please specify path to data directory in gan_cifar.py!')\n\nBATCH_SIZE = 32 # Critic batch size\nGEN_BS_MULTIPLE = 2 # Generator batch size, as a multiple of BATCH_SIZE\nITERS = 450000 # How many iterations to train for\nDIM_G = 128 # Generator dimensionality\nDIM_D = 128 # Critic dimensionality\nNORMALIZATION_G = True # Use batchnorm in generator?\nNORMALIZATION_D = False # Use batchnorm (or layernorm) in critic?\nOUTPUT_DIM = 49152 # Number of pixels in CIFAR10 (128*128*3)\nLR = 0.0002 # 2e-4 # Initial learning rate\nDECAY = True # Whether to decay LR over learning\nN_CRITIC = 5 # 5 # Critic steps per generator steps\nINCEPTION_FREQUENCY = 1000 # How frequently to calculate Inception score\n\nCONDITIONAL = True # Whether to train a conditional or unconditional model\nACGAN = False # If CONDITIONAL, whether to use ACGAN or \"vanilla\" conditioning\nACGAN_SCALE = 1. # How to scale the critic's ACGAN loss relative to WGAN loss\nACGAN_SCALE_G = 0.1 # How to scale generator's ACGAN loss relative to WGAN loss\n\n# SPECTRAL_NORM_UPDATE_OPS = \"spectral_norm_update_ops\"\nWORD2VEC_FILE = None\nVOCAB_SIZE = 1000\nEMBEDDING_DIM = 300 # 620\nCHECKPOINT_DIR = os.path.join(DATA_DIR, 'checkpoint')\nLOSS_TYPE = 'HINGE' # 'Goodfellow', 'HINGE', 'WGAN', 'WGAN-GP'\nSOFT_PLUS = False\nRESTORE = False\n\nif CONDITIONAL and (not ACGAN) and (not NORMALIZATION_D):\n print(\"WARNING! Conditional model without normalization in D might be effectively unconditional!\")\n\nN_GPUS = 1\nif N_GPUS not in [1, 2]:\n raise Exception('Only 1 or 2 GPUs supported!')\nDEVICES = ['/gpu:{}'.format(i) for i in range(N_GPUS)]\nif len(DEVICES) == 1: # Hack because the code assumes 2 GPUs\n DEVICES = [DEVICES[0], DEVICES[0]]\n\nlib.print_model_settings(locals().copy())\n\n\ndef nonlinearity(x, activation_fn='relu', leakiness=0.2):\n if activation_fn == 'relu':\n return tf.nn.relu(x)\n if activation_fn == 'lrelu':\n assert 0 < leakiness <= 1, \"leakiness must be <= 1\"\n return tf.maximum(x, leakiness * x)\n\n\ndef Normalize(name, inputs, labels=None):\n \"\"\"This is messy, but basically it chooses between batchnorm, layernorm,\n their conditional variants, or nothing, depending on the value of `name` and\n the global hyperparam flags.\"\"\"\n\n with tf.variable_scope(name):\n if not CONDITIONAL:\n labels = None\n if CONDITIONAL and ACGAN and ('D.' in name):\n labels = None\n\n if ('D.' in name) and NORMALIZATION_D:\n return lib.ops.normalization.layer_norm(name, [1, 2, 3], inputs)\n elif ('G.' in name) and NORMALIZATION_G:\n if labels is not None:\n # inputs_ = tf.transpose(inputs, [0, 3, 1, 2], name='NHWC_to_NCHW')\n outputs = lib.ops.normalization.cond_batchnorm(name, [0, 1, 2], inputs, labels=labels, n_labels=1000)\n # return tf.transpose(outputs, [0, 2, 3, 1], name='NCHW_to_NHWC')\n return outputs\n else:\n # inputs_ = tf.transpose(inputs, [0, 3, 1, 2], name='NHWC_to_NCHW')\n outputs = lib.ops.normalization.batch_norm(inputs, fused=True)\n # return tf.transpose(outputs, [0, 2, 3, 1], name='NCHW_to_NHWC')\n return outputs\n else:\n return inputs\n\n\ndef ConvMeanPool(inputs, output_dim, filter_size=3, stride=1, name=None,\n spectral_normed=False, update_collection=None, inputs_norm=False,\n he_init=True, biases=True):\n output = lib.ops.conv2d.Conv2D(inputs, inputs.shape.as_list()[-1], output_dim, filter_size, stride, name,\n spectral_normed=spectral_normed,\n update_collection=update_collection,\n he_init=he_init, biases=biases)\n # output = tf.nn.avg_pool(inputs, [1, 2, 2, 1], [1, 2, 2, 1], padding='VALID')\n output = tf.add_n(\n [output[:, ::2, ::2, :], output[:, 1::2, ::2, :], output[:, ::2, 1::2, :], output[:, 1::2, 1::2, :]]) / 4.\n return output\n\n\ndef MeanPoolConv(inputs, output_dim, filter_size=3, stride=1, name=None,\n spectral_normed=False, update_collection=None, inputs_norm=False,\n he_init=True, biases=True):\n output = inputs\n output = tf.add_n(\n [output[:, ::2, ::2, :], output[:, 1::2, ::2, :], output[:, ::2, 1::2, :], output[:, 1::2, 1::2, :]]) / 4.\n # output = tf.nn.avg_pool(inputs, [1, 2, 2, 1], [1, 2, 2, 1], padding='VALID')\n output = lib.ops.conv2d.Conv2D(output, output.shape.as_list()[-1], output_dim, filter_size, stride, name,\n spectral_normed=spectral_normed,\n update_collection=update_collection,\n he_init=he_init, biases=biases)\n\n return output\n\n\ndef UpsampleConv(inputs, output_dim, filter_size=3, stride=1, name=None,\n spectral_normed=False, update_collection=None, inputs_norm=False,\n he_init=True, biases=True):\n output = inputs\n output = tf.concat([output, output, output, output], axis=3)\n output = tf.depth_to_space(output, 2)\n # w, h = inputs.shape.as_list()[1], inputs.shape.as_list()[2]\n # output = tf.image.resize_images(inputs, [w * 2, h * 2])\n output = lib.ops.conv2d.Conv2D(output, output.shape.as_list()[-1], output_dim, filter_size, stride, name,\n spectral_normed=spectral_normed,\n update_collection=update_collection,\n he_init=he_init, biases=biases)\n\n return output\n\n\ndef ResidualBlock(inputs, input_dim, output_dim, filter_size, name,\n spectral_normed=False, update_collection=None, inputs_norm=False,\n resample=None, labels=None, biases=True):\n \"\"\"resample: None, 'down', or 'up'.\n \"\"\"\n if resample == 'down':\n conv_1 = functools.partial(lib.ops.conv2d.Conv2D, input_dim=input_dim, output_dim=input_dim)\n conv_2 = functools.partial(ConvMeanPool, output_dim=output_dim)\n conv_shortcut = ConvMeanPool\n elif resample == 'up':\n conv_1 = functools.partial(UpsampleConv, output_dim=output_dim)\n conv_shortcut = UpsampleConv\n conv_2 = functools.partial(lib.ops.conv2d.Conv2D, input_dim=output_dim, output_dim=output_dim)\n elif resample is None:\n conv_shortcut = functools.partial(lib.ops.conv2d.Conv2D, input_dim=input_dim)\n conv_1 = functools.partial(lib.ops.conv2d.Conv2D, input_dim=input_dim, output_dim=output_dim)\n conv_2 = functools.partial(lib.ops.conv2d.Conv2D, input_dim=output_dim, output_dim=output_dim)\n else:\n raise Exception('invalid resample value')\n\n if output_dim == input_dim and resample is None:\n shortcut = inputs # Identity skip-connection\n else:\n shortcut = conv_shortcut(inputs=inputs, output_dim=output_dim, filter_size=1, name=name + '.Shortcut',\n spectral_normed=spectral_normed,\n update_collection=update_collection,\n he_init=False, biases=biases)\n\n output = inputs\n output = Normalize(name + '.N1', output, labels=labels)\n output = nonlinearity(output)\n # if resample == 'up':\n # output = nonlinearity(output)\n # else:\n # output = lrelu(output, leakiness=0.2)\n\n output = conv_1(inputs=output, filter_size=filter_size, name=name + '.Conv1',\n spectral_normed=spectral_normed,\n update_collection=update_collection,\n he_init=True, biases=biases)\n\n output = Normalize(name + '.N2', output, labels=labels)\n output = nonlinearity(output)\n # if resample == 'up':\n # output = nonlinearity(output)\n # else:\n # output = lrelu(output, leakiness=0.2)\n\n output = conv_2(inputs=output, filter_size=filter_size, name=name + '.Conv2',\n spectral_normed=spectral_normed,\n update_collection=update_collection,\n he_init=True, biases=biases)\n\n return shortcut + output\n\n\ndef OptimizedResBlockDisc1(inputs,\n spectral_normed=False, update_collection=None, inputs_norm=False,\n biases=True):\n conv_1 = functools.partial(lib.ops.conv2d.Conv2D, input_dim=3, output_dim=DIM_D // 2)\n conv_2 = functools.partial(ConvMeanPool, output_dim=DIM_D // 2)\n conv_shortcut = MeanPoolConv\n shortcut = conv_shortcut(inputs=inputs, output_dim=DIM_D // 2, filter_size=1, name='D.Block.1.Shortcut',\n spectral_normed=spectral_normed,\n update_collection=update_collection,\n he_init=False, biases=biases)\n\n output = inputs\n output = conv_1(inputs=output, filter_size=3, name='D.Block.1.Conv1',\n spectral_normed=spectral_normed,\n update_collection=update_collection,\n he_init=True, biases=biases)\n output = nonlinearity(output)\n # output = lrelu(output, leakiness=0.2)\n output = conv_2(inputs=output, filter_size=3, name='D.Block.1.Conv2',\n spectral_normed=spectral_normed,\n update_collection=update_collection,\n he_init=True, biases=biases)\n return shortcut + output\n\n\ndef Generator(n_samples_, labels, noise=None, reuse=False):\n with tf.variable_scope(\"Generator\", reuse=reuse):\n if noise is None:\n noise = tf.random_normal([n_samples_, 128])\n\n output = lib.ops.linear.Linear(noise, 128, 4 * 4 * DIM_G * 8, 'G.Input')\n output = tf.reshape(output, [-1, 4, 4, DIM_G * 8])\n # 1024\n output = ResidualBlock(output, DIM_G * 8, DIM_G * 8, 3, 'G.Block.1', resample='up', labels=labels, biases=True)\n print('G.1: {}'.format(output.shape.as_list()))\n # 512\n output = ResidualBlock(output, DIM_G * 8, DIM_G * 4, 3, 'G.Block.2', resample='up', labels=labels, biases=True)\n print('G.2: {}'.format(output.shape.as_list()))\n # 256\n output = ResidualBlock(output, DIM_G * 4, DIM_G * 2, 3, 'G.Block.3', resample='up', labels=labels, biases=True)\n print('G.3: {}'.format(output.shape.as_list()))\n # 128\n output = ResidualBlock(output, DIM_G * 2, DIM_G, 3, 'G.Block.4', resample='up', labels=labels, biases=True)\n print('G.4: {}'.format(output.shape.as_list()))\n # 64\n output = ResidualBlock(output, DIM_G, DIM_G // 2, 3, 'G.Block.5', resample='up', labels=labels, biases=True)\n print('G.5: {}'.format(output.shape.as_list()))\n output = Normalize('G.OutputNorm', output, labels)\n output = nonlinearity(output)\n\n output = lib.ops.conv2d.Conv2D(output, DIM_G // 2, 3, 3, 1, 'G.Output', he_init=False)\n output = tf.tanh(output)\n print('G.output.shape: {}'.format(output.shape.as_list()))\n return tf.reshape(output, [-1, OUTPUT_DIM])\n # return tf.reshape(tf.transpose(output, [0, 3, 1, 2], name='NHWC_to_NCHW'), [-1, OUTPUT_DIM])\n\n\ndef Discriminator(inputs, labels, update_collection=None, reuse=False):\n with tf.variable_scope(\"Discriminator\", reuse=reuse):\n output = tf.reshape(inputs, [-1, 128, 128, 3])\n # output = tf.transpose(output, [0, 2, 3, 1], name='NCHW_to_NHWC')\n output = OptimizedResBlockDisc1(output,\n spectral_normed=True,\n update_collection=update_collection,\n biases=True)\n\n # output = ResidualBlock(output, 3, DIM_D // 2, 3, 'Discriminator.1',\n # spectral_normed=True,\n # update_collection=update_collection,\n # resample='down', labels=labels, biases=True)\n output = ResidualBlock(output, DIM_D // 2, DIM_D, 3, 'D.Block.2',\n spectral_normed=True,\n update_collection=update_collection,\n resample='down', labels=labels, biases=True)\n\n output = ResidualBlock(output, DIM_D, DIM_D * 2, 3, 'D.Block.3',\n spectral_normed=True,\n update_collection=update_collection,\n resample='down', labels=labels, biases=True)\n\n # embedding labels, and concatenate to 'output'.\n # (N, EMBEDDING_DIM)\n embedding_y = lib.ops.embedding.embed_y(labels, VOCAB_SIZE, EMBEDDING_DIM, word2vec_file=WORD2VEC_FILE)\n embedding_y = lib.ops.linear.Linear(embedding_y, EMBEDDING_DIM, DIM_D, 'D.Embedding_y',\n spectral_normed=True,\n update_collection=update_collection,\n biases=True) # (N, DIM_D)\n\n embedding_y = tf.expand_dims(tf.expand_dims(embedding_y, axis=1), axis=1)\n embedding_y = tf.tile(embedding_y, multiples=[1, output.shape.as_list()[1], output.shape.as_list()[2], 1])\n output = tf.concat(values=[output, embedding_y], axis=3)\n\n output = ResidualBlock(output, DIM_D * 3, DIM_D * 4, 3, 'D.Block.4',\n spectral_normed=True,\n update_collection=update_collection,\n resample='down', labels=labels, biases=True)\n output = ResidualBlock(output, DIM_D * 4, DIM_D * 8, 3, 'D.Block.5',\n spectral_normed=True,\n update_collection=update_collection,\n resample='down', labels=labels, biases=True)\n output = ResidualBlock(output, DIM_D * 8, DIM_D * 8, 3, 'D.Block.6',\n spectral_normed=True,\n update_collection=update_collection,\n resample=None, labels=labels, biases=True)\n output = nonlinearity(output)\n # output = lrelu(output, leakiness=0.2)\n output = tf.reduce_mean(output, axis=[1, 2])\n output_wgan = lib.ops.linear.Linear(output, DIM_D * 8, 1, 'D.Output',\n spectral_normed=True,\n update_collection=update_collection)\n output_wgan = tf.reshape(output_wgan, [-1])\n if CONDITIONAL and ACGAN:\n output_acgan = lib.ops.linear.Linear(output, DIM_D, 10, 'D.ACGANOutput',\n spectral_normed=True,\n update_collection=update_collection,\n biases=True)\n return output_wgan, output_acgan\n else:\n return output_wgan, None\n\n\n# with tf.Graph().as_default() as g:\nconfig = tf.ConfigProto(allow_soft_placement=True)\nconfig.gpu_options.allow_growth = True\nwith tf.Session(config=config) as session:\n _iteration = tf.placeholder(tf.int32, shape=None)\n all_real_data_int = tf.placeholder(tf.int32, shape=[BATCH_SIZE, OUTPUT_DIM])\n all_real_labels = tf.placeholder(tf.int32, shape=[BATCH_SIZE])\n\n labels_splits = tf.split(all_real_labels, len(DEVICES), axis=0)\n\n fake_data_splits = []\n for i, device in enumerate(DEVICES):\n with tf.device(device):\n if i > 0:\n fake_data_splits.append(Generator(int(BATCH_SIZE / len(DEVICES)), labels_splits[i], reuse=True))\n else:\n fake_data_splits.append(Generator(int(BATCH_SIZE / len(DEVICES)), labels_splits[i]))\n\n all_real_data = tf.reshape(2 * ((tf.cast(all_real_data_int, tf.float32) / 256.) - .5), [BATCH_SIZE, OUTPUT_DIM])\n all_real_data += tf.random_uniform(shape=[BATCH_SIZE, OUTPUT_DIM], minval=0., maxval=1. / 128) # dequantize\n all_real_data_splits = tf.split(all_real_data, len(DEVICES), axis=0)\n\n DEVICES_A = DEVICES[int(len(DEVICES) / 2):]\n # DEVICES_B = DEVICES[:int(len(DEVICES) / 2)]\n\n disc_costs = []\n disc_acgan_costs = []\n disc_acgan_accs = []\n disc_acgan_fake_accs = []\n for i, device in enumerate(DEVICES_A):\n with tf.device(device):\n real_and_fake_data = tf.concat(values=[\n all_real_data_splits[i],\n all_real_data_splits[len(DEVICES_A) + i],\n fake_data_splits[i],\n fake_data_splits[len(DEVICES_A) + i]\n ], axis=0)\n real_and_fake_labels = tf.concat(values=[\n labels_splits[i],\n labels_splits[len(DEVICES_A) + i],\n labels_splits[i],\n labels_splits[len(DEVICES_A) + i]\n ], axis=0)\n disc_all, disc_all_acgan = Discriminator(real_and_fake_data, real_and_fake_labels, update_collection=None)\n disc_real = disc_all[:int(BATCH_SIZE / len(DEVICES_A))]\n disc_fake = disc_all[int(BATCH_SIZE / len(DEVICES_A)):]\n if LOSS_TYPE == 'Goodfellow':\n if SOFT_PLUS:\n disc_real_l = -tf.reduce_mean(tf.nn.softplus(tf.log(tf.nn.sigmoid(disc_real))))\n disc_fake_l = -tf.reduce_mean(tf.nn.softplus(tf.log(1 - tf.nn.sigmoid(disc_fake))))\n else:\n disc_real_l = -tf.reduce_mean(tf.log(tf.nn.sigmoid(disc_real)))\n disc_fake_l = -tf.reduce_mean(tf.log(1 - tf.nn.sigmoid(disc_fake)))\n disc_costs.append(disc_real_l + disc_fake_l)\n elif LOSS_TYPE == 'HINGE':\n if SOFT_PLUS:\n disc_real_l = tf.reduce_mean(tf.nn.softplus(-tf.minimum(0., -1 + disc_real)))\n disc_fake_l = tf.reduce_mean(tf.nn.softplus(-tf.minimum(0., -1 - disc_fake)))\n else:\n # disc_real_l = -tf.reduce_mean(tf.minimum(0., -1 + disc_real))\n # disc_fake_l = -tf.reduce_mean(tf.minimum(0., -1 - disc_fake))\n disc_real_l = tf.reduce_mean(tf.nn.relu(1. - disc_real))\n disc_fake_l = tf.reduce_mean(tf.nn.relu(1. + disc_fake))\n disc_costs.append(disc_real_l + disc_fake_l)\n elif LOSS_TYPE == 'WGAN':\n if SOFT_PLUS:\n disc_costs.append(\n tf.reduce_mean(tf.nn.softplus(disc_fake)) + tf.reduce_mean(tf.nn.softplus(-disc_real)))\n else:\n disc_costs.append(tf.reduce_mean(disc_fake) - tf.reduce_mean(disc_real))\n\n if CONDITIONAL and ACGAN:\n disc_acgan_costs.append(tf.reduce_mean(\n tf.nn.sparse_softmax_cross_entropy_with_logits(\n logits=disc_all_acgan[:int(BATCH_SIZE / len(DEVICES_A))],\n labels=real_and_fake_labels[:int(BATCH_SIZE / len(DEVICES_A))])\n ))\n disc_acgan_accs.append(tf.reduce_mean(\n tf.cast(\n tf.equal(\n tf.to_int32(tf.argmax(disc_all_acgan[:int(BATCH_SIZE / len(DEVICES_A))], axis=1)),\n real_and_fake_labels[:int(BATCH_SIZE / len(DEVICES_A))]\n ),\n tf.float32\n )\n ))\n disc_acgan_fake_accs.append(tf.reduce_mean(\n tf.cast(\n tf.equal(\n tf.to_int32(tf.argmax(disc_all_acgan[int(BATCH_SIZE / len(DEVICES_A)):], axis=1)),\n real_and_fake_labels[int(BATCH_SIZE / len(DEVICES_A)):]\n ),\n tf.float32\n )\n ))\n\n # gradient_penalty, not included\n # if LOSS_TYPE == 'WGAN-GP'\n # for i, device in enumerate(DEVICES_B):\n # with tf.device(device):\n # real_data = tf.concat([all_real_data_splits[i], all_real_data_splits[len(DEVICES_A) + i]], axis=0)\n # fake_data = tf.concat([fake_data_splits[i], fake_data_splits[len(DEVICES_A) + i]], axis=0)\n # labels = tf.concat([\n # labels_splits[i],\n # labels_splits[len(DEVICES_A) + i],\n # ], axis=0)\n # alpha = tf.random_uniform(\n # shape=[int(BATCH_SIZE / len(DEVICES_A)), 1],\n # minval=0.,\n # maxval=1.\n # )\n # differences = fake_data - real_data\n # interpolates = real_data + (alpha * differences)\n # gradients = tf.gradients(Discriminator(interpolates, labels)[0], [interpolates])[0]\n # slopes = tf.sqrt(tf.reduce_sum(tf.square(gradients), reduction_indices=[1]))\n # gradient_penalty = 10 * tf.reduce_mean((slopes - 1.) ** 2)\n # disc_costs.append(gradient_penalty)\n\n disc_wgan = tf.add_n(disc_costs) / len(DEVICES_A)\n # tf.summary.scalar('D_wgan_cost', disc_wgan)\n if CONDITIONAL and ACGAN:\n disc_acgan = tf.add_n(disc_acgan_costs) / len(DEVICES_A)\n disc_acgan_acc = tf.add_n(disc_acgan_accs) / len(DEVICES_A)\n disc_acgan_fake_acc = tf.add_n(disc_acgan_fake_accs) / len(DEVICES_A)\n disc_cost = disc_wgan + (ACGAN_SCALE * disc_acgan)\n\n tf.summary.scalar('D_acgan_cost', disc_acgan)\n tf.summary.scalar('D_acgan_accuracy', disc_acgan_acc)\n tf.summary.scalar('D_acgan_fake_accuracy', disc_acgan_fake_acc)\n tf.summary.scalar('D_cost', disc_cost)\n else:\n disc_acgan = tf.constant(0.)\n disc_acgan_acc = tf.constant(0.)\n disc_acgan_fake_acc = tf.constant(0.)\n disc_cost = disc_wgan\n\n if DECAY:\n decay = tf.where(\n tf.less(_iteration, 400000),\n 1.0, tf.maximum(0., 1. - (tf.cast(_iteration, tf.float32) / 450000)))\n else:\n decay = 1.\n tf.summary.scalar('lr', LR * decay)\n\n gen_costs = []\n gen_acgan_costs = []\n for device in DEVICES:\n with tf.device(device):\n n_samples = GEN_BS_MULTIPLE * int(BATCH_SIZE / len(DEVICES))\n fake_labels = tf.cast(tf.random_uniform([n_samples]) * 1000, tf.int32)\n if CONDITIONAL and ACGAN:\n disc_fake, disc_fake_acgan = Discriminator(Generator(n_samples, fake_labels, reuse=True),\n fake_labels,\n update_collection=\"NO_OPS\",\n reuse=True)\n gen_costs.append(-tf.reduce_mean(tf.nn.softplus(disc_fake)))\n # gen_costs.append(-tf.reduce_mean(disc_fake))\n gen_acgan_costs.append(tf.reduce_mean(\n tf.nn.sparse_softmax_cross_entropy_with_logits(logits=disc_fake_acgan, labels=fake_labels)\n ))\n else:\n disc_fake, _ = Discriminator(Generator(n_samples, fake_labels, reuse=True),\n fake_labels,\n update_collection=\"NO_OPS\",\n reuse=True)\n if LOSS_TYPE == 'Goodfellow':\n if SOFT_PLUS:\n gen_costs.append(tf.reduce_mean(tf.nn.softplus(-tf.log(tf.nn.sigmoid(disc_fake)))))\n else:\n gen_costs.append(-tf.reduce_mean(tf.log(tf.nn.sigmoid(disc_fake))))\n elif LOSS_TYPE == 'HINGE':\n if SOFT_PLUS:\n gen_costs.append(tf.reduce_mean(tf.nn.softplus(-disc_fake)))\n else:\n gen_costs.append(-tf.reduce_mean(disc_fake))\n elif LOSS_TYPE == 'WGAN':\n if SOFT_PLUS:\n gen_costs.append(tf.reduce_mean(tf.nn.softplus(-disc_fake)))\n else:\n gen_costs.append(-tf.reduce_mean(disc_fake))\n gen_cost = (tf.add_n(gen_costs) / len(DEVICES))\n # tf.summary.scalar('G_wgan_cost', gen_cost)\n if CONDITIONAL and ACGAN:\n gen_cost += (ACGAN_SCALE_G * (tf.add_n(gen_acgan_costs) / len(DEVICES)))\n tf.summary.scalar('G_acgan_costs', tf.add_n(gen_acgan_costs) / len(DEVICES))\n tf.summary.scalar('G_cost', gen_cost)\n\n # gen_params = lib.params_with_name('Generator')\n # disc_params = lib.params_with_name('D.')\n gen_params = [var for var in tf.trainable_variables() if 'Generator' in var.name]\n print('\\ngen_params:')\n for var in gen_params:\n print(var.name)\n\n disc_params = [var for var in tf.trainable_variables() if 'Discriminator' in var.name]\n print('\\ndisc_params:')\n for var in disc_params:\n print(var.name)\n\n print('\\ntrainable_variables.name:')\n for var in tf.trainable_variables():\n print(var.name)\n\n gen_opt = tf.train.AdamOptimizer(learning_rate=LR * decay, beta1=0., beta2=0.9)\n disc_opt = tf.train.AdamOptimizer(learning_rate=LR * decay, beta1=0., beta2=0.9)\n gen_gv = gen_opt.compute_gradients(gen_cost, var_list=gen_params)\n disc_gv = disc_opt.compute_gradients(disc_cost, var_list=disc_params)\n gen_train_op = gen_opt.apply_gradients(gen_gv)\n disc_train_op = disc_opt.apply_gradients(disc_gv)\n\n # Function for generating samples\n frame_i = [0]\n fixed_noise = tf.constant(np.random.normal(size=(25, 128)).astype('float32'))\n # tiger shark(3), electric locomotive(547), mountain bike(671), submarine(833)\n # gray whale(147), Welsh springer spaniel(218), Persian cat(283), tiger(292),\n # chiffonier(493), fire truck(555), mosque(668), palace(698),\n # schooner(780), daisy(985), sandbar(977), pizza(963)\n sample_labels = np.array([3, 547, 671, 833, 147, 218, 283, 292, 493, 555, 668, 698, 780, 985, 977, 963],\n dtype='int32')\n # sample_labels = np.repeat(sample_labels, 25)\n fixed_labels = tf.constant(sample_labels)\n samples_prob = tf.multinomial(tf.log([[0.6] * 16]), 1)\n category = tf.cast(samples_prob[0][0], tf.int32)\n samples_label = fixed_labels[category]\n samples_label = tf.expand_dims(samples_label, axis=0)\n samples_label = tf.tile(samples_label, [25])\n fixed_noise_samples = Generator(25, samples_label, noise=fixed_noise, reuse=True)\n\n\n def generate_image(frame):\n samples = session.run(fixed_noise_samples)\n samples_label_ = session.run(fixed_labels[category])\n samples = ((samples + 1.) * (255. / 2)).astype('int32')\n # samples = np.split(samples, 16, 0)\n # for sample in samples:\n samples = np.reshape(samples, (25, 128, 128, 3))\n common.misc.save_images(samples, 'samples_{}_{}.png'.format(frame, samples_label_))\n\n\n # Function for calculating inception score\n fake_labels_100 = tf.cast(tf.random_uniform([100]) * 1000, tf.int32)\n samples_100 = Generator(100, fake_labels_100, reuse=True)\n\n\n def get_inception_score(n):\n all_samples = []\n for i in range(int(n / 100)):\n all_samples.append(session.run(samples_100))\n all_samples = np.concatenate(all_samples, axis=0)\n all_samples = ((all_samples + 1.) * (255.99 / 2)).astype('int32')\n all_samples = all_samples.reshape((-1, 128, 128, 3))\n return common.inception.inception_score.get_inception_score(list(all_samples))\n\n\n # Function for reading data\n # train_gen, dev_gen = lib.cifar10.load(BATCH_SIZE, DATA_DIR)\n #\n #\n # def inf_train_gen():\n # while True:\n # for images_, labels_ in train_gen():\n # yield images_, labels_\n #\n #\n # gen = inf_train_gen()\n\n for name, grads_and_vars in [('G', gen_gv), ('D', disc_gv)]:\n print(\"{} Params:\".format(name))\n total_param_count = 0\n for g, v in grads_and_vars:\n shape = v.get_shape()\n shape_str = \",\".join([str(x) for x in v.get_shape()])\n\n param_count = 1\n for dim in shape:\n param_count *= int(dim)\n total_param_count += param_count\n\n if g is None:\n print(\"\\t{} ({}) [no grad!]\".format(v.name, shape_str))\n else:\n print(\"\\t{} ({})\".format(v.name, shape_str))\n print(\"Total param count: {}\".format(locale.format(\"%d\", total_param_count, grouping=True)))\n\n summaries_op = tf.summary.merge_all()\n saver = tf.train.Saver(max_to_keep=5)\n summary_writer = tf.summary.FileWriter(CHECKPOINT_DIR, graph=session.graph)\n session.run(tf.global_variables_initializer())\n\n if RESTORE:\n ckpt = tf.train.latest_checkpoint(CHECKPOINT_DIR)\n if ckpt:\n print('restore model from: {}...'.format(ckpt))\n saver.restore(session, ckpt)\n\n filenames, labels = ILSVRC2012.get_filenames_labels(DATA_DIR)\n data_, labels_ = ILSVRC2012.input_fn(filenames, labels, BATCH_SIZE, 21)\n for iteration in range(ITERS):\n start_time = time.time()\n\n if 0 < iteration:\n _ = session.run([gen_train_op], feed_dict={_iteration: iteration})\n\n for i in range(N_CRITIC):\n # _data, _labels = next(gen)\n # data_, labels_ = ILSVRC2012.input_fn(filenames, labels, BATCH_SIZE, 21)\n _data, _labels = session.run([data_, labels_])\n\n # print('image_resized.shape: {}'.format(_data.shape)) # (N, 128, 128, 3)\n # _data = np.transpose(_data, axes=[0, 3, 1, 2]) # 'NHWC_to_NCHW'\n # print('image_transposed.shape: {}'.format(_data.shape)) # (N, 3, 128, 128)\n _data = np.reshape(_data, [_data.shape[0], -1])\n # print('image_flatten.shape: {}'.format(_data.shape)) # (N, 3*128*128)\n # print('_labels.shape: {}'.format(_labels.shape)) # (N,)\n # print('_data: {}'.format(_data))\n # print('_labels: {}'.format(_labels))\n\n if CONDITIONAL and ACGAN:\n _disc_cost, _disc_wgan, _gen_cost, _disc_acgan, _disc_acgan_acc, \\\n _disc_acgan_fake_acc, _, summaries = session.run(\n [disc_cost, disc_wgan, gen_cost, disc_acgan, disc_acgan_acc,\n disc_acgan_fake_acc, disc_train_op, summaries_op],\n feed_dict={all_real_data_int: _data,\n all_real_labels: _labels,\n _iteration: iteration})\n else:\n _disc_cost, _disc_wgan, _gen_cost, _, summaries = session.run(\n [disc_cost, disc_wgan, gen_cost, disc_train_op, summaries_op],\n feed_dict={all_real_data_int: _data,\n all_real_labels: _labels,\n _iteration: iteration})\n\n summary_writer.add_summary(summaries, global_step=iteration)\n\n # lib.plot.plot('cost', _disc_cost)\n lib.plot.plot('d_cost', _disc_wgan)\n lib.plot.plot('g_cost', _gen_cost)\n if CONDITIONAL and ACGAN:\n lib.plot.plot('disc_wgan', _disc_wgan)\n lib.plot.plot('acgan', _disc_acgan)\n lib.plot.plot('acc_real', _disc_acgan_acc)\n lib.plot.plot('acc_fake', _disc_acgan_fake_acc)\n # lib.plot.plot('time', time.time() - start_time)\n\n if iteration % INCEPTION_FREQUENCY == INCEPTION_FREQUENCY - 1:\n inception_score = get_inception_score(50000)\n lib.plot.plot('inception_50k', inception_score[0])\n lib.plot.plot('inception_50k_std', inception_score[1])\n\n # Calculate dev loss and generate samples every 100 iters\n if iteration % 100 == 99:\n # dev_disc_costs = []\n # for images, _labels in dev_gen():\n # _dev_disc_cost = session.run([disc_cost],\n # feed_dict={all_real_data_int: images,\n # all_real_labels: _labels})\n # dev_disc_costs.append(_dev_disc_cost)\n # lib.plot.plot('dev_cost', np.mean(dev_disc_costs))\n\n generate_image(iteration)\n\n if (iteration < 500) or (iteration % 1000 == 999):\n lib.plot.flush()\n\n if not os.path.exists(CHECKPOINT_DIR):\n os.mkdir(CHECKPOINT_DIR)\n saver.save(session, os.path.join(CHECKPOINT_DIR, 'model.ckpt'), global_step=iteration)\n\n lib.plot.tick()\n\n summary_writer.flush()\n summary_writer.close()\n","sub_path":"SNGAN/gan_imagNet_resnet.py","file_name":"gan_imagNet_resnet.py","file_ext":"py","file_size_in_byte":33201,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"601545219","text":"import tensorflow as tf\nfrom larq import utils\nimport numpy as np\n\ntry:\n from tensorflow.keras.metrics import Metric\nexcept: # TensorFlow 1.13 doesn't export this as a public API\n from tensorflow.python.keras.metrics import Metric\n\n\nclass FlipRatio(Metric):\n \"\"\"Computes the mean ration of changed values in a given tensor.\n\n !!! example\n ```python\n m = metrics.FlipRatio(values_shape=(2,))\n m.update_state((1, 1)) # result: 0\n m.update_state((2, 2)) # result: 1\n m.update_state((1, 2)) # result: 0.75\n print('Final result: ', m.result().numpy()) # Final result: 0.75\n ```\n\n # Arguments\n values_shape: Shape of the tensor for which to track changes.\n values_dtype: Data type of the tensor for which to track changes.\n name: Name of the metric.\n dtype: Data type of the moving mean.\n \"\"\"\n\n def __init__(\n self, values_shape=(), values_dtype=\"int8\", name=\"flip_ratio\", dtype=None\n ):\n super().__init__(name=name, dtype=dtype)\n self.values_dtype = tf.as_dtype(values_dtype)\n self.values_shape = tf.TensorShape(values_shape).as_list()\n with tf.init_scope():\n self._previous_values = self.add_weight(\n \"previous_values\",\n shape=values_shape,\n dtype=self.values_dtype,\n initializer=tf.keras.initializers.zeros,\n )\n self.total = self.add_weight(\n \"total\", initializer=tf.keras.initializers.zeros\n )\n self.count = self.add_weight(\n \"count\", initializer=tf.keras.initializers.zeros\n )\n self._size = np.prod(self.values_shape)\n\n def update_state(self, values, sample_weight=None):\n values = tf.cast(values, self.values_dtype)\n changed_values = tf.math.count_nonzero(tf.equal(self._previous_values, values))\n flip_ratio = 1 - (tf.cast(changed_values, self.dtype) / self._size)\n\n update_total_op = self.total.assign_add(flip_ratio * tf.sign(self.count))\n with tf.control_dependencies([update_total_op]):\n update_count_op = self.count.assign_add(1)\n with tf.control_dependencies([update_count_op]):\n return self._previous_values.assign(values)\n\n def result(self):\n return tf.compat.v1.div_no_nan(self.total, self.count - 1)\n\n def reset_states(self):\n tf.keras.backend.batch_set_value(\n [(v, 0) for v in self.variables if v != self._previous_values]\n )\n\n def get_config(self):\n return {\n **super().get_config(),\n \"values_shape\": self.values_shape,\n \"values_dtype\": self.values_dtype.name,\n }\n\n def add_weight(\n self,\n name,\n shape=(),\n aggregation=tf.VariableAggregation.SUM,\n synchronization=tf.VariableSynchronization.ON_READ,\n initializer=None,\n dtype=None,\n ):\n if utils.tf_1_14_or_newer():\n return super().add_weight(\n name=name,\n shape=shape,\n aggregation=aggregation,\n synchronization=synchronization,\n initializer=initializer,\n dtype=dtype,\n )\n else:\n # Call explicitely tf.keras.layers.Layer.add_weight because TF 1.13\n # doesn't support setting a custom dtype\n return tf.keras.layers.Layer.add_weight(\n self,\n name=name,\n shape=shape,\n dtype=self._dtype if dtype is None else dtype,\n trainable=False,\n initializer=initializer,\n collections=[],\n synchronization=synchronization,\n aggregation=aggregation,\n )\n","sub_path":"larq/metrics.py","file_name":"metrics.py","file_ext":"py","file_size_in_byte":3814,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"468899756","text":"import math\nimport numpy as np\nimport pylab\nimport matplotlib.pyplot as plt\nx=np.arange(-3,3,0.01)\nfunk = str(input())\nylist = []\nfor h in range(len(x)):\n ylist.append(eval(str(funk.replace(\"x\",str(x[h])))))\nplt.plot(x,ylist)\nplt.axis('equal')\nplt.grid(True)\nplt.title(r'$Your func$')\nplt.show()\n","sub_path":"3.py","file_name":"3.py","file_ext":"py","file_size_in_byte":299,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"61036064","text":"#coding=utf-8\nimport ConfigParser\nimport os\nimport smtplib\nimport datetime\nimport time\nfrom email.mime.text import MIMEText\nfrom email.header import Header\nfrom email.mime.multipart import MIMEMultipart\nfrom email.mime.image import MIMEImage\nfrom email.utils import parseaddr, formataddr\n\ndef get_config(section, key):\n\tconfig = ConfigParser.ConfigParser()\n\tpath = os.path.split(os.path.realpath(__file__))[0] + '/info.conf'\n\tconfig.read(path)\n\treturn config.get(section, key)\n\nsender = get_config('address', 'sender')\nkey = get_config('address', 'key')\nhost = get_config('address', 'host')\nreceiver = get_config('address', 'receiver') \n\n# 三个参数:第一个为文本内容,第二个 plain 设置文本格式,第三个 utf-8 设置编码\nmessage = MIMEMultipart('related') \nmessage['From'] = Header(\"章先生~\", 'utf-8').encode()#内容中显示的发件人\nmessage['To'] = Header(\"收件人哦~\", 'utf-8').encode()#内容中显示的收件人\nmessage['Subject'] = Header('I Love You~', 'utf-8').encode()#邮件的题目\n\nmsgAlternative = MIMEMultipart('alternative')\nmessage.attach(msgAlternative)\nmail_msg = \"\"\"\ni love you测试...
\n图片演示:
\n
\n\"\"\"\nmsgAlternative.attach(MIMEText(mail_msg, 'html', 'utf-8'))\n\nfilename = get_config('image', 'path') + get_config('image', 'name') + '.jpg'\nfp = open(filename, 'rb')\nimage = MIMEImage(fp.read());\nfp.close()\nimage.add_header('Content-ID', '')\nmessage.attach(image)\n\n\nwhile True:\n\thour = get_config('time', 'hour')\n\tminute = get_config('time', 'minute') \n\tsecond = get_config('time', 'second') \n\tcurrent_time = time.localtime(time.time()) \n\tif ((current_time.tm_hour == int(hour)) and (current_time.tm_min == int(minute)) and (current_time.tm_sec == int(second))):\n\t\ttry:\n\t\t\tsmtpObj = smtplib.SMTP_SSL()#这个点要注意\n\t\t\tsmtpObj.connect(host)\n\t\t\tsmtpObj.login(sender, key) #邮箱登录\n\t\t\tsmtpObj.sendmail(sender, receiver, message.as_string())\n\t\t\tprint (\"邮件发送成功\")\n\t\texcept smtplib.SMTPException as e:\n\t\t\tprint (\"Error: 发送邮件产生错误\")\n\t\t\tprint(e)\n\ttime.sleep(1)\nsmtpObj.close()\n","sub_path":"i_love_you.py","file_name":"i_love_you.py","file_ext":"py","file_size_in_byte":2125,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"116728447","text":"from astropy.io import fits\nimport matplotlib.pyplot as plt\nimport numpy as np\n\ndef mean_fits(files):\n if len(files) == 0:\n return []\n\n datasets = []\n\n for file in files:\n hdulist = fits.open(file)\n datasets.append(hdulist[0].data)\n hdulist.close()\n\n result = np.mean(datasets, axis=0)\n return result\n\ndef run_test(files):\n data = mean_fits(files)\n \n if len(data) > 0:\n print(data[100, 100])\n\n plt.imshow(data.T, cmap=plt.cm.viridis)\n plt.colorbar()\n plt.show()\n\nif __name__ == '__main__':\n # Test Case 1\n print(\"Test Case #1\")\n run_test(['image0.fits', 'image1.fits', 'image2.fits'])\n \n # Test Case 2\n print(\"Test Case #2\")\n run_test(['image0.fits', 'image1.fits', 'image3.fits'])\n \n # Test Case 3\n print(\"Test Case #3\")\n run_test(['image0.fits', 'image1.fits', 'image2.fits', 'image3.fits', 'image4.fits'])\n\n # Additional Test Case 1\n print(\"Additional Test Case #1\")\n run_test([])","sub_path":"week1/1b/4_mean_of_a_set_of_fits_files/program.py","file_name":"program.py","file_ext":"py","file_size_in_byte":1005,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"2233026","text":"# use open cv to show new images from AirSim \r\n\r\nfrom PythonClient import *\r\nimport cv2\r\nimport time\r\nimport sys\r\n\r\nclient = AirSimClient('127.0.0.1')\r\n\r\nhelp = False\r\n\r\nfontFace = cv2.FONT_HERSHEY_SIMPLEX\r\nfontScale = 0.5\r\nthickness = 2\r\ntextSize, baseline = cv2.getTextSize(\"FPS\", fontFace, fontScale, thickness)\r\nprint (textSize)\r\ntextOrg = (10, 10 + textSize[1])\r\nframeCount = 0\r\nstartTime=time.clock()\r\nfps = 0\r\n\r\nwhile True:\r\n # because this method returns std::vector, msgpack decides to encode it as a string unfortunately.\r\n result = client.getImageForCamera(0, AirSimImageType.Depth)\r\n if (result == \"\\0\"):\r\n if (not help):\r\n help = True\r\n print(\"Please press '1' in the AirSim view to enable the Depth camera view\")\r\n else:\r\n rawImage = np.fromstring(result, np.int8)\r\n png = cv2.imdecode(rawImage, cv2.IMREAD_UNCHANGED)\r\n \r\n cv2.putText(png,'FPS ' + str(fps),textOrg, fontFace, fontScale,(255,0,255),thickness)\r\n cv2.imshow(\"Depth\", png)\r\n\r\n frameCount = frameCount + 1\r\n endTime=time.clock()\r\n diff = endTime - startTime\r\n if (diff > 1):\r\n fps = frameCount\r\n frameCount = 0\r\n startTime = endTime\r\n \r\n key = cv2.waitKey(1) & 0xFF;\r\n if (key == 27 or key == ord('q') or key == ord('x')):\r\n break;\r\n","sub_path":"PythonClient/camera.py","file_name":"camera.py","file_ext":"py","file_size_in_byte":1343,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"239463923","text":"import os\n\nfrom cs50 import SQL\nfrom flask import Flask, flash, jsonify, redirect, render_template, request, session\nfrom flask_session import Session\nfrom tempfile import mkdtemp\nfrom werkzeug.exceptions import default_exceptions, HTTPException, InternalServerError\nfrom werkzeug.security import check_password_hash, generate_password_hash\n\nfrom helpers import apology, login_required, lookup, usd\nfrom itertools import chain\n\n# Configure application\napp = Flask(__name__)\n\n# Ensure templates are auto-reloaded\napp.config[\"TEMPLATES_AUTO_RELOAD\"] = True\n\n# Ensure responses aren't cached\n@app.after_request\ndef after_request(response):\n response.headers[\"Cache-Control\"] = \"no-cache, no-store, must-revalidate\"\n response.headers[\"Expires\"] = 0\n response.headers[\"Pragma\"] = \"no-cache\"\n return response\n\n# Custom filter\napp.jinja_env.filters[\"usd\"] = usd\n\n# Configure session to use filesystem (instead of signed cookies)\napp.config[\"SESSION_FILE_DIR\"] = mkdtemp()\napp.config[\"SESSION_PERMANENT\"] = False\napp.config[\"SESSION_TYPE\"] = \"filesystem\"\nSession(app)\n\n# Configure CS50 Library to use SQLite database\ndb = SQL(\"sqlite:///finance.db\")\n\n\n@app.route(\"/\")\n@login_required\ndef index():\n\n # arrange the data into group and name it stocks\n stocks = db.execute(\"SELECT Symbol,Name,Shares FROM portfolio WHERE id=:id\",\n id=session[\"user_id\"])\n\n\n # obtaining the cash form the database table name users\n result = db.execute(\"SELECT cash FROM users WHERE id=:id\",\n id=session[\"user_id\"])\n cash = result[0]\n y = cash[\"cash\"]\n cashs= float(y)\n\n grandtotal = cashs\n\n #delete the data from newportfolio.\n delete = db.execute(\"DELETE FROM newportfolio WHERE id=:id\",\n id=session[\"user_id\"])\n\n\n #obtaining the current price of every stock\n for stock in stocks:\n\n symbol = str(stock[\"Symbol\"])\n name = str(stock[\"Name\"])\n shares = int(stock[\"Shares\"])\n quote = lookup(name)\n price = float(quote[\"price\"])\n Total = float(price * shares)\n grandtotal += Total\n share = str(shares)\n\n\n #add all the new products and their price to the newportfolio table\n newportfolio = db.execute(\"INSERT INTO newportfolio(name,symbol,shares,price,total,id)VALUES(:name,:symbol,:shares,:price,:Total,:id)\",\n symbol=symbol,name=name,shares=share,price=price,Total=Total,\n id=session[\"user_id\"])\n\n #select the products from the table newportfolio to display through index.html\n newportfolio2 = db.execute(\"SELECT Name,Symbol,Shares,price,Total FROM newportfolio WHERE id=:id\",\n id=session[\"user_id\"])\n\n\n return render_template(\"index.html\",newportfolio=newportfolio2,total=usd(grandtotal),cash=usd(cashs))\n\n\n\n\n@app.route(\"/buy\", methods=[\"GET\", \"POST\"])\n@login_required\ndef buy():\n #if user reached via post\n if request.method == \"POST\":\n #make variables to facilitate it to me\n stock = request.form.get(\"symbol\")\n shares = request.form.get(\"shares\")\n int_shares = int(shares)\n\n # Ensure stock is submitted\n if not stock:\n return apology(\"must write a name of a stock\",400)\n\n # Ensure number of shares is submittes\n elif not shares :\n return apology(\"must provide how many do you want to purchase\",400)\n\n #Ensure that the number is positive\n elif int_shares < 1 :\n return apology(\"must be a positive number\",400)\n\n\n #select their money from the database\n list = db.execute(\"SELECT cash FROM users WHERE id=:id\",\n id=session[\"user_id\"])\n t = list[0]\n y = t['cash']\n r = int(y)\n\n #obtain the price,name, and symbol of the product form lookup function\n dict ={}\n dict = lookup(stock)\n price = dict['price']\n symbol = dict['symbol']\n name = dict['name']\n total = price*int_shares\n\n #obtain the Names from the database and make list of them\n Names = db.execute(\"SELECT Name FROM portfolio WHERE id=:id\",\n id=session[\"user_id\"])\n\n #making a list of Names from the database\n list = []\n for x in Names:\n Name = x[\"Name\"]\n if (stock == Name):\n list.append(Name)\n stocks = list\n\n\n #check that the user has enough money and if the stock is in stocks\n if (total<=r and stock in stocks ):\n\n newshare = db.execute(\"UPDATE portfolio set Shares = Shares+:int_shares , Total=:total+Total , datetime=datetime('now') WHERE Name=:stock AND id=:id \",\n stock=stock,int_shares=int_shares,total=total,id=session[\"user_id\"])\n\n newshare2 =db.execute(\"UPDATE newportfolio set Shares = Shares+:int_shares , Total=:total+Total WHERE Name=:stock AND id=:id\",\n stock=stock,int_shares=int_shares,total=total,id=session[\"user_id\"])\n\n newportfolio3 = db.execute(\"SELECT Name,Symbol,Shares,price,Total FROM newportfolio WHERE id=:id\",\n id=session[\"user_id\"])\n\n #update user cash\n update = db.execute(\"UPDATE users SET cash = :r - :total WHERE id=:id\",\n r=r,total=total,id=session[\"user_id\"])\n\n\n elif (total <= r and stock not in stocks) :\n\n\n #add the stock name and the username and the price to the portfolio database\n portfolio = db.execute(\"INSERT INTO portfolio(symbol,name,shares,price,total,id,Situation,datetime)VALUES(:name,:symbol,:int_shares,:price,:total,:id,'Bought',datetime('now'))\",\n symbol=symbol,name=name,int_shares=int_shares,price=price,total=total,id=session[\"user_id\"])\n\n\n\n #add the stock name and the username and the price to the newportfolio database\n newportfolio = db.execute(\"INSERT INTO newportfolio(symbol,name,shares,price,total,id)VALUES(:name,:symbol,:int_shares,:price,:total,:id)\",\n symbol=symbol,name=name,int_shares=int_shares,price=price,total=total,id=session[\"user_id\"])\n\n #update user cash\n update = db.execute('UPDATE users SET cash = :r - :total WHERE id=:id',\n r = r, total=total,id=session[\"user_id\"])\n\n #select form newportfolio\n newportfolio3 = db.execute(\"SELECT Name,Symbol,Shares,price,Total FROM newportfolio WHERE Name=Name AND id=:id GROUP BY Name,Symbol,Shares,Price,Total\",\n id=session[\"user_id\"])\n if (total <=r ):\n\n\n\n #bring cash from users table\n result = db.execute(\"SELECT cash FROM users WHERE id=:id\",\n id=session[\"user_id\"])\n cash = result[0]\n y = cash[\"cash\"]\n cashs = float(y)\n\n #add all the total form newportfolio table\n total0 = db.execute(\"SELECT SUM(Total) FROM newportfolio WHERE id=:id\",\n id=session[\"user_id\"])\n total2 = total0[0]\n x = total2[\"SUM(Total)\"]\n total3 = float(x)\n grandtotal = total3 + cashs\n\n #insert data into the history table\n Table = db.execute(\" SELECT datetime FROM portfolio WHERE Name=:stock AND id=:id\",\n stock=stock,id=session[\"user_id\"])\n date = Table[0]\n date2= date[\"datetime\"]\n\n\n History = db.execute(\"INSERT INTO History(Name,Price,Shares,Total,id,Situation,datetime)VALUES(:stock,:price,:int_shares,:total,:id,'bought',:date2)\",\n stock=stock,price=price,int_shares=int_shares,total=total,date2=date2,id=session[\"user_id\"])\n\n return render_template(\"index.html\",newportfolio=newportfolio3,cash=usd(cashs),total=usd(grandtotal))\n else:\n return apology(\"not enough money\",403)\n\n\n\n #if user via get means without clicking submit\n else:\n return render_template(\"buy.html\")\n\n\n@app.route(\"/check\", methods=[\"GET\"])\ndef check():\n\n username = request.args.get(\"username\")\n\n #getting all the usersname from the database\n users = db.execute(\"SELECT username FROM users WHERE username=:username\",username=username)\n\n\n\n #check if both existed\n if len(username)>0 and not users:\n return jsonify(\"true\")\n elif users and username:\n return jsonify(\"false\")\n\n\n\n\n #return redirect(\"/\")\n\n@app.route(\"/history\")\n@login_required\ndef history():\n\n\n History = db.execute(\"SELECT * FROM History WHERE id=:id\",\n id=session[\"user_id\"])\n return render_template('history.html',History=History)\n\n\n@app.route(\"/login\", methods=[\"GET\", \"POST\"])\ndef login():\n\n \"\"\"Log user in\"\"\"\n\n # Forget any user_id\n session.clear()\n\n # User reached route via POST (as by submitting a form via POST)\n if request.method == \"POST\":\n\n # Ensure username was submitted\n if not request.form.get(\"username\"):\n return apology(\"must provide username\", 403)\n\n # Ensure password was submitted\n elif not request.form.get(\"password\"):\n return apology(\"must provide password\", 403)\n\n # Query database for username\n rows = db.execute(\"SELECT * FROM users WHERE username = :username\",\n username=request.form.get(\"username\"))\n\n # Ensure username exists and password is correct\n if len(rows) != 1 or not check_password_hash(rows[0][\"hash\"], request.form.get(\"password\")):\n return apology(\"invalid username and/or password\", 403)\n\n # Remember which user has logged in\n session[\"user_id\"] = rows[0][\"id\"]\n\n # Redirect user to home page\n return redirect(\"/\")\n\n # User reached route via GET (as by clicking a link or via redirect)\n else:\n return render_template(\"login.html\")\n\n\n@app.route(\"/logout\")\ndef logout():\n \"\"\"Log user out\"\"\"\n\n # Forget any user_id\n session.clear()\n\n # Redirect user to login form\n return redirect(\"/\")\n\n\n@app.route(\"/quote\", methods=[\"GET\", \"POST\"])\n#@login_required\ndef quote():\n if request.method == \"POST\":\n if not request.form.get(\"symbol\"):\n return apology(\"choose a stock\", 400)\n\n symbol = request.form.get(\"symbol\")\n dict={}\n dict = lookup(symbol)\n if not dict:\n return apology(\"This stock is not available\",400)\n price = dict['price']\n symbol = dict['symbol']\n name = dict['name']\n\n\n\n\n\n\n\n\n return render_template(\"stock.html\",name=name,price=usd(price),symbol=symbol)\n else:\n\n return render_template(\"quote.html\")\n\n\n\n@app.route(\"/register\", methods=[\"GET\", \"POST\"])\ndef register():\n\n \"\"\"Register user\"\"\"\n if request.method == \"POST\":\n\n if not request.form.get(\"username\"):\n return apology(\"must provide username\", 400)\n elif not request.form.get(\"password\"):\n return apology(\"must provide password\", 400)\n elif not request.form.get(\"confirmation\"):\n return apology(\"must reconfirm password\", 400)\n elif request.form.get(\"password\") != request.form.get(\"confirmation\"):\n return apology(\"password must be the same \",400)\n\n hash=generate_password_hash(request.form.get(\"password\"))\n\n result = db.execute(\n \"INSERT INTO users (username,hash)VALUES(:username,:hash)\",\n username = request.form.get(\"username\"), hash =hash)\n\n if not result:\n return apology(\"Try another username\",400)\n\n session[\"user_id\"] = result\n\n return redirect(\"/\")\n\n else:\n\n return render_template(\"register.html\")\n\n\n@app.route(\"/sell\", methods=[\"GET\", \"POST\"])\n@login_required\ndef sell():\n #if user via post\n if request.method == \"POST\":\n\n #get a stock\n stock = request.form.get(\"stock\")\n\n #check that stock is submitted\n if not stock:\n return apology(\"Must enter a stock name\",403)\n\n\n #bring all the Names from the database\n stocks = db.execute(\"SELECT Name FROM newportfolio WHERE id=:id\",\n id=session[\"user_id\"])\n\n #obtaining the Names without hashing\n list = []\n for x in stocks:\n stock2 = x[\"Name\"]\n list.append(stock2)\n Names = list\n\n #iterate to see if what the user submitted is available\n if stock not in Names:\n return apology(\"not available\",403)\n\n #know how many shares\n shares = request.form.get(\"shares\")\n\n #translate shares into integer\n int_shares = int(shares)\n\n #obtain the price of the stock\n quote = lookup(stock)\n price = float(quote[\"price\"])\n total = float(price*int_shares)\n\n #check shares\n if not shares:\n return apology(\"Must enter shares\",403)\n\n #Check shares is positive\n elif int_shares<1:\n return apology(\"Must be positive\",403)\n\n #select the shares of the stock\n shares = db.execute(\"SELECT Shares FROM portfolio WHERE Name = :stock AND id=:id \",\n stock=stock,id=session[\"user_id\"])\n\n #remove the hash from the shares\n share = shares[0]\n share2 = share[\"Shares\"]\n share3 = int(share2)\n\n\n #ensure that there is enough shares\n if int_shares > share3:\n return apology(\"There is not enough shares\",403)\n\n #update shares and datetime in the portfolio\n updateshare = db.execute(\"UPDATE portfolio set Shares=Shares-:int_shares,Total=Total-:total,datetime=datetime('now') WHERE Name = :stock AND id=:id\",\n stock=stock,int_shares=int_shares,total=total,id=session[\"user_id\"])\n\n #select the updateshare from the portfolio\n updateshare2 = db.execute(\"SELECT Shares FROM portfolio WHERE Name = :stock AND id=:id \",\n stock=stock,id=session[\"user_id\"])\n\n #remove the hash\n updateshare3 = updateshare2[0]\n updateshare4 = updateshare3[\"Shares\"]\n updateshare5 = int(updateshare4)\n\n #update cash\n updatecash = db.execute(\"UPDATE users SET cash=cash+:total WHERE id=:id\",\n total=total,id=session[\"user_id\"])\n\n #see if the updateshare is 0 delete\n if updateshare5 == 0 :\n Delete = db.execute(\"DELETE FROM portfolio WHERE Name=:stock AND id=:id \",\n stock=stock,id=session[\"user_id\"])\n\n #obtain datetime from portfolio\n date = db.execute(\"SELECT datetime FROM portfolio WHERE Name=:stock AND id=:id\",\n stock=stock,id=session[\"user_id\"])\n date2 = date[0]\n date3 = date2[\"datetime\"]\n\n #insert into history table\n History = db.execute(\"INSERT INTO History (Name,Price,Shares,Total,id,Situation,datetime)VALUES(:stock,:price,:int_shares,:total,:id,'sold',:date3)\",\n stock=stock,price=price,int_shares=int_shares,total=total,date3=date3,id=session[\"user_id\"])\n\n History2 = db.execute(\"SELECT * FROM History WHERE id=:id\",\n id=session[\"user_id\"])\n\n return render_template(\"history.html\",History=History2)\n #if user via get\n else:\n return render_template(\"sell.html\")\n\ndef errorhandler(e):\n \"\"\"Handle error\"\"\"\n if not isinstance(e, HTTPException):\n e = InternalServerError()\n return apology(e.name, e.code)\n\n\n# Listen for errors\nfor code in default_exceptions:\n app.errorhandler(code)(errorhandler)\n","sub_path":"environment/finance/.~c9_invoke_9wziLU.py","file_name":".~c9_invoke_9wziLU.py","file_ext":"py","file_size_in_byte":15162,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"84257480","text":"from django.shortcuts import render,redirect\nfrom django.template.context_processors import csrf\nfrom django.template import loader\nfrom django.http import HttpResponse\nfrom django.http import JsonResponse\n\nfrom django.template.context_processors import csrf\nfrom system.models import issued,Books,Faculty,Requests,issued,StudentIssued\nfrom datetime import datetime,date\n\nfrom django.core import mail\nfrom django.core.mail import EmailMessage,send_mail\nfrom django.template.loader import render_to_string\nfrom django.utils.html import strip_tags\nfrom django.contrib.auth.models import User\nfrom django.contrib.auth.decorators import login_required\nfrom django.core.files import File\n\n# Create your views here.\n\ndef BaseLayout(request):\n return render(request,'administrator/base.html')\n\n@login_required(login_url='/login/')\ndef AllFaculty(request):\n all_faculty=Faculty.objects.all()\n req_count = Requests.objects.all()\n print(req_count)\n t = datetime(date.today().year, date.today().month, date.today().day, 0, 0)\n mydate=t.strftime('%Y-%m-%d')\n allissued = issued.objects.filter(return_date__lt=mydate)\n context={\n 'faculties':all_faculty,\n 'req_count':req_count,\n 'overdue':allissued\n }\n return render(request,'administrator/Member.html',context=context)\n\n@login_required(login_url='/login/')\ndef AllBooks(request):\n req_count = Requests.objects.all()\n all_books= Books.objects.all()\n #print(all_books)\n t = datetime(date.today().year, date.today().month, date.today().day, 0, 0)\n mydate=t.strftime('%Y-%m-%d')\n allissued = issued.objects.filter(return_date__lt=mydate)\n context={\n 'books':all_books,\n 'req_count':req_count,\n 'overdue':allissued\n }\n return render(request,'administrator/books.html',context=context)\n #return render_to_response('administrator/books.html', context=context)\n\n\n@login_required(login_url='/login/')\ndef Add(request):\n if request.method == \"POST\":\n id = request.POST.get('id','')\n name= request.POST.get('name','')\n email= request.POST.get('email','')\n phone_no = request.POST.get('phone_no','')\n if request.POST.get('ea','') == \"Add\":\n print(phone_no)\n password= User.objects.make_random_password(length=8) #Generate password randomly\n print(password) #Print Randomly generated password \n a=User.objects.create_user(id,email,password)\n a.save()\n faculty= Faculty(id=id,name=name,email=email,phone_no=phone_no,password=password,date_joined=datetime.now())\n faculty.save()\n # Receiver email\n to=email\n # body and subject of mail \n body=\"Hey %s ! \\n \\n Password for your Ce-Department Library account is %s \\n\"%(name,password)\n #Composing email and sending mail\n email=EmailMessage('CE-Department',body,to=[to])\n email.send()\n else:\n faculty = Faculty.objects.get(id=id)\n faculty.name = name\n faculty.email = email\n faculty.phone_no = phone_no\n faculty.save()\n all_faculty=Faculty.objects.all()\n return redirect('/administrator/Faculties/')\n\n@login_required(login_url='/login/')\ndef AddBook(request):\n if request.method == \"POST\":\n last_book = Books.objects.last()\n #print(last_book.id)\n last_id = last_book.id.split('-')\n #print(last_id)\n id = int(last_id[1]) + 1\n Title = request.POST.get('Title','')\n id= \"CE-\" + str(id)\n Publisher= request.POST.get('Publisher','')\n Author= request.POST.get('Author','')\n a=Books(sr_no=int(last_book.sr_no)+1,id=id,title=Title,publisher=Publisher,author=Author,available=True)\n a.save()\n return redirect('/administrator/Books/')\n\n\n\n@login_required(login_url='/login/')\ndef Editdata(request):\n if request.method == \"GET\":\n #print(request.GET.get('id'))\n fac_data = Faculty.objects.get(id=request.GET.get('id'))\n #print(fac_data.id)\n data={\n 'id':fac_data.id,\n 'name':fac_data.name,\n 'email':fac_data.email,\n 'phone_no':fac_data.phone_no,\n }\n return JsonResponse(data)\n\n@login_required(login_url='/login/')\ndef DeleteFac(request):\n if request.method == \"GET\":\n fac_data = Faculty.objects.get(id=request.GET.get('id'))\n fac_data.delete()\n data={\n 'cond':True\n }\n return JsonResponse(data)\n\n@login_required(login_url='/login/') \ndef InputCSV(request):\n if request.method == \"POST\":\n if(request.POST.get('type') == \"fac\"):\n csv_file = request.FILES[\"csv_file\"]\n file_data = csv_file.read().decode(\"utf-8\")\t\n lines = file_data.split(\"\\n\")\n for line in lines:\t\t\t\n fields = line.split(',')\n print(fields)\n faculty= Faculty(id=fields[0],name=fields[1],email=fields[2],phone_no=fields[3],password=fields[4],date_joined=datetime.now())\n faculty.save()\n print(line)\n return redirect('/administrator/Faculties/')\n elif(request.POST.get('type') == \"Book\"):\n return redirect('/administrator/Faculties/')\n else:\n return redirect('/administrator/Faculties/')\n else:\n return render(request,'administrator/upload.html')\n\n\"\"\"def UploadDatabase(request):\n if request.method == \"POST\":\n csv_file = request.FILES[\"csv_file\"]\n file_data = csv_file.read().decode(\"utf-8\")\t\n lines = file_data.split(\"\\n\")\n for line in lines:\t\t\t\t\t\t\n\t\t\tfields = line.split(\",\")\n faculty= Faculty(id=fields[0],name=fields[1],email=fields[2],phone_no=fields[3],password=fields[4],date_joined=datetime.now())\n faculty.save()\n return HttpResponse(\"saved\")\"\"\"\n\n@login_required(login_url='/login/')\ndef ChangebookStatus(request):\n if request.method == \"GET\":\n book = Books.objects.get(id=request.GET.get('bookid'))\n if request.GET.get('cond') == 'add':\n book.available = True\n book.save()\n else:\n book.available = False\n book.save()\n return JsonResponse({\"successful\":True})\n#faculty\n@login_required(login_url='/login/')\ndef BookRequests(request):\n due = 6\n with open('./system/Due.txt','r') as f:\n f.readline()\n f.readline()\n due = int(f.readline())\n print(due)\n\n if request.method == \"POST\":\n req = Requests.objects.get(id = request.POST.get('req_id'))\n if (req.date.month+due) >= 12:\n new_month = (req.date.month+due) % 12\n new_year=req.date.year+1\n if new_month==0:\n new_month = 12\n new_year = req.date.year\n else:\n new_month = (req.date.month+due)\n new_year = req.date.year\n new_date=date(new_year,new_month,req.date.day)\n issue=issued(book_id=req.book_id,faculty_id=req.faculty_id,issue_date=date.today(),return_date=new_date)\n req.delete()\n issue.save()\n same_book = Requests.objects.filter(book_id=req.book_id)\n for b in same_book:\n b.delete()\n return redirect('/administrator/BookRequest/')\n else:\n req_count = Requests.objects.all()\n t = datetime(date.today().year, date.today().month, date.today().day, 0, 0)\n mydate=t.strftime('%Y-%m-%d')\n allissued = issued.objects.filter(return_date__lt=mydate)\n context={\n 'all_requests':Requests.objects.all(),\n 'req_count':req_count,\n 'overdue':allissued\n }\n return render(request,'administrator/request.html',context=context)\n\n@login_required(login_url='/login/')\ndef BookIssued(request):\n if request.method == \"GET\":\n req_count = Requests.objects.all()\n allissued = issued.objects.all().order_by('return_date')\n t = datetime(date.today().year, date.today().month, date.today().day, 0, 0)\n mydate=t.strftime('%Y-%m-%d')\n #allissued = issued.objects.all()\n context={\n 'allissued':allissued,\n 'req_count':req_count,\n 'overdue':issued.objects.filter(return_date__lt=mydate)\n }\n return render(request,'administrator/issued.html',context=context)\n if request.method == \"POST\":\n if request.POST.get('status') == \"return\":\n myissue = issued.objects.get(id=request.POST.get('issue_id'))\n myissue.delete()\n return redirect('/administrator/BookIssued/')\n if request.POST.get('status') == \"renew\":\n myissue = issued.objects.get(id=request.POST.get('issue_id'))\n if (myissue.return_date.month+6) >= 12:\n new_month = (myissue.return_date.month+6) % 12\n new_year=myissue.return_date.year+1\n if new_month==0:\n new_month = 12\n new_year = myissue.return_date.year\n else:\n new_month = (myissue.return_date.month+6)\n new_year = myissue.return_date.year\n new_date=date(new_year,new_month,myissue.return_date.day)\n myissue.return_date = new_date\n myissue.save()\n return redirect('/administrator/BookIssued/')\n\n@login_required(login_url='/login/')\ndef Notify(request):\n t = datetime(date.today().year, date.today().month, date.today().day, 0, 0)\n mydate=t.strftime('%m/%d/%Y')\n subject = 'DeadLine Of Book'\n context={\n 'end' : mydate,\n }\n html_message = render_to_string('administrator/mail.html', context)\n plain_message = strip_tags(html_message)\n from_email = 'From '\n to = 'gdthumar.code@gmail.com'\n mail.send_mail(subject, plain_message, from_email, [to], html_message=html_message)\n return HttpResponse('Sent')\n\n@login_required(login_url='/login/')\ndef OverDue(request):\n if request.method == \"GET\":\n req_count = Requests.objects.all()\n t = datetime(date.today().year, date.today().month, date.today().day, 0, 0)\n mydate=t.strftime('%Y-%m-%d')\n allissued = issued.objects.filter(return_date__lt=mydate)\n context={\n 'allissued':allissued,\n 'req_count':req_count,\n 'today':date.today()\n }\n return render(request,'administrator/overdue.html',context=context)\n\n@login_required(login_url='/login/')\ndef Send_Notification(request):\n t = datetime(date.today().year, date.today().month, date.today().day, 0, 0)\n mydate=t.strftime('%Y-%m-%d')\n print(mydate)\n allissued = issued.objects.filter(return_date__lt=mydate)\n from_email = 'From '\n subject = 'DeadLine Of Book'\n to=[]\n for issue in allissued:\n context={\n 'end' : issue.return_date,\n }\n to.append(issue.faculty_id.email)\n html_message = render_to_string('administrator/mail.html', context)\n plain_message = strip_tags(html_message)\n to = issue.faculty_id.email\n mail.send_mail(subject, plain_message, from_email, [to], html_message=html_message)\n return JsonResponse({\"successful\":mydate})\n\n@login_required(login_url='/login/')\ndef ChangeSetting(request):\n if request.method == \"GET\":\n f=open(\"./system/Due.txt\", \"r\")\n templines=f.readlines()\n lines = []\n for line in templines:\n lines.append(line)\n print(lines)\n studentduedate=lines[0]\n studentduecharge=lines[1]\n facultyduedate=lines[2]\n facultyduecharge=lines[3]\n #t = date(date.today().year, date.today().month, date.today().day)\n #print(t)\n data={\n 'studentduedate':studentduedate,\n 'studentduecharge':studentduecharge,\n 'facultyduedate':facultyduedate,\n 'facultyduecharge':facultyduecharge,\n }\n return JsonResponse(data)\n else:\n studentduedate=request.POST.get('studentduedate')\n studentduecharge=request.POST.get('studentduecharge')\n facultyduedate=request.POST.get('facultyduedate')\n facultyduecharge=request.POST.get('facultyduecharge')\n # 1-> student date 2->charge 3->faculty date 4->charge\n print(studentduedate,studentduecharge)\n print(facultyduedate,facultyduecharge)\n with open('./system/Due.txt','w+') as f:\n f.write(studentduedate+'\\n'+ studentduecharge+'\\n'+ facultyduedate+'\\n'+ facultyduecharge+ '\\n')\n\n return redirect('/administrator/Books/')\n\ndef AboutUs(request):\n return render(request,'administrator/aboutus.html')\n\n\n \n\n\n\n\n","sub_path":"library/system/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":12742,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"2086946","text":"from __future__ import absolute_import, division, print_function, unicode_literals\n\nimport copy\nfrom collections import namedtuple\n\nfrom echomesh.base import Args\nfrom echomesh.base import CommandFile\nfrom echomesh.base import GetPrefix\nfrom echomesh.base import Leafs\nfrom echomesh.base import Merge\nfrom echomesh.base import Yaml\n\n_ARGUMENT_ERROR = \"\"\"\nERROR: Didn't understand arguments to echomesh: \"%s\".\n\nechomesh needs to be called with arguments looking like \"name=value\".\n\nExamples:\n echomesh\n echomesh debug=true\n echomesh audio.input.enable=false light.enable=false\n\"\"\"\n\n_ASSIGNMENT_ERROR = \"\"\"\nERROR: couldn't assign a variable from: \"%s\".\n\nVariable assignments look like \"name=value\" and you can have more than one\nper line.\n\nExamples:\n debug=true\n audio.input.enable=false light.enable=false\n\"\"\"\n\nFileConfig = namedtuple('FileConfig', 'file base edits changes')\n\nclass MergeConfig(object):\n def __init__(self, args):\n self.args = args\n self.read()\n\n def read(self):\n self._read_file_configs()\n self.arg_config = self._assignment_to_config(self.args, _ARGUMENT_ERROR)\n return self.recalculate()\n\n def recalculate(self):\n self.config = None\n self.changed = {}\n for _, configs in self.file_configs:\n self.config = Merge.merge(self.config, *configs)\n self.changed = Merge.merge(self.changed, *configs[2:])\n\n arg = copy.deepcopy(self.arg_config)\n clean_arg = Merge.difference_strict(arg, self.changed)\n self.config = Merge.merge_for_config(self.config, clean_arg)\n\n return self.config\n\n def has_changes(self):\n return any(configs[2] for (_, configs) in self.file_configs)\n\n def get_changes(self):\n return [(f, c[2]) for (f, c) in self.file_configs if c[2]]\n\n def assign(self, args, index=2): # default is 'master'\n configs = self.file_configs[index][1]\n\n while len(configs) < 3:\n configs.append({})\n assignments = self._assignment_to_config(args, _ASSIGNMENT_ERROR)\n configs[2] = Merge.merge(configs[2], assignments)\n self.recalculate()\n return assignments\n\n def save(self):\n saved_files = []\n for f, configs in self.file_configs:\n if len(configs) > 2 and configs[2]:\n saved_files.append(f)\n configs[1] = Merge.merge(*configs[1:])\n while len(configs) > 2:\n configs.pop()\n with open(f, 'r') as fo:\n data = fo.read().split(Yaml.SEPARATOR)[0]\n\n with open(f, 'wb') as fw:\n fw.write(data)\n fw.write(Yaml.SEPARATOR)\n fw.write(Yaml.encode_one(configs[1]))\n\n self.arg_config = Merge.difference_strict(self.arg_config, self.changed)\n self.recalculate()\n return saved_files\n\n def assignments(self, index=2):\n assigned = self.file_configs[index][1]\n return (len(assigned) > 2 and Leafs.leafs(assigned[2])) or {}\n\n def _read_file_configs(self):\n self.file_configs = []\n base_config = None\n\n for f in reversed(CommandFile.expand('config.yml')):\n configs = Yaml.read(f, 'config')\n for c in configs:\n if base_config:\n base_config = Merge.merge_for_config(base_config, c)\n else:\n base_config = copy.deepcopy(c)\n while len(configs) < 3:\n configs.append({})\n self.file_configs.append([f, configs])\n\n def _assignment_to_config(self, args, error):\n args = ' '.join(args)\n config = {}\n base_config = self.file_configs[0][1][0]\n assert isinstance(base_config, dict)\n try:\n split_args = Args.split(args)\n except Exception as e:\n e.arg = '%s %s' % (error, args)\n raise\n\n for addr, value in split_args:\n try:\n GetPrefix.set_assignment(addr, value, base_config, config,\n unmapped_names=Merge.CONFIG_EXCEPTIONS)\n except GetPrefix.PrefixException:\n raise Exception('Can\\'t understand configuration address \"%s\"' % addr)\n except Exception:\n raise Exception('Can\\'t understand configuration value \"%s\" in %s=%s' %\n (value, addr, value))\n return config\n\n","sub_path":"code/python/echomesh/base/MergeConfig.py","file_name":"MergeConfig.py","file_ext":"py","file_size_in_byte":4026,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"400530774","text":"import sys, os\r\nDAFAPP_DIR = os.environ.get(\"DAFAPPSERVER_ROOTDIR\")\r\nsys.path.append(DAFAPP_DIR + 'ibank/accounting/script_modules')\r\nimport accountingapi\r\n\r\ndef CreateJournal(config, cnnumber, cndate, branch_code, user_id_commit):\r\n journal = config.CreatePObject('Journal')\r\n datevalue = accountingapi.GetActiveAccDay(config).DateValue\r\n datevalue = config.ModDateTime.EncodeDate(datevalue[0],datevalue[1],datevalue[2])\r\n journal.journal_date = datevalue\r\n journal.description = 'Jurnal CN %s' % (cnnumber)\r\n journal.branch_code = branch_code\r\n journalapi = accountingapi.Journal(journal)\r\n journal_type = 'GL' # Jurnal umum\r\n journalapi.SetNewInstance(journal_type)\r\n\r\n journal.is_posted = 'F'\r\n journal.is_partlychecked = 'T'\r\n journal.userid_create = user_id_commit\r\n \r\n return journal\r\n\r\ndef CreateJournalItem(config, strDescription, journal, oAccountInstance, debit, credit, user_id_commit):\r\n item = config.CreatePObject('JournalItem')\r\n item.description = strDescription\r\n itemapi = accountingapi.JournalItem(item)\r\n itemapi.SetNewInstance(journal, oAccountInstance, debit, credit)\r\n item.JournalItemStatus = 'C'\r\n item.userid_create = user_id_commit\r\n item.userid_check = user_id_commit\r\n\r\ndef DAFScriptMain(config, parameter, returnpacket):\r\n # config: ISysConfig object\r\n # parameter: TPClassUIDataPacket\r\n # returnpacket: TPClassUIDataPacket (undefined structure)\r\n\r\n user_id_commit = parameter.FirstRecord.user_id_commit\r\n cnvalue = parameter.FirstRecord.cnvalue\r\n cash = parameter.FirstRecord.cash\r\n todeposit = parameter.FirstRecord.todeposit\r\n cnnumber = parameter.FirstRecord.cnnumber\r\n cndate = parameter.FirstRecord.cndate\r\n branch_code = parameter.FirstRecord.branch_code\r\n currency_code = parameter.FirstRecord.currency_code\r\n\r\n oAccInstCNSales = accountingapi.GetAccModuleIntfInstance(branch_code, currency_code, 'cnsales', config)\r\n oAccInstCNCash = accountingapi.GetAccModuleIntfInstance(branch_code, currency_code, 'cncash', config)\r\n #oAccInstCNCustDeposit = accountingapi.GetAccModuleIntfInstance(branch_code, currency_code, 'cncustdeposit', config)\r\n oAccInstCNPybDeposit = accountingapi.GetAccModuleIntfInstance(branch_code, currency_code, 'cnpybdeposit', config)\r\n #oAccInstCNRcvSales = accountingapi.GetAccModuleIntfInstance(branch_code, currency_code, 'cnrcvsales', config)\r\n\r\n config.BeginTransaction()\r\n try:\r\n journal = CreateJournal(config, cnnumber, cndate, branch_code, user_id_commit)\r\n\r\n # journal item cn sales\r\n strDescription = 'Pengurangan nilai penjualan akibat CN '+ cnnumber\r\n CreateJournalItem(config, strDescription, journal, oAccInstCNSales, cnvalue, 0.0, user_id_commit)\r\n\r\n # sementara tidak ada cn ke cash, semua cn didepositkan\r\n # journal item cash\r\n #strDescription = 'Pembayaran tunai untuk CN '+ cnnumber\r\n #CreateJournalItem(config, strDescription, journal, oAccInstCNCash, 0.0, cash, user_id_commit)\r\n\r\n # tidak dianggap mengurangi piutang karena tidak terikat dengan invoice tertentu\r\n # journal item piutang\r\n #strDescription = 'Pengurangan piutang usaha akibat CN '+ cnnumber\r\n #CreateJournalItem(config, strDescription, journal, oAccInstCNRcvSales, 0.0, todeposit, user_id_commit)\r\n\r\n # journal item hutang\r\n strDescription = 'Penambahan hutang usaha akibat CN '+ cnnumber\r\n CreateJournalItem(config, strDescription, journal, oAccInstCNPybDeposit, 0.0, cnvalue, user_id_commit)\r\n\r\n #CheckDebitCreditBal(config, journal.journal_no)\r\n\r\n config.Commit()\r\n isSucceed = 1\r\n except:\r\n config.Rollback()\r\n isSucceed = 0\r\n raise\r\n \r\n returnpacket.CreateValues(['isSucceed',isSucceed])\r\n\r\n return 1\r\n\r\n","sub_path":"scripts/sales/cn journal.py","file_name":"cn journal.py","file_ext":"py","file_size_in_byte":3666,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"230024583","text":"# -*- mode:python;coding:utf-8 -*-\nimport pygst\npygst.require('0.10')\nimport gst\nimport gobject, sys\n\n\nclass GPlayer:\n def __init__(self):\n self.player = gst.Pipeline(\"esono\")\n\n self.source = gst.element_factory_make('gnomevfssrc', 'file-source')\n self.source = gst.element_factory_make(\"audiotestsrc\", \"audio\")\n \n self.volume = gst.element_factory_make('volume','volume')\n self.volume.set_property('volume', 0.1)\n self.sink = gst.element_factory_make(\"jackaudiosink\", \"sink\")\n self.player.add(self.source, self.volume, self.sink )\n gst.element_link_many(self.source, self.volume, self.sink )\n\n def play(self, path):\n # self.source.set_property('location', 'file://' + path )\n self.player.set_state(gst.STATE_PLAYING)\n \n\n\n'''\ndef play_uri(uri):\n \" play an uri like file:///home/foo/bar.mp3 \"\n\n mainloop = gobject.MainLoop()\n player = gst.element_factory_make(\"playbin\", \"player\")\n \n player.set_property('uri', uri)\n player.set_state(gst.STATE_PLAYING)\n\n mainloop.run()\n\nplay_uri(\"file:///data/audio/sc/marco_bernabe/terje_paulsen__the_abundant_emptiness_between_cold_and_heat.flac\")\n'''\nmainloop = gobject.MainLoop()\np = GPlayer()\np.play(\"/data/audio/sc/marco_bernabe/terje_paulsen__the_abundant_emptiness_between_cold_and_heat.flac\")\np.play(\"/usr/lib/pd/doc/sound/voice2.wav\")\nmainloop.run()\n","sub_path":"esonoclaste.app/Contents/Resources/esono/tests/zz/gstreamer.py","file_name":"gstreamer.py","file_ext":"py","file_size_in_byte":1402,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}