\")\n\n aff4_blobs = aff4.FACTORY.Open(blob_path, token=self.token)\n offset = 0\n write_path = \"%d\" % time.time()\n for i, blob in enumerate(aff4_blobs):\n self.CallClient(\n \"UpdateAgent\", executable=blob, more_data=i < aff4_blobs.chunks-1,\n offset=offset, write_path=write_path, next_state=\"Interrogate\")\n\n offset += len(blob.data)\n\n @flow.StateHandler(next_state=\"Done\")\n def Interrogate(self, responses):\n if not responses.success:\n self.Log(\"Installer reported an error: %s\" % responses.status)\n else:\n self.Log(\"Installer completed.\")\n self.CallFlow(\"Interrogate\", next_state=\"Done\")\n\n @flow.StateHandler()\n def Done(self):\n client = aff4.FACTORY.Open(self.client_id, token=self.token)\n info = client.Get(client.Schema.CLIENT_INFO)\n self.Log(\"Client update completed, new version: %s\" %\n info.client_version)\n\n\nclass NannyMessageHandler(ClientCrashEventListener):\n \"\"\"A listener for nanny messages.\"\"\"\n EVENTS = [\"NannyMessage\"]\n\n well_known_session_id = rdfvalue.SessionID(\"aff4:/flows/W:NannyMessage\")\n\n mail_template = \"\"\"\nGRR nanny message received.
\n\nThe nanny for client %(client_id)s (%(hostname)s) just sent a message:
\n
\n%(message)s\n
\nClick here to access this machine.\n\n%(signature)s
\n\n\"\"\"\n\n subject = \"GRR nanny message received from %s.\"\n\n logline = \"Nanny for client %s sent: %s\"\n\n @flow.EventHandler(allow_client_access=True)\n def ProcessMessage(self, message=None, event=None):\n \"\"\"Processes this event.\"\"\"\n _ = event\n\n client_id = message.source\n\n message = rdfvalue.DataBlob(message.args).string\n\n logging.info(self.logline, client_id, message)\n\n # Write crash data to AFF4.\n client = aff4.FACTORY.Open(client_id, token=self.token)\n client_info = client.Get(client.Schema.CLIENT_INFO)\n\n crash_details = rdfvalue.ClientCrash(\n client_id=client_id, client_info=client_info,\n crash_message=message, timestamp=long(time.time() * 1e6),\n crash_type=self.well_known_session_id)\n\n self.WriteAllCrashDetails(client_id, crash_details)\n\n # Also send email.\n if config_lib.CONFIG[\"Monitoring.alert_email\"]:\n client = aff4.FACTORY.Open(client_id, token=self.token)\n hostname = client.Get(client.Schema.HOSTNAME)\n url = urllib.urlencode(((\"c\", client_id),\n (\"main\", \"HostInformation\")))\n\n email_alerts.SendEmail(\n config_lib.CONFIG[\"Monitoring.alert_email\"],\n \"GRR server\",\n self.subject % client_id,\n self.mail_template % dict(\n client_id=client_id,\n admin_ui=config_lib.CONFIG[\"AdminUI.url\"],\n hostname=hostname,\n signature=config_lib.CONFIG[\"Email.signature\"],\n urn=url,\n message=message),\n is_html=True)\n\n\nclass ClientAlertHandler(NannyMessageHandler):\n \"\"\"A listener for client messages.\"\"\"\n EVENTS = [\"ClientAlert\"]\n\n well_known_session_id = rdfvalue.SessionID(\"aff4:/flows/W:ClientAlert\")\n\n mail_template = \"\"\"\nGRR client message received.
\n\nThe client %(client_id)s (%(hostname)s) just sent a message:
\n
\n%(message)s\n
\nClick here to access this machine.\n\n%(signature)s
\n\n\"\"\"\n\n subject = \"GRR client message received from %s.\"\n\n logline = \"Client message from %s: %s\"\n\n\nclass ClientCrashHandler(ClientCrashEventListener):\n \"\"\"A listener for client crashes.\"\"\"\n EVENTS = [\"ClientCrash\"]\n\n well_known_session_id = rdfvalue.SessionID(\"aff4:/flows/W:CrashHandler\")\n\n mail_template = \"\"\"\nGRR client crash report.
\n\nClient %(client_id)s (%(hostname)s) just crashed while executing an action.\nClick here to access this machine.\n\nThanks,
\n%(signature)s
\n\nP.S. The state of the failing flow was:\n%(state)s\n\n%(nanny_msg)s\n\n\"\"\"\n\n @flow.EventHandler(allow_client_access=True)\n def ProcessMessage(self, message=None, event=None):\n \"\"\"Processes this event.\"\"\"\n _ = event\n client_id = message.source\n nanny_msg = \"\"\n\n flow_obj = aff4.FACTORY.Open(message.session_id, token=self.token)\n\n # Log.\n logging.info(\"Client crash reported, client %s.\", client_id)\n\n # Export.\n stats.STATS.IncrementCounter(\"grr_client_crashes\")\n\n # Write crash data to AFF4.\n client = aff4.FACTORY.Open(client_id, token=self.token)\n client_info = client.Get(client.Schema.CLIENT_INFO)\n\n status = rdfvalue.GrrStatus(message.args)\n crash_details = rdfvalue.ClientCrash(\n client_id=client_id, session_id=message.session_id,\n client_info=client_info, crash_message=status.error_message,\n timestamp=rdfvalue.RDFDatetime().Now(),\n crash_type=self.well_known_session_id)\n\n self.WriteAllCrashDetails(client_id, crash_details,\n flow_session_id=message.session_id)\n\n # Also send email.\n if config_lib.CONFIG[\"Monitoring.alert_email\"]:\n if status.nanny_status:\n nanny_msg = \"Nanny status: %s\" % status.nanny_status\n\n client = aff4.FACTORY.Open(client_id, token=self.token)\n hostname = client.Get(client.Schema.HOSTNAME)\n url = urllib.urlencode(((\"c\", client_id),\n (\"main\", \"HostInformation\")))\n\n renderer = rendering.FindRendererForObject(flow_obj.state)\n\n email_alerts.SendEmail(\n config_lib.CONFIG[\"Monitoring.alert_email\"],\n \"GRR server\",\n \"Client %s reported a crash.\" % client_id,\n self.mail_template % dict(\n client_id=client_id,\n admin_ui=config_lib.CONFIG[\"AdminUI.url\"],\n hostname=hostname,\n state=renderer.RawHTML(),\n urn=url,\n nanny_msg=nanny_msg,\n signature=config_lib.CONFIG[\"Email.signature\"]\n ),\n is_html=True)\n\n if nanny_msg:\n msg = \"Client crashed, \" + nanny_msg\n else:\n msg = \"Client crashed.\"\n\n # Now terminate the flow.\n flow.GRRFlow.TerminateFlow(message.session_id, reason=msg,\n token=self.token, force=True)\n\n\nclass ClientStartupHandler(flow.EventListener):\n\n well_known_session_id = rdfvalue.SessionID(\"aff4:/flows/W:Startup\")\n\n @flow.EventHandler(allow_client_access=True, auth_required=False)\n def ProcessMessage(self, message=None, event=None):\n \"\"\"Handle a startup event.\"\"\"\n _ = event\n # We accept unauthenticated messages so there are no errors but we don't\n # store the results.\n if (message.auth_state !=\n rdfvalue.GrrMessage.AuthorizationState.AUTHENTICATED):\n return\n\n client_id = message.source\n\n client = aff4.FACTORY.Create(client_id, \"VFSGRRClient\", mode=\"rw\",\n token=self.token)\n old_info = client.Get(client.Schema.CLIENT_INFO)\n old_boot = client.Get(client.Schema.LAST_BOOT_TIME, 0)\n startup_info = rdfvalue.StartupInfo(message.args)\n info = startup_info.client_info\n\n # Only write to the datastore if we have new information.\n new_data = (info.client_name, info.client_version, info.revision,\n info.build_time, info.client_description)\n old_data = (old_info.client_name, old_info.client_version,\n old_info.revision, old_info.build_time,\n old_info.client_description)\n\n if new_data != old_data:\n client.Set(client.Schema.CLIENT_INFO(info))\n\n client.AddLabels(*info.labels, owner=\"GRR\")\n\n # Allow for some drift in the boot times (5 minutes).\n if abs(int(old_boot) - int(startup_info.boot_time)) > 300 * 1e6:\n client.Set(client.Schema.LAST_BOOT_TIME(startup_info.boot_time))\n\n client.Close()\n\n flow.Events.PublishEventInline(\"ClientStartup\", message, token=self.token)\n\n\nclass IgnoreResponses(flow.WellKnownFlow):\n \"\"\"This flow exists so other well known flows can delegate their responses.\"\"\"\n\n category = None\n\n well_known_session_id = rdfvalue.SessionID(\"aff4:/flows/W:DevNull\")\n\n def ProcessMessage(self, message):\n pass\n\n\nclass KeepAliveArgs(rdfvalue.RDFProtoStruct):\n protobuf = flows_pb2.KeepAliveArgs\n\n\nclass KeepAlive(flow.GRRFlow):\n \"\"\"Requests that the clients stays alive for a period of time.\"\"\"\n\n # We already want to run this flow while waiting for a client approval.\n # Note that this can potentially be abused to launch a DDOS attack against\n # the frontend server(s) by putting all clients into fastpoll mode. The load\n # of idle polling messages is not that high though and this can only be done\n # by users that have a GRR account already so the risk is acceptable.\n ACL_ENFORCED = False\n\n category = \"/Administrative/\"\n behaviours = flow.GRRFlow.behaviours + \"BASIC\"\n\n sleep_time = 60\n args_type = KeepAliveArgs\n\n @flow.StateHandler(next_state=\"SendMessage\")\n def Start(self):\n self.state.Register(\"end_time\", self.args.duration.Expiry())\n self.CallState(next_state=\"SendMessage\")\n\n @flow.StateHandler(next_state=\"Sleep\")\n def SendMessage(self, responses):\n if not responses.success:\n self.Log(responses.status.error_message)\n raise flow.FlowError(responses.status.error_message)\n\n self.CallClient(\"Echo\", data=\"Wake up!\", next_state=\"Sleep\")\n\n @flow.StateHandler(next_state=\"SendMessage\")\n def Sleep(self, responses):\n if not responses.success:\n self.Log(responses.status.error_message)\n raise flow.FlowError(responses.status.error_message)\n\n if rdfvalue.RDFDatetime().Now() < self.state.end_time - self.sleep_time:\n start_time = rdfvalue.RDFDatetime().Now() + self.sleep_time\n self.CallState(next_state=\"SendMessage\", start_time=start_time)\n\n\nclass TerminateFlowArgs(rdfvalue.RDFProtoStruct):\n protobuf = flows_pb2.TerminateFlowArgs\n\n\nclass TerminateFlow(flow.GRRFlow):\n \"\"\"Terminate a flow with a given URN.\"\"\"\n # This flow can run on any client without ACL enforcement (an SUID flow).\n ACL_ENFORCED = False\n args_type = TerminateFlowArgs\n\n @flow.StateHandler()\n def Start(self):\n \"\"\"Terminate a flow. User has to have access to the flow.\"\"\"\n # We have to create special token here, because within the flow\n # token has supervisor access.\n check_token = access_control.ACLToken(username=self.token.username,\n reason=self.token.reason)\n # If we can read the flow, we're allowed to terminate it.\n data_store.DB.security_manager.CheckDataStoreAccess(\n check_token, [self.args.flow_urn], \"r\")\n\n flow.GRRFlow.TerminateFlow(self.args.flow_urn,\n reason=self.args.reason,\n token=self.token, force=True)\n\n\nclass LaunchBinaryArgs(rdfvalue.RDFProtoStruct):\n protobuf = flows_pb2.LaunchBinaryArgs\n\n\nclass LaunchBinary(flow.GRRFlow):\n \"\"\"Launch a signed binary on a client.\"\"\"\n\n category = \"/Administrative/\"\n\n AUTHORIZED_LABELS = [\"admin\"]\n args_type = LaunchBinaryArgs\n\n @flow.StateHandler(next_state=[\"End\"])\n def Start(self):\n fd = aff4.FACTORY.Open(self.args.binary, token=self.token)\n if not isinstance(fd, collections.GRRSignedBlob):\n raise RuntimeError(\"Executable binary %s not found.\" % self.args.binary)\n\n offset = 0\n write_path = \"%d\" % time.time()\n for i, blob in enumerate(fd):\n self.CallClient(\n \"ExecuteBinaryCommand\", executable=blob, more_data=i < fd.chunks-1,\n args=shlex.split(self.args.command_line), offset=offset,\n write_path=write_path, next_state=\"End\")\n\n offset += len(blob.data)\n\n def _TruncateResult(self, data):\n if len(data) > 2000:\n result = data[:2000] + \"... [truncated]\"\n else:\n result = data\n\n return result\n\n @flow.StateHandler()\n def End(self, responses):\n if not responses.success:\n raise IOError(responses.status)\n\n response = responses.First()\n if response:\n self.Log(\"Stdout: %s\" % self._TruncateResult(response.stdout))\n self.Log(\"Stderr: %s\" % self._TruncateResult(response.stderr))\n\n self.SendReply(response)\n\n\nclass RunReportFlowArgs(rdfvalue.RDFProtoStruct):\n protobuf = flows_pb2.RunReportFlowArgs\n\n\nclass RunReport(flow.GRRGlobalFlow):\n \"\"\"Run a report and send the result via email.\"\"\"\n\n category = \"/Reporting/\"\n\n args_type = RunReportFlowArgs\n behaviours = flow.GRRGlobalFlow.behaviours + \"BASIC\"\n\n ACL_ENFORCED = False\n\n # Only admins are allows to run reports.\n AUTHORIZED_LABELS = [\"admin\"]\n\n @flow.StateHandler(next_state=\"RunReport\")\n def Start(self):\n if self.state.args.report_name not in reports.Report.classes:\n raise flow.FlowError(\"No such report %s\" % self.state.args.report_name)\n else:\n self.CallState(next_state=\"RunReport\")\n\n @flow.StateHandler(next_state=\"EmailReport\")\n def RunReport(self):\n \"\"\"Run the report.\"\"\"\n report_cls = reports.Report.GetPlugin(self.state.args.report_name)\n report_obj = report_cls(token=self.token)\n report_obj.Run()\n report_obj.MailReport(self.state.args.email)\n\n\nclass ApplyLabelsToClientsFlowArgs(rdfvalue.RDFProtoStruct):\n protobuf = flows_pb2.ApplyLabelsToClientsFlowArgs\n\n\nclass ApplyLabelsToClientsFlow(flow.GRRGlobalFlow):\n\n args_type = ApplyLabelsToClientsFlowArgs\n\n ACL_ENFORCED = False\n\n @flow.StateHandler()\n def Start(self):\n audit_description = \",\".join([self.token.username + \".\" + name\n for name in self.args.labels])\n audit_events = []\n try:\n client_objs = aff4.FACTORY.MultiOpen(\n self.args.clients, aff4_type=\"VFSGRRClient\", mode=\"rw\",\n token=self.token)\n for client_obj in client_objs:\n client_obj.AddLabels(*self.args.labels)\n client_obj.Close()\n\n audit_events.append(\n rdfvalue.AuditEvent(user=self.token.username,\n action=\"CLIENT_ADD_LABEL\",\n flow_name=\"ApplyLabelsToClientsFlow\",\n client=client_obj.urn,\n description=audit_description))\n finally:\n flow.Events.PublishMultipleEvents({\"Audit\": audit_events},\n token=self.token)\n","repo_name":"ForensicTools/GRREAT-475_2141-Chaigon-Failey-Siebert","sub_path":"lib/flows/general/administrative.py","file_name":"administrative.py","file_ext":"py","file_size_in_byte":29876,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"4"}
+{"seq_id":"41425979479","text":"\nimport socket\n\nmyFirstSocket = socket.socket()\nmyFirstSocket.bind((\"\", 50000))\nmyFirstSocket.listen(1)\n\nwhile True:\n clientSocket, clientAdres = myFirstSocket.accept()\n messageSend = input(\"Введите сообщение: \")\n if messageSend == 'by by':\n break\n clientSocket.sendall(messageSend.encode(encoding=\"utf-8\"))\n\n while True:\n data = clientSocket.recv(1024)\n receivedMsg = data.decode(encoding=\"utf-8\")\n print(receivedMsg)\n\n\nclientSocket.close() \n\n\n","repo_name":"Allex413/homeNetwork1","sub_path":"HomeWork_Network1/homeServer.py","file_name":"homeServer.py","file_ext":"py","file_size_in_byte":479,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"}
+{"seq_id":"24173814964","text":"import logging\n\nimport argparse as argp\nimport functools\nfrom glob import glob\nimport gzip\nimport imgworker\nfrom multiprocessing import Pool\nimport numpy as np\nimport os\nimport sys\nimport tarfile\nfrom time import time\nimport yaml\n\nfrom neon.util.compat import range, StringIO\nfrom neon.util.param import opt_param\nfrom neon.util.persist import serialize\n\nTARGET_SIZE = None\nSQUARE_CROP = True\n\nlogger = logging.getLogger(__name__)\n\n\n# NOTE: We have to leave this helper function out of the class and use the\n# global variable hack so that we can use multiprocess pool.map\ndef proc_img(imgfile, is_string=False):\n from PIL import Image\n if is_string:\n imgfile = StringIO(imgfile)\n im = Image.open(imgfile)\n\n # This part does the processing\n scale_factor = TARGET_SIZE / np.float32(min(im.size))\n (wnew, hnew) = map(lambda x: int(round(scale_factor * x)), im.size)\n if scale_factor != 1:\n filt = Image.BICUBIC if scale_factor > 1 else Image.ANTIALIAS\n im = im.resize((wnew, hnew), filt)\n\n if SQUARE_CROP is True:\n (cx, cy) = map(lambda x: (x - TARGET_SIZE) / 2, (wnew, hnew))\n im = im.crop((cx, cy, cx+TARGET_SIZE, cy+TARGET_SIZE))\n\n buf = StringIO()\n im.save(buf, format='JPEG')\n return buf.getvalue()\n\n\nclass BatchWriter(object):\n\n def __init__(self, **kwargs):\n self.__dict__.update(kwargs)\n self.out_dir = os.path.expanduser(self.save_dir)\n self.in_dir = os.path.expanduser(self.image_dir)\n self.batch_size = self.macro_size\n global TARGET_SIZE, SQUARE_CROP\n TARGET_SIZE = self.output_image_size\n SQUARE_CROP = self.square_crop\n opt_param(self, ['file_pattern'], '*.jpg')\n opt_param(self, ['validation_pct'], 0.2)\n opt_param(self, ['num_workers'], 5)\n opt_param(self, ['class_samples_max'])\n self.train_file = os.path.join(self.out_dir, 'train_file.csv.gz')\n self.val_file = os.path.join(self.out_dir, 'val_file.csv.gz')\n self.stats = os.path.join(self.out_dir, 'dataset_cache.pkl')\n self.val_mean = np.zeros((self.output_image_size,\n self.output_image_size,\n self.num_channels), dtype=np.uint8)\n self.train_mean = np.zeros((self.output_image_size,\n self.output_image_size,\n self.num_channels), dtype=np.uint8)\n\n def __str__(self):\n pairs = map(lambda a: a[0] + ': ' + a[1],\n zip(self.__dict__.keys(),\n map(str, self.__dict__.values())))\n return \"\\n\".join(pairs)\n\n def write_csv_files(self):\n # Get the labels as the subdirs\n subdirs = glob(os.path.join(self.in_dir, '*'))\n labels = sorted(map(lambda x: os.path.basename(x), subdirs))\n indexes = range(len(labels))\n self.labels_dict = {k: v for k, v in zip(labels, indexes)}\n\n tlines = []\n vlines = []\n for subdir in subdirs:\n subdir_label = self.labels_dict[os.path.basename(subdir)]\n files = glob(os.path.join(subdir, self.file_pattern))\n np.random.shuffle(files)\n if self.class_samples_max is not None:\n files = files[:self.class_samples_max]\n lines = [(filename, subdir_label) for filename in files]\n v_idx = int(self.validation_pct * len(lines))\n tlines += lines[v_idx:]\n vlines += lines[:v_idx]\n\n np.random.shuffle(tlines)\n np.random.shuffle(vlines)\n\n if not os.path.exists(self.out_dir):\n os.makedirs(self.out_dir)\n\n for ff, ll in zip([self.train_file, self.val_file], [tlines, vlines]):\n with gzip.open(ff, 'wb') as f:\n f.write('filename,l_id\\n')\n for tup in ll:\n f.write('{},{}\\n'.format(*tup))\n f.close()\n\n # Write out cached stats for this data\n self.ntrain = (len(tlines) + self.batch_size - 1) / self.batch_size\n self.train_nrec = len(tlines)\n self.nval = (len(vlines) + self.batch_size - 1) / self.batch_size\n self.val_nrec = len(vlines)\n self.train_start = 0\n self.val_start = 10 ** int(np.log10(self.ntrain * 10))\n\n def parse_file_list(self, infile):\n import pandas as pd\n compression = 'gzip' if infile.endswith('.gz') else None\n df = pd.read_csv(infile, compression=compression)\n\n lk = filter(lambda x: x.startswith('l'), df.keys())\n tk = filter(lambda x: x.startswith('t'), df.keys())\n\n labels = {ll: np.array(df[ll].values, np.int32) for ll in lk}\n targets = np.array(df[tk].values, np.float32) if len(tk) > 0 else None\n imfiles = df['filename'].values\n\n self.nclass = {ll: (max(df[ll].values) + 1) for ll in lk}\n return imfiles, labels, targets\n\n def write_batches(self, name, start, labels, imfiles, targets=None,\n is_tar=False):\n pool = Pool(processes=self.num_workers)\n psz = self.batch_size\n osz = self.output_image_size\n npts = (len(imfiles) + psz - 1) / psz\n\n imfiles = [imfiles[i*psz: (i+1)*psz] for i in range(npts)]\n\n if targets is not None:\n targets = [targets[i*psz: (i+1)*psz].T.copy() for i in range(npts)]\n\n labels = [{k: v[i*psz: (i+1)*psz] for k, v in labels.iteritems()}\n for i in range(npts)]\n\n accum_buf = np.zeros((osz, osz, self.num_channels), dtype=np.int32)\n batch_mean = np.zeros(accum_buf.shape, dtype=np.uint8)\n logger.info(\"Writing %s batches...\", name)\n for i, jpeg_file_batch in enumerate(imfiles):\n t = time()\n if is_tar:\n jpeg_file_batch = [j.read() for j in jpeg_file_batch]\n jpeg_strings = pool.map(\n functools.partial(proc_img, is_string=is_tar), jpeg_file_batch)\n targets_batch = None if targets is None else targets[i]\n labels_batch = labels[i]\n bfile = os.path.join(self.out_dir, 'data_batch_%d' % (start + i))\n serialize({'data': jpeg_strings,\n 'labels': labels_batch,\n 'targets': targets_batch},\n bfile)\n logger.info(\"Wrote to %s (%s batch %d of %d) (%.2f sec)\",\n self.out_dir, name, i + 1, len(imfiles), time() - t)\n\n # get the means and accumulate\n imgworker.calc_batch_mean(jpglist=jpeg_strings, tgt=batch_mean,\n orig_size=osz, rgb=self.rgb,\n nthreads=self.num_workers)\n\n # scale for the case where we have an undersized batch\n if len(jpeg_strings) < self.batch_size:\n batch_mean *= len(jpeg_strings) / self.batch_size\n accum_buf += batch_mean\n pool.close()\n mean_buf = self.train_mean if name == 'train' else self.val_mean\n mean_buf[:] = accum_buf / len(imfiles)\n\n def save_meta(self):\n serialize({'ntrain': self.ntrain,\n 'nval': self.nval,\n 'train_start': self.train_start,\n 'val_start': self.val_start,\n 'macro_size': self.batch_size,\n 'train_mean': self.train_mean,\n 'val_mean': self.val_mean,\n 'labels_dict': self.labels_dict,\n 'val_nrec': self.val_nrec,\n 'train_nrec': self.train_nrec,\n 'nclass': self.nclass}, self.stats)\n\n def run(self):\n self.write_csv_files()\n namelist = ['train', 'validation']\n filelist = [self.train_file, self.val_file]\n startlist = [self.train_start, self.val_start]\n for sname, fname, start in zip(namelist, filelist, startlist):\n logger.info(\"%s %s %s\", sname, fname, start)\n if fname is not None and os.path.exists(fname):\n imgs, labels, targets = self.parse_file_list(fname)\n self.write_batches(sname, start, labels, imgs, targets)\n else:\n logger.info('Skipping %s, file missing', sname)\n self.save_meta()\n\n\nclass BatchWriterImagenet(BatchWriter):\n\n # code below adapted from Alex Krizhevsky's cuda-convnet2 library,\n # make-data.py\n # Copyright 2014 Google Inc. All rights reserved.\n #\n # Licensed under the Apache License, Version 2.0 (the \"License\");\n # you may not use this file except in compliance with the License.\n # You may obtain a copy of the License at\n #\n # http://www.apache.org/licenses/LICENSE-2.0\n #\n # Unless required by applicable law or agreed to in writing, software\n # distributed under the License is distributed on an \"AS IS\" BASIS,\n # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n # See the License for the specific language governing permissions and\n # limitations under the License.\n ##########################################################################\n\n def run(self):\n bsz = self.batch_size\n\n load_dir = self.in_dir\n # load_dir = os.path.join(os.path.expandvars(\n # os.path.expanduser(self.in_dir)), 'I1K')\n train_tar = os.path.join(load_dir, 'ILSVRC2012_img_train.tar')\n validation_tar = os.path.join(load_dir, 'ILSVRC2012_img_val.tar')\n devkit_tar = os.path.join(load_dir, 'ILSVRC2012_devkit_t12.tar.gz')\n self.url = \"http://www.image-net.org/download-imageurls\"\n for infile in (train_tar, validation_tar, devkit_tar):\n if not os.path.exists(infile):\n raise IOError(infile + \" not found. Please ensure you have\"\n \"ImageNet downloaded. More info here: \" +\n self.url)\n labels_dict, label_names, val_labels = self.parse_dev_meta(devkit_tar)\n self.labels_dict = labels_dict\n np.random.seed(0)\n with self.open_tar(train_tar, 'training tar') as tf:\n s_sets = tf.getmembers()\n s_tars = [tarfile.open(fileobj=tf.extractfile(s)) for s in s_sets]\n\n logger.info(\"Loaded synset tars.\")\n logger.info('Building trainset list ( can take a while)...')\n\n t_jpegfiles = []\n for i, st in enumerate(s_tars):\n if i % 100 == 0:\n pct_done = int(round((100.0 * i) / len(s_tars)))\n logger.info(\"%d%% ...\", pct_done)\n t_jpegfiles += [st.extractfile(m) for m in st.getmembers()]\n st.close()\n\n np.random.shuffle(t_jpegfiles)\n train_labels = [[labels_dict[j.name[:9]]] for j in t_jpegfiles]\n num_train_files = len(t_jpegfiles)\n logger.info(\"created list of jpg files\")\n logger.info(\"Number of training files = %d\", num_train_files)\n\n self.ntrain = (num_train_files + bsz - 1) / bsz\n self.train_nrec = num_train_files\n self.nclass = {'l_id': 1000}\n self.train_start = 0\n train_labels = {'l_id': np.array(train_labels, dtype=np.int32)}\n self.write_batches('train', self.train_start, train_labels,\n t_jpegfiles, targets=None, is_tar=True)\n\n with self.open_tar(validation_tar, 'validation tar') as tf:\n v_jpegfiles = sorted([tf.extractfile(m) for m in tf.getmembers()],\n key=lambda x: x.name)\n num_val_files = len(v_jpegfiles)\n\n self.nval = (num_val_files + bsz - 1) / bsz\n self.val_nrec = num_val_files\n self.val_start = 10 ** int(np.log10(self.ntrain) + 1)\n val_labels = {'l_id': np.array(val_labels, dtype=np.int32)}\n self.write_batches('validation', self.val_start, val_labels,\n v_jpegfiles, targets=None, is_tar=True)\n self.save_meta()\n\n def open_tar(self, path, name):\n if not os.path.exists(path):\n logger.error(\"ILSVRC 2012 %s not found at %s.\",\n \"Make sure to set ILSVRC_SRC_DIR correctly at the\",\n \"top of this file (%s).\" % (name, path, sys.argv[0]))\n sys.exit(1)\n return tarfile.open(path)\n\n def parse_dev_meta(self, ilsvrc_devkit_tar):\n tf = self.open_tar(ilsvrc_devkit_tar, 'devkit tar')\n fmeta = tf.extractfile(\n tf.getmember('ILSVRC2012_devkit_t12/data/meta.mat'))\n import scipy.io\n meta_mat = scipy.io.loadmat(StringIO(fmeta.read()))\n labels_dic = dict(\n (m[0][1][0], m[0][0][0][0] - 1) for m in meta_mat['synsets']\n if m[0][0][0][0] >= 1 and m[0][0][0][0] <= 1000)\n label_names_dic = dict(\n (m[0][1][0], m[0][2][0]) for m in meta_mat['synsets']\n if (m[0][0][0][0] >= 1 and m[0][0][0][0] <= 1000))\n label_names = [tup[1] for tup in sorted(\n [(v, label_names_dic[k]) for k, v in labels_dic.items()],\n key=lambda x:x[0])]\n\n fvgtruth = tf.extractfile(tf.getmember(\n 'ILSVRC2012_devkit_t12/data/' +\n 'ILSVRC2012_validation_ground_truth.txt'))\n vgtruth = [[int(line.strip()) - 1] for line in fvgtruth.readlines()]\n tf.close()\n return labels_dic, label_names, vgtruth\n\n\nif __name__ == \"__main__\":\n parser = argp.ArgumentParser()\n parser.add_argument('--config', help='Configuration File', required=True)\n parser.add_argument('--dataset', help='Dataset name', required=True)\n\n args = parser.parse_args()\n with open(args.config) as f:\n ycfg = yaml.load(f)[args.dataset]\n bw = BatchWriterImagenet(**ycfg)\n bw.run()\n","repo_name":"ominux/neon","sub_path":"neon/util/batch_writer.py","file_name":"batch_writer.py","file_ext":"py","file_size_in_byte":13785,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"4"}
+{"seq_id":"13818068233","text":"import psycopg2\n\n# # Settings for Alpha\n# HOST = \"tklid-monit0001.vm.mos.cloud.sbrf.ru\"\n# PORT = \"5433\"\n# DB_NAME = \"aiadviserdb\"\n# USER_1 = \"aiadviser_admin\"\n# PASSWORD_1 = \"aiadviser0TEST$Admin123\"\n# USER_2 = \"aiadviser\"\n# PASSWORD_2 = \"aiadviser0TEST$User123\"\n# USER = USER_1\n# PASSWORD = PASSWORD_1\n\n# Settings for test\nHOST = \"127.0.0.1\"\nPORT = \"5433\"\nDB_NAME = \"translation_memory_large\"\nUSER = \"postgres\"\nPASSWORD = \"100542\"\n\n\nconn = psycopg2.connect(\n user=USER,\n password=PASSWORD,\n host=HOST,\n port=PORT,\n database=DB_NAME\n)\n\ncursor = conn.cursor()\ntry:\n query_1 = \"\"\"\n SELECT table_name\n FROM information_schema.tables\n WHERE table_schema = 'public'\n ORDER BY table_name;\n \"\"\"\n cursor.execute(query_1)\n print('Done')\n records = cursor.fetchall()\n print(records)\n\n query_2 = \"\"\"\n SELECT *\n FROM cat_tmgroup\n \"\"\"\n cursor.execute(query_2)\n print('Done')\n records = cursor.fetchall()\n print(records)\n\n query_3 = \"\"\"\n SELECT \n table_name, \n column_name, \n data_type \n FROM \n information_schema.columns\n WHERE \n table_name = 'cat_tmgroup';\n \"\"\"\n cursor.execute(query_3)\n print('Done')\n records = cursor.fetchall()\n print(records)\n\n last_update = f\"to_timestamp('16-05-2011 15:36:38', 'dd-mm-yyyy hh24:mi:ss')\"\n query_4 = f\"\"\"\n SELECT cat_tmgroup.id, cat_tmgroup.updated_at, cat_sourceunit.text AS source, cat_translationunit.text AS translation \n FROM cat_tmunit \n INNER JOIN cat_translationunit \n ON cat_translationunit.id=cat_tmunit.translation_unit_id \n INNER JOIN cat_sourceunit \n ON cat_sourceunit.id=cat_tmunit.source_unit_id \n INNER JOIN cat_tmgroup \n ON cat_tmgroup.id=cat_tmunit.tm_group_id \n WHERE cat_tmunit.language_id IN (20, 72) AND cat_tmgroup.updated_at > {last_update};\n \"\"\"\n # query_4 = f\"\"\"\n # SELECT cat_tmunit.tm_group_id , cat_sourceunit.text AS source, cat_translationunit.text AS translation\n # FROM cat_tmunit\n # INNER JOIN cat_translationunit\n # ON cat_translationunit.id=cat_tmunit.translation_unit_id\n # INNER JOIN cat_sourceunit\n # ON cat_sourceunit.id=cat_tmunit.source_unit_id\n # \"\"\"\n cursor.execute(query_4)\n print('Done main')\n records = cursor.fetchall()\n print(records[:3])\n\n # query_5 = f\"\"\"\n # SELECT *\n # FROM cat_tmgroup\n # \"\"\"\n # cursor.execute(query_5)\n # print('Done 5')\n # records = cursor.fetchall()\n # print(records)\nexcept:\n print('Some problems')\nfinally:\n cursor.close()\n conn.close()\n\n\n","repo_name":"dmi3eva/katyas_zone","sub_path":"db/check_psycorpg.py","file_name":"check_psycorpg.py","file_ext":"py","file_size_in_byte":2603,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"}
+{"seq_id":"22983083779","text":"# 7 time() 함수 써서 지금 몇시몇분인지 구하기\n\nimport time # time 모듈 불러와서 time() 쓸 수 있게 하기\n# time()은 1970 01 01 이후 흘러온 전체 초를 반환함\nfseconds = time.time() # 이 값은 float이다\n\n# 현재 시각은 24시, 60분을 넘겨 표시할 수 없음\nnowhrs = int((fseconds//(60*60))%24)\nif nowhrs > 12: # 12시간 표기 형식\n nowhrs -= 12\n nowhrs = \"오후 \" + str(nowhrs)\nelif nowhrs == 12:\n nowhrs = \"오후 \" + str(nowhrs)\nelif nowhrs < 12:\n nowhrs = \"오전 \" + str(nowhrs)\n \nnowmnts = int((fseconds//60)%60)\n\nprint(\"현재 시각(GMT):\", nowhrs+\"시\", str(nowmnts)+\"분\")\n","repo_name":"cuberisu/Practice","sub_path":"practice_3-7.py","file_name":"practice_3-7.py","file_ext":"py","file_size_in_byte":661,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"}
+{"seq_id":"75249326515","text":"\"\"\"The PoolSense integration.\"\"\"\nfrom datetime import timedelta\nimport logging\n\nimport async_timeout\nfrom poolsense import PoolSense\nfrom poolsense.exceptions import PoolSenseError\n\nfrom openpeerpower.config_entries import ConfigEntry\nfrom openpeerpower.const import CONF_EMAIL, CONF_PASSWORD\nfrom openpeerpower.core import OpenPeerPower\nfrom openpeerpower.helpers import aiohttp_client\nfrom openpeerpower.helpers.update_coordinator import (\n CoordinatorEntity,\n DataUpdateCoordinator,\n UpdateFailed,\n)\n\nfrom .const import DOMAIN\n\nPLATFORMS = [\"sensor\", \"binary_sensor\"]\n\n\n_LOGGER = logging.getLogger(__name__)\n\n\nasync def async_setup_entry(opp: OpenPeerPower, entry: ConfigEntry):\n \"\"\"Set up PoolSense from a config entry.\"\"\"\n\n poolsense = PoolSense(\n aiohttp_client.async_get_clientsession(opp),\n entry.data[CONF_EMAIL],\n entry.data[CONF_PASSWORD],\n )\n auth_valid = await poolsense.test_poolsense_credentials()\n\n if not auth_valid:\n _LOGGER.error(\"Invalid authentication\")\n return False\n\n coordinator = PoolSenseDataUpdateCoordinator(opp, entry)\n\n await coordinator.async_config_entry_first_refresh()\n\n opp.data.setdefault(DOMAIN, {})\n opp.data[DOMAIN][entry.entry_id] = coordinator\n\n opp.config_entries.async_setup_platforms(entry, PLATFORMS)\n\n return True\n\n\nasync def async_unload_entry(opp: OpenPeerPower, entry: ConfigEntry):\n \"\"\"Unload a config entry.\"\"\"\n unload_ok = await opp.config_entries.async_unload_platforms(entry, PLATFORMS)\n if unload_ok:\n opp.data[DOMAIN].pop(entry.entry_id)\n return unload_ok\n\n\nclass PoolSenseEntity(CoordinatorEntity):\n \"\"\"Implements a common class elements representing the PoolSense component.\"\"\"\n\n def __init__(self, coordinator, email, info_type):\n \"\"\"Initialize poolsense sensor.\"\"\"\n super().__init__(coordinator)\n self._unique_id = f\"{email}-{info_type}\"\n self.info_type = info_type\n\n @property\n def unique_id(self):\n \"\"\"Return a unique id.\"\"\"\n return self._unique_id\n\n\nclass PoolSenseDataUpdateCoordinator(DataUpdateCoordinator):\n \"\"\"Define an object to hold PoolSense data.\"\"\"\n\n def __init__(self, opp, entry):\n \"\"\"Initialize.\"\"\"\n self.poolsense = PoolSense(\n aiohttp_client.async_get_clientsession(opp),\n entry.data[CONF_EMAIL],\n entry.data[CONF_PASSWORD],\n )\n self.opp = opp\n self.entry = entry\n\n super().__init__(opp, _LOGGER, name=DOMAIN, update_interval=timedelta(hours=1))\n\n async def _async_update_data(self):\n \"\"\"Update data via library.\"\"\"\n data = {}\n with async_timeout.timeout(10):\n try:\n data = await self.poolsense.get_poolsense_data()\n except (PoolSenseError) as error:\n _LOGGER.error(\"PoolSense query did not complete\")\n raise UpdateFailed(error) from error\n\n return data\n","repo_name":"OpenPeerPower/core","sub_path":"openpeerpower/components/poolsense/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2961,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"4"}
+{"seq_id":"34835702666","text":"# config.py\nfrom yacs.config import CfgNode as CN\n\n_C = CN()\n\n_C.WORK = CN()\n_C.WORK.PATH = \"\"\n\n_C.DATA = CN()\n_C.DATA.PATH = \"\"\n_C.DATA.PATH_TEST = \"\"\n_C.DATA.PATH_TRAIN = \"\"\n_C.DATA.FNAME_LABELS = \"\"\n\n_C.PRETRAINED = CN()\n_C.PRETRAINED.PATH = \"\"\n_C.PRETRAINED.FNAME_PREMODEL = \"\"\n\n_C.PROCESSED = CN()\n_C.PROCESSED.PATH = \"\"\n\n_C.OUTPUT = CN()\n_C.OUTPUT.PATH = \"\"\n\n_C.TRAIN = CN()\n_C.TRAIN.FRAC_FOR_TRAIN = 0.8\n_C.TRAIN.NUM_CLASSES = 0\n_C.TRAIN.NUM_EPOCHS = 3\n_C.TRAIN.BATCH_SIZE = 100\n_C.TRAIN.LEARNING_RATE = 0.001\n_C.TRAIN.MOMENTUM = 0.9\n_C.TRAIN.STEP_SIZE = 7\n_C.TRAIN.GAMMA = 0.1\n\n_C.PREDICT = CN()\n_C.PREDICT.BATCH_SIZE = 100\n_C.PREDICT.MODEL_PATH = \"D:\\\\GitWork\\\\dog_breed\\\\pretrained\\\\\"\n_C.PREDICT.MODEL_FILE = 'resnet50_20200926-2053_t9175_v9339.pth'\n\n\ndef get_cfg_defaults():\n \"\"\"Get a yacs CfgNode object with default values for the project.\"\"\"\n # Return a clone so that the defaults will not be altered\n # This is for the \"local variable\" use pattern\n return _C.clone()","repo_name":"morpheus9631/dog_breed","sub_path":"configs/config_train_v3.py","file_name":"config_train_v3.py","file_ext":"py","file_size_in_byte":997,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"}
+{"seq_id":"31509786749","text":"import time\nimport threading\nimport win32api\nimport win32con\n\nmonke = int(input('input length of autoclicker: '))\ndef autoclick():\n \n \n delay = 0.02\n\n \n while not stop_flag:\n # Use the win32api and win32con modules to perform a left mouse click\n win32api.mouse_event(win32con.MOUSEEVENTF_LEFTDOWN, 0, 0)\n win32api.mouse_event(win32con.MOUSEEVENTF_LEFTUP, 0, 0)\n\n \n time.sleep(delay)\n\n\nstop_flag = False\n\n\nthreading.Thread(target=autoclick).start()\n\n\ntime.sleep(monke)\n\n\nstop_flag = not stop_flag\n","repo_name":"cybershinig4mi/AutoClickerOrganic","sub_path":"clicker.py","file_name":"clicker.py","file_ext":"py","file_size_in_byte":545,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"}
+{"seq_id":"17698460573","text":"#!/usr/bin/env python3\n\nimport glob\nimport os\nimport re\n\nfrom pyrocko.guts import Object, String, Dict, List\n\n\nclass TestResult(Object):\n package = String.T()\n branch = String.T(optional=True)\n box = String.T()\n py_version = String.T(optional=True)\n prerequisite_versions = Dict.T(\n String.T(), String.T(), optional=True, default={})\n log = String.T(optional=True, yamlstyle='|')\n result = String.T(optional=True)\n errors = List.T(String.T(), optional=True, default=[], yamlstyle='block')\n fails = List.T(String.T(), optional=True, default=[], yamlstyle='block')\n skips = List.T(String.T(), optional=True, default=[], yamlstyle='block')\n\n\ndef parse_result(res, package, box, fn):\n\n with open(fn, 'r') as f:\n txt = f.read()\n\n lines = txt.splitlines()\n for line in lines[:7]:\n pack, vers = line.split(': ')\n res.prerequisite_versions[pack] = vers\n\n txt = '\\n'.join(\n line for line in lines if not re.match(r'^Q\\w+::', line))\n txt = re.sub(r' +\\n', '\\n', txt)\n\n res.log = txt.strip()\n\n m = re.search(r'^=+ (.*) =+$', lines[-1], re.M)\n if m:\n if m.group(1).find('failed') != -1:\n res.result = 'FAILED (%s)' % m.group(1)\n else:\n res.result = 'OK (%s)' % m.group(1)\n\n count = {}\n for x in re.findall(r'^(.*) SKIPPED', txt, re.M):\n if x not in count:\n count[x] = 1\n else:\n count[x] += 1\n\n for x in sorted(count.keys()):\n res.skips.append('%s (%ix)' % (x, count[x]))\n\n for x in re.findall(r'^(.*) FAILED', txt, re.M):\n res.fails.append(x)\n\n\ndef iter_results():\n package = 'pyrocko'\n if os.path.exists('vagrant'):\n boxes = os.listdir('vagrant')\n\n else:\n boxes = [os.path.basename(os.path.abspath('.'))]\n os.chdir('../..')\n\n for box in sorted(boxes):\n\n fns = glob.glob(os.path.join('vagrant', box, 'test-*.py[23].out'))\n if fns:\n for fn in fns:\n m = re.search(r'/test-(.*)\\.py([23])\\.out$', fn)\n res = TestResult(package=package, branch=m.group(1), box=box)\n res.py_version = m.group(2)\n parse_result(res, package, box, fn)\n yield res\n\n else:\n res = TestResult(\n package=package, box=box,\n result='ERROR (running the tests failed)')\n\n yield res\n\n\nif __name__ == '__main__':\n for r in iter_results():\n print(r)\n","repo_name":"pyrocko/pyrocko","sub_path":"maintenance/vagrant_tests_collect.py","file_name":"vagrant_tests_collect.py","file_ext":"py","file_size_in_byte":2585,"program_lang":"python","lang":"en","doc_type":"code","stars":186,"dataset":"github-code","pt":"4"}
+{"seq_id":"35022507025","text":"class Solution:\n def permute(self, nums: List[int]) -> List[List[int]]:\n ans = []\n numLen = len(nums)\n def perm(n,cur):\n if len(cur) == numLen:\n if len(set(cur)) == numLen:\n ans.append(cur[:])\n return\n for i in range(numLen):\n # if nums[i] != nums[n]:\n cur.append(nums[i])\n perm(i, cur)\n cur.pop()\n perm(1,[])\n return ans","repo_name":"abelops/Competitive-Programming","sub_path":"0046-permutations/0046-permutations.py","file_name":"0046-permutations.py","file_ext":"py","file_size_in_byte":490,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"}
+{"seq_id":"6951700748","text":"import torch\nimport torch.nn as nn\n\nclass Model(nn.Module):\n def __init__(self,in_channel=3,out_channel=1):\n super(Model,self).__init__()\n self.pooling=nn.MaxPool2d((2,2))\n self.upsample=nn.Upsample(scale_factor=2,mode='bicubic')\n self.layer1=nn.Sequential(\n nn.Conv2d(in_channel,64,3,1,1),\n nn.ReLU(inplace=True),\n nn.Conv2d(64,64,3,1,1),\n nn.ReLU(inplace=True),\n )\n self.layer2=nn.Sequential(\n nn.Conv2d(64,128,3,1,1),\n nn.ReLU(inplace=True),\n nn.Conv2d(128,128,3,1,1),\n nn.ReLU(inplace=True),\n )\n self.layer3=nn.Sequential(\n nn.Conv2d(128,256,3,1,1),\n nn.ReLU(inplace=True),\n nn.Conv2d(256,256,3,1,1),\n nn.ReLU(inplace=True),\n )\n self.layer4=nn.Sequential(\n nn.Conv2d(256,256,3,1,1),\n nn.ReLU(inplace=True),\n nn.Conv2d(256,256,3,1,1),\n nn.ReLU(inplace=True),\n )\n #concat[layer4_out,layer3_out]\n self.layer5=nn.Sequential(\n nn.Conv2d(512,128,3,1,1),\n nn.ReLU(inplace=True),\n nn.Conv2d(128,128,3,1,1),\n nn.ReLU(inplace=True),\n )\n #concat[layer5_out,layer3_out]\n self.layer6=nn.Sequential(\n nn.Conv2d(256,64,3,1,1),\n nn.ReLU(inplace=True),\n nn.Conv2d(64,64,3,1,1),\n nn.ReLU(inplace=True),\n )\n #concat[layer5_out,layer3_out]\n self.layer7=nn.Sequential(\n nn.Conv2d(128,64,3,1,1),\n nn.ReLU(inplace=True),\n nn.Conv2d(64,64,3,1,1),\n nn.ReLU(inplace=True),\n nn.Conv2d(64,1,3,1,1),\n nn.Sigmoid()\n )\n\n def forward(self,x):\n x1=self.layer1(x)\n x1_pool=self.pooling(x1)\n\n x2=self.layer2(x1_pool)\n x2_pool=self.pooling(x2)\n\n x3=self.layer3(x2_pool)\n x3_pool=self.pooling(x3)\n\n x4=self.layer4(x3_pool)\n x4_pool=self.upsample(x4)\n\n x5=self.layer5(torch.cat([x4_pool,x3],dim=1))\n x5_pool=self.upsample(x5)\n\n x6=self.layer6(torch.cat([x5_pool,x2],dim=1))\n x6_pool=self.upsample(x6)\n\n x7=self.layer7(torch.cat([x6_pool,x1],dim=1))\n\n return x7\n\n\ndef main():\n model=Model().cuda()\n a=torch.randn((8,3,256,256)).cuda()\n y=model(a)\n print(a.shape,y.shape)\n \n\nif __name__ == '__main__':\n main()\n \n\n \n\n","repo_name":"helloful/ImageSeg","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":2482,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"}
+{"seq_id":"17090181634","text":"import os\nimport sys\nimport urllib.request\nfrom tqdm import tqdm\nfrom itertools import chain\nimport zipfile\nimport json\n\ndef downloadurltofile(url,filename):\n if not os.path.exists(filename):\n print(f'--> Downloading {filename} <--'.center(80, '#'))\n with open(filename, 'wb') as file:\n with urllib.request.urlopen(url) as resp:\n length = int(resp.getheader('content-length'))\n blocksize = max(4096, length // 100)\n with tqdm(total=length, file=sys.stdout) as pbar:\n while True:\n buff = resp.read(blocksize)\n if not buff:\n break\n file.write(buff)\n pbar.update(len(buff))\n print(' Download complete '.center(80,'#'))\n else:\n print(f'-->> {filename} file already exists locally <<--'.center(80, '#'))\n print()\n\ndef download(url,targetfolder,targetfile):\n path = os.getcwd()\n data_dir = os.path.abspath(os.path.join(path, targetfolder))\n\n if not os.path.exists(data_dir):\n os.makedirs(data_dir)\n \n targetfile = os.path.join(data_dir, targetfile)\n downloadurltofile(url,targetfile)\n return data_dir,targetfile\n\ndef unzippedfile(folder,file):\n with zipfile.ZipFile(file, 'r') as zip_ref:\n zip_ref.extractall(folder)\n\ndef upload_data(workspace, datastore, src_dir, tgt_path):\n datastore.upload(\n src_dir=src_dir,\n target_path=tgt_path,\n show_progress=True)\n print(' Upload complete '.center(80,'#'))\n\ndef get_config(configfile):\n jsonfile = open(configfile)\n configdata = json.load(jsonfile)\n return configdata\n","repo_name":"manuelreyesgomez/ClaraCovidTranferLearningExample","sub_path":"ngccontent.py","file_name":"ngccontent.py","file_ext":"py","file_size_in_byte":1710,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"}
+{"seq_id":"16960795323","text":"import torch\nfrom abc import ABC, abstractmethod\nfrom typing import (\n Tuple,\n Union,\n Dict,\n List,\n)\nfrom shell_data.utils.utils import knn_dist\nimport logging\n\n\nclass Buffer(ABC):\n @abstractmethod\n def add_data(self, data):\n pass\n\n @abstractmethod\n def get_data(self, batch_size):\n pass\n\n @abstractmethod\n def __len__(self):\n pass\n\n def is_empty(self):\n return len(self) == 0\n\n def update_tasks(self, task_idx):\n pass\n\n\nclass SupervisedLearningBuffer(Buffer):\n \"\"\"\n Infinite-size buffer for supervised learning tasks.\n Consisting of a tensor of features and a tensor of labels.\n \"\"\"\n\n def __init__(self, dim, task):\n super().__init__()\n self.dim = dim\n self.X = torch.empty(0, *dim)\n label_type = torch.long if task == 'classification' else torch.float\n self.y = torch.empty(0, dtype=label_type)\n\n def add_data(self, data, dedup=True):\n if dedup and len(self) > 0:\n data = self.dedup(data)\n x, y = data\n self.X = torch.cat((self.X, x))\n self.y = torch.cat((self.y, y))\n\n def dedup(self, data, ret_mask=False) -> Tuple[torch.Tensor, torch.Tensor]:\n \"\"\"\n Process data and remove the data that is already in the buffer.\n\n Return a mask: true if the data is NOT in the buffer. False otherwise.\n \"\"\"\n if len(self) == 0:\n if ret_mask:\n return torch.ones(len(data[0]), dtype=torch.bool)\n return data\n\n x, y = data\n distances = knn_dist(x, self.X, k=1)\n # if distances = 0, then the data is already in the buffer and should be removed\n eps = 0.01 # HACK: because of floating point error\n mask = distances > eps\n logging.debug(f\"No. of duplicates: {len(mask) - mask.sum()}\")\n if ret_mask:\n return mask\n return x[mask], y[mask]\n\n def get_data(self, batch_size):\n \"\"\"\n Sample (without replacement) a batch of data from the buffer.\n \"\"\"\n batch_size = min(batch_size, len(self))\n idx = torch.randperm(len(self.X))[:batch_size]\n return self.X[idx], self.y[idx]\n\n def __len__(self):\n return len(self.X)\n\n def save_buffer(self, path_name):\n torch.save(self.X, f\"{path_name}_X.pt\")\n torch.save(self.y, f\"{path_name}_y.pt\")\n\n def load(self, path_name):\n self.X = torch.load(f\"{path_name}_X.pt\")\n self.y = torch.load(f\"{path_name}_y.pt\")\n\n\nclass ClassifcationBuffer(SupervisedLearningBuffer):\n def __init__(self, dim, num_classes):\n super().__init__(dim, 'classification')\n self.num_classes = num_classes\n\n def get_cls_counts(self):\n # HACK: assume that the num_cls = 10\n return {f\"cls_{i}\": (self.y == i).sum().item() for i in range(self.num_classes)}\n\n\nclass ReservoirSamplingClassificationBuffer(ClassifcationBuffer):\n def __init__(self, dim, buffer_size, num_classes):\n super().__init__(dim, num_classes)\n self.buffer_size = buffer_size\n self._buffer_weights = torch.zeros(0)\n\n # https://avalanche-api.continualai.org/en/v0.1.0/_modules/avalanche/training/storage_policy.html#ReservoirSamplingBuffer\n def add_data(self, data, dedup=True):\n if len(data[0]) == 0:\n return\n if dedup and len(self) > 0:\n data = self.dedup(data)\n x, y = data\n # self.X = torch.cat((self.X, x))\n # self.y = torch.cat((self.y, y))\n new_weights = torch.rand(len(x))\n cat_weights = torch.cat([new_weights, self._buffer_weights])\n cat_x = torch.cat([x, self.X])\n cat_y = torch.cat([y, self.y])\n sorted_weights, sorted_idxs = cat_weights.sort(descending=True)\n\n buffer_idxs = sorted_idxs[:self.buffer_size]\n self.X = cat_x[buffer_idxs]\n self.y = cat_y[buffer_idxs]\n self._buffer_weights = sorted_weights[:self.buffer_size]\n\n\nclass RegressionBuffer(SupervisedLearningBuffer):\n def __init__(self, dim):\n super().__init__(dim, 'regression')\n\n\nclass BalancedClassificationBuffer(Buffer):\n def __init__(self, dim, num_classes):\n super().__init__()\n self.dim = dim\n self.num_classes = num_classes\n self.buffers = [ClassifcationBuffer(dim) for _ in range(num_classes)]\n self.past_tasks = []\n\n def save_buffer(self, path):\n for buffer_id, buffer in enumerate(self.buffers):\n buffer.save_buffer(path_name=f'{path}_buffer_{buffer_id}')\n\n def load(self, path):\n for buffer_id, buffer in enumerate(self.buffers):\n buffer.load(path_name=f'{path}_buffer_{buffer_id}')\n\n def update_tasks(self, task_idx: List[int]):\n self.past_tasks += task_idx\n\n def get_cls_counts(self) -> dict:\n return {f\"cls_{i}\": len(b) for i, b in enumerate(self.buffers)}\n\n def add_data(self, data):\n x, y = data\n for i in range(self.num_classes):\n idx = y == i\n self.buffers[i].add_data((x[idx], y[idx]))\n\n def get_data(self, batch_size: Union[int, Dict[int, int]]) -> Tuple[torch.Tensor, torch.Tensor]:\n \"\"\"\n Sample (without replacement) a batch of data from the buffer,\n making sure that each class is represented equally.\n\n If batch_size is an integer, then the batch size is the same for\n all non-empty classes. If batch_size is a dictionary, then the\n batch size for each class is specified by the dictionary.\n \"\"\"\n assert isinstance(batch_size, (int, dict))\n if isinstance(batch_size, int):\n nonzero_num_classes = sum([len(b) > 0 for b in self.buffers])\n min_num_samples = min([len(b) for b_id, b in enumerate(\n self.buffers) if len(b) > 0 and b_id in self.past_tasks])\n X = torch.empty(0, *self.dim)\n y = torch.empty(0, dtype=torch.long)\n for b_id, b in enumerate(self.buffers):\n if isinstance(batch_size, int):\n cls_batch_size = min(\n batch_size // nonzero_num_classes, min_num_samples)\n elif isinstance(batch_size, dict) and b_id in batch_size:\n cls_batch_size = batch_size[b_id]\n else:\n continue\n if len(b) < cls_batch_size:\n continue\n cls_data = b.get_data(cls_batch_size)\n X = torch.cat((X, cls_data[0]))\n y = torch.cat((y, cls_data[1]))\n return X, y\n\n def __len__(self):\n return sum([len(b) for b in self.buffers])\n\n\ndef get_dataset_from_buffer(buffer: Buffer, data_size: int):\n buf_x, buf_y = buffer.get_data(\n batch_size=data_size\n )\n return torch.utils.data.TensorDataset(buf_x, buf_y)\n","repo_name":"vlongle/shell-refactor-data","sub_path":"shell-data/shell_data/dataset/buffer.py","file_name":"buffer.py","file_ext":"py","file_size_in_byte":6762,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"}
+{"seq_id":"14828394577","text":"#!/usr/bin/env python3 \n# -*- coding: utf-8 -*- \n\"\"\" \nCreated by Lanrete on 2018/6/15\n\"\"\"\n\nimport gc\nimport pandas as pd\n\nfrom sklearn.preprocessing import LabelEncoder\n\nfrom config import FIT_PARAMS\nfrom utility import timer\nfrom bureau_data_prepare import agg_bureau\nfrom pre_application_data_prepare import agg_pre_application\nfrom pipeline import fit_pipeline\n\nDATA_PATH = '../data'\n\n\ndef main(fit_params):\n data_path = '../data'\n train_base = pd.read_csv(f'{data_path}/application_train.csv')\n test_base = pd.read_csv(f'{data_path}/application_test.csv')\n\n train_base.set_index(keys='SK_ID_CURR', drop=True, inplace=True)\n test_base.set_index(keys='SK_ID_CURR', drop=True, inplace=True)\n\n with timer('Creating variables in base set'):\n for df in [train_base, test_base]:\n df['ANNUITY_INCOME_PERC'] = df['AMT_ANNUITY'] / df['AMT_INCOME_TOTAL']\n df['PAYMENT_RATE'] = df['AMT_ANNUITY'] / df['AMT_CREDIT']\n df['INCOME_CREDIT_PERC'] = df['AMT_INCOME_TOTAL'] / df['AMT_CREDIT']\n\n with timer('Aggregating bureau.csv'):\n bureau_df = agg_bureau()\n train_base = train_base.join(bureau_df, how='left')\n test_base = test_base.join(bureau_df, how='left')\n del bureau_df\n gc.collect()\n\n with timer('Aggregating previous_application.csv'):\n previous_application_df = agg_pre_application()\n train_base = train_base.join(previous_application_df, how='left')\n test_base = test_base.join(previous_application_df, how='left')\n del previous_application_df\n gc.collect()\n\n y = train_base['TARGET']\n del train_base['TARGET']\n y = LabelEncoder().fit_transform(y)\n\n header = 'Grid Searching Pipeline with parameter grids' if fit_params else 'Fitting and predicting'\n\n with timer(header):\n fit_pipeline(\n train_base, y,\n predict=True, x_score=test_base, fit_params=fit_params\n )\n\n\nif __name__ == '__main__':\n main(fit_params=FIT_PARAMS)\n","repo_name":"lanrete/HomeCreditDefaultRisk","sub_path":"source/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2007,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"}
+{"seq_id":"3797939286","text":"from collections import defaultdict\n\ndef solution(fees, records):\n \n basic_t, base_price, unit_t, unit_price = fees\n \n def price(log, fees): # 2\n if len(log) % 2:\n log.append(23*60+59)\n t = sum(log[i+1] - log[i] for i in range(0, len(log), 2))\n return unit_price * -(-max(0, t-basic_t) // unit_t) + base_price\n \n log = defaultdict(list)\n \n for record in records: # 1\n t, n, _ = record.split()\n h, m = map(int, t.split(':'))\n log[n].append(60*h+ m)\n \n return [price(log[n], fees) for n in sorted(log)] # 3\n# import math\n\n# def solution(fees, records):\n# parking = {}\n# check = {}\n# answer = {}\n\n# for i in records:\n# time, number, order = i.split()\n# now = list(map(int,time.split(':')))\n# now_m = now[0] * 60 + now[1]\n# if order == \"IN\":\n# parking[number] = now_m\n# check[number] = True\n# if number not in answer:\n# answer[number] = 0\n# else:\n# check[number] = False\n# answer[number] += now_m - parking[number]\n\n# for i, flag in check.items():\n# if flag:\n# answer[i] += (23*60+59) - parking[i]\n# check[i] = True\n\n# for num, v in answer.items():\n# if v > fees[0]: # 요금 계산\n# answer[num] = fees[1] + (math.ceil((v - fees[0])/fees[2]))*fees[3]\n# else:\n# answer[num] = fees[1]\n\n# return list(dict(sorted(answer.items())).values())\n\n\n\nfees = [180, 5000, 10, 600]\nrecords = [\"05:34 5961 IN\", \"06:00 0000 IN\", \"06:34 0000 OUT\", \"07:59 5961 OUT\", \"07:59 0148 IN\", \"18:59 0000 IN\", \"19:09 0148 OUT\", \"22:59 5961 IN\", \"23:00 5961 OUT\"]\n# fees = [120, 0, 60, 591]\n# records = [\"16:00 3961 IN\",\"16:00 0202 IN\",\"18:00 3961 OUT\",\"18:00 0202 OUT\",\"23:58 3961 IN\"]\n# fees = [1, 461, 1, 10]\n# records = [\"00:00 1234 IN\"]\n\nprint(solution(fees, records))","repo_name":"BreathIN423/CodingTest","sub_path":"PGM/Python/lv2/주차 요금 계산.py","file_name":"주차 요금 계산.py","file_ext":"py","file_size_in_byte":1942,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"4"}
+{"seq_id":"72035588597","text":"import matplotlib.pyplot as plt\nimport numpy as np\n\nfrom databases.students import students\nfrom databases import ACOLE1, ACOLE2\nfrom methods import opt\nfrom base import default_axis_config\nfrom colors import color1, color2\nfrom Fig27_m1_tests import boxplot_blocks_pairs\n\ntop_labels = ['Leitura', 'Ditado por composição', 'Ditado manuscrito']\ninner_labels = ['Palavras regulares CV', 'Palavras com\\ndificuldades ortográficas']\n\n# def boxplot_blocks(ax, blocks, label):\n# bar_positions = np.arange(len(blocks))\n\n# data = [[p for p in block.data['deltas'] if p is not None] for block in blocks]\n# boxprops = dict(linewidth=1, color='black')\n# medianprops = dict(linewidth=2, color='orange')\n\n# bp1 = ax.boxplot(data[::2], positions=bar_positions[::2], widths=0.6, sym='o', boxprops=boxprops, medianprops=medianprops)\n# bp2 = ax.boxplot(data[1::2], positions=bar_positions[1::2], widths=0.6, sym='o', boxprops=boxprops, medianprops=medianprops)\n\n# ax.set_title(label)\n# default_axis_config(ax)\n\n# ax.set_xticks(bar_positions)\n# ax.set_xticklabels([block.legend for block in blocks], rotation=45, ha='right')\n\n# labels = [block.legend for block in blocks]\n# return bp1, bp2, labels\n\ndef plot_blocks(ax, blocks, label, y_padding=1.0):\n bar_width = 0.4\n bar_positions = np.arange(len(blocks))\n\n data = [[p for p in block.data['deltas'] if p is not None] for block in blocks]\n\n # calculate mean\n means = [np.mean(d) for d in data]\n\n # ax.set_ylim(0, 100)\n ax.set_title(label, y=y_padding)\n default_axis_config(ax, False)\n\n legends = [block.legend for block in blocks][0::2]\n ax.bar(bar_positions[::2], means[::2], width=bar_width, color=color1, label=inner_labels[0])\n ax.bar(bar_positions[1::2]-bar_width, means[1::2], width=bar_width, color=color2, label=inner_labels[1])\n\n ax.set_xticks(np.array(bar_positions[1::2]) - bar_width - bar_width / 2)\n ax.set_xticklabels(legends, ha='center')\n\nfilters = {\n 'Fig30_m1_completo': lambda students : [student for student in students if student.has_two_acoles() and student.has_m1],\n 'Fig30_m1_completo_acoles_completas': lambda students : [student for student in students if student.has_two_complete_acoles() and student.has_m1],\n 'Fig30_m1_completo_primeira_acole_incompleta': lambda students : [student for student in students if student.has_two_acoles_first_incomplete() and student.has_m1],\n\n 'Fig31_m2_completo': lambda students : [student for student in students if student.has_two_acoles() and student.has_m2],\n 'Fig31_m2_completo_acoles_completas': lambda students : [student for student in students if student.has_two_complete_acoles() and student.has_m2],\n 'Fig31_m2_completo_primeira_acole_incompleta': lambda students : [student for student in students if student.has_two_acoles_first_incomplete() and student.has_m2],\n\n 'Fig32_m3_completo': lambda students : [student for student in students if student.has_two_acoles() and student.has_m3],\n 'Fig32_m3_completo_acoles_completas': lambda students : [student for student in students if student.has_two_complete_acoles() and student.has_m3],\n 'Fig32_m3_completo_primeira_acole_incompleta': lambda students : [student for student in students if student.has_two_acoles_first_incomplete() and student.has_m3],\n\n 'Fig34_has_first_acole_incomplete': lambda students : [student for student in students if student.has_two_acoles_first_incomplete()],\n 'Fig33_has_two_acoles': lambda students : [student for student in students if student.has_two_acoles()],\n 'Fig35_has_two_complete_acoles': lambda students : [student for student in students if student.has_two_complete_acoles()],\n}\n\ndef bar_plot(students, use_boxplot, filename):\n if use_boxplot:\n figure_name = filename+'_boxplot'\n else:\n figure_name = filename\n opt.set_filename(figure_name)\n reading = []\n composition = []\n manuscript = []\n\n ACOLE_1 = ACOLE1.create()\n ACOLE_2 = ACOLE2.create()\n\n for student in filters[filename](students):\n ac1, ac2 = student.get_first_and_second_acoles()\n for block, student_block in zip(ACOLE_1.blocks, student.acoles[ac1].blocks):\n for key, data in student_block.data.items():\n if len(data) > 0:\n for d in data:\n block.data[key].append(d)\n\n for block, student_block in zip(ACOLE_2.blocks, student.acoles[ac2].blocks):\n for key, data in student_block.data.items():\n if len(data) > 0:\n for d in data:\n block.data[key].append(d)\n\n # df = ACOLE_2.days_per_week()\n # min_ = df['mean_days_per_week'].min() # 0.07317073170731707\n # max_ = df['mean_days_per_week'].max() # 2.4615384615384617\n values = [0.72, 1.30, 1.50, 1.70, 2.5]\n\n for r in ACOLE2.custom_range(values):\n blocks1 = ACOLE_1.by_frequency(r)\n blocks2 = ACOLE_2.by_frequency(r)\n reading.append(blocks2.LEITURA.delta(blocks1.LEITURA))\n reading.append(blocks2.LEITURA_DIFICULDADES.delta(blocks1.LEITURA_DIFICULDADES))\n composition.append(blocks2.DITADO_COMPOSICAO.delta(blocks1.DITADO_COMPOSICAO))\n composition.append(blocks2.DITADO_COMPOSICAO_DIFICULDADES.delta(blocks1.DITADO_COMPOSICAO_DIFICULDADES))\n manuscript.append(blocks2.DITADO_MANUSCRITO.delta(blocks1.DITADO_MANUSCRITO))\n manuscript.append(blocks2.DITADO_MANUSCRITO_DIFICULDADES.delta(blocks1.DITADO_MANUSCRITO_DIFICULDADES))\n\n fig, axs = plt.subplots(3, 1, sharey=True)\n fig.set_size_inches(5, 10)\n fig.set_dpi(100)\n # fig.suptitle(title, y=1.035, fontsize=14)\n\n groups = [reading, composition, manuscript]\n for group in groups:\n for block in group:\n block.legend = str([block.frequency_range.low, block.frequency_range.high])\n\n axs[1].set_ylabel('Diferença da porcentagem média de acertos')\n bpg1 = []\n bpg2 = []\n for (ax, title, data) in zip(axs, top_labels, groups):\n if use_boxplot:\n bp1, bp2, _ = boxplot_blocks_pairs(ax, data, title, title_y=1.0, limity=False, data='deltas')\n bpg1.append(bp1)\n bpg2.append(bp2)\n else:\n plot_blocks(ax, data, title)\n\n fig.tight_layout()\n\n fig.text(0.5, -0.02, 'Faixa de frequência semanal\\n(Dias/Semana)', ha='center', va='center', fontsize=12)\n\n x1 = 0.5\n y1 = 1.05\n if use_boxplot:\n fig.legend([bpg1[0][\"boxes\"][0], bpg2[0][\"boxes\"][0]], inner_labels, loc='upper center', bbox_to_anchor=(x1, y1), ncol=2)\n else:\n handles, labels = ax.get_legend_handles_labels()\n fig.legend(handles, labels, loc='upper center', bbox_to_anchor=(x1, y1), ncol=2)\n\n plt.savefig(opt.output_path(), bbox_inches='tight')\n plt.close()\n\ndef plot():\n \"\"\"\n Diferença entre a porcentagem de acertos na ACOLE final e inicial,\n \"\"\"\n for filename in filters.keys():\n bar_plot(students, use_boxplot=False, filename=filename)\n bar_plot(students, use_boxplot=True, filename=filename)\n\n # schools = sorted([k for k in students.schools(True).keys()])\n # for school in schools:\n # for filename in filters.keys():\n # students_by_school = students.by_school(school)\n # bar_plot(students_by_school, use_boxplot=False, filename=filename+'_'+school)\n # bar_plot(students_by_school, use_boxplot=True, filename=filename+'_'+school)\nif __name__ == \"__main__\":\n plot()","repo_name":"cpicanco/alfatech-analysis","sub_path":"figures/Fig30_frequency_deltas.py","file_name":"Fig30_frequency_deltas.py","file_ext":"py","file_size_in_byte":7486,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"}
+{"seq_id":"6971542035","text":"import lxml.html\n\nfrom scrapy.contrib.linkextractors.sgml import SgmlLinkExtractor\nfrom scrapy.contrib.loader.processor import Compose\nfrom scrapy.contrib.spiders import CrawlSpider, Rule\nfrom scrapy.selector import HtmlXPathSelector\n\nfrom rho_blogs.loaders import (BlogPostLoader, BlogAuthorLoader,\n CommentPostLoader, CommentAuthorLoader)\nfrom rho_blogs.processors import StringToDatetime\n\n\ndef strip_profile_url(value):\n \"\"\"strip /weblog/ from profile url\"\"\"\n if value.endswith('weblog/'):\n value = value[:-7]\n return value\n\ndef clean_post(value):\n \"\"\"Remove unwanted elements in post content\"\"\"\n doc = lxml.html.fragment_fromstring(value)\n doc.tag = 'div' # replaces
\n doc.attrib.clear()\n\n # remove comment owner info\n for e in doc.xpath('//div[@class=\"weblog_keywords\"]'):\n e.drop_tree()\n\n return lxml.html.tostring(doc)\n\ndef clean_comment(value):\n \"\"\"Remove unwanted elements in comment content\"\"\"\n doc = lxml.html.fragment_fromstring(value)\n doc.tag = 'div' # replaces \n doc.attrib.clear()\n\n # remove empty links without childrens. e.g. name anchors\n for e in doc.xpath('//a'):\n if not e.getchildren() and not e.text:\n e.drop_tag()\n\n # remove comment owner info\n for e in doc.xpath('//div[@class=\"comment-owner\"]'):\n e.drop_tree()\n\n return lxml.html.tostring(doc)\n\n\nclass ElggBlogAuthorLoader(BlogAuthorLoader):\n profile_url_out = Compose(BlogAuthorLoader.default_output_processor,\n strip_profile_url)\n\n\nclass ElggBlogPostLoader(BlogPostLoader):\n content_out = Compose(BlogPostLoader.default_output_processor,\n clean_post)\n posted_out = Compose(BlogPostLoader.default_output_processor,\n StringToDatetime('%B %d, %Y'))\n\n\nclass ElggCommentAuthorLoader(CommentAuthorLoader):\n profile_url_out = Compose(CommentAuthorLoader.default_output_processor,\n strip_profile_url)\n\n\nclass ElggCommentPostLoader(CommentPostLoader):\n content_out = Compose(CommentPostLoader.default_output_processor,\n clean_comment)\n posted_out = Compose(CommentPostLoader.default_output_processor,\n lambda s: s.split(' on ')[1],\n StringToDatetime('%A, %d %B %Y, %H:%M %Z |'))\n\n\n\nclass ElggBlogArchiveSpider(CrawlSpider):\n\n username = None\n domain = None\n\n archive_url = 'http://%(domain)s/%(username)s/weblog/archive/'\n\n content_selector_id = ''\n content_selector_xpath = ''\n\n post_loader = ElggBlogPostLoader\n author_loader = ElggBlogAuthorLoader\n comment_loader = ElggCommentPostLoader\n comment_author_loader = ElggCommentAuthorLoader\n\n def __init__(self):\n assert self.username and self.domain\n self.allowed_domains = [self.domain]\n self.start_urls = [self.archive_url % {'username': self.username,\n 'domain': self.domain}]\n\n archives_le = SgmlLinkExtractor(allow=self.get_archive_links_re(),\n restrict_xpaths=self.get_archive_links_xpath())\n posts_le = SgmlLinkExtractor(allow=self.get_post_links_re(),\n restrict_xpaths=self.get_post_links_xpath())\n\n self.rules = (\n Rule(archives_le, follow=True),\n Rule(posts_le, callback='parse_post'),\n )\n\n super(ElggBlogArchiveSpider, self).__init__()\n\n def get_content_xpath(self):\n if self.content_selector_xpath:\n return self.content_selector_xpath\n else:\n return '//div[@id=\"%s\"]' % self.content_selector_id\n\n def get_archive_links_re(self):\n return r'/archive/\\d{4}/\\d{2}/'\n\n def get_archive_links_xpath(self):\n return '%s/ul/li' % self.get_content_xpath()\n\n def get_post_links_re(self):\n return r'/weblog/.+'\n\n def get_post_links_xpath(self):\n return '%s//div[@class=\"weblog-title\"]' % self.get_content_xpath()\n\n def get_post_author_xpaths(self):\n \"\"\"\n Returns a tuple of xpath rules (container, name, profile_url, avatar_url)\n \"\"\"\n return ('%s//div[@class=\"user\"]' % self.get_content_xpath(),\n './/a[2]/text()',\n './/a[2]/@href',\n './/img/@src')\n\n def get_post_xpaths(self):\n \"\"\"\n Returns a tuple of xpath rules:\n (container, title, content, tags, posted)\n\n Does not return origin_url because is taked from response.url\n \"\"\"\n return (self.get_content_xpath(),\n './/div[@class=\"weblog-title\"]//text()',\n './div[@class=\"weblog-post\"]/div[@class=\"post\"]',\n # only extract tags with links\n './/div[@class=\"weblog_keywords\"]//a/text()',\n './/h2[@class=\"weblog_dateheader\"]/text()',\n )\n\n def get_comments_xpath(self):\n return '%s//div[@id=\"comments\"]/ol/li' % self.get_content_xpath()\n\n def get_comment_author_xpath(self):\n \"\"\"\n Returns a tuple of relative xpath rules to each comment xpath rule:\n (container, name, profile_url, avatar_url)\n \"\"\"\n return ('./div[@class=\"comment-owner\"]/p',\n './a[2]/text()',\n './a[2]/@href',\n './a[1]/img/@src',\n )\n\n def get_comment_post_xpath(self):\n \"\"\"\n Returns a tuple of relative xpath rules to each comment xpath rule:\n (container, content, posted, origin_url)\n \"\"\"\n return ('.',\n '.',\n './div[@class=\"comment-owner\"]/p/text()',\n './div[@class=\"comment-owner\"]/p/a[3]/@href',\n )\n\n def parse_post_author(self, response):\n hxs = HtmlXPathSelector(response)\n container, name, profile_url, avatar_url = self.get_post_author_xpaths()\n\n author = self.author_loader(selector=hxs.select(container))\n author.add_xpath('name', name)\n author.add_xpath('profile_url', profile_url)\n author.add_xpath('avatar_url', avatar_url)\n\n return author.load_item()\n\n def parse_post_comments(self, response):\n hxs = HtmlXPathSelector(response)\n\n (author_container,\n author_name,\n author_profile_url,\n author_avatar_url) = self.get_comment_author_xpath()\n\n (container, content,\n posted, origin_url) = self.get_comment_post_xpath()\n\n comment_list = []\n for comment in hxs.select(self.get_comments_xpath()):\n author = self.comment_author_loader(selector=comment.select(author_container))\n author.add_xpath('name', author_name)\n author.add_xpath('profile_url', author_profile_url)\n author.add_xpath('avatar_url', author_avatar_url)\n\n comment = self.comment_loader(selector=comment.select(container))\n comment.add_xpath('content', content)\n comment.add_xpath('posted', posted)\n comment.add_xpath('origin_url', origin_url)\n\n comment.add_value('author', [author.load_item()])\n\n comment_list.append(comment.load_item())\n\n return comment_list\n\n def parse_post(self, response):\n hxs = HtmlXPathSelector(response)\n container, title, content, tags, posted = self.get_post_xpaths()\n\n post = self.post_loader(selector=hxs.select(container))\n post.add_value('origin_url', [unicode(response.url)])\n post.add_xpath('title', title)\n post.add_xpath('content', content)\n post.add_xpath('tags', tags)\n post.add_xpath('posted', posted)\n\n author = self.parse_post_author(response)\n post.add_value('author', [author])\n\n comments = self.parse_post_comments(response)\n post.add_value('comments', comments)\n\n return post.load_item()\n","repo_name":"rmax/rho-blogs-crawler","sub_path":"rho_blogs/spider.py","file_name":"spider.py","file_ext":"py","file_size_in_byte":7935,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"4"}
+{"seq_id":"74544429236","text":"import ubluetooth\nfrom micropython import const\nimport ubinascii\n\nclass TEST_BLE:\n def __init__(self):\n self.ble = ubluetooth.BLE()\n if self.ble.active() == False:\n self.ble.active(True)\n self.show_bt_mac()\n\n def show_bt_mac(self):\n address_str = ubinascii.hexlify(self.ble.config(\"mac\")[1]).decode()\n print(\"BLE_MAC : \" + address_str)\n \n def adv(self):\n print(\"ble_advertise_start\")\n send_str = \"BLE,\"\n send_data = send_str.encode()\n self.ble.gap_advertise(20000 , adv_data=send_data)\n\nif __name__ == \"__main__\":\n # BLE通信\n print(\"adv start\")\n gps_ble = TEST_BLE() \n gps_ble.adv()","repo_name":"cdsl-research/C0119360_B4","sub_path":"late_term/rssi_test/ble_adv.py","file_name":"ble_adv.py","file_ext":"py","file_size_in_byte":685,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"}
+{"seq_id":"73550798196","text":"import sys\nfrom numpy import load, std, mean, vstack, array, reshape, save\nfrom sklearn import preprocessing\nfrom operator import itemgetter\nfrom constants import NUMBER_OF_FEATURES, NUMBER_OF_TRAINING_FEATURES, SIMILARITY_WEIGHT, SCORE_EXP, ALL_TRAINING_FEATURES_DATA_DIR, OPTIMAL_TRAINING_FEATURES_DIR, OPTIMAL_TRAINING_FEATURES_DATA_DIR\nfrom scale_data import scale_data_single\nfrom k_nearest_neighbor import find_optimal_k\n\ndef get_features_sorted_by_same_genre_similarity(all_training_features):\n genre_stds = []\n for feature_index in range(NUMBER_OF_FEATURES):\n genre_stds.append(({\n 'min': [],\n 'mean': [],\n 'max': []\n }))\n\n for genre, features in all_training_features.items():\n for feature_index, feature in enumerate(features):\n for prop, prop_values in feature.items():\n genre_stds[feature_index][prop].append(std(prop_values))\n\n average_genre_stds = {\n 'min': [0] * NUMBER_OF_FEATURES,\n 'mean': [0] * NUMBER_OF_FEATURES,\n 'max': [0] * NUMBER_OF_FEATURES\n }\n\n for feature_index, feature in enumerate(genre_stds):\n for prop, prop_stds in feature.items():\n average_genre_stds[prop][feature_index] = mean(prop_stds)\n\n scaled_average_genre_stds = {}\n for prop, stds in average_genre_stds.items():\n stds = array(stds).reshape(-1, 1)\n scaler = preprocessing.MinMaxScaler(feature_range = (0, 1)).fit(stds)\n scaled_average_genre_stds[prop] = scaler.transform(stds)\n\n feature_props = []\n for prop, scaled_average_genre_feature_stds in scaled_average_genre_stds.items():\n for feature_index, scaled_average_genre_feature_std in enumerate(scaled_average_genre_feature_stds):\n feature_id = feature_index + 1\n feature_name = str(feature_id) + '_' + prop\n feature_props.append({\n 'feature_id': feature_id,\n 'feature_prop': prop,\n 'feature_name': feature_name,\n 'std': scaled_average_genre_feature_std[0]\n })\n\n features_sorted_by_same_genre_similarity = sorted(feature_props, key=itemgetter('std'))\n return features_sorted_by_same_genre_similarity\n\n\ndef get_features_sorted_by_genre_difference(all_training_features):\n combined_genre_feature_prop_averages = []\n for feature_index in range(NUMBER_OF_FEATURES):\n combined_genre_feature_prop_averages.append({\n 'min': [],\n 'mean': [],\n 'max': []\n })\n\n for genre, features in all_training_features.items():\n for feature_index, feature in enumerate(features):\n for prop, prop_values in feature.items():\n combined_genre_feature_prop_averages[feature_index][prop].append(mean(prop_values))\n\n feature_props = []\n for feature_index, feature in enumerate(combined_genre_feature_prop_averages):\n for prop, prop_averages in feature.items():\n feature_id = feature_index + 1\n feature_name = str(feature_id) + '_' + prop\n feature_props.append({\n 'feature_id': feature_id,\n 'feature_prop': prop,\n 'feature_name': feature_name,\n 'std': std(prop_averages)\n })\n\n features_sorted_by_genre_difference = sorted(feature_props, key=itemgetter('std'), reverse=True)\n return features_sorted_by_genre_difference\n\ndef get_sorted_features(features_sorted_by_same_genre_similarity, features_sorted_by_genre_difference):\n scored_features = []\n for index, feature in enumerate(features_sorted_by_same_genre_similarity):\n same_genre_similarity_score = (index ** SCORE_EXP) * SIMILARITY_WEIGHT\n genre_difference_score = (list(map(itemgetter('feature_name'), features_sorted_by_genre_difference)).index(feature['feature_name']) ** SCORE_EXP) * (1 - SIMILARITY_WEIGHT)\n scored_features.append({\n 'feature_id': feature['feature_id'],\n 'feature_prop': feature['feature_prop'],\n 'score': same_genre_similarity_score + genre_difference_score\n })\n\n sorted_features = sorted(scored_features, key=itemgetter('score'))\n return sorted_features\n\n\ndef find_optimal_training_features(all_training_features):\n features_sorted_by_same_genre_similarity = get_features_sorted_by_same_genre_similarity(all_training_features)\n features_sorted_by_genre_difference = get_features_sorted_by_genre_difference(all_training_features)\n sorted_features = get_sorted_features(features_sorted_by_same_genre_similarity, features_sorted_by_genre_difference)\n return sorted_features[0:NUMBER_OF_TRAINING_FEATURES]\n\ndef save_optimal_training_features(all_training_features, optimal_training_features):\n optimal_training_features_data = {\n 'X_train': [],\n 'y_train': [],\n 'k': 0\n }\n\n for genre, features in all_training_features.items():\n genre_X_train = []\n genre_y_train = []\n for song_index in range(len(features[0]['min'])):\n genre_X_train.append([])\n genre_y_train.append(genre)\n\n for feature_index, feature in enumerate(features):\n feature_id = feature_index + 1\n for prop, prop_values in feature.items():\n if any(f['feature_id'] == feature_id and f['feature_prop'] == prop for f in optimal_training_features):\n for song_index, prop_value in enumerate(prop_values):\n genre_X_train[song_index].append(prop_value)\n\n optimal_training_features_data['X_train'].extend(genre_X_train)\n optimal_training_features_data['y_train'].extend(genre_y_train)\n\n X_train_scale = scale_data_single(optimal_training_features_data['X_train'])\n optimal_training_features_data['k'] = find_optimal_k(X_train_scale, optimal_training_features_data['y_train'])\n\n save(OPTIMAL_TRAINING_FEATURES_DIR, optimal_training_features)\n save(OPTIMAL_TRAINING_FEATURES_DATA_DIR, optimal_training_features_data)\n\n\ndef main(argv):\n all_training_features = load(ALL_TRAINING_FEATURES_DATA_DIR).item()\n optimal_training_features = find_optimal_training_features(all_training_features)\n save_optimal_training_features(all_training_features, optimal_training_features)\n\n\nif __name__ == \"__main__\":\n main(sys.argv)\n","repo_name":"arvidede/music-genre-classification","sub_path":"generate_optimal_training_features.py","file_name":"generate_optimal_training_features.py","file_ext":"py","file_size_in_byte":6333,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"}
+{"seq_id":"18135327178","text":"# coding: utf-8\n\n\"\"\"\nData pre-processing\n##########################\n\"\"\"\nfrom logging import getLogger\nfrom collections import Counter\nimport os, copy\nimport pandas as pd\nimport numpy as np\nimport time\n#from torch.utils.data import Dataset\n\n\nclass RecDataset(object):\n def __init__(self, config, df=None):\n self.config = config\n self.dataset_path = os.path.abspath(config['data_path'])\n self.preprocessed_dataset_path = os.path.abspath(config['preprocessed_data'])\n self.preprocessed_loaded = False # if preprocessed data loaded?\n self.logger = getLogger()\n self.dataset_name = config['dataset']\n\n # dataframe\n self.uid_field = self.config['USER_ID_FIELD']\n self.iid_field = self.config['ITEM_ID_FIELD']\n self.ts_id = self.config['TIME_FIELD']\n\n if df is not None:\n self.df = df\n return\n self.ui_core_splitting_str = self._k_core_and_splitting()\n self.processed_data_name = '{}_{}_processed.inter'.format(self.dataset_name, self.ui_core_splitting_str)\n # load from preprocessed path?\n if self.config['load_preprocessed'] and self._load_preprocessed_dataset():\n self.preprocessed_loaded = True\n self.logger.info('\\nData loaded from preprocessed dir: ' + self.preprocessed_dataset_path + '\\n')\n return\n # load dataframe\n self._from_scratch()\n # pre-processing\n self._data_processing()\n\n def _k_core_and_splitting(self):\n user_min_n = 1\n item_min_n = 1\n if self.config['min_user_inter_num'] is not None:\n user_min_n = max(self.config['min_user_inter_num'], 1)\n if self.config['min_item_inter_num'] is not None:\n item_min_n = max(self.config['min_item_inter_num'], 1)\n # splitting\n ratios = self.config['split_ratio']\n tot_ratio = sum(ratios)\n # remove 0.0 in ratios\n ratios = [i for i in ratios if i > .0]\n ratios = [str(int(_ * 10 / tot_ratio)) for _ in ratios]\n s = ''.join(ratios)\n return 'u{}i{}_s'.format(user_min_n, item_min_n) + s\n\n def _load_preprocessed_dataset(self):\n file_path = os.path.join(self.preprocessed_dataset_path, self.processed_data_name)\n if not os.path.isfile(file_path):\n return False\n # load\n self.df = self._load_df_from_file(file_path, self.config['load_cols']+[self.config['preprocessed_data_splitting']])\n return True\n\n def _from_scratch(self):\n \"\"\"Load dataset from scratch.\n Initialize attributes firstly, then load data from atomic files, pre-process the dataset lastly.\n \"\"\"\n self.logger.info('Loading {} from scratch'.format(self.__class__))\n # get path\n file_path = os.path.join(self.dataset_path, '{}.inter'.format(self.dataset_name))\n if not os.path.isfile(file_path):\n raise ValueError('File {} not exist'.format(file_path))\n self.df = self._load_df_from_file(file_path, self.config['load_cols'])\n\n def _load_df_from_file(self, file_path, load_columns):\n # read header(user_id:token item_id:token rating:float timestamp:float) for ml-10k\n cnt = 0\n with open(file_path, 'r') as f:\n head = f.readline()[:-1]\n field_separator = self.config['field_separator']\n # only use [user_id, item_id, timestamp]\n for field_type in head.split(field_separator):\n if field_type in load_columns:\n cnt += 1\n # all cols exist\n if cnt != len(load_columns):\n raise ValueError('File {} lost some required columns.'.format(file_path))\n\n df = pd.read_csv(file_path, sep=self.config['field_separator'], usecols=load_columns)\n return df\n\n def _data_processing(self):\n \"\"\"Data preprocessing, including:\n - K-core data filtering\n - Remap ID\n \"\"\"\n # drop N/A value\n self.df.dropna(inplace=True)\n # remove duplicate rows\n self.df.drop_duplicates(inplace=True)\n # perform k-core\n self._filter_by_k_core(self.df)\n # remap ID\n self._reset_index(self.df)\n\n def _filter_by_k_core(self, df):\n \"\"\"Filter by number of interaction.\n\n Upper/Lower bounds can be set, only users/items between upper/lower bounds can be remained.\n See :doc:`../user_guide/data/data_args` for detail arg setting.\n\n Note:\n Lower bound is also called k-core filtering, which means this method will filter loops\n until all the users and items has at least k interactions.\n \"\"\"\n while True:\n ban_users = self._get_illegal_ids_by_inter_num(df, field=self.uid_field,\n max_num=self.config['max_user_inter_num'],\n min_num=self.config['min_user_inter_num'])\n ban_items = self._get_illegal_ids_by_inter_num(df, field=self.iid_field,\n max_num=self.config['max_item_inter_num'],\n min_num=self.config['min_item_inter_num'])\n\n if len(ban_users) == 0 and len(ban_items) == 0:\n return\n\n dropped_inter = pd.Series(False, index=df.index)\n if self.uid_field:\n dropped_inter |= df[self.uid_field].isin(ban_users)\n if self.iid_field:\n dropped_inter |= df[self.iid_field].isin(ban_items)\n # self.logger.info('[{}] dropped interactions'.format(len(dropped_inter)))\n df.drop(df.index[dropped_inter], inplace=True)\n\n def _get_illegal_ids_by_inter_num(self, df, field, max_num=None, min_num=None):\n \"\"\"Given inter feat, return illegal ids, whose inter num out of [min_num, max_num]\n\n Args:\n field (str): field name of user_id or item_id.\n feat (pandas.DataFrame): interaction feature.\n max_num (int, optional): max number of interaction. Defaults to ``None``.\n min_num (int, optional): min number of interaction. Defaults to ``None``.\n\n Returns:\n set: illegal ids, whose inter num out of [min_num, max_num]\n \"\"\"\n self.logger.debug('\\n get_illegal_ids_by_inter_num:\\n\\t field=[{}], max_num=[{}], min_num=[{}]'.format(\n field, max_num, min_num\n ))\n\n if field is None:\n return set()\n if max_num is None and min_num is None:\n return set()\n\n max_num = max_num or np.inf\n min_num = min_num or -1\n\n ids = df[field].values\n inter_num = Counter(ids)\n ids = {id_ for id_ in inter_num if inter_num[id_] < min_num or inter_num[id_] > max_num}\n\n self.logger.debug('[{}] illegal_ids_by_inter_num, field=[{}]'.format(len(ids), field))\n return ids\n\n def _reset_index(self, df):\n if df.empty:\n raise ValueError('Some feat is empty, please check the filtering settings.')\n df.reset_index(drop=True, inplace=True)\n\n def split(self, ratios):\n \"\"\"Split interaction records by ratios.\n\n Args:\n ratios (list): List of split ratios. No need to be normalized.\n group_by (str, optional): Field name that interaction records should grouped by after splitting.\n Defaults to ``None``\n\n Returns:\n list: List of :class:`~Dataset`, whose interaction features has been splitted.\n\n Note:\n Other than the first one, each part is rounded down.\n \"\"\"\n if self.preprocessed_loaded:\n dfs = []\n splitting_label = self.config['preprocessed_data_splitting']\n # splitting into training/validation/test\n for i in range(3):\n temp_df = self.df[self.df[splitting_label] == i].copy()\n temp_df.drop(splitting_label, inplace=True, axis=1)\n dfs.append(temp_df)\n # wrap as RecDataset\n full_ds = [self.copy(_) for _ in dfs]\n return full_ds\n\n tot_ratio = sum(ratios)\n # remove 0.0 in ratios\n ratios = [i for i in ratios if i > .0]\n ratios = [_ / tot_ratio for _ in ratios]\n\n # get split global time\n split_ratios = np.cumsum(ratios)[:-1]\n split_timestamps = list(np.quantile(self.df[self.ts_id], split_ratios))\n\n # get df training dataset unique users/items\n df_train = self.df.loc[self.df[self.ts_id] < split_timestamps[0]]\n self.logger.info('==Splitting: 1. Reindexing and filtering out new users/items not in train dataset...')\n\n uni_users = pd.unique(df_train[self.uid_field])\n uni_items = pd.unique(df_train[self.iid_field])\n # re_index users & items\n u_id_map = {k: i for i, k in enumerate(uni_users)}\n i_id_map = {k: i for i, k in enumerate(uni_items)}\n self.df[self.uid_field] = self.df[self.uid_field].map(u_id_map)\n self.df[self.iid_field] = self.df[self.iid_field].map(i_id_map)\n # filter out Nan line\n self.df.dropna(inplace=True)\n # as int\n self.df = self.df.astype(int)\n\n # split df based on global time\n self.logger.info('==Splitting: 2. Train/Valid/Test.')\n dfs = []\n start = 0\n for i in split_timestamps:\n dfs.append(self.df.loc[(start <= self.df[self.ts_id]) & (self.df[self.ts_id] < i)].copy())\n start = i\n # last\n dfs.append(self.df.loc[start <= self.df[self.ts_id]].copy())\n\n # save to disk\n self.logger.info('==Splitting: 3. Dumping...')\n self._save_dfs_to_disk(u_id_map, i_id_map, dfs)\n # self._drop_cols(dfs+[self.df], [self.ts_id])\n\n # wrap as RecDataset\n full_ds = [self.copy(_) for _ in dfs]\n return full_ds\n\n def _save_dfs_to_disk(self, u_map, i_map, dfs):\n if self.config['load_preprocessed'] and not self.preprocessed_loaded:\n dir_name = self.preprocessed_dataset_path\n if not os.path.exists(dir_name):\n os.makedirs(dir_name)\n # save id mapping\n u_df = pd.DataFrame(list(u_map.items()), columns=[self.uid_field, 'new_id'])\n i_df = pd.DataFrame(list(i_map.items()), columns=[self.iid_field, 'new_id'])\n u_df.to_csv(os.path.join(self.preprocessed_dataset_path,\n '{}_u_{}_mapping.csv'.format(self.dataset_name, self.ui_core_splitting_str)),\n sep=self.config['field_separator'], index=False)\n i_df.to_csv(os.path.join(self.preprocessed_dataset_path,\n '{}_i_{}_mapping.csv'.format(self.dataset_name, self.ui_core_splitting_str)),\n sep=self.config['field_separator'], index=False)\n # 0-training/1-validation/2-test\n for i, temp_df in enumerate(dfs):\n temp_df[self.config['preprocessed_data_splitting']] = i\n temp_df = pd.concat(dfs)\n temp_df.to_csv(os.path.join(self.preprocessed_dataset_path, self.processed_data_name),\n sep=self.config['field_separator'], index=False)\n self.logger.info('\\nData saved to preprocessed dir: \\n' + self.preprocessed_dataset_path)\n\n # def _drop_cols(self, dfs, col_names):\n # for _df in dfs:\n # _df.drop(col_names, inplace=True, axis = 1)\n\n def copy(self, new_df):\n \"\"\"Given a new interaction feature, return a new :class:`Dataset` object,\n whose interaction feature is updated with ``new_df``, and all the other attributes the same.\n\n Args:\n new_df (pandas.DataFrame): The new interaction feature need to be updated.\n\n Returns:\n :class:`~Dataset`: the new :class:`~Dataset` object, whose interaction feature has been updated.\n \"\"\"\n nxt = RecDataset(self.config, new_df)\n return nxt\n\n def num(self, field):\n \"\"\"Given ``field``, for token-like fields, return the number of different tokens after remapping,\n for float-like fields, return ``1``.\n\n Args:\n field (str): field name to get token number.\n\n Returns:\n int: The number of different tokens (``1`` if ``field`` is a float-like field).\n \"\"\"\n if field not in self.config['load_cols']:\n raise ValueError('field [{}] not defined in dataset'.format(field))\n uni_len = len(pd.unique(self.df[field]))\n return uni_len\n\n def shuffle(self):\n \"\"\"Shuffle the interaction records inplace.\n \"\"\"\n self.df = self.df.sample(frac=1, replace=False).reset_index(drop=True)\n\n def sort_by_chronological(self):\n self.df.sort_values(by=[self.ts_id], inplace=True, ignore_index=True)\n\n def __len__(self):\n return len(self.df)\n\n def __getitem__(self, idx):\n # Series result\n return self.df.iloc[idx]\n\n def __repr__(self):\n return self.__str__()\n\n def __str__(self):\n info = [self.dataset_name]\n self.inter_num = len(self.df)\n uni_u = pd.unique(self.df[self.uid_field])\n uni_i = pd.unique(self.df[self.iid_field])\n if self.uid_field:\n self.user_num = len(uni_u)\n self.avg_actions_of_users = self.inter_num/self.user_num\n info.extend(['The number of users: {}'.format(self.user_num),\n 'Average actions of users: {}'.format(self.avg_actions_of_users)])\n if self.iid_field:\n self.item_num = len(uni_i)\n self.avg_actions_of_items = self.inter_num/self.item_num\n info.extend(['The number of items: {}'.format(self.item_num),\n 'Average actions of items: {}'.format(self.avg_actions_of_items)])\n info.append('The number of inters: {}'.format(self.inter_num))\n if self.uid_field and self.iid_field:\n sparsity = 1 - self.inter_num / self.user_num / self.item_num\n info.append('The sparsity of the dataset: {}%'.format(sparsity * 100))\n return '\\n'.join(info)\n","repo_name":"enoche/ImRec","sub_path":"utils/dataset.py","file_name":"dataset.py","file_ext":"py","file_size_in_byte":14272,"program_lang":"python","lang":"en","doc_type":"code","stars":30,"dataset":"github-code","pt":"4"}
+{"seq_id":"32126875440","text":"import numpy as np\nfrom ballet import Feature\nfrom ballet.eng import SimpleFunctionTransformer\n\ninput = ['Lot Area', 'Lot Frontage']\ndef fill_frontage(df):\n mask = df['Lot Frontage'].isnull()\n df['Lot Frontage'][mask] = np.sqrt(df['Lot Area'])[mask]\n return df['Lot Frontage']\ntransformer = SimpleFunctionTransformer(fill_frontage)\nname = 'Lot Frontage Fill'\nfeature = Feature(input=input, transformer=transformer, name=name)\n","repo_name":"micahjsmith/ballet-ames-notebooks","sub_path":"features/user_08/feature_02.py","file_name":"feature_02.py","file_ext":"py","file_size_in_byte":435,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"}
+{"seq_id":"31948321157","text":"import math\nimport matplotlib.pyplot as plt\n\n\ndef plot(x: list, y: list, x_new: list, y_new: list):\n plt.scatter(x, y, c='red')\n plt.scatter(x_new, y_new, c='blue')\n plt.legend(['old point', 'new point'])\n\n plt.axhline(0, c='gray')\n plt.axvline(0, c='gray')\n\n max_val = max(x + y + x_new + y_new) + 1\n min_val = min(x + y + x_new + y_new) - 1\n min_val = min(min_val, 0)\n\n plt.ylim((min_val, max_val))\n plt.xlim((min_val, max_val))\n plt.xlabel('x - axis')\n plt.ylabel('y - axis')\n plt.title('Point Transformation')\n plt.show()\n\n\ndef translate(x: int, y: int, tx: int, ty: int):\n x_new = [x + tx]\n y_new = [y + ty]\n plot([x], [y], x_new, y_new)\n\n\ndef scale(x: int, y: int, sx: float, sy: float):\n x_new = [x * sx]\n y_new = [y * sy]\n plot([x], [y], x_new, y_new)\n\n\ndef rotate(x: int, y: int, angle: float):\n x_new = [x * math.cos(angle) - y * math.sin(angle)]\n y_new = [x * math.sin(angle) + y * math.cos(angle)]\n plot([x], [y], x_new, y_new)\n\n\nx = int(input('Enter x: '))\ny = int(input('Enter y: '))\n\nprint('1. Translation\\n2. Scaling\\n3. Rotation')\nchoice = int(input('Enter a choice: '))\n\nassert (choice >= 1 and choice <= 3)\n\nif choice == 1:\n tx = int(input('Enter tx: '))\n ty = int(input('Enter ty: '))\n translate(x, y, tx, ty)\nelif choice == 2:\n sx = float(input('Enter sx: '))\n sy = float(input('Enter sy: '))\n scale(x, y, sx, sy)\nelse:\n angle = int(input('Enter theta in degrees: '))\n angle = (angle * math.pi) / 180\n rotate(x, y, angle)\n","repo_name":"rohitmalik776/cg-lab-dtu","sub_path":"point_transformations.py","file_name":"point_transformations.py","file_ext":"py","file_size_in_byte":1539,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"}
+{"seq_id":"29852448071","text":"from pms.models import Notification\nfrom django.urls import path\nfrom . import views\n\nurlpatterns = [\n\tpath('', views.IndexView, name = \"index\"),\n\tpath('home/', views.HomeView, name = \"home\"),\n\tpath('createproject/', views.CreateProjectView, name = \"createproject\"),\n\tpath('projects/', views.ProjectsView, name = \"projects\"),\n\tpath('projects//', views.ProjectDetailsView, name = \"projectdetails\"),\n\tpath('projects/addnewlevel//', views.AddNewLevelView, name = \"addnewlevel\"),\n\tpath('projects/moveproject///', views.MoveProjectView, name = \"moveproject\"),\n\tpath('projects/changedetails//', views.ChangeProjectDetailsView, name = \"changedetails\"),\n\tpath('projects/addworker///', views.AddWorkersView, name = \"addworker\"),\n\tpath('projects/relieveworker///', views.RelieveFromProjectView, name = \"relieveworker\"),\n\tpath('notifications/', views.NotificationsView, name = \"notifications\"),\n\tpath('notifications/markasread//', views.NotificationMarkAsReadView, name = \"markasread\"),\n\n\n\t# AUTHENTICATION\n\tpath('login/', views.LoginView, name = \"login\"),\n\tpath('logout/', views.LogoutView, name = \"logout\"),\n\tpath('register/', views.RegisterView, name = \"register\"),\n]\n","repo_name":"j-yeskay/pms-django","sub_path":"pms/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1311,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"}
+{"seq_id":"20246390819","text":"#!/usr/bin/env python\n\nimport rospy\nfrom race.msg import drive_param\nimport curses\n#import signal\n#TIMEOUT = 0.1 # number of seconds your want for timeout\nforward = 0;\nleft = 0;\n\n# def interrupted(signum, frame):\n# \"called when read times out\"\n# global forward\n# forward = 0\n# global left\n# left = 0\n# stdscr.addstr(2, 20, \"Stop\")\n# stdscr.addstr(2, 25, '%.2f' % forward)\n# stdscr.addstr(3, 20, \"Stop\")\n# stdscr.addstr(3, 25, '%.2f' % left)\n# signal.signal(signal.SIGALRM, interrupted)\n\n# def input():\n# try:\n# foo = stdscr.getch()\n# return foo\n# except:\n# # timeout\n# return\n\nstdscr = curses.initscr()\ncurses.cbreak()\nstdscr.keypad(1)\nrospy.init_node('keyboard_talker')\npub = rospy.Publisher('drive_parameters', drive_param, queue_size=10)\n\n# set alarm\n#signal.alarm(TIMEOUT)\n#s = input()\n# disable the alarm after success\n#signal.alarm(0)\n#print 'You typed', s\n\nstdscr.refresh()\n\nkey = ''\nwhile key != ord('q'):\n#\tsignal.setitimer(signal.ITIMER_REAL,0.05)\n#\tkey = input()\n\tkey = stdscr.getch()\n\tstdscr.refresh()\n#\tsignal.alarm(0)\n\tif key == curses.KEY_UP: \n\t\tforward = forward + 1;\n\t\tstdscr.addstr(2, 20, \"Up \")\n\t\tstdscr.addstr(2, 25, '%.2f' % forward)\n\t\tstdscr.addstr(5, 20, \" \")\n\telif key == curses.KEY_DOWN:\n\t\tforward = forward - 1; \n\t\tstdscr.addstr(2, 20, \"Down\")\n\t\tstdscr.addstr(2, 25, '%.2f' % forward)\n\t\tstdscr.addstr(5, 20, \" \")\n\tif key == curses.KEY_LEFT:\n\t\tleft = left - 1; \n\t\tstdscr.addstr(3, 20, \"left\")\n\t\tstdscr.addstr(3, 25, '%.2f' % left)\n\t\tstdscr.addstr(5, 20, \" \")\n\telif key == curses.KEY_RIGHT:\n\t\tleft = left + 1; \n\t\tstdscr.addstr(3, 20, \"rgt \")\n\t\tstdscr.addstr(3, 25, '%.2f' % left)\n\t\tstdscr.addstr(5, 20, \" \")\n\tif key == curses.KEY_DC:\n\t\tleft = 0\n\t\tforward = 0\n\t\tstdscr.addstr(5, 20, \"Stop\")\n\tmsg = drive_param()\n\tmsg.velocity = forward\n\tmsg.angle = left\n\tpub.publish(msg)\ncurses.endwin()\n","repo_name":"f1tenth/F110CPSWeek2018","sub_path":"Unimore/drivebox-tenth/src/f1tenth/race/src/keyboard.py","file_name":"keyboard.py","file_ext":"py","file_size_in_byte":1916,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"4"}
+{"seq_id":"29353905081","text":"import csv\n\ndef new_user():\n with open(\"user.csv\",\"a\",newline=\"\") as f: \n read = csv.reader(f)\n for i in csv.read:\n print(i)\n try:\n user = [i for i in read]\n except:\n id = 0\n\n for i in user:\n print (i)\n if id != 0:\n id = user[0] + 1\n\n nam = input(\"Enter name: \")\n\n score = 0\n\n n_user = [id,nam,score]\n\n with open('user.csv', 'a', newline = '') as f:\n write = csv.writer(f)\n write.writerow(n_user)\n\nnew_user()","repo_name":"DarkGamer1507/Project","sub_path":"quiz/main_menu.py/new_user.py","file_name":"new_user.py","file_ext":"py","file_size_in_byte":521,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"}
+{"seq_id":"70458831797","text":"from unittest import TestCase\nfrom parameterized import parameterized\nfrom TattooSalon.salon.validator import Validator\n\nclass TestValidatorTDD(TestCase):\n\n @parameterized.expand([\n [\"Кристина123\", True],\n [\"kristina@gmail.com\", True],\n [\"+34567567kris\", True],\n [\"main_admin\", True],\n [\"super-admin\", True],\n [\"#Admin123\", True],\n [\"Костя%1%1%\", True],\n [\"1*2*3*4\", True],\n [\"kristina/Mironenko\", True],\n [\"\", False],\n ])\n def test_check_login(self, input_string, expected):\n\n actual = Validator.check_login(input_string)\n\n self.assertEqual(actual, expected)\n\n\nclass TestIntegrated(TestCase):\n\n def test_addToCart_AddToCart_DeleteFromCart_ExceptedTrue(self):\n self.assertEqual(True, True)\n def test_CheckOnValidLoginAndValidPasswordExceptTrue(self):\n self.assertEqual(True, True)","repo_name":"xlebyshek17/TattoSalon","sub_path":"TattooSalon/salon/tests/test_tdd.py","file_name":"test_tdd.py","file_ext":"py","file_size_in_byte":906,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"}
+{"seq_id":"37848335882","text":"import matplotlib.pyplot as plt\nf = open('mouse.csv', 'r').read()\ndatasets = f.split('\\n')\n# get 4 corners \nfor i in range(6,len(datasets)- 2,2):\n ydata = datasets[i].split(',')\n ylabel = ydata[0]\n y = list(map(float, ydata[1:]))\n \n i = i + 1\n \n xdata = datasets[i].split(',')\n xlabel = xdata[0]\n x = list(map(float, xdata[1:]))\n line, = plt.plot(x,y, color='g')\n line.set_label(ylabel)\n\n if \"Bottom Right\" in ylabel:\n peaks = []\n threshold= -0.7\n ind = 0\n for j in range(len(y)):\n if y[j] > threshold:\n print(x[j], y[j])\n peaks.append(x[j])\n \n \nmouse_data = datasets[len(datasets)-2].split(',')\nmouse_label = mouse_data[0]\nmouse_timestamps = list(map(float, mouse_data[1:]))\nplt.axhline(threshold, color = 'r', label = \"Threshold\")\ndistances = []\nimport math\nmouse_label_added = False\nfor m in mouse_timestamps:\n d = 10000\n r_d = 0\n for p in peaks:\n _d = math.sqrt((m -p)**2)\n if d > _d:\n d = _d\n r_d = p-m\n distances.append(r_d)\n if mouse_label_added:\n plt.axvline(m)\n else:\n plt.axvline(m, label = mouse_label)\n mouse_label_added = True\n\nplt.legend()\nprint(distances)\nm = sum(distances)/len(distances)\ndeviations = list(map(lambda x : (x - m) **2, distances))\nprint(deviations)\nstd = math.sqrt(sum(deviations)/len(deviations))\nprint(m, std)#-36.61435555749055 16.73778306287719\nplt.show()\n\n\n\n","repo_name":"colonbrack3t/Final-Year-Project","sub_path":"VR Balance Board Calibration project/Assets/BalanceBoard/plot_mouse.py","file_name":"plot_mouse.py","file_ext":"py","file_size_in_byte":1492,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"}
+{"seq_id":"33721663376","text":"import torch\nimport torch.nn as nn\n\n# vae更新代码,更新点在于 mu与logvar 通过两个神经网络分别得出\nclass VAE_new(nn.Module):\n def __init__(self):\n super(VAE_new, self).__init__()\n self.fc1 = nn.Linear(784,256)\n self.fc2 = nn.Linear(256,64)\n self.fc31 = nn.Linear(64,10)\n self.fc32 = nn.Linear(64,10)\n self.relu = nn.ReLU()\n self.decoder = nn.Sequential(\n nn.Linear(10,64),\n nn.ReLU(),\n nn.Linear(64,256),\n nn.ReLU(),\n nn.Linear(256,784),\n nn.Sigmoid()\n )\n\n def encoder(self,x):\n h1 = self.relu(self.fc1(x))\n h2 = self.relu(self.fc2(h1))\n\n return self.relu(self.fc31(h2)),self.relu(self.fc32(h2))\n\n def reparamtrize(self,mu,logvar):\n return mu + logvar * torch.randn_like(logvar)\n\n def forward(self,x):\n batch_size = x.size(0)\n # flatten\n x = x.view(batch_size,784)\n mu,logvar = self.encoder(x)\n h_ = self.reparamtrize(mu,logvar)\n x_hat = self.decoder(h_)\n x_hat = x_hat.view(batch_size,1,28,28)\n\n # KL divergence\n # 因为我们想要逼近的是 N~(0,1) 所以 mu2=0,sigma2 = 1\n kld = 0.5 * torch.sum(\n torch.pow(mu, 2) +\n torch.pow(logvar, 2) -\n torch.log(1e-8 + torch.pow(logvar, 2)) - 1\n ) / (batch_size * 28 * 28)\n\n return x_hat,kld\n\n\n\n","repo_name":"sjt5285126/GridVGAE_system","sub_path":"autocoder/vae_update.py","file_name":"vae_update.py","file_ext":"py","file_size_in_byte":1446,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"}
+{"seq_id":"17166870690","text":"import tkinter as tk\n\nclass Tag(tk.Tk):\n\tdef __init__(self):\n\t\tsuper().__init__()\n\t\tself.title(\"Tags\")\n\t\tself.text = tk.Text(self)\n\t\tself.text.pack(side=tk.TOP,fill='y')\n\t\t\n\t\tself.text.bind('',self.configuring_tags,'')# the empty string option means that the function should replace any other binding of the same shorcut key\n\n\t\tself.text.bind('',self.raise_selected)\n\t\tself.text.bind('',self.underline_selected)\n\t\tself.text.bind('',self.duplicate_text)\n\n\t\tself.text.bind('',self.search_word)\n\t\t\n\n\tdef configuring_tags(self,event = None):\n\t\tlineend = self.text.index(tk.INSERT)\n\t\tlineend = lineend.split('.')\n\t\tlineend = int(lineend[1])\n\t\tfor i in range(lineend):\n\t\t\tindex = '1.'+str(i)\n\t\t\tend= '1.end'\n\t\t\tself.text.tag_add('even',index,end)# args are: call tag,start_point of the tag, end point of the tag\n\t\tself.text.tag_configure('even',foreground= 'green')# args are: create tag,what the tag should affect \n\n\tdef raise_selected(self,event=None):\n\t\t\n\t\tselected_pos = self.text.tag_ranges('sel')# the range of the selection u made. returns a list of elements: starting index and ending index of the selected area\n\n\t\ttry:\n\t\t\tself.text.tag_add('raise',selected_pos[0],selected_pos[1])\n\t\texcept:#to prevent an error incase there is no selected area\n\t\t\tpass \n\t\tself.text.tag_configure('raise',offset=5)# the offset raises the selected text\n\n\t\treturn 'break' # overwrites any default event of the selected shorcut key\n\tdef underline_selected(self,event=None):\n\t\tself.text.tag_configure('underline',underline=1)\n\t\tselected_pos = self.text.tag_ranges('sel')# the range of the selection u made. returns a list of elements: starting index and ending index of the selected area\n\n\t\ttry:\n\t\t\tself.text.tag_add('underline',selected_pos[0],selected_pos[1])\n\t\texcept:\n\t\t\tpass\n\t\t\n\n\t\treturn 'break' # overwrites any default event of the selected shorcut key\n\n\t# duplicating a text\n\tdef duplicate_text(self,event):\n\t\tcursor_pos = self.text.index(tk.INSERT)\n\t\tcursor_pos = cursor_pos.split('.')\n\t\t\n\t\tselected_area = self.text.tag_ranges('sel')\n\t\tnew_pos = str(selected_area[1])\n\t\t\n\t\tnew_pos = new_pos.split('.')\n\t\t\n\t\tnew_pos = str(int(new_pos[0])+1)+'.0'\n\t\t\n\n\t\ttext_copy = self.text.get(selected_area[0],selected_area[1]) # returns the text within this indexes\n\t\t\n\n\t\tself.text.insert(new_pos,'\\n'+text_copy)\n\t\treturn 'break'\n\n\tdef search_word(self,event):\n\t\tself.text.tag_configure('color',foreground='purple')\n\t\tstart = 1.0\n\t\tidx = self.text.search('python',start,stopindex=tk.END)\n\t\twhile idx:\n\t\t\ttag_begin = idx\n\t\t\ttag_end = f'{idx}+6c'\n\t\t\tself.text.tag_add('color',tag_begin,tag_end)\n\n\t\t\tstart = tag_end\n\t\t\tidx = self.text.search('python',start,stopindex = tk.END)\n\n\n\t\nif __name__ == '__main__':\n\ttag = Tag()\n\ttag.mainloop()\n","repo_name":"jaykayudo/Flotex","sub_path":"demos/tags.py","file_name":"tags.py","file_ext":"py","file_size_in_byte":2766,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"}
+{"seq_id":"9888255306","text":"'''\nRestricted Boltzmann Machines\n'''\nimport numpy as np\nimport tensorflow as tf\nimport tensorflow.contrib.eager as tfe\nfrom utils import sample_from_bernoulli, tf_xavier_init\n\n\nclass RBM():\n\n def __init__(self, vis_dim, hid_dim, w=None, vis_b=None, hid_b=None):\n self.vis_dim = vis_dim\n self.hid_dim = hid_dim\n if w is not None:\n self.w = w\n else:\n self.w = tfe.Variable(\n tf_xavier_init(self.vis_dim, self.hid_dim, const=4.0),\n name='rbm.w')\n if hid_b is not None:\n self.hid_b = hid_b\n else:\n self.hid_b = tfe.Variable(\n tf.zeros([self.hid_dim]), dtype=tf.float32, name='rbm.hid_b')\n if vis_b is not None:\n self.vis_b = vis_b\n else:\n self.vis_b = tfe.Variable(\n tf.zeros([self.vis_dim]), dtype=tf.float32, name='rbm.vis_b')\n\n # conditional distributions\n def vis2hid(self, v):\n return tf.nn.sigmoid(tf.matmul(v, self.w) + self.hid_b)\n\n def hid2vis(self, h):\n return tf.nn.sigmoid(tf.matmul(h, tf.transpose(self.w)) + self.vis_b)\n\n # Gibbs steps\n def gibbs_vhv(self, v_0):\n h_1 = sample_from_bernoulli(self.vis2hid(v_0))\n v_1 = sample_from_bernoulli(self.hid2vis(h_1))\n return h_1, v_1\n\n def gibbs_hvh(self, h_0):\n v_1 = sample_from_bernoulli(self.hid2vis(h_0))\n h_1 = sample_from_bernoulli(self.vis2hid(v_1))\n return v_1, h_1\n\n # marginalization\n def ulogprob_vis(self, v):\n wx_b = tf.matmul(v, self.w) + self.hid_b\n vbias_term = tf.einsum('ij,j->i', v, self.vis_b)\n hidden_term = tf.reduce_sum(tf.nn.softplus(wx_b), axis=1)\n return hidden_term + vbias_term\n\n def ulogprob_hid(self, h):\n wx_b = tf.matmul(h, tf.transpose(self.w)) + self.vis_b\n hbias_term = tf.einsum('ij,j->i', h, self.hid_b)\n vis_term = tf.reduce_sum(tf.nn.softplus(wx_b), axis=1)\n return vis_term + hbias_term\n\n # log partiation function\n def log_z_summing_h(self):\n assert (self.hid_dim <= 20)\n h_all = np.arange(2**self.hid_dim, dtype=np.int32)\n h_all = ((h_all.reshape(-1, 1) &\n (2**np.arange(self.hid_dim))) != 0).astype(np.float32)\n h_all = tf.constant(h_all[:, ::-1], dtype=tf.float32)\n log_p_h = self.ulogprob_hid(h_all)\n log_z = tf.reduce_logsumexp(log_p_h, axis=0)\n return log_z\n\n def log_z_summing_v(self):\n assert (self.vis_dim <= 20)\n v_all = np.arange(2**self.vis_dim, dtype=np.int32)\n v_all = ((v_all.reshape(-1, 1) &\n (2**np.arange(self.vis_dim))) != 0).astype(np.float32)\n v_all = tf.constant(v_all[:, ::-1], dtype=tf.float32)\n log_p_v = self.ulogprob_vis(v_all)\n log_z = tf.reduce_logsumexp(log_p_v, axis=0)\n return log_z\n\n # likelihood\n def logprob_vis(self, v, log_z):\n return self.ulogprob_vis(v) - log_z\n\n def logprob_hid(self, h, log_z):\n return self.ulogprob_hid(h) - log_z\n\n # energy function\n def energy(self, h, v):\n hbias_term = tf.einsum('ij,j->i', h, self.hid_b)\n vbias_term = tf.einsum('ij,j->i', v, self.vis_b)\n weight_term = tf.reduce_sum(tf.matmul(v, self.w) * h, axis=1)\n return -(hbias_term + vbias_term + weight_term)\n\n # free energy\n def free_energy(self, v):\n return -self.ulogprob_vis(v)\n\n # free energy for debug\n def _debug_free_energy(self, v):\n assert (self.hid_dim <= 20)\n assert (v.numpy().shape == (1, self.vis_dim))\n\n h_all = np.arange(2**self.hid_dim, dtype=np.int32)\n h_all = ((h_all.reshape(-1, 1) &\n (2**np.arange(self.hid_dim))) != 0).astype(np.float32)\n h_all = tf.constant(h_all[:, ::-1], dtype=tf.float32)\n v_dup = tf.tile(v, [2**self.hid_dim, 1])\n return -tf.reduce_logsumexp(-self.energy(h_all, v), axis=0)\n\n # get samples\n def get_h_from_v(self, v, burn_in_steps=100):\n for i in xrange(burn_in_steps):\n h, v = self.gibbs_vhv(v)\n return h.numpy()\n\n def get_h(self, num_samples, burn_in_steps=1000, random=True):\n v = tf.zeros([num_samples, self.vis_dim], dtype=tf.float32)\n if random:\n v = sample_from_bernoulli(v + 0.5) # data average\n for i in xrange(burn_in_steps):\n h, v = self.gibbs_vhv(v)\n return h.numpy()\n\n def get_independent_samples(self,\n num_samples,\n burn_in_steps=100000,\n random=True,\n initial_v=None):\n if initial_v is not None:\n v = initial_v\n else:\n v = tf.zeros([num_samples, self.vis_dim], dtype=tf.float32)\n if random:\n v = sample_from_bernoulli(v + 0.2) # data average\n\n for i in xrange(burn_in_steps):\n _, v = self.gibbs_vhv(v)\n return v.numpy()\n\n def get_independent_means(self,\n num_samples,\n burn_in_steps=100000,\n random=True):\n v = tf.zeros([num_samples, self.vis_dim], dtype=tf.float32)\n if random:\n v = sample_from_bernoulli(v + 0.2) # data average\n for i in xrange(burn_in_steps):\n _, v = self.gibbs_vhv(v)\n h_1 = sample_from_bernoulli(self.vis2hid(v))\n v_1 = self.hid2vis(h_1)\n return v_1.numpy()\n\n def get_samples_single_chain(self,\n num_samples,\n adjacent_samples=10,\n steps_between_samples=1000,\n burn_in_steps=100000,\n random=True):\n assert num_samples % adjacent_samples == 0\n v = tf.zeros([1, self.vis_dim], dtype=tf.float32)\n if random:\n v = sample_from_bernoulli(v + 0.2) # data average\n for i in xrange(burn_in_steps):\n _, v = self.gibbs_vhv(v)\n sample_list = []\n for i in xrange(num_samples / adjacent_samples):\n for j in xrange(adjacent_samples):\n _, v = self.gibbs_vhv(v)\n sample_list.append(v.numpy())\n for i in xrange(steps_between_samples):\n _, v = self.gibbs_vhv(v)\n return np.vstack(sample_list)\n\n # for constrastive divergence training\n def cd_step(self, v, train_mc_steps):\n h = sample_from_bernoulli(self.vis2hid(v))\n h_list = [\n h,\n ]\n v_list = []\n for i in xrange(train_mc_steps):\n new_v, new_h = self.gibbs_hvh(h_list[-1])\n v_list.append(new_v)\n h_list.append(new_h)\n chain_end = tf.stop_gradient(v_list[-1])\n return chain_end\n\n def pcd_step(self, v, train_mc_steps, persistent):\n h_list = [\n persistent,\n ]\n v_list = []\n for i in xrange(train_mc_steps):\n new_v, new_h = self.gibbs_hvh(h_list[-1])\n v_list.append(new_v)\n h_list.append(new_h)\n chain_end = tf.stop_gradient(v_list[-1])\n return chain_end, tf.stop_gradient(h_list[-1])\n\n def cd_loss(self, v_0, v_n):\n return tf.reduce_mean(\n self.free_energy(v_0), axis=0) - tf.reduce_mean(\n self.free_energy(v_n), axis=0)\n\n # reconstruction\n def reconstruction_error(self, v_0):\n h_1 = sample_from_bernoulli(self.vis2hid(v_0))\n v_1_logits = tf.matmul(h_1, tf.transpose(self.w)) + self.vis_b\n return tf.reduce_mean(\n tf.reduce_sum(\n tf.nn.sigmoid_cross_entropy_with_logits(\n labels=v_0, logits=v_1_logits),\n axis=1),\n axis=0)\n\n def params(self):\n return (self.hid_b, self.vis_b, self.w)\n\n\n# base rate RBM for AIS\n\n\nclass BRRBM(RBM):\n\n def __init__(self, vis_dim, hid_dim, data):\n self.vis_dim = vis_dim\n self.hid_dim = hid_dim\n self.w = tfe.Variable(tf.zeros([vis_dim, hid_dim]), dtype=tf.float32)\n self.hid_b = tfe.Variable(tf.zeros([self.hid_dim]), dtype=tf.float32)\n # MLE for the value of vis_b\n sample_mean = tf.reduce_mean(data, axis=0)\n # Smooth to make sure p(v) > 0 for every v\n sample_mean = tf.clip_by_value(sample_mean, 1e-5, 1 - 1e-5)\n self.vis_b = -tf.log(1. / sample_mean - 1.)\n self.log_z = tf.reduce_sum(\n tf.nn.softplus(self.vis_b), axis=0) + self.hid_dim * np.log(2.)\n\n # get tf samples\n def get_independent_samples_tf(self, num_samples, burn_in_steps=100):\n v = tf.zeros([num_samples, self.vis_dim], dtype=tf.float32)\n for i in xrange(burn_in_steps):\n _, v = self.gibbs_vhv(v)\n return v\n\n\n# Mix RBM for AIS\n\n\nclass MIXRBM(RBM):\n\n def tune(self, brrbm, rbm, weight):\n # adjust parameters of the mixed RBM\n n = brrbm.hid_dim\n self.vis_b = (1. - weight) * brrbm.vis_b + weight * rbm.vis_b\n self.hid_b = tf.concat(\n [(1. - weight) * brrbm.hid_b, weight * rbm.hid_b], axis=0)\n self.w = tf.concat([(1. - weight) * brrbm.w, weight * rbm.w], axis=1)\n","repo_name":"DoubleBlindReviewShareCode/anonymous_link_AdVIL_code","sub_path":"RBM.py","file_name":"RBM.py","file_ext":"py","file_size_in_byte":9258,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"}
+{"seq_id":"8170625834","text":"import os, requests, time\n\nimport datetime\nfrom google.transit import gtfs_realtime_pb2\nfrom dotenv import load_dotenv, find_dotenv\nfrom protobuf_to_dict import protobuf_to_dict\n\n\nMTA_URL = 'http://datamine.mta.info/mta_esi.php'\nTIMES_TO_GET = 6\n\n\nclass TrainInfo:\n def __init__(self, api_key, feed_id, station):\n self.api_key = api_key\n self.feed_id = feed_id\n self.station = station\n self.feed_message = gtfs_realtime_pb2.FeedMessage()\n \n @staticmethod\n def get_train_time_with_label(train, arrival_time, now):\n minutes_until_train = (arrival_time - int(now)) // 60\n minutes = \"{}\".format(minutes_until_train)\n return \"{}: {}\".format(train, minutes)\n\n @staticmethod\n def get_train_time_minutes(arrival_time, now):\n minutes_until_train = (arrival_time - int(now)) // 60\n return \"{}\".format(minutes_until_train)\n\n @staticmethod\n def format_train_time(arrival_time):\n arrival_time = time.localtime(arrival_time)\n return time.strftime(\"%H:%M\", arrival_time)\n\n def get_train_time_data(self, train_data):\n train_time_data = list()\n for trains in train_data:\n trip_update = trains.get('trip_update')\n if not trip_update:\n continue\n\n route_id = trip_update['trip']['route_id']\n\n stop_time_update = trip_update['stop_time_update']\n for stop_info in stop_time_update:\n if stop_info.get('stop_id') == self.station:\n arrival = stop_info.get('arrival')\n if not arrival:\n continue\n train_time_data.append((route_id, arrival['time']))\n return train_time_data\n\n def get_train_time_strings(self, train_time_data):\n if len(train_time_data) < 1:\n return 'no times'\n\n train_time_data.sort(key=lambda route_time: route_time[1])\n\n now = time.time()\n\n train_output = list()\n\n for i, train_arrival_time in enumerate(train_time_data[:TIMES_TO_GET]):\n train, arrival_time = train_arrival_time\n minutes_until_arrival = (arrival_time - int(now)) / 60\n if minutes_until_arrival < 1:\n continue\n\n train_output.append(self.format_train_time(arrival_time))\n\n return ' '.join(train_output) + ' '\n\n def get_train_identifiers_for_all_feeds(self):\n def get_train_ids(feed_entities):\n for entity in feed_entities:\n trip_update = entity.get('trip_update')\n if not trip_update:\n continue\n trip = trip_update['trip']\n if not trip:\n continue\n route_id = trip.get('route_id')\n if route_id:\n yield route_id\n\n possible_feed_ids = range(1, 60)\n\n for feed_id in possible_feed_ids:\n feed = self.get_feed(feed_id=feed_id)\n if feed:\n train_ids = ','.join(set(get_train_ids(feed)))\n yield 'feed_id={}: {}'.format(feed_id, train_ids)\n\n def get_train_text(self):\n feed = self.get_feed()\n if not feed:\n # TODO log an exception\n return\n train_time_data = self.get_train_time_data(feed)\n return self.get_train_time_strings(train_time_data)\n\n def get_feed(self, feed_id=None):\n feed_id = feed_id or self.feed_id\n query_str = '?key={}&feed_id={}'.format(\n self.api_key, feed_id\n )\n response = requests.get(MTA_URL + query_str)\n\n try:\n self.feed_message.ParseFromString(response.content)\n subway_feed = protobuf_to_dict(self.feed_message)\n return subway_feed['entity']\n except Exception:\n return\n\n\nif __name__ == \"__main__\":\n load_dotenv(find_dotenv())\n MTA_API_KEY = os.environ['MTA_API_KEY']\n FEED_IDS = os.environ['FEED_IDS'].split(',')\n STATIONS = os.environ['STOPS'].split(',')\n\n if True: # TODO add flag\n for feed_id, station in zip(FEED_IDS, STATIONS):\n ti = TrainInfo(api_key=MTA_API_KEY,\n feed_id=feed_id,\n station=station)\n print(ti.get_train_text())\n else:\n ti = TrainInfo(api_key=MTA_API_KEY, feed_id=None, station=None)\n print('\\n'.join(list(ti.get_train_identifiers_for_all_feeds())))\n\n\n","repo_name":"redSlug/weather-reporter","sub_path":"server/client/train_info.py","file_name":"train_info.py","file_ext":"py","file_size_in_byte":4456,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"4"}
+{"seq_id":"19884063900","text":"#!/usr/bin/env python\n\"\"\"This plugin adds artifact functionality to the UI.\"\"\"\n\nimport itertools\nimport StringIO\n\nfrom grr.gui import renderers\nfrom grr.gui.plugins import fileview\nfrom grr.gui.plugins import forms\nfrom grr.gui.plugins import semantic\nfrom grr.lib import aff4\nfrom grr.lib import artifact\nfrom grr.lib import artifact_lib\nfrom grr.lib import parsers\nfrom grr.lib import rdfvalue\n\n\nclass ArtifactListRenderer(forms.MultiSelectListRenderer):\n \"\"\"Renderer for listing the available Artifacts.\"\"\"\n\n type = rdfvalue.ArtifactName\n\n artifact_template = (\"\"\"\n \n
\n
\n
\n | Labels | |
\n | Platforms | |
\n | Conditions | |
\n | Dependencies | |
\n | Links | |
\n | Output Type | |
\n
\n
Artifact Collectors
\n
\n
Artifact Processors
\n
\n
\"\"\")\n\n layout_template = (\n \"\"\"\n\"\"\")\n\n def Layout(self, request, response):\n \"\"\"Get available artifact information for display.\"\"\"\n # Get all artifacts that aren't Bootstrap and aren't the base class.\n self.artifacts = {}\n artifact.LoadArtifactsFromDatastore(token=request.token)\n for name, artifact_val in artifact_lib.ArtifactRegistry.artifacts.items():\n if set([\"Bootstrap\"]).isdisjoint(artifact_val.labels):\n self.artifacts[name] = artifact_val\n self.labels = artifact_lib.ARTIFACT_LABELS\n\n # Convert artifacts into a dict usable from javascript.\n artifact_dict = {}\n for artifact_name, artifact_val in self.artifacts.items():\n artifact_dict[artifact_name] = artifact_val.ToExtendedDict()\n processors = []\n for processor in parsers.Parser.GetClassesByArtifact(artifact_name):\n processors.append({\"name\": processor.__name__,\n \"output_types\": processor.output_types,\n \"doc\": processor.GetDescription()})\n artifact_dict[artifact_name][\"processors\"] = processors\n\n # Skip the our parent and call the TypeDescriptorFormRenderer direct.\n response = renderers.TypeDescriptorFormRenderer.Layout(self, request,\n response)\n return self.CallJavascript(response, \"ArtifactListRenderer.Layout\",\n prefix=self.prefix,\n artifacts=artifact_dict,\n supported_os=artifact_lib.SUPPORTED_OS_LIST,\n labels=self.labels)\n\n\nclass ArtifactRDFValueRenderer(semantic.RDFValueRenderer):\n \"\"\"A special renderer for ArtifactRDFValues.\"\"\"\n\n classname = \"Artifact\"\n\n layout_template = renderers.Template(\n \"\"\"\n\"\"\"\n + ArtifactListRenderer.artifact_template + \"\"\"\n
\n\"\"\")\n\n def Layout(self, request, response):\n self.artifact_str = self.proxy.ToPrettyJson()\n response = super(ArtifactRDFValueRenderer, self).Layout(request, response)\n return self.CallJavascript(response, \"ArtifactRDFValueRenderer.Layout\",\n artifact_str=self.artifact_str)\n\n\nclass ArtifactRawRDFValueRenderer(semantic.RDFValueRenderer):\n \"\"\"A renderer for showing JSON format for ArtifactRDFValues.\"\"\"\n\n classname = \"Artifact\"\n\n layout_template = renderers.Template(\n \"{{this.artifact_str|escape}}\")\n\n def Layout(self, request, response):\n self.artifact_str = self.proxy.ToPrettyJson(extended=True)\n super(ArtifactRawRDFValueRenderer, self).Layout(request, response)\n\n\nclass ArtifactManagerView(renderers.TableRenderer):\n \"\"\"Artifact Manager table with toolbar.\"\"\"\n\n description = \"Artifact Manager\"\n behaviours = frozenset([\"Configuration\"])\n order = 50\n\n toolbar = \"ArtifactManagerToolbar\"\n\n def __init__(self, **kwargs):\n super(ArtifactManagerView, self).__init__(**kwargs)\n self.AddColumn(semantic.RDFValueColumn(\"Artifact Name\", width=\"5%\"))\n self.AddColumn(semantic.RDFValueColumn(\n \"Artifact Details\", width=\"50%\", renderer=ArtifactRDFValueRenderer))\n self.AddColumn(semantic.RDFValueColumn(\n \"Artifact Raw\", width=\"40%\", renderer=ArtifactRawRDFValueRenderer))\n\n def BuildTable(self, start_row, end_row, request):\n \"\"\"Builds table artifacts.\"\"\"\n artifact_urn = rdfvalue.RDFURN(\"aff4:/artifact_store\")\n try:\n collection = aff4.FACTORY.Open(artifact_urn,\n aff4_type=\"RDFValueCollection\",\n token=request.token)\n except IOError:\n return\n\n self.size = len(collection)\n row_index = start_row\n for value in itertools.islice(collection, start_row, end_row):\n self.AddCell(row_index, \"Artifact Name\", value.name)\n self.AddCell(row_index, \"Artifact Details\", value)\n self.AddCell(row_index, \"Artifact Raw\", value)\n row_index += 1\n\n def Layout(self, request, response):\n \"\"\"Populate the table state with the request.\"\"\"\n if self.toolbar:\n tb_cls = renderers.Renderer.classes[self.toolbar]\n tb_cls().Layout(request, response)\n return super(ArtifactManagerView, self).Layout(request, response)\n\n\nclass ArtifactManagerToolbar(renderers.TemplateRenderer):\n \"\"\"A navigation enhancing toolbar.\n\n Internal State:\n - aff4_path: The path we are viewing now in the table.\n \"\"\"\n post_parameters = [\"aff4_path\"]\n event_queue = \"file_select\"\n\n layout_template = renderers.Template(\"\"\"\n\n\n\n\n\n
\n\n\"\"\")\n\n def Layout(self, request, response):\n response = super(ArtifactManagerToolbar, self).Layout(request, response)\n return self.CallJavascript(response, \"ArtifactManagerToolbar.Layout\")\n\n\nclass DeleteArtifactsConfirmationDialog(renderers.ConfirmationDialogRenderer):\n \"\"\"Dialog that asks for confirmation to delete uploaded artifacts.\n\n Note that this only deletes artifacts that have been uploaded via the\n ArtifactManager. Artifacts loaded from the artifacts directory are\n unaffected.\n \"\"\"\n\n content_template = renderers.Template(\"\"\"\nAre you sure you want to delete all\nuploaded artifacts?
\n\"\"\")\n\n ajax_template = renderers.Template(\"\"\"\nUploaded artifacts were deleted successfully.
\n\"\"\")\n\n def RenderAjax(self, request, response):\n aff4.FACTORY.Delete(\"aff4:/artifact_store\", token=request.token)\n return self.RenderFromTemplate(self.ajax_template, response,\n unique=self.unique, this=self)\n\n\nclass ArtifactJsonUploadView(fileview.UploadView):\n \"\"\"Renders a binary upload page.\"\"\"\n post_parameters = []\n upload_handler = \"ArtifactUploadHandler\"\n storage_path = \"aff4:/artifact_store\"\n\n\nclass ArtifactUploadHandler(fileview.UploadHandler):\n \"\"\"Handles upload of a binary config file such as a driver.\"\"\"\n\n def RenderAjax(self, request, response):\n \"\"\"Handle the upload via ajax.\"\"\"\n try:\n self.uploaded_file = request.FILES.items()[0][1]\n content = StringIO.StringIO()\n for chunk in self.uploaded_file.chunks():\n content.write(chunk)\n self.dest_path = artifact.UploadArtifactYamlFile(\n content.getvalue(), token=request.token)\n\n return renderers.TemplateRenderer.Layout(self, request, response,\n self.success_template)\n except (IOError, artifact_lib.ArtifactDefinitionError) as e:\n self.error = \"Could not write artifact to database %s\" % e\n return renderers.TemplateRenderer.Layout(self, request, response,\n self.error_template)\n","repo_name":"ForensicTools/GRREAT-475_2141-Chaigon-Failey-Siebert","sub_path":"gui/plugins/artifact_view.py","file_name":"artifact_view.py","file_ext":"py","file_size_in_byte":10809,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"4"}
+{"seq_id":"20435045990","text":"#!/usr/bin/env python\n# -*- coding: UTF-8 -*-\n# coding=utf8\n\n\"\"\"\nPython协程示例\n\"\"\"\n\n\ndef display():\n r = ''\n while True:\n n = yield r # 获取send的参数n,并且返回r\n\n if not n:\n return\n print('[----]display %d' % n)\n r = 'Next'\n print('You can not see me.')\n\n\ndef sendMessage(c):\n c.send(None) # 启动生成器\n n = 0\n while n < 5:\n n = n + 1\n print('[SEND]send %d' % n)\n r = c.send(n)\n print('[SEND]get: %s' % r)\n c.close()\n\n\ndef demo1():\n d = display # 和协程用法无关,仅为对比\n print(d) # \n\n c = display() # 因为函数里包含yield,因此这里解释器并不会掉用(也并不是获取函数指针)\n print(c) # \n sendMessage(c)\n\n\nif __name__ == '__main__':\n demo1()\n","repo_name":"pengyuwei/learning-backend","sub_path":"python/thread/coroutines.py","file_name":"coroutines.py","file_ext":"py","file_size_in_byte":892,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"}
+{"seq_id":"19414389966","text":"import numpy as np\r\nimport pickle\r\nimport time\r\n\r\n\r\ndef pq(data, P, init_centroids, max_iter):\r\n def Split_P_data(Data_File, p):\r\n data = np.array(Data_File)\r\n data = np.array_split(data, p, axis=1)\r\n return data\r\n\r\n def Update_centr(p, centroids, data, N_cluster):\r\n for i_p in range(p):\r\n for j_index, j_row in enumerate(data[i_p]):\r\n # one of data to all centers distance\r\n one_L1_dis = np.sum(abs(centroids[i_p] - j_row), axis=1)\r\n one_L1 = np.where(one_L1_dis == np.min(one_L1_dis, axis=0))\r\n N_cluster[i_p][j_index] = one_L1[0][0]\r\n for i_p in range(p):\r\n for j_index, j_row in enumerate(centroids[i_p]):\r\n indexs = np.where(N_cluster[i_p] == j_index)\r\n if len(indexs[0]) == 0:\r\n continue\r\n else:\r\n for d_index, d in enumerate(indexs[0]):\r\n if d_index == 0:\r\n line = np.array([data[i_p][d]])\r\n else:\r\n line = np.vstack((line, data[i_p][d]))\r\n # update onelne of centroid\r\n centroids[i_p][j_index] = np.median(line, axis=0)\r\n return centroids, N_cluster\r\n\r\n def K_means(p, centroids, data, N_cluster, max_iter):\r\n for i in range(max_iter):\r\n centroids, N_cluster = Update_centr(p, centroids, data, N_cluster)\r\n # final half update N_cluster\r\n for i_p in range(p):\r\n for j_index, j_row in enumerate(data[i_p]):\r\n # one of data to all centers distance\r\n one_L1_dis = np.sum(abs(centroids[i_p] - j_row), axis=1, dtype='float32')\r\n # find the min one for this row\r\n one_L1 = np.where(one_L1_dis == np.min(one_L1_dis, axis=0))\r\n N_cluster[i_p][j_index] = one_L1[0][0]\r\n return centroids, N_cluster\r\n data = Split_P_data(data, P)\r\n centroids = np.array(init_centroids, dtype='float32')\r\n # we can get p*N*1 array\r\n N_cluster = np.zeros([len(data[0]), P], dtype='uint8')\r\n N_cluster = np.array_split(N_cluster, P, axis=1)\r\n # assign value of N_cluster\r\n centroids, N_cluster = K_means(P, centroids, data, N_cluster, max_iter)\r\n a = N_cluster[0]\r\n for i in range(len(N_cluster)):\r\n if i == 0:\r\n continue\r\n a = np.hstack((a, N_cluster[i]))\r\n N_cluster = a\r\n # codebooks code\r\n return centroids, N_cluster\r\n\r\n\r\ndef query(queries, codebooks, codes, T):\r\n def caculate_one_p_line_dis(queries, codebooks, p):\r\n queries = np.array_split(queries, p, axis=0)\r\n a = np.zeros([256, p])\r\n for i in range(p):\r\n a[:, i] = np.sum(abs(codebooks[i] - queries[i]), axis=1)\r\n return a\r\n\r\n p = len(codes[0])\r\n QKP_dis_table = np.zeros([len(queries), 256, p])\r\n test = np.zeros([len(queries), 256, p])\r\n h = 0\r\n for i in range(len(queries)):\r\n one_line_queries = queries[i].T\r\n QKP_dis_table[i] = caculate_one_p_line_dis(one_line_queries, codebooks, p)\r\n\r\n dis_query_n = np.zeros([len(codes), 1])\r\n q_dis_query_n = np.zeros([len(queries), len(codes), 1])\r\n\r\n for q_index in range(len(queries)):\r\n for i in range(len(codes)):\r\n one_line_codes = []\r\n p_index = 0\r\n for j in codes[i]:\r\n one_line_codes.append(QKP_dis_table[q_index][j][p_index])\r\n p_index += 1\r\n dis_query_n[i] = sum(one_line_codes)\r\n q_dis_query_n[q_index] = dis_query_n\r\n sort_q_n = np.zeros([len(queries), len(codes), 1])\r\n for i in range(len(queries)):\r\n sort_q_dis_query_n = np.argsort(q_dis_query_n[i].T)\r\n sort_q_n[i] = sort_q_dis_query_n.T\r\n answer = set()\r\n answers = []\r\n for q_index in range(len(queries)):\r\n answer = set()\r\n t = 1\r\n extra = T\r\n for i in range(T):\r\n answer.add(int(sort_q_n[q_index][i][0]))\r\n last_one = sort_q_n[q_index][i][0]\r\n\r\n while t:\r\n\r\n next_one = sort_q_n[q_index][extra][0]\r\n if q_dis_query_n[q_index][int(next_one)][0] == q_dis_query_n[q_index][int(last_one)][0]:\r\n answer.add(int(next_one))\r\n extra += 1\r\n else:\r\n t = 0\r\n\r\n answers.append(answer)\r\n return answers\r\n\r\n\r\n# How to run your implementation for Part 1\r\nwith open('./toy_example/Data_File', 'rb') as f:\r\n data = pickle.load(f, encoding='bytes')\r\nwith open('./toy_example/Centroids_File', 'rb') as f:\r\n centroids = pickle.load(f, encoding='bytes')\r\nstart = time.time()\r\ncodebooks, codes = pq(data, P=2, init_centroids=centroids, max_iter=20)\r\nend = time.time()\r\ntime_cost_1 = end - start\r\nprint(f\"Part1: {time_cost_1} s\")\r\n\r\n# How to run your implementation for Part 2\r\nwith open('./toy_example/Query_File', 'rb') as f:\r\n queries = pickle.load(f, encoding='bytes')\r\nstart = time.time()\r\ncandidates = query(queries, codebooks, codes, T=10)\r\nend = time.time()\r\ntime_cost_2 = end - start\r\nprint(f\"Part2: {time_cost_2} s\")\r\nprint(candidates)\r\n","repo_name":"RJY66/COMP9318-20T1","sub_path":"9318Project1.py","file_name":"9318Project1.py","file_ext":"py","file_size_in_byte":5165,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"}
+{"seq_id":"17614398070","text":"import configparser\nimport gzip\nimport json\nimport os\nimport shutil\nimport time\n\nfrom TwitterAPI import TwitterAPI\n\n# Set up constants\n\nconfig = configparser.ConfigParser()\nconfig.read('./config/api_auth.cf')\nconsumer_key = config['AUTH']['consumer_key']\nconsumer_secret = config['AUTH']['consumer_secret']\naccess_token_key = config['AUTH']['access_token_key']\naccess_token_secret = config['AUTH']['access_token_secret']\n\nconfig.read('./config/conf.cf')\nwrite_file_name = config['FILE LOCS']['dirty_dataset_dir'] + '/dirty'\n# Get auth tokens, etc., initialize Twitter API connections\napi = TwitterAPI(consumer_key, consumer_secret, access_token_key, access_token_secret)\n\n# Get from the specified endpoint\nreq = api.request('statuses/sample', {})\n\n# Function to scrape, used to help restart scrapinging in case of error\ntime_start = time.time()\nnum_file = int(float(config['DATA INFO']['start_num']))\n\n\ndef scrape_stuff():\n counter = 0\n time_file = time.time()\n global num_file\n write_file = open(write_file_name + str(num_file) + '.txt', 'a+')\n time_elapsed = time.time()\n\n # Iterates over items given in request\n for item in req:\n if 'delete' in item or ('lang' in item and item['lang'] != 'en'):\n continue\n\n # Log progress\n if counter % 100 == 0:\n print('Seconds since start/last file/last 100: ' + str(time.time() - time_start) + ' // ' + str(\n time.time() - time_file) + ' // ' + str(time.time() - time_elapsed))\n print('Current progress: ' + str(counter))\n time_elapsed = time.time()\n counter += 1\n write_file.write(json.dumps(item) + '\\n')\n\n # Every 100,000 good tweets transfer them to a zip file\n if counter % 100000 == 0 and counter > 0:\n write_file.close()\n with open(write_file_name + str(num_file) + '.txt', 'rb') as f_in:\n with gzip.open(write_file_name + str(num_file) + '.txt.gz', 'wb') as f_out:\n shutil.copyfileobj(f_in, f_out)\n\n # Remove uncompressed\n os.remove(write_file_name + str(num_file) + '.txt')\n num_file += 1\n write_file = open(write_file_name + str(num_file) + '.txt', 'a+')\n time_file = time.time()\n\n\n# Loop to ensure continuous scraping\nwhile True:\n try:\n print('scraping')\n scrape_stuff()\n except Exception:\n req = api.request('statuses/sample', {})\n continue\n","repo_name":"ultraeric/TwitterBot","sub_path":"scrape.py","file_name":"scrape.py","file_ext":"py","file_size_in_byte":2467,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"4"}
+{"seq_id":"18611406136","text":"# -*- coding:utf-8 -*-\n\nimport socket\n\ndef verify(protocol,ip,port):\n url = protocol+'://'+ip+':'+str(port)\n timeout = 10\n print('testing if web container arbitrary file read vul')\n try:\n socket.setdefaulttimeout(timeout)\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n s.connect((ip, int(port)))\n flag = b\"GET /../../../../../../../../../etc/passwd HTTP/1.1\\r\\n\\r\\n\"\n s.send(flag)\n data = s.recv(1024)\n s.close()\n if b'root:' in data and b'nobody:' in data:\n msg = 'There is web container arbitrary file read vul on url: ' +url+ ' .'\n number = 'v30'\n print(msg)\n return True,url,number,msg\n else:\n msg = 'There is no web container arbitrary file read vul'\n number = 'v0'\n return False,url,number,msg\n except Exception as e:\n msg = str(e)\n number = 'v0'\n return False,url,number,msg\n\n","repo_name":"7hang/Python-crack","sub_path":"V-Scrack/exp/payload/webfileread.py","file_name":"webfileread.py","file_ext":"py","file_size_in_byte":968,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"4"}
+{"seq_id":"37638792891","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n\nclass FPN(nn.Module):\n def __init__(self, C3_inplanes, C4_inplanes, C5_inplanes, planes=256):\n super(FPN, self).__init__()\n # planes = 256 channels\n self.P3_1 = nn.Conv2d(C3_inplanes, planes, kernel_size=1, padding=0)\n self.P3_2 = nn.Conv2d(planes, planes, kernel_size=3, padding=1)\n self.P4_1 = nn.Conv2d(C4_inplanes, planes, kernel_size=1, padding=0)\n self.P4_2 = nn.Conv2d(planes, planes, kernel_size=3, padding=1)\n self.P5_1 = nn.Conv2d(C5_inplanes, planes, kernel_size=1, padding=0)\n self.P5_2 = nn.Conv2d(planes, planes, kernel_size=3, padding=1)\n self.P6 = nn.Conv2d(C5_inplanes, planes, kernel_size=3, stride=2, padding=1)\n self.P7 = nn.Sequential(\n nn.ReLU(inplace=True),\n nn.Conv2d(planes, planes, kernel_size=3, stride=2, padding=1))\n\n def forward(self, inputs):\n [C3, C4, C5] = inputs\n P5 = self.P5_1(C5)\n P4 = self.P4_1(C4)\n P4 = F.interpolate(P5, size=(P4.shape[2], P4.shape[3]),\n mode='nearest') + P4\n P3 = self.P3_1(C3)\n P3 = F.interpolate(P4, size=(P3.shape[2], P3.shape[3]),\n mode='nearest') + P3\n P6 = self.P6(C5)\n P7 = self.P7(P6)\n\n P5 = self.P5_2(P5)\n P4 = self.P4_2(P4)\n P3 = self.P3_2(P3)\n\n del C3, C4, C5\n return [P3, P4, P5, P6, P7]\n\n\nif __name__ == \"__main__\":\n # Img size 672*640 -> C1 168*160 -> C2 168*160\n # -> C3 84*80 -> C4 42*40 -> C5 21*20\n # -> P3 84*80 -> P4 42*40 -> P5 21*20 -> P6 11*10 -> P7 6*5\n C3 = torch.randn([2, 128 * 4, 84, 80])\n C4 = torch.randn([2, 256 * 4, 42, 40])\n C5 = torch.randn([2, 512 * 4, 21, 20])\n\n model = FPN(128 * 4, 256 * 4, 512 * 4)\n out = model([C3, C4, C5])\n print(\"len(out):\", len(out))\n for i in range(len(out)):\n print(i + 1, out[i].shape)\n print(out[i])\n # torch.Size([2, 256, 84, 80])\n # torch.Size([2, 256, 42, 40])\n # torch.Size([2, 256, 21, 20])\n # torch.Size([2, 256, 11, 10])\n # torch.Size([2, 256, 6, 5])\n","repo_name":"HanXiaoyiGitHub/Simple-CV-Pytorch-master","sub_path":"models/detection/RetinaNet/neck/FPN.py","file_name":"FPN.py","file_ext":"py","file_size_in_byte":2150,"program_lang":"python","lang":"en","doc_type":"code","stars":18,"dataset":"github-code","pt":"4"}
+{"seq_id":"21322869730","text":"import unittest\nimport sys\nimport copy\nsys.path.append(\"../src\")\n\nfrom contextShim import *\nfrom util.util import *\n\nclass TestContextShim(unittest.TestCase):\n def setUp(self):\n self.c = ContextShim()\n \n # SETUP\n db = {\"GroupsEnumerated\":3,\n \"Group0\":100,\"Group1\":102,\"Group2\":103,\n \"IdsAggregated\":5,\n \"Id0\":10, \"Id1\":20, \"Id2\":30, \"Id3\":40, \"Id4\":50\n }\n self.summary = ContextSummary(1, db)\n self.s = ContextSummarySerializer()\n \n db1 = {\"GroupsEnumerated\":3,\n \"Group0\":100,\"Group1\":101,\"Group2\":102\n }\n self.summary1 = ContextSummary(2, db1)\n self.summary1.setTimestamp(time.time())\n time.sleep(0.01)\n db2 = {\"GroupsEnumerated\":3,\n \"Group0\":100,\"Group1\":102,\"Group2\":103\n }\n self.summary2 = ContextSummary(3, db2)\n self.summary2.setTimestamp(time.time())\n \n self.group = None\n \n def test_getContextBytes(self):\n \"\"\"\n Set contextHandler for the shim, and the shim will give you\n correct bytes to send\n \"\"\"\n contextHandler = self.c.getContextHandler()\n contextHandler.setMyContext(self.summary)\n contextHandler.setReceivedSummaries({2:self.summary1, 3:self.summary2})\n self.summary1.setHops(1) # only shorter hops can be included\n self.summary2.setHops(1) # only shorter hops can be included\n \n # 100 is a group, and 1 has group0(100)\n # so addGroupDefinition adds 1 into the member of 100\n g = GroupDefinition(100)\n contextHandler.addGroupDefinition(g)\n self.group = contextHandler.get(100)\n \n numberToSend = contextHandler.getSummariesToSend()\n self.assertEqual(4, len(numberToSend))\n res = self.c.getContextBytes()\n self.assertEqual(448, len(res))\n return res\n \n def test_setprocessContextBytes(self):\n # get the stream buffer\n res = self.test_getContextBytes()\n summaries = self.c.processContextBytes(res)\n expecteds = [self.summary, self.summary2, self.summary1, self.group]\n hit = 0\n for summary in summaries:\n for expected in expecteds:\n # We can't compare summary and expected one by one\n # as summary has +1 in hops because processContextBytes increases it by 1\n \n #print summary\n #print expected\n if summary.getId() == expected.getId():\n hit += 1\n\n self.assertTrue(hit == 4)\n \n def test_sameExceptHops(self):\n summary = copy.deepcopy(self.summary)\n summary.setHops(100)\n self.assertTrue(summary.sameExceptHops(self.summary))\n \nif __name__ == \"__main__\":\n unittest.main(verbosity=2)","repo_name":"prosseek/GrapevinePython","sub_path":"test/testContextShim.py","file_name":"testContextShim.py","file_ext":"py","file_size_in_byte":2889,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"}
+{"seq_id":"17106230085","text":"#MERGE THE HESSIAN FILTERED + THRESHOLDED IMAGES\nimport numpy as np\nimport nibabel as nib\nimport os\nfrom os import path\n\nLOWER_HESS_DIR = \"\" #PATH TO RESAMPLED + (LOWER SIGMA) HESSIAN FILTERED + THRESHOLDED DATA\nUPPER_HESS_DIR = \"\" #PATH TO RESAMPLED + (UPPER SIGMA) HESSIAN FILTERED + THRESHOLDED DATA\nMERGED_DIR = \"\" ##PATH TO RESAMPLED + COMPLETE HESSIAN FILTERED + THRESHOLDED DATA\n\nLOWER_HESS_IMG = sorted(os.listdir(LOWER_HESS_DIR))\nUPPER_HESS_IMG = sorted(os.listdir(UPPER_HESS_DIR))\n\nnew = list(zip(LOWER_HESS_IMG, UPPER_HESS_IMG))\n\nfor i, j in new:\n if \".nii.gz\" in i and j:\n lower_hess_img_dir = path.join(LOWER_HESS_DIR, i) #create directory for each file name as iterates through 'if' command\n upper_hess_img_dir = path.join(UPPER_HESS_DIR, j)\n MRA_l_hess = nib.load(lower_hess_img_dir) #load nifty file\n lower_hess = MRA_l_hess.get_fdata() #numpy array\n MRA_u_hess = nib.load(upper_hess_img_dir) # load nifty file\n upper_hess = MRA_u_hess.get_fdata() # numpy array\n full_img = lower_hess + upper_hess\n binary_img = np.where(full_img > 0.0, 1.0, 0.0)\n split_file_name = i.split('.nii')\n new_file_name = (split_file_name[0] + \"_merged\" + \".nii.gz\")\n new_out_dir = path.join(MERGED_DIR, new_file_name)\n new_img_nii = nib.Nifti1Image(binary_img, MRA_u_hess.affine) # produces new nifti file from array\n nib.save(new_img_nii, new_out_dir) # saves the nifti file in folder\n print(f'Done merging of: {i} and {j}')\n\n\n\n","repo_name":"georgiakenyon/Segmentation-method-for-cerebral-blood-vessels-from-MRA-using-hysteresis","sub_path":"MRA_Segmentation_Github/merge_hess_thresh.py","file_name":"merge_hess_thresh.py","file_ext":"py","file_size_in_byte":1530,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"}
+{"seq_id":"72466677556","text":"\"\"\"\nCreated on Thu Jun 4 16:38:47 2020\n\n@author: antoinecollin\n\"\"\"\n\nfrom typing import Literal\n\nimport anndata\nimport numpy as np\nimport pandas as pd\n\ntry:\n import compute # Prevents circular import\nexcept ImportError:\n from . import compute\n\n\ndef mean_celltype(\n adata: anndata, partition_key: str = \"CellType\", add_adata: bool = True\n):\n \"\"\"\n Computes the average gene expression by celltypes\n\n Parameters\n ----------\n adata\n Annotated data matrix.\n\n partition_key\n The key in adata.obs corresponding to the annotations to be used.\n\n gene_id_key\n The key in adata.obs corresponding to the gene ID column. Default\n will use adata.var.index.\n\n add_adata\n Indicate if the average matrix should be added to the varm field\n and its index to the uns field of adata.\n\n Returns\n -------\n average_by_celltype\n The mean expression by celltype matrix with format celltypes x genes.\n \"\"\"\n celltypes = adata.obs[partition_key].cat.categories\n average_by_celltype = pd.DataFrame([], columns=list(adata.var.index))\n i = 0\n idx = []\n for cell in celltypes:\n if sum(adata.obs[partition_key] == cell) != 0:\n reduced_adata = adata[adata.obs[partition_key] == cell, :]\n mean_expr = np.asarray(reduced_adata.X.mean(axis=0))\n mean_expr = mean_expr.flatten()\n average_by_celltype.loc[i] = mean_expr\n idx.append(cell)\n i += 1\n average_by_celltype.index = idx\n if add_adata:\n adata.varm[f\"ave_celltype_counts_{partition_key}\"] = np.array(\n average_by_celltype.transpose()\n )\n adata.uns[\n f\"ave_celltype_index_{partition_key}\"\n ] = average_by_celltype.index\n return average_by_celltype\n\n\ndef get_average_celltype_counts(adata, partition_key: str = \"CellType\"):\n \"\"\"\n Gets the mean expression by celltype matrix of adata. If it's already\n in the adata object, fetches it. If it's not, computes it and adds it\n to the adata object in varm with labels in uns\n\n Parameters\n ----------\n adata\n Annotated data matrix.\n\n partition_key\n The key in adata.obs corresponding to the annotations to be used.\n\n gene_id_key\n The key in adata.obs corresponding to the gene ID column. Default\n will use adata.var.index.\n\n Returns\n -------\n average_by_celltype\n The mean expression by celltype matrix.\n \"\"\"\n try:\n adata.varm[f\"ave_celltype_counts_{partition_key}\"]\n except KeyError:\n average_by_celltype = mean_celltype(\n adata, partition_key=partition_key, add_adata=True\n )\n else:\n average_by_celltype = pd.DataFrame(\n adata.varm[f\"ave_celltype_counts_{partition_key}\"].transpose(),\n columns=adata.var.index,\n )\n average_by_celltype.index = adata.uns[\n f\"ave_celltype_index_{partition_key}\"\n ]\n return average_by_celltype\n\n\ndef get_anndata(adata_filename: str):\n \"\"\"\n Fetches the anndata file\n\n Parameters\n ----------\n adata_filename\n\n Returns\n -------\n The Anndata object\n\n \"\"\"\n adata = anndata.read_h5ad(\"\")\n return adata\n\n\ndef get_markers(markers_filename: str):\n \"\"\"\n Fetches the markers file\n\n Parameters\n markers_filename\n\n\n ----------\n markers_filename\n\n Returns\n -------\n A dict containing the markers list with their corresponding celltype\n\n \"\"\"\n # markers = dict(csv.read(...))\n # return markers\n\n\ndef get_spe(\n adata, spe_metric: Literal[\"shannon\", \"tau\", \"gini\"], partition_key\n):\n specs = {\n \"shannon\": compute.shannon_average,\n \"tau\": compute.tau_average,\n \"gini\": compute.gini_average,\n }\n try:\n adata.var[f\"{spe_metric}_{partition_key}\"]\n except KeyError:\n spe_func = specs[spe_metric]\n spe_list = spe_func(adata, partition_key)\n adata.var[f\"{spe_metric}_{partition_key}\"] = spe_list\n else:\n spe_list = adata.var[f\"{spe_metric}_{partition_key}\"]\n return spe_list\n","repo_name":"becavin-lab/checkatlas","sub_path":"checkatlas/metrics/specificity/get_data.py","file_name":"get_data.py","file_ext":"py","file_size_in_byte":4107,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"4"}
+{"seq_id":"3988813611","text":"from collections import defaultdict\nclass Graph:\n\n def __init__(self):\n self.graph = defaultdict(list)\n\n def addEdge(self, u, v):\n self.graph[u].append(v)\n # self.graph[v].append(u)\n\n return self.graph\n\ndef dfsutil(graph,v,visited):\n\n print(v,end=\" \")\n\n for neighbor in graph[v]:\n if neighbor not in visited:\n visited.add(neighbor)\n dfsutil(graph,neighbor,visited)\n\ndef dfs(graph):\n visited = set()\n\n for i in range(1,len(graph)+1):\n if i not in visited:\n visited.add(i)\n dfsutil(graph,i,visited)\n\ng = Graph()\ng.addEdge(1, 2)\ng.addEdge(2, 4)\ng.addEdge(2, 7)\ng.addEdge(4, 6)\ng.addEdge(6, 7)\ngraph = g.addEdge(3, 5)\ndfs(graph)","repo_name":"vikramiiitm/CP","sub_path":"graph/dfs.py","file_name":"dfs.py","file_ext":"py","file_size_in_byte":729,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"}
+{"seq_id":"34272876587","text":"import os\nfrom pickle import Pickler, Unpickler\nfrom typing import Dict, Tuple\nimport numpy as np\n\nfrom typing.io import BinaryIO\n\nfrom opinions.graph.graphs import GraphManager\nfrom opinions.interfaces.interfaces import SimulationListener\nfrom opinions.objects.opinion import OpinionManager\nfrom opinions.objects.reference import ReferenceManager\n\n\nclass OpinionsIO (SimulationListener):\n structurePickler: Pickler = None\n structureUnpickler: Unpickler = None\n topologyPickler: Pickler = None\n topologyUnpickler: Unpickler = None\n xPickler: Pickler = None\n xUnpickler: Unpickler = None\n\n outfiles: Dict[str, BinaryIO]\n\n def __init__(self):\n pass\n\n def open_input_files(self, args: Dict) -> Dict:\n try:\n in_folder_arg = args['--inFolder']\n if not os.path.exists(in_folder_arg):\n raise FileNotFoundError(f'Folder not found {in_folder_arg}')\n\n run_id = str(args['--id'])\n # run_folder = os.path.join(in_folder_arg, run_id)\n # if not os.path.exists(run_folder):\n # raise FileNotFoundError(f'Folder not found {run_folder}')\n\n topology_file_path = os.path.join(in_folder_arg, 'topology-%s.log' % (run_id,))\n # Ugly solution that works only on windows\n # topology_file_path = '\\\\\\\\?\\\\'+topology_file_path\n topology_file = open(topology_file_path, 'rb')\n structure_file_path = os.path.join(in_folder_arg, 'structure-%s.log' % (run_id,))\n structure_file = open(structure_file_path, 'rb')\n x_file_path = os.path.join(in_folder_arg, 'x-%s.log' % (run_id,))\n x_file = open(x_file_path, 'rb')\n # d_file # Do you really want it?\n except FileNotFoundError as err:\n raise RuntimeError(\n # f\"Error: {err}\\n\"\n f\"If you are using windows. This may be caused by a problem related to long path names.\\n\"\n f\"To fix it on windows 10 1607 and later, The registry key \"\n f\"HKLM\\\\SYSTEM\\\\CurrentControlSet\\\\Control\\\\FileSystem LongPathsEnabled \"\n f\"(Type: REG_DWORD) must exist and be set to 1.\\n\"\n f\"Refer to https://docs.microsoft.com/en-us/windows/win32/fileio/naming-a-file?#enable-long-paths-in-windows-10-version-1607-and-later for details.\"\n )\n\n ret = dict()\n ret['topologyFile'] = topology_file\n ret['structureFile'] = structure_file\n ret['xFile'] = x_file\n # ret['dFile'] = d_file # Really need it?\n\n self.structureUnpickler = Unpickler(structure_file)\n self.topologyUnpickler = Unpickler(topology_file)\n self.xUnpickler = Unpickler(x_file)\n\n return ret\n\n def open_output_files(self, args: Dict, protocol=4) -> Dict:\n out_folder_arg = args['--outFolder']\n if not os.path.exists(out_folder_arg):\n os.makedirs(out_folder_arg, exist_ok=True)\n\n run_id = args['--id']\n topology_file_path = os.path.join(out_folder_arg, 'topology-%s.log' % (run_id,))\n topology_file = open(topology_file_path, 'wb')\n structure_file_path = os.path.join(out_folder_arg, 'structure-%s.log' % (run_id,))\n structure_file = open(structure_file_path, 'wb')\n x_file_path = os.path.join(out_folder_arg, 'x-%s.log' % (run_id,))\n x_file = open(x_file_path, 'wb')\n # d_file # Do you really want it?\n\n ret = dict()\n ret['topologyFile'] = topology_file\n ret['structureFile'] = structure_file\n ret['xFile'] = x_file\n # ret['dFile'] = d_file # Really need it?\n self.outfiles = ret\n\n self.structurePickler = Pickler(structure_file, protocol=protocol)\n self.topologyPickler = Pickler(topology_file, protocol=protocol)\n self.xPickler = Pickler(x_file, protocol=protocol)\n\n return ret\n\n def simulation_starting(self, state):\n \"\"\"Save graph_manager, reference_manager, and opinion_manager\"\"\"\n self.topologyPickler.dump(state[0])\n self.structurePickler.dump(state[1])\n self.structurePickler.dump(state[2])\n\n def retrieve_structure_and_topology(self) -> Tuple[GraphManager, ReferenceManager, OpinionManager]:\n \"\"\"retrieve graph_manager, reference_manager, and opinion_manager\"\"\"\n return self.topologyUnpickler.load(), self.structureUnpickler.load(), self.structureUnpickler.load()\n\n def simulation_started(self, state):\n self.xPickler.dump(state)\n\n def retrieve_step_delta_and_x(self) -> Tuple[int, float, np.ndarray]:\n return self.xUnpickler.load()\n\n def update(self, state):\n # later add the ability to store change in topology graph\n if len(state) == 3:\n self.xPickler.dump(state)\n elif len(state) > 3:\n self.xPickler.dump((state[0], state[1], state[2]))\n self.topologyPickler.dump(state[3])\n else:\n raise RuntimeError('Unknown state length / structure : ' + str(state))\n\n def simulation_ending(self, state):\n self.xPickler.dump(state)\n\n def simulation_ended(self, state):\n self.outfiles['xFile'].close()\n self.outfiles['topologyFile'].close()\n self.outfiles['structureFile'].close()\n","repo_name":"PyOpinions/pyOpinions","sub_path":"pyOpinions-io/src/opinions/io/opinionsIO.py","file_name":"opinionsIO.py","file_ext":"py","file_size_in_byte":5271,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"}
+{"seq_id":"1245963122","text":"\"\"\"add uploaded to vogue column to analysis\n\nRevision ID: 49ded71bd1a1\nRevises: 1dadcefd3bbf\nCreate Date: 2021-03-10 13:32:40.247574\n\n\"\"\"\nimport sqlalchemy as sa\n\nfrom alembic import op\n\n# revision identifiers, used by Alembic.\nrevision = \"49ded71bd1a1\"\ndown_revision = \"1dadcefd3bbf\"\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n op.add_column(\"analysis\", sa.Column(\"uploaded_to_vogue_at\", sa.DateTime(), nullable=True))\n\n\ndef downgrade():\n op.drop_column(\"analysis\", \"uploaded_to_vogue_at\")\n","repo_name":"Clinical-Genomics/cg","sub_path":"alembic/versions/49ded71bd1a1_add_uploaded_to_vogue_column_to_analysis.py","file_name":"49ded71bd1a1_add_uploaded_to_vogue_column_to_analysis.py","file_ext":"py","file_size_in_byte":510,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"4"}
+{"seq_id":"13810011682","text":"#!/usr/bin/env python\n# coding=utf-8\n#示例属性如下:\n #cookedLevel: 这是数字:\n #0-3表示还是生的,超过3表示半生不熟,超过5表示已经考好了,超过8表示已经烤过头了\n #cookedString:这是字符串:描述地瓜的生熟程度\n #condiments:这是地瓜的配料列表,比如番茄酱,芥末酱等\n#示例方法如下:\n#cook():把地瓜烤一段时间\n#addCondiments():给地瓜添加配料\n#__init__():设置默认的属性\n#__str__():让pint的结果看起来好一些\n\n\n#定义类,并且定义__init__()方法\n\n#定义“地瓜”类\n\nclass SweerPotato:\n '这是地瓜类'\n\n\n #定义初始化方法\n\n def __init__(self):\n self.cookedLevel = 0\n self.cookedString= \"生的\"\n self.codiments= []\n\n\n#添加“烤地瓜”方法\n '烤地瓜方法'\n def cook(self,time):\n self.cookedLevel += time\n if self.cookedLevel > 8:\n self.cookedString = \"烤成灰了\"\n elif self.cookedLevel >5:\n self.cookedString = \"烤好了\"\n elif self.cookedLevel > 3:\n self.cookedString = \"半生不熟\"\n else:\n self.cookedStrinh = \"生的\"\n\n\n\n#测试\n\nmySweetPotato = SweerPotato()\nprint(mySweetPotato.cookedLevel)\nprint(mySweetPotato.cookedString)\nprint(mySweetPotato.codiments)\n\n#测试cook方法\nprint(\"开始烤............\\n\")\nmySweetPotato.cook(4) #烤了4分钟了\nprint(mySweetPotato.cookedLevel)\nprint(mySweetPotato.cookedString)\n","repo_name":"VictorSSH/Python","sub_path":"Code/OOP/oop_3_class.py","file_name":"oop_3_class.py","file_ext":"py","file_size_in_byte":1494,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"}
+{"seq_id":"33738750256","text":"#!/usr/bin/env python3\n\nimport sys\nimport socket\nimport sys\nimport requests\nimport subprocess\nimport os\nimport uuid\nimport json\nfrom dotenv import load_dotenv\n\nhomedir = str(os.getenv('HOME'))\nload_dotenv(homedir+'/recon/config.env')\n\ntarget = sys.argv[1]\nheaders = {'Accept' : 'application/json', 'Content-Type' : 'application/json'}\nauth = (str(os.getenv('USERNAME')), str(os.getenv('PASSWORD')))\nlista_ips = []\nlista_index = ['subdomain','portscan','webenum','webvuln','infravuln']\njson_parse = ''\ndic_ip = {}\nlist_vulns = []\nlist_sistemas = []\n\ndef consulta_bases(index):\n\tdata = {\"size\":10000}\n\turl = str(os.getenv('HOST'))+target+'-'+index+'/_search'\n\tget_doc = requests.get(url, headers=headers, auth=auth, data=json.dumps(data), verify=False)\n\tparse_scan = json.loads(get_doc.text)\n\treturn(parse_scan)\n\nif len(sys.argv) != 3:\n\tfor i in lista_index:\n\t\tind = consulta_bases(i)\n\n\t\tif ind not in list_sistemas:\n\t\t\ttry:\n\t\t\t\tfor x in ind['hits']['hits']:\n\t\t\t\t\tlist_sistemas.append(x['_source']['url.original'])\n\t\t\texcept:\n\t\t\t\tpass\n\n\tos.system('clear')\n\tprint(list_sistemas)\n\texit()\nelse:\n\tsistema = sys.argv[2]\n\ndef consulta_diretorios(sistema):\n\twith open (homedir+'/recon/data/'+target+'/result.txt','a') as file:\n\t\tfile.write('\\n[*] Directories\\n')\n\n\tlist_sis = []\n\tfor index in lista_index:\n\t\tjson_parse = consulta_bases(index)\n\t\tfor x in json_parse['hits']['hits']:\n\t\t\ttry:\n\t\t\t\tif(x['_source']['url.original'] == sistema):\n\t\t\t\t\tif(x['_source']['url.full'] not in list_sis):\n\t\t\t\t\t\tlist_sis.append(x['_source']['url.full'])\n\t\t\t\t\t\twith open (homedir+'/recon/data/'+target+'/result.txt','a') as file:\n\t\t\t\t\t\t\tfile.write(x['_source']['url.full']+'\\n')\n\t\t\texcept:\n\t\t\t\tpass\n\ndef consulta_ip(ip):\n\twith open (homedir+'/recon/data/'+target+'/result.txt','a') as file:\n\t\tfile.write('\\n[*] Ports\\n')\n\tdic_ip[ip] = []\n\tfor index in lista_index:\n\t\tjson_parse = consulta_bases(index)\n\t\ttry:\n\t\t\tfor x in json_parse['hits']['hits']:\n\t\t\t\tif(x['_source']['server.ip'] == ip):\n\t\t\t\t\tif(x['_source']['server.port'] not in dic_ip[ip]):\n\t\t\t\t\t\tdic_ip[ip].append(x['_source']['server.port'])\n\t\t\t\t\t\twith open (homedir+'/recon/data/'+target+'/result.txt','a') as file:\n\t\t\t\t\t\t\t\tfile.write(str(ip)+' '+str(x['_source']['server.port'])+'\\n')\n\t\texcept:\n\t\t\tpass\n\tconsulta_diretorios(sistema)\ndef consulta_vuln():\n\tlist_vulns = []\n\tip = ''\n\twith open (homedir+'/recon/data/'+target+'/result.txt','a') as file:\n\t\tfile.write('[*] Vulnerabilities\\n')\n\tfor index in lista_index:\n\t\tjson_parse = consulta_bases(index)\n\t\tfor x in json_parse['hits']['hits']:\n\t\t\ttry:\n\t\t\t\tfor x in json_parse['hits']['hits']:\n\t\t\t\t\tif(x['_source']['url.original'] == sistema):\n\t\t\t\t\t\tif(x['_source']['server.ip'] != '0.0.0.0'):\n\t\t\t\t\t\t\tip = x['_source']['server.ip']\n\t\t\t\t\t\tif(x['_source']['vulnerability.name'] not in list_vulns):\n\t\t\t\t\t\t\tlist_vulns.append(x['_source']['vulnerability.name'])\n\t\t\t\t\t\t\twith open (homedir+'/recon/data/'+target+'/result.txt','a') as file:\n\t\t\t\t\t\t\t\tfile.write(x['_source']['url.full']+' - '+x['_source']['vulnerability.name']+'\\n')\n\t\t\texcept:\n\t\t\t\tpass\n\n\tconsulta_ip(ip)\n\ndef main():\n\tos.system('rm -rf '+homedir+'/recon/data/'+target+'/result.txt')\n\tconsulta_vuln()\n\twith open(homedir+'/recon/data/'+target+'/result.txt', 'r') as file:\n\t\tos.system('clear')\n\t\tdata = file.read()\n\t\tprint(data)\n\nif __name__ == '__main__':\n\tmain()\n","repo_name":"vida003/AutoRecon","sub_path":"search.py","file_name":"search.py","file_ext":"py","file_size_in_byte":3299,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"}
+{"seq_id":"73769420276","text":"\"\"\"\nUnit tests go here\n\"\"\"\nimport numpy as np\nimport pandas as pd\nimport pytest\nfrom pandas._testing import assert_frame_equal\n\nfrom spc.helpers import (\n flatten_list,\n get_df_with_sample_id,\n get_num_of_PCs_to_retain,\n multiply_matrices,\n standardize_and_PCA,\n)\n\n\ndef test_flatten_list():\n assert flatten_list([[1, 1], [2, 2], [3, 3]]) == [1, 1, 2, 2, 3, 3]\n assert flatten_list([[], []]) == []\n assert flatten_list([[]]) == []\n\n\ndef test_get_df_with_sample_id():\n df_test_input = pd.DataFrame({\"x1\": [1, 2, 2, 3, 3, 4, 5]})\n df_expected1 = pd.DataFrame(\n {\"sample_id\": [1, 2, 3, 4, 5, 6, 7], \"x1\": [1, 2, 2, 3, 3, 4, 5]}\n )\n df_expected2 = pd.DataFrame(\n {\"sample_id\": [1, 1, 2, 2, 3, 3, 4], \"x1\": [1, 2, 2, 3, 3, 4, 5]}\n )\n df_expected3 = pd.DataFrame(\n {\"sample_id\": [1, 1, 1, 1, 1, 1, 1], \"x1\": [1, 2, 2, 3, 3, 4, 5]}\n )\n df_output1 = get_df_with_sample_id(df_test_input, n_sample_size=1)\n df_output2 = get_df_with_sample_id(df_test_input, n_sample_size=2)\n df_output3 = get_df_with_sample_id(df_test_input, n_sample_size=7)\n\n assert_frame_equal(df_output1, df_expected1, check_dtype=False)\n assert_frame_equal(df_output2, df_expected2, check_dtype=False)\n assert_frame_equal(df_output3, df_expected3, check_dtype=False)\n with pytest.raises(Exception):\n get_df_with_sample_id(df_test_input, n_sample_size=0)\n\n\ndef test_multiply_matrices():\n sigma = np.array(\n [\n [1, 0.7, 0.9, 0.3, 0.2, 0.3],\n [0.7, 1, 0.8, 0.1, 0.4, 0.2],\n [0.9, 0.8, 1, 0.1, 0.2, 0.1],\n [0.3, 0.1, 0.1, 1, 0.2, 0.1],\n [0.2, 0.4, 0.2, 0.2, 1, 0.1],\n [0.3, 0.2, 0.1, 0.1, 0.1, 1],\n ]\n )\n delta_output = np.sqrt(\n multiply_matrices(np.array([1] * 6), np.linalg.inv(sigma), np.array([1] * 6))\n )\n delta_expected = 1.86\n assert np.round(delta_output, 2) == delta_expected\n\n\n@pytest.mark.parametrize(\n \"variance_explain_min, expected_num_of_PCs_to_retain\",\n [[0.1, 1], [0.55, 2], [0.9, 3], [0.95, 3]],\n)\ndef test_get_num_of_PCs_to_retain(\n dataframe_for_PCAModel_phase1, variance_explain_min, expected_num_of_PCs_to_retain\n):\n _, PCA_object, _ = standardize_and_PCA(df=dataframe_for_PCAModel_phase1)\n num_of_PCs_to_retain, _ = get_num_of_PCs_to_retain(\n PCA_object=PCA_object, PC_variance_explained_min=variance_explain_min\n )\n assert num_of_PCs_to_retain == expected_num_of_PCs_to_retain\n","repo_name":"hviidhenrik/SPC","sub_path":"tests/test_helpers.py","file_name":"test_helpers.py","file_ext":"py","file_size_in_byte":2485,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"4"}
+{"seq_id":"28359882828","text":"n=int(input())\na=list(map(int,input().split()))\nb=max(a)\ni=1\nwhile True:\n c=0\n for j in a:\n if b*i%j==0:\n c+=1\n if c==len(a):\n print(b*i)\n break\n i+=1","repo_name":"Abhishekvaranasi07/codemind-python","sub_path":"LCM_of_n_numbers.py","file_name":"LCM_of_n_numbers.py","file_ext":"py","file_size_in_byte":194,"program_lang":"python","lang":"fa","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"}
+{"seq_id":"43541473494","text":"arr = [1, 2, 3]\nN = 3\n\nsel = [0] * N\n\n\ndef perm(idx, check):\n if idx == N:\n print(sel)\n return\n\n for i in range(N):\n if (check & (1 << i)) != 0:\n continue\n\n sel[idx] = arr[i]\n perm(idx + 1, check | (1 << i))\n\n\nperm(0, 0)\n","repo_name":"kimchaelin13/Algorithm","sub_path":"lesson/0901/순열_비트마스크.py","file_name":"순열_비트마스크.py","file_ext":"py","file_size_in_byte":273,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"4"}
+{"seq_id":"25147700030","text":"#!/usr/bin/env python\n# encoding: utf-8\n\nimport mock\nimport os\nimport giit.python_environment\n\n\ndef test_python_environment(testdirectory):\n\n prompt = mock.Mock()\n virtual_environment = mock.Mock()\n log = mock.Mock()\n requirements = testdirectory.write_text(\n filename=\"requirements.txt\", data=\"sphinx\", encoding=\"utf-8\"\n )\n\n env = {\"PATH\": \"/oki/doki\"}\n virtual_environment.create_environment.side_effect = lambda name: env\n\n python_environment = giit.python_environment.PythonEnvironment(\n prompt=prompt, virtual_environment=virtual_environment, log=log\n )\n\n venv = python_environment.from_requirements(\n requirements=requirements, pip_packages=None\n )\n\n assert venv == env\n\n command = \"python -m pip install -U -r {}\".format(\n os.path.join(testdirectory.path(), \"requirements.txt\")\n )\n prompt.run.assert_called_once_with(command=command, env=env)\n","repo_name":"steinwurf/giit","sub_path":"test/test_python_environment.py","file_name":"test_python_environment.py","file_ext":"py","file_size_in_byte":923,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"}
+{"seq_id":"7551086400","text":"def read_data(filename: str) -> list[int]:\n with open(filename) as file:\n n = int(file.readline())\n data = [0] * 10_000_000\n for _ in range(n):\n key, val = map(int, file.readline().strip().split())\n data[key] = val\n return data\n\n\ndef solution(data: list[int]) -> int:\n data = list(map(lambda x: x // 48 + int(x % 48 != 0), data))\n ans = 100 ** 100\n sm = sum(data)\n c = 0\n for i in range(1, len(data)):\n c += data[i] * i\n b = data[0]\n for i in range(1, len(data)):\n c += 2 * b - sm\n if data[i] != 0:\n ans = min(ans, c)\n b += data[i]\n return ans\n\n\nprint(solution(read_data('27A.txt')), end=' ')\nprint(solution(read_data('27B.txt')))\n","repo_name":"vlad-marlo/algorithms","sub_path":"school/ege/vars/25021990/27.py","file_name":"27.py","file_ext":"py","file_size_in_byte":747,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"4"}
+{"seq_id":"28380243287","text":"from flask import Flask\nfrom flask_restful import Api, reqparse\nfrom flask_swagger_ui import get_swaggerui_blueprint\nfrom app.routes.hello_world import HelloWorld\nfrom app.routes.random_message import RandomMessage\n\napp = Flask(__name__)\napi = Api(app)\nprefix=\"/api/v1\"\nparser = reqparse.RequestParser()\n\n### swagger specific ###\nSWAGGER_URL = '/swagger'\nAPI_URL = '/static/swagger.json'\nSWAGGERUI_BLUEPRINT = get_swaggerui_blueprint(\n SWAGGER_URL,\n API_URL,\n config={\n 'app_name': \"Example REST API\"\n }\n)\n\napp.register_blueprint(SWAGGERUI_BLUEPRINT, url_prefix=SWAGGER_URL)\n\napi.add_resource(HelloWorld, prefix + '/hello-world')\napi.add_resource(RandomMessage, prefix + '/message')\n\nif __name__ == '__main__':\n app.run(debug=True)","repo_name":"rodrigomkd/example-rest-api","sub_path":"api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":753,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"}
+{"seq_id":"74005309237","text":"\"\"\" This function calculates de Omega matrix in Caliendo - Parro (2009)\n Inputs are A = alphas, B = bethas, G = I-O matrix, Dinp = trade shares,\n tarifap = tarifs, Fp = trade weighted tariffs \"\"\"\n\nimport numpy as np\nfrom numpy.linalg import matrix_power\n\n\ndef Expenditure(alphas, B, G, Dinp, taup, Fp, VAn, wf0, Sn, J, N):\n\n # [J, N] = np.shape(A)\n IA = np.zeros((J * N, J * N))\n I_F = 1 - Fp\n\n for n in range(N):\n IA[n * J: (n + 1) * J, n * J: (n + 1) * J] = np.kron(alphas[:, n], I_F[:, n].T).reshape(40, 40)\n\n Pit = Dinp/taup\n Bt = 1 - B\n BP = np.zeros(np.shape(Pit))\n\n for j in range(J):\n BP[j * N: (j + 1) * N, :] = np.kron(np.ones(N).reshape(N, 1), Bt[j, :]) * Pit[j * N: (j + 1) * N, :]\n\n NBP = np.zeros(np.shape(BP.T))\n\n for j in range(N):\n for n in range(N):\n NBP[j, n * J: (n + 1) * J] = BP[np.arange(n, N, J * N), j]\n\n NNBP = np.kron(NBP, np.ones((J, 1)))\n GG = np.kron(np.ones((1, N)), G)\n GP = GG * NNBP\n\n OM = np.eye(J * N, J * N) - (GP + IA)\n Vb = alphas * np.kron(np.ones((J, 1)), (wf0 * VAn).T)\n Vb = Vb.reshape(J * N, 1, order='F').copy()\n Bb = -alphas * (Sn * np.ones((1, J))).T\n Bb = Bb.reshape(J * N, 1, order='F').copy()\n\n temp = matrix_power(OM, -1)\n DD1 = temp.dot(Vb)\n DD2 = temp.dot(Bb)\n PQ = DD1 + DD2\n PQ = PQ.reshape(J, N, order='F').copy()\n\n return PQ\n","repo_name":"BAFurtado/AberturaComercial","sub_path":"expenditure.py","file_name":"expenditure.py","file_ext":"py","file_size_in_byte":1386,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"}
+{"seq_id":"9200570345","text":"import os\n\nfrom ament_index_python.packages import get_package_share_directory\nfrom launch import LaunchDescription\nfrom launch.actions import IncludeLaunchDescription\nfrom launch.launch_description_sources import PythonLaunchDescriptionSource\nfrom launch.substitutions import LaunchConfiguration\n\nfrom pysdf import SDF\nimport numpy as np\nimport random\n\nclass SelectRandomPose:\n\n def __init__(self, \n sdl_path, \n min_x,\n min_y,\n max_x,\n max_y): \n\n self.parsed_sdl = SDF.from_file(sdl_path, remove_blank_text=True)\n self.parsed_sdl.to_file(sdl_path, pretty_print=True)\n self.objects_to_ignore = [\"head\", \"left_hand\", \"right_hand\", \"left_foot\", \"right_foot\", \"body\"]\n self.cylinder_radius = 0.2\n self.min_x = min_x\n self.min_y = min_y\n self.max_x = max_x\n self.max_y = max_y\n\n def __cylinder_detect(self):\n self.cylinder_poses = []\n for visual in self.parsed_sdl.iter(\"visual\"):\n if visual.name not in self.objects_to_ignore:\n pose = np.fromstring(visual.pose.text, count=6, sep=\" \")\n self.cylinder_poses.append(pose)\n\n def get_random_pose(self):\n self.__cylinder_detect()\n while True:\n x = random.uniform(self.min_x, self.max_x)\n y = random.uniform(self.min_y, self.max_y)\n self.pose = np.array([x, y])\n self.valid_pose_x_list = []\n self.valid_pose_y_list = []\n\n for cylinder_pose in self.cylinder_poses:\n valid_pose_x = not cylinder_pose[0] - self.cylinder_radius < self.pose[0] < cylinder_pose[0] + self.cylinder_radius \n valid_pose_y = not cylinder_pose[1] - self.cylinder_radius < self.pose[1] < cylinder_pose[1] + self.cylinder_radius \n self.valid_pose_x_list.append(valid_pose_x)\n self.valid_pose_y_list.append(valid_pose_y)\n\n if False not in self.valid_pose_x_list and False not in self.valid_pose_y_list: \n return self.pose\n\n\ndef generate_launch_description():\n launch_file_dir = os.path.join(get_package_share_directory('turtlebot3_gazebo'), 'launch')\n pkg_gazebo_ros = get_package_share_directory('gazebo_ros')\n\n world = random.randint(1, 4)\n\n path_sdf_model = f\"~/../../opt/ros/humble/share/turtlebot3_gazebo/models/g2w{world}/model.sdf\"\n path_sdf_model = os.path.expanduser(path_sdf_model)\n selectRandomPose = SelectRandomPose(sdl_path = path_sdf_model, \n min_x = -1.3,\n min_y = -1.5,\n max_x = 2.0,\n max_y = 1.5)\n \n pose = selectRandomPose.get_random_pose() \n\n \n\n use_sim_time = LaunchConfiguration('use_sim_time', default='true')\n x_pose = LaunchConfiguration('x_pose', default=f'{pose[0]}')\n y_pose = LaunchConfiguration('y_pose', default=f'{pose[1]}')\n \n\n if world == 1: \n world = os.path.join(\n get_package_share_directory('turtlebot3_gazebo'),\n 'worlds',\n 'g2w1.world'\n ) \n elif world == 2: \n world = os.path.join(\n get_package_share_directory('turtlebot3_gazebo'),\n 'worlds',\n 'g2w2.world'\n ) \n elif world == 3: \n world = os.path.join(\n get_package_share_directory('turtlebot3_gazebo'),\n 'worlds',\n 'g2w3.world'\n ) \n elif world == 4: \n world = os.path.join(\n get_package_share_directory('turtlebot3_gazebo'),\n 'worlds',\n 'g2w4.world'\n ) \n\n gzserver_cmd = IncludeLaunchDescription(\n PythonLaunchDescriptionSource(\n os.path.join(pkg_gazebo_ros, 'launch', 'gzserver.launch.py')\n ),\n launch_arguments={'world': world}.items()\n )\n\n gzclient_cmd = IncludeLaunchDescription(\n PythonLaunchDescriptionSource(\n os.path.join(pkg_gazebo_ros, 'launch', 'gzclient.launch.py')\n )\n )\n\n robot_state_publisher_cmd = IncludeLaunchDescription(\n PythonLaunchDescriptionSource(\n os.path.join(launch_file_dir, 'robot_state_publisher.launch.py')\n ),\n launch_arguments={'use_sim_time': use_sim_time}.items()\n )\n\n spawn_turtlebot_cmd = IncludeLaunchDescription(\n PythonLaunchDescriptionSource(\n os.path.join(launch_file_dir, 'spawn_turtlebot3.launch.py')\n ),\n launch_arguments={\n 'x_pose': f'{pose[0]}',\n 'y_pose': f'{pose[1]}',\n }.items()\n )\n\n ld = LaunchDescription()\n\n # Add the commands to the launch description\n ld.add_action(gzserver_cmd)\n ld.add_action(gzclient_cmd)\n ld.add_action(robot_state_publisher_cmd)\n ld.add_action(spawn_turtlebot_cmd)\n\n return ld\n","repo_name":"autonomous-robots/worlds_gazebo","sub_path":"launch/turtlebot3_world.launch.py","file_name":"turtlebot3_world.launch.py","file_ext":"py","file_size_in_byte":4944,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"}
+{"seq_id":"41138375440","text":"import psycopg2 as psycopg2\nimport pandas as pd\nimport seaborn as sns\nimport matplotlib.pyplot as plt\n\nsns.set_theme(style=\"whitegrid\")\nimg_path = '/Users/tim/master-thesis/latex/img/experiments/'\n# sns.set(rc = {'figure.figsize':(11,8)})\nalgorithms_order = ['Seeded DynaMOSA', 'DynaMOSA', 'Seeded Random Search', 'Random Search']\ncrates_order = ['time', 'gamie', 'lsd', 'humantime', 'quick_xml', 'tight']\n\nwith psycopg2.connect(\"dbname=rustyunit user=rust password=Lz780231Ray\") as conn:\n sql_seeded_random = \"select * from experiments_seeded_random;\"\n seeded_random_data = pd.read_sql_query(sql_seeded_random, conn)\n seeded_random_data['Algorithm'] = 'Seeded Random Search'\n seeded_random_data = seeded_random_data[seeded_random_data['crate'] != 'toycrate']\n\n sql_random = \"select * from experiments_random;\"\n random_data = pd.read_sql_query(sql_random, conn)\n random_data['Algorithm'] = 'Random Search'\n random_data = random_data[random_data['crate'] != 'toycrate']\n\n sql_seeded_dynamosa = \"select * from experiments_seeded_dynamosa;\"\n seeded_dynamosa_data = pd.read_sql_query(sql_seeded_dynamosa, conn)\n seeded_dynamosa_data['Algorithm'] = 'Seeded DynaMOSA'\n seeded_dynamosa_data = seeded_dynamosa_data[seeded_dynamosa_data['crate'] != 'toycrate']\n\n sql_dynamosa = \"select * from experiments_dynamosa;\"\n dynamosa_data = pd.read_sql_query(sql_dynamosa, conn)\n dynamosa_data['Algorithm'] = 'DynaMOSA'\n dynamosa_data = dynamosa_data[dynamosa_data['crate'] != 'toycrate']\n\n data = pd.concat([seeded_random_data, random_data, seeded_dynamosa_data, dynamosa_data])\n\n plot_data = data.groupby(['Algorithm', 'gen']).mean()\n fig = plt.figure(1)\n # mir_coverage, tests_length, tests, covered_targets\n\n coverage_plot = sns.lineplot(x=\"gen\", y=\"mir_coverage\",\n hue=\"Algorithm\", # style=\"event\",\n data=plot_data, hue_order=algorithms_order)\n coverage_plot.get_legend().set_title(None)\n coverage_plot.set(xlabel=\"Generation\", ylabel=\"Basic block coverage\")\n plt.show()\n fig.savefig(img_path + 'coverage-over-time-crates.png', dpi=300, format='png', bbox_inches='tight')\n","repo_name":"foxycom/rusty-unit","sub_path":"experiments/coverage_lineplot.py","file_name":"coverage_lineplot.py","file_ext":"py","file_size_in_byte":2201,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"4"}
+{"seq_id":"75183676917","text":"# Написать класс Vect, унаследованный от list, со следующими изменениями:\r\n# ◦\tПри создании экземпляра должно проверяться, что\r\n# 1\tВсе элементы последовательности одного типа\r\n# 2\tЭтот тип поддерживает сложение и умножение на число\r\n# ◦\tВ противном случае возникает исключение Type Error\r\n# ◦\tКласс поддерживает поэлементное умножение на число: vect @ 5\r\n# ◦\tКласс поддерживает поэлементное сложение (вместо конкатенации): vect1 + vect2\r\n# ◦\tКласс поддерживает запись в файл vect.write(\"файл\") и чтение из файла vect.read(\"файл\") (имя файла любое)\r\n# ◦\tПример:\r\n#\r\n# 1 >>> v, w = Vect(range(9)), Vect(range(10,16))\r\n# 2 >>> v, w\r\n# 3 ([0, 1, 2, 3, 4, 5, 6, 7, 8], [10, 11, 12, 13, 14, 15])\r\n# 4 >>> v+w\r\n# 5 [10, 12, 14, 16, 18, 20]\r\n# 6 >>> v@3\r\n# 7 [0, 3, 6, 9, 12, 15, 18, 21, 24]\r\n# 8 >>> s = Vect(\"QWER\")\r\n# 9 >>> s\r\n# 10 ['Q', 'W', 'E', 'R']\r\n# 11 >>> s+\"ASDFG\"\r\n# 12 ['QA', 'WS', 'ED', 'RF']\r\n# 13 >>> s@4\r\n# 14 ['QQQQ', 'WWWW', 'EEEE', 'RRRR']\r\n# 15 >>> l = Vect(i*2+1 for i in range(6))\r\n# 16 >>> l\r\n# 17 [1, 3, 5, 7, 9, 11]\r\n# 18 >>> e = Vect((1,2,3,4.4,5,6))\r\n\r\nclass Vect(list):\r\n def __add__(self, other):\r\n sum = Vect()\r\n size = min(len(self), len(other))\r\n for i in range(size):\r\n sum.append(other[i] + self[i])\r\n return sum\r\n\r\n def __mul__(self, other):\r\n for i in range(len(self)):\r\n self[i] *= other\r\n return self\r\n\r\n def write(self, path):\r\n output = open(path, 'w')\r\n output.write(str(self))\r\n\r\n def read(self, path):\r\n input = open(path, 'r')\r\n str = input.read()\r\n lst = list(eval(str))\r\n self.clear()\r\n for i in range(len(lst)):\r\n self.append(lst[i])\r\nv = Vect(range(9))\r\n# v.write('output1.txt')\r\nv.read('output1.txt')\r\nprint('bye')\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"cryptoEcho/MSU_python2021","sub_path":"04.24_2.py","file_name":"04.24_2.py","file_ext":"py","file_size_in_byte":2242,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"}
+{"seq_id":"13833223632","text":"def longest_valid(s):\n match = [0] * (len(s) + 1)\n for i in range(1, len(s)):\n if s[i] in '{':\n continue\n open = '{'['}'.index(s[i])]\n start = i - 1 - match[i - 1]\n if start < 0:\n continue\n if s[start] != open:\n continue\n match[i] = i - start + 1 + match[start - 1]\n best = max(match)\n end = match.index(best)\n return s[end + 1 - best:end + 1]\n\n\nprint(len(longest_valid(input())))\n\nt = int(input())\npoint = []\nfor i in range(t):\n point.append(list(map(int, input().split())))\n","repo_name":"lalit21-logico/programming","sub_path":"brac.py","file_name":"brac.py","file_ext":"py","file_size_in_byte":569,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"}
+{"seq_id":"24849543033","text":"while True:\r\n try:\r\n N = int(input('Введите N: '))\r\n if 1<=N<=1000: break\r\n print('1≤N≤1000 !')\r\n except:\r\n print('Неверный ввод!')\r\n\r\nсловарь = {k[0]:' '.join(k[1:]) for k in [input(f'Введите {i+1}-ю запись: ').split() for i in range(N)]}\r\n\r\nwhile True:\r\n try:\r\n M = int(input('Введите M: '))\r\n if 1<=M<=100: break\r\n print('1≤M≤100 !')\r\n except:\r\n print('Неверный ввод!')\r\n\r\nслова = [input(f'Введите {i+1}-е слово: ') for i in range(M)]\r\n\r\n[print(f'{i}:', словарь. setdefault(i,'Нет в словаре')) for i in слова]","repo_name":"Irina-Sinyukova/PY2","sub_path":"PY/Ddictionary.py","file_name":"Ddictionary.py","file_ext":"py","file_size_in_byte":688,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"}
+{"seq_id":"42506501283","text":"#!/usr/bin/evn python2.7\n# -*- coding: utf-8 -*-\n\"\"\"\n特征向量制作\n\n\"\"\"\nimport tldextract\nimport numpy as np\n\nfrom get_lexical import UrlLexical\nfrom get_whois import extract_feature_whois\nfrom get_rank import get_scheme_domain\n\n\nclass build_feature(object):\n def __init__(self, data_obj, url_ip_map, url_ip_dmap, pdomain_whois_map, domain_rank_map, domain_cert_map, \\\n domain_pr_map, sus_domain):\n self.data_obj = data_obj\n self.url_ip_map = url_ip_map\n self.url_ip_dmap = url_ip_dmap\n self.pdomain_whois_map = pdomain_whois_map\n self.domain_rank_map = domain_rank_map\n self.domain_cert_map = domain_cert_map\n self.domain_pr_map = domain_pr_map\n self.sus_domain = sus_domain\n\n def __getkey(self, url):\n # 获取词汇特征\n lex_obj = UrlLexical(url)\n filename, filepath, pdomain_token, sdomain_token = lex_obj.do_extract()\n\n # 获取rank\n scheme_domain = get_scheme_domain(url)\n rank = self.domain_rank_map.get(scheme_domain, None)\n\n # 获取whois信息\n whois_obj = extract_feature_whois(url, self.pdomain_whois_map)\n exp_timedelta = whois_obj.get_exp_timedelta()\n reg_timedelta = whois_obj.get_reg_timedelta()\n upd_timedelta = whois_obj.get_update_timedelta()\n timedelta = whois_obj.get_timedelta()\n registrar = whois_obj.get_registrar()\n\n # 获取pdomain\n tld_obj = tldextract.extract(lex_obj.url_domain)\n url_pdomain = \".\".join([tld_obj.domain, tld_obj.suffix])\n url_domain = lex_obj.url_domain\n url_path = lex_obj.url_path\n url_netloc = lex_obj.url_netloc\n\n return filename, filepath, pdomain_token, sdomain_token, rank, exp_timedelta, reg_timedelta, upd_timedelta,\\\n timedelta, registrar, url_pdomain, url_domain, url_path, url_netloc\n\n def build_feature(self, url):\n \"\"\"\n :param url:\n :return:\n \"\"\"\n filename, filepath, pdomain_token, sdomain_token, rank, exp_timedelta, reg_timedelta, upd_timedelta,\\\n timedelta, registrar, url_pdomain, url_domain, url_path, url_netloc = self.__getkey(url)\n\n feature = np.zeros((1, 30), dtype=np.float32)\n\n # feature 1: domain path length\n if url_domain in self.data_obj.domain_path_map:\n feature[0][0] = len(self.data_obj.domain_path_map[url_domain])\n \n # feature 2: filename\n if filename and filename in self.data_obj.mal_filename:\n feature[0][1] = 1\n \n# if filename and filename in self.data_obj.ben_filename:\n# feature[0][2] = 1\n\n # feature 3: filepath\n if filepath and filepath in self.data_obj.mal_filepath:\n feature[0][3] = 1\n \n# if filename and filename in self.data_obj.ben_filepath:\n# feature[0][4] = 1\n\n # feature 4: domain tokens\n for token in pdomain_token:\n if token in self.data_obj.mal_pdomain_tokens:\n feature[0][5] = 1\n if token in self.data_obj.ben_pdomain_tokens:\n feature[0][7] = 1\n\n for token in sdomain_token:\n if token in self.data_obj.mal_sdomain_tokens:\n feature[0][6] = 1\n if token in self.data_obj.ben_sdomain_tokens:\n feature[0][8] = 1\n\n # feature 5: ip\n if url in self.url_ip_map and self.url_ip_map[url].rsplit(\".\", 1)[0] in self.data_obj.mal_ips:\n feature[0][9] += 1\n\n # feature 6: regisration time\n if reg_timedelta is not None:\n feature[0][10] = reg_timedelta\n\n # feature 7 reg exp time\n if timedelta is not None:\n feature[0][11] = timedelta\n\n # feature 8: update time\n if upd_timedelta is not None:\n feature[0][12] = upd_timedelta\n\n # feature 9 rank:\n if rank is None or rank > 10000000:\n feature[0][13] = 1\n\n if rank is not None and rank < 100000:\n feature[0][14] = 1\n\n # feature 10 cert:\n if url_domain in self.domain_cert_map and not isinstance(self.domain_cert_map[url_domain], int):\n feature[0][15] = 1\n\n # feature 11 maltrail:\n if url_domain in self.sus_domain:\n feature[0][16] = 1\n\n # feature 12 port num\n if url_pdomain in self.data_obj.domain_port_map:\n feature[0][17] = len(self.data_obj.domain_port_map[url_pdomain])\n\n # feature 15-17 filename lexical\n if filename and filename.count(\"%\") / float(len(filename)) > 0.2 and filename[-3:] == \"exe\":\n feature[0][18] = 1\n\n if filename and filename.count(\"@\") > 0:\n feature[0][19] = 1\n\n # feature 19 path level count\n if url_path:\n feature[0][20] = url_path.count(\"/\")\n\n # feature 20-21\n if url_domain in self.url_ip_dmap:\n feature[0][21] = len(self.url_ip_dmap[url_domain][1])\n feature[0][22] = len(self.url_ip_dmap[url_domain][2])\n\n if url_domain:\n for char in url_domain:\n if char.isdigit():\n feature[0][23] += 1\n\n feature[0][24] = len(url_domain) / url_domain.count(\".\")\n feature[0][25] = max([len(_) for _ in url_domain.split(\".\")])\n\n # feature 22 name servers count\n if url_pdomain in self.pdomain_whois_map and 'name_servers' in self.pdomain_whois_map and \\\n self.pdomain_whois_map[url_pdomain]['name_servers']:\n feature[0][26] = len(self.pdomain_whois_map[url_pdomain]['name_servers'])\n\n if url_pdomain in self.pdomain_whois_map and 'status' in self.pdomain_whois_map and \\\n self.pdomain_whois_map[url_pdomain]['status']:\n feature[0][27] = len(self.pdomain_whois_map[url_pdomain]['status'])\n\n if url_domain in self.domain_pr_map and self.domain_pr_map[url_domain] > 0:\n feature[0][28] = 1\n\n# deep digger into geo info\n\n# if domain in url_ip_dmap:\n# asn_set = set()\n# ips = url_ip_dmap[domain][2]\n# for ip in ips:\n# asn_set.add(asndb.asn_by_addr(ip))\n# feature[0][25] = len(asn_set)\n\n# if domain in url_ip_dmap:\n# geo_set = set()\n# ips = url_ip_dmap[domain][2]\n# for ip in ips:\n# if citydb.record_by_name(ip):\n# geo_set.add(citydb.record_by_name(ip)['city'])\n# feature[0][26] = len(geo_set)\n\n# feature filename\n\n# if filename:\n# feature[0][29] = filename.count(\"_\")\n\n# feature whois emails\n\n# if pdomain in whois_dict and 'emails' in whois_dict and whois_dict[pdomain]['emails']:\n# feature[0][23] = len(whois_dict[pdomain]['emails'])\n\n# feature whois country\n\n# if pdomain in whois_dict and 'country' in whois_dict and whois_dict[pdomain]['country']!=\"CN\":\n# feature[0][23] = 1\n\n# feature registrar\n\n# vector_obj = HashingVectorizer(n_features=64)\n# if registrar:\n# ff_add = vector_obj.fit_transform([registrar]).toarray()\n# else:\n# ff_add = np.zeros((1, 64), dtype = np.float32)\n# feature = np.concatenate((feature, ff_add), axis = 1)\n\n# feature asn ratio\n\n# if url in url_ip_map and url_ip_map[url] !='unknown':\n# asn = asndb.lookup(url_ip_map[url])[0]\n# if asn is not None and asn in asn_score_map:\n# feature[0][22] = asn_score_map[asn]\n\n return feature\n\n\n def build_feature_set(self, urls):\n feature = []\n for url in urls:\n feature.append(self.build_feature(url))\n feature = np.concatenate(feature, axis=0)\n return feature","repo_name":"angelfish91/URL-XGB","sub_path":"make_feature.py","file_name":"make_feature.py","file_ext":"py","file_size_in_byte":7952,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"}
+{"seq_id":"2325691455","text":"def group_values_by_indices(values, indices):\n groups = {}\n for value, index in zip(values, indices):\n if index not in groups:\n groups[index] = []\n groups[index].append(value)\n\n # Sort the dictionary by its keys and return the values\n return [groups[key] for key in sorted(groups)]\n\n\n# Example usage\nvalues = [1, 2, 3, 4, 5, 6, 7, 8]\nindices = [3, 3, 1, 1, 2, 2, 2, 4]\nprint(group_values_by_indices(values, indices))","repo_name":"jidec/bigcrittercolor","sub_path":"tests/scratch_experiments/group_by_indices.py","file_name":"group_by_indices.py","file_ext":"py","file_size_in_byte":453,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"}
+{"seq_id":"28958375553","text":"import numpy as np\nfrom matplotlib import pyplot as plt\n\n\nclass MapVisualizer:\n def __init__(self,size_meters,pixels):\n # size_meters: estimated map size, e.g. 10m x 10m\n # pixels: map matrix size, e.g. [1000,1000]\n self.size_meters=size_meters\n self.pixels=pixels\n self.resolution=size_meters/pixels\n self.vehicle=None\n self.img=None\n self.previous_X = [-1, -1]\n self.setlabels()\n\n def setlabels(self):\n fig=plt.figure(figsize=(7,7))\n mng = plt.get_current_fig_manager()\n plt.ion()\n self.ax = fig.gca()\n self.ax.set_xlabel('X (m)')\n self.ax.set_ylabel('Y (m)')\n self.ax.grid(False)\n plt.title('GMAPPING')\n # self.ax.set_xlim([0, self.pixels])\n self.ax.set_ylim([0, self.pixels])\n ticks=np.arange(0,self.size_meters+1)\n labels = [str(tick) for tick in ticks]\n self.ax.set_xticks(ticks/self.resolution)\n self.ax.set_yticks(ticks/self.resolution)\n self.ax.set_xticklabels(labels)\n self.ax.set_yticklabels(labels)\n\n def visualize(self,X,map_matrix):\n # X: [x,y,theta]\n # map_matrix: same as hw1\n\n # If pre-processing on input values is needed:(from hw1)\n # map_matrix[map_matrix < 0] = -1\n # map_matrix[map_matrix > 0] = 1 - map_matrix[map_matrix > 0]\n\n if self.vehicle:\n self.vehicle.remove()\n self.vehicle=self.ax.arrow(X[0]/self.resolution, X[1]/self.resolution,\n 0.1*np.cos(X[2]), 0.1*np.sin(X[2]), head_width=2, fc='r', ec='r')\n if (self.previous_X[0] != -1):\n self.ax.plot([self.previous_X[0]/self.resolution, X[0]/self.resolution], [self.previous_X[1]/self.resolution, X[1]/self.resolution], \"r\")\n if self.img is None:\n self.img = self.ax.imshow(map_matrix.T, cmap='Greys')\n else:\n self.img.set_data(map_matrix.T)\n self.previous_X = X\n plt.pause(0.0001)\n plt.draw()\n\n\n\n# # Example in 25 steps:\n# map_visualizer=MapVisualizer(8,1000)\n# for i in range(25):\n# X = [0.2*i, 0.3*i, i*np.pi / 4]\n# test=np.random.rand(1000000).reshape((1000,1000))\n# map_visualizer.visualize(X,test)\n\n\n","repo_name":"Zhefan-Xu/gmapping","sub_path":"MapVisualization.py","file_name":"MapVisualization.py","file_ext":"py","file_size_in_byte":2228,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"4"}
+{"seq_id":"33317835282","text":"import collections\nfrom tree import TreeNode, make_binary_tree\n\n\ndef first_solution(root: TreeNode) -> int:\n if not root:\n return 0\n depths = []\n\n def dfs(node, depth):\n if node.left is None and node.right is None:\n depths.append(depth)\n return depth\n depth += 1\n\n for child in [node.left, node.right]:\n if child is not None:\n dfs(child, depth)\n return depth\n\n dfs(root, 1)\n return max(depths)\n\n\ndef second_solution(root: TreeNode) -> int:\n if not root:\n return 0\n max_depth = 0\n\n def dfs(node, depth, maximum):\n if node.left is None and node.right is None:\n return max(maximum, depth)\n depth += 1\n\n for child in [node.left, node.right]:\n if child is not None:\n maximum = dfs(child, depth, maximum)\n return maximum\n\n return dfs(root, 1, max_depth)\n\n\ndef third_solution(root: TreeNode) -> int:\n queue = collections.deque([root])\n depth = 0\n\n while queue:\n depth += 1\n for _ in range(len(queue)):\n node = queue.popleft()\n if node.left:\n queue.append(node.left)\n if node.right:\n queue.append(node.right)\n return depth\n\n\nif __name__ == \"__main__\":\n tree = [3, 9, 20, None, None, 15, 7]\n root = make_binary_tree(tree)\n print(third_solution(root))\n","repo_name":"youngbin-ro/problem-solving","sub_path":"python-algorithm-interview-book/chapter14. tree/maximum-depth-of-binary-tree.py","file_name":"maximum-depth-of-binary-tree.py","file_ext":"py","file_size_in_byte":1422,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"}
+{"seq_id":"9421405742","text":"from typing import Any, Optional, Sequence, Union\n\nimport torch\nfrom torch import Tensor, tensor\n\nfrom torchmetrics.functional.regression.r2 import _r2_score_update\nfrom torchmetrics.functional.regression.rse import _relative_squared_error_compute\nfrom torchmetrics.metric import Metric\nfrom torchmetrics.utilities.imports import _MATPLOTLIB_AVAILABLE\nfrom torchmetrics.utilities.plot import _AX_TYPE, _PLOT_OUT_TYPE\n\nif not _MATPLOTLIB_AVAILABLE:\n __doctest_skip__ = [\"RelativeSquaredError.plot\"]\n\n\nclass RelativeSquaredError(Metric):\n r\"\"\"Computes the relative squared error (RSE).\n\n .. math:: \\text{RSE} = \\frac{\\sum_i^N(y_i - \\hat{y_i})^2}{\\sum_i^N(y_i - \\overline{y})^2}\n\n Where :math:`y` is a tensor of target values with mean :math:`\\overline{y}`, and\n :math:`\\hat{y}` is a tensor of predictions.\n\n If num_outputs > 1, the returned value is averaged over all the outputs.\n\n As input to ``forward`` and ``update`` the metric accepts the following input:\n\n - ``preds`` (:class:`~torch.Tensor`): Predictions from model in float tensor with shape ``(N,)``\n or ``(N, M)`` (multioutput)\n - ``target`` (:class:`~torch.Tensor`): Ground truth values in float tensor with shape ``(N,)``\n or ``(N, M)`` (multioutput)\n\n As output of ``forward`` and ``compute`` the metric returns the following output:\n\n - ``rse`` (:class:`~torch.Tensor`): A tensor with the RSE score(s)\n\n Args:\n num_outputs: Number of outputs in multioutput setting\n squared: If True returns RSE value, if False returns RRSE value.\n kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info.\n\n Example:\n >>> from torchmetrics.regression import RelativeSquaredError\n >>> target = torch.tensor([3, -0.5, 2, 7])\n >>> preds = torch.tensor([2.5, 0.0, 2, 8])\n >>> relative_squared_error = RelativeSquaredError()\n >>> relative_squared_error(preds, target)\n tensor(0.0514)\n\n \"\"\"\n is_differentiable = True\n higher_is_better = False\n full_state_update = False\n sum_squared_error: Tensor\n sum_error: Tensor\n residual: Tensor\n total: Tensor\n\n def __init__(\n self,\n num_outputs: int = 1,\n squared: bool = True,\n **kwargs: Any,\n ) -> None:\n super().__init__(**kwargs)\n\n self.num_outputs = num_outputs\n\n self.add_state(\"sum_squared_error\", default=torch.zeros(self.num_outputs), dist_reduce_fx=\"sum\")\n self.add_state(\"sum_error\", default=torch.zeros(self.num_outputs), dist_reduce_fx=\"sum\")\n self.add_state(\"residual\", default=torch.zeros(self.num_outputs), dist_reduce_fx=\"sum\")\n self.add_state(\"total\", default=tensor(0), dist_reduce_fx=\"sum\")\n self.squared = squared\n\n def update(self, preds: Tensor, target: Tensor) -> None:\n \"\"\"Update state with predictions and targets.\"\"\"\n sum_squared_error, sum_error, residual, total = _r2_score_update(preds, target)\n\n self.sum_squared_error += sum_squared_error\n self.sum_error += sum_error\n self.residual += residual\n self.total += total\n\n def compute(self) -> Tensor:\n \"\"\"Computes relative squared error over state.\"\"\"\n return _relative_squared_error_compute(\n self.sum_squared_error, self.sum_error, self.residual, self.total, squared=self.squared\n )\n\n def plot(\n self, val: Optional[Union[Tensor, Sequence[Tensor]]] = None, ax: Optional[_AX_TYPE] = None\n ) -> _PLOT_OUT_TYPE:\n \"\"\"Plot a single or multiple values from the metric.\n\n Args:\n val: Either a single result from calling `metric.forward` or `metric.compute` or a list of these results.\n If no value is provided, will automatically call `metric.compute` and plot that result.\n ax: An matplotlib axis object. If provided will add plot to that axis\n\n Returns:\n Figure and Axes object\n\n Raises:\n ModuleNotFoundError:\n If `matplotlib` is not installed\n\n .. plot::\n :scale: 75\n\n >>> from torch import randn\n >>> # Example plotting a single value\n >>> from torchmetrics.regression import RelativeSquaredError\n >>> metric = RelativeSquaredError()\n >>> metric.update(randn(10,), randn(10,))\n >>> fig_, ax_ = metric.plot()\n\n .. plot::\n :scale: 75\n\n >>> from torch import randn\n >>> # Example plotting multiple values\n >>> from torchmetrics.regression import RelativeSquaredError\n >>> metric = RelativeSquaredError()\n >>> values = []\n >>> for _ in range(10):\n ... values.append(metric(randn(10,), randn(10,)))\n >>> fig, ax = metric.plot(values)\n\n \"\"\"\n return self._plot(val, ax)\n","repo_name":"Lightning-AI/torchmetrics","sub_path":"src/torchmetrics/regression/rse.py","file_name":"rse.py","file_ext":"py","file_size_in_byte":4864,"program_lang":"python","lang":"en","doc_type":"code","stars":1718,"dataset":"github-code","pt":"4"}
+{"seq_id":"33554070991","text":"import helper\nfrom da_bell_secrets import *\nfrom providers import PROVIDERS\nimport smtplib, ssl\nfrom email import encoders\nfrom email.mime.base import MIMEBase\nfrom email.mime.multipart import MIMEMultipart\nfrom email.mime.text import MIMEText\nimport pathlib\n\n''' \nThis program sends an SMS/MMS message to the \nowner of a Da Bell device to notify them that \nthe doorbell was pressed along with a photo. \n\nInfo: Attachment can be at most 1MB\nHad to learn that the hard way.\n'''\n\ndef send_text_message(file_path):\n __send_mms_via_email(file_path)\n\n@helper.threaded\ndef __send_mms_via_email(file_path):\n # initialize variables needed\n phone_number: str = PHONE_NUMBER\n door_ring_message: str = helper.DOOR_RING_MESSAGE\n file_path: str = file_path\n mime_maintype: str = helper.FILE_TYPE\n mime_subtype: str = pathlib.Path(file_path).suffix\n file_name: str = pathlib.Path(file_path).name\n phone_provider: str = PHONE_PROVIDER\n sender_credentials: tuple = SENDER_CREDENTIALS\n subject: str = helper.APP_NAME\n smtp_server: str = helper.SMTP_GMAIL\n smtp_port: int = helper.SMTP_PORT\n \n # get/create information needed to send message\n # get gmail and password from da_bell_secrets.py\n sender_email, email_password = sender_credentials\n \n # get message type (sms/mms) based on provider\n # some do not allow mms\n message_type = helper.MESSAGE_TYPE[0] \\\n if PROVIDERS.get(phone_provider).get(helper.MMS_SUPPORT_KEY) \\\n else helper.MESSAGE_TYPE[0]\n \n # create receiver email based on their phone number and carrier\n receiver_email = f'{phone_number}@{PROVIDERS.get(phone_provider).get(message_type)}'\n \n # create gmail body\n email_message = MIMEMultipart()\n email_message[\"Subject\"] = subject\n email_message[\"From\"] = sender_email\n email_message[\"To\"] = receiver_email\n email_message.attach(MIMEText(door_ring_message, helper.TEXT_TYPE))\n \n # open file being sent and attach to email_message\n with open(file_path, helper.READ_BINARY) as attachment:\n part = MIMEBase(mime_maintype, mime_subtype)\n part.set_payload(attachment.read())\n encoders.encode_base64(part)\n part.add_header(\n \"Content-Disposition\", f\"attachment; filename={file_name}\",\n )\n email_message.attach(part)\n\n # send the message\n with smtplib.SMTP_SSL(smtp_server, smtp_port, context = ssl.create_default_context()) as email:\n # securely login to gmail\n email.login(sender_email, email_password)\n # send email with body and attachment\n email.sendmail(sender_email, receiver_email, email_message.as_string())\n print(\"Da Bell owner notified that doorbell was pressed\")\n ","repo_name":"Amark18/Da-Bell","sub_path":"mms.py","file_name":"mms.py","file_ext":"py","file_size_in_byte":2739,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"4"}
+{"seq_id":"18753118228","text":"from django import forms\nfrom django.core.exceptions import ValidationError\nfrom django.contrib.auth import get_user_model, forms as user_forms\nfrom django.utils.translation import gettext_lazy as _\n\n\nclass UserCreateForm(user_forms.UserCreationForm):\n email = forms.EmailField()\n\n class Meta:\n model = get_user_model()\n fields = (\"username\", \"email\", )\n field_classes = {\"username\": user_forms.UsernameField}\n\n\nclass UserUpdateForm(forms.ModelForm):\n email = forms.EmailField()\n\n class Meta:\n model = get_user_model()\n fields = (\"username\", \"email\", \"first_name\", \"last_name\", )\n field_classes = {\"username\": user_forms.UsernameField}\n\n def clean_email(self):\n email = self.cleaned_data['email']\n username = self.cleaned_data['username']\n user_with_email = get_user_model().objects.filter(email=email).exclude(username=username)\n if not user_with_email.exists():\n return email\n else:\n raise ValidationError(_('User with this email address already exists'))\n","repo_name":"infohata/ptu2_adboard","sub_path":"adboard/user_menu/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":1072,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"}
+{"seq_id":"69970251672","text":"from Components.config import config\nfrom .Debug import logger\nfrom .FileUtils import readFile, writeFile\nfrom .CutListUtils import ptsToSeconds\n\n\nclass ParserMetaFile():\n\n\tmeta_keys = [\n\t\t\"service_reference\", \"name\", \"description\", \"rec_time\", \"tags\", \"length\", \"size\", \"service_data\"\n\t]\n\n\txmeta_keys = [\n\t\t\"timer_start_time\", \"timer_stop_time\", \"recording_start_time\", \"recording_stop_time\", \"recording_margin_before\",\n\t\t\"recording_margin_after\"\n\t]\n\n\tdef __init__(self, path):\n\t\tself.path = path\n\t\tself.meta_path = path + \".meta\"\n\t\tself.xmeta_path = path + \".xmeta\"\n\t\tself.meta = {}\n\t\tself.xmeta = {}\n\n\t\tself.meta_list = self.readMeta(self.meta_path)\n\t\tif self.meta_list:\n\t\t\tself.meta = self.list2dict(self.meta_list, self.meta_keys)\n\t\t\tif self.meta:\n\t\t\t\tself.meta[\"length\"] = ptsToSeconds(self.meta[\"length\"])\n\t\t\tself.xmeta_list = self.readMeta(self.xmeta_path)\n\t\t\tself.xmeta = self.list2dict(self.xmeta_list, self.xmeta_keys)\n\t\t\tif self.meta and not self.xmeta:\n\t\t\t\tself.xmeta[\"recording_start_time\"] = self.meta[\"rec_time\"]\n\t\t\t\tself.xmeta[\"recording_stop_time\"] = 0\n\t\t\t\tself.xmeta[\"recording_margin_before\"] = config.recording.margin_before.value * 60\n\t\t\t\tself.xmeta[\"recording_margin_after\"] = config.recording.margin_after.value * 60\n\n\tdef list2dict(self, alist, keys):\n\t\tadict = {}\n\t\tfor i, key in enumerate(keys):\n\t\t\tif i < len(alist):\n\t\t\t\ttry:\n\t\t\t\t\tadict[key] = int(alist[i])\n\t\t\t\texcept ValueError:\n\t\t\t\t\tadict[key] = alist[i]\n\t\treturn adict\n\n\tdef dict2list(self, adict, keys):\n\t\tlogger.debug(\"adict: %s\", adict)\n\t\talist = []\n\t\tfor key in keys:\n\t\t\tif key in adict:\n\t\t\t\talist.append(adict[key])\n\t\t\telse:\n\t\t\t\talist.append(\"\")\n\t\treturn alist\n\n\tdef readMeta(self, path):\n\t\tmeta_list = readFile(path).splitlines()\n\t\tmeta_list = [list_item.strip() for list_item in meta_list]\n\t\treturn meta_list\n\n\tdef getMeta(self):\n\t\tself.meta.update(self.xmeta)\n\t\tlogger.debug(\"meta: %s\", self.meta)\n\t\treturn self.meta\n\n\tdef updateXMeta(self, xmeta):\n\t\tlogger.debug(\"xmeta: %s\", xmeta)\n\t\tself.xmeta.update(xmeta)\n\t\tlogger.debug(\"self.xmeta: %s\", self.xmeta)\n\t\tself.saveXMeta()\n\n\tdef saveXMeta(self):\n\t\talist = self.dict2list(self.xmeta, self.xmeta_keys)\n\t\tdata = \"\\n\".join([str(line) for line in alist])\n\t\twriteFile(self.xmeta_path, data)\n","repo_name":"dream-alpha/CacheCockpit","sub_path":"src/ParserMetaFile.py","file_name":"ParserMetaFile.py","file_ext":"py","file_size_in_byte":2227,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"}
+{"seq_id":"18315509217","text":"import numpy as np\nimport os\n\nfrom OpenGL.GL import *\nfrom OpenGL.GL.framebufferobjects import *\nfrom OpenGL.arrays import vbo\n\nfrom ..textures import Texture\nfrom ..shaders import set_uniform, compileShader\nfrom .base import AbstractEffect\n\n\nclass FXAAEffect(AbstractEffect):\n '''Fast Approximate Anti Aliasing. It is an efficient way to add\n anti-aliasing to your scenes. The reason to have it is to\n reduce jagged lines.\n\n The parameters *span_max*, *reduce_mul*, *reduce_min* are\n tweakable even if it is suggested to keep them at their default value.\n\n .. image:: /_static/fxaa_on_off.png\n :width: 800px\n \n '''\n def __init__(self, widget, span_max = 4.0, reduce_mul=1/8.0, reduce_min=1/128.0):\n self.widget = widget\n curdir = os.path.dirname(__file__)\n \n vert = open(os.path.join(curdir, 'shaders', 'noeffect.vert')).read()\n frag = open(os.path.join(curdir, 'shaders', 'fxaa.frag')).read() \n # Compile quad shader\n vertex = compileShader(vert, GL_VERTEX_SHADER)\n fragment = compileShader(frag, GL_FRAGMENT_SHADER)\n \n self.span_max = span_max\n self.reduce_mul = reduce_mul\n self.reduce_min = reduce_min\n \n self.quad_program = shaders.compileProgram(vertex, fragment)\n\n def render(self, fb, texturedict):\n glBindFramebuffer(GL_FRAMEBUFFER, fb)\n glViewport(0, 0, self.widget.width(), self.widget.height())\n \n glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)\n glUseProgram(self.quad_program)\n \n set_uniform(self.quad_program, 'FXAA_SPAN_MAX', '1f', self.span_max)\n set_uniform(self.quad_program, 'FXAA_REDUCE_MUL', '1f', self.reduce_mul)\n set_uniform(self.quad_program, 'FXAA_REDUCE_MIN', '1f', self.reduce_min)\n \n qd_id = glGetUniformLocation(self.quad_program, b\"textureSampler\")\n texture = texturedict['color']\n \n # Setting up the texture\n glActiveTexture(GL_TEXTURE0)\n texture.bind()\n \n # Set our \"quad_texture\" sampler to user Texture Unit 0\n glUniform1i(qd_id, 0)\n # Set resolution\n res_id = glGetUniformLocation(self.quad_program, b\"texcoordOffset\")\n glUniform2f(res_id, 1.0/self.widget.width(), 1.0/self.widget.height())\n\n # # Let's render a quad\n quad_data = np.array([-1.0, -1.0, 0.0,\n 1.0, -1.0, 0.0,\n -1.0, 1.0, 0.0,\n -1.0, 1.0, 0.0,\n 1.0, -1.0, 0.0,\n 1.0, 1.0, 0.0],\n dtype='float32')\n \n vboquad = vbo.VBO(quad_data)\n vboquad.bind()\n \n glVertexPointer(3, GL_FLOAT, 0, None) \n glEnableClientState(GL_VERTEX_ARRAY)\n\n # draw \"count\" points from the VBO\n glDrawArrays(GL_TRIANGLES, 0, 6)\n \n vboquad.unbind()\n glDisableClientState(GL_VERTEX_ARRAY)\n \n def on_resize(self, w, h):\n pass","repo_name":"chemlab/chemlab","sub_path":"chemlab/graphics/postprocessing/fxaa.py","file_name":"fxaa.py","file_ext":"py","file_size_in_byte":3084,"program_lang":"python","lang":"en","doc_type":"code","stars":204,"dataset":"github-code","pt":"5"}
+{"seq_id":"29899910725","text":"import os.path\n\nclass proxy_opts:\n auto = 'auto'\n manual = 'manual'\n noProxy = 'noProxy'\n\nclass schedule_opts:\n manual = 'manual'\n runEvery = 'runEvery'\n days = 'days'\n\nclass upload_opts:\n manual = 'manual'\n auto = 'auto'\n\ntimeFormat = 'hh:mm:ss'\ndateFormat = 'yyyy-MM-ddThh:mm:ss'\nrunEvery_opts = ['hour', 'minute']\nweekDaysStr = ['sun', 'mon', 'tue', 'wed', 'thu', 'fri', 'sat']\ntourDisplayReasons = ['uncoupling', 'publish']\n\naccount_loginType = 'account/loginType'\naccount_rememberLogin = 'account/rememberLogin'\naccount_username = 'account/username'\naccount_password = 'account/password'\naccount_runOnStartup = 'account/runOnStartup'\naccount_sendErrorLog = 'account/sendErrorLog'\naccount_language = 'account/language'\naccount_dropboxPrompted = 'account/dropboxPrompted'\n\noauth_googleExpiry = 'oauth/googleExpiry'\noauth_facebookExpiry = 'oauth/facebookExpiry'\n\ncompute_rootFolder = 'compute/rootFolder'\ncompute_usersFolders = 'compute/usersFolders'\n\nschedule_runEvery_value = 'schedule/runEvery_value'\nschedule_runEvery_unit = 'schedule/runEvery_unit'\nschedule_day_sun = 'schedule/day_sun'\nschedule_day_mon = 'schedule/day_mon'\nschedule_day_tue = 'schedule/day_tue'\nschedule_day_wed = 'schedule/day_wed'\nschedule_day_thu = 'schedule/day_thu'\nschedule_day_fri = 'schedule/day_fri'\nschedule_day_sat = 'schedule/day_sat'\nschedule_day_time = 'schedule/day_time'\nschedule_upload = 'schedule/upload'\nschedule_schedule = 'schedule/schedule'\n\nvcs_installed_version = 'vcs/installedVersion'\nvcs_new_version = 'vcs/newVersionInstalled'\nvcs_new_version_path = 'vcs/newVersionPath'\nvcs_startup_version = 'vcs/startupVersion'\nvcs_events_notified = 'vcs/eventsNotified'\n\nstatus_showFinished = 'status/showFinished'\nstatus_showPath = 'status/showPath'\nstatus_showPending = 'status/showPending'\n\nno_proxy = 'connection/noProxy'\nproxy_server = 'connection/proxyServer'\nproxy_port = 'connection/proxyPort'\nproxy_protocol = 'connection/proxyScheme'\nproxy_is_authenticated = 'connection/isProxyAuthenticated'\nproxy_user = 'connection/proxyUser'\nproxy_password = 'connection/proxyPassword'\nproxy_connection = 'connection/proxyConnection'\n","repo_name":"SimeonRolev/iOS-Windows-settings-parser","sub_path":"config_parser/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":2458,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"}
+{"seq_id":"42409314477","text":"class Person(object):\n def __init__(self,name,age):\n self.name =name\n self.age =age\n\n def __str__(self):\n return \"<{} {}>\".format(self.name, self.age)\n\n\np =Person(\"小黑\",18)\nprint(p)\np2 = Person(\"高材生\",9000)\nprint(p2)","repo_name":"leiqing110/Django-Restaurant","sub_path":"APP01/类的__str__方法.py","file_name":"类的__str__方法.py","file_ext":"py","file_size_in_byte":251,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"5"}
+{"seq_id":"17536019661","text":"from subprocess import call, DEVNULL\n\n\ndef run_editor(cmds):\n for cmd in cmds:\n if program_exists(cmd[0]):\n call(cmd)\n break\n else:\n print('No editors found (tried {})'\n .format(', '.join(x[0] for x in cmds)))\n sys.exit(1)\n\n\ndef program_exists(name):\n return call(['which', name], stdout=DEVNULL, stderr=DEVNULL) == 0\n","repo_name":"chriswatrous/scripts","sub_path":"bin/edit_helpers.py","file_name":"edit_helpers.py","file_ext":"py","file_size_in_byte":383,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"}
+{"seq_id":"1773972724","text":"import pytest\nfrom .pages.product_page import ProductPage\nfrom .pages.basket_page import BasketPage\nfrom .pages.login_page import LoginPage\nimport time, secrets\n\nlink = 'http://selenium1py.pythonanywhere.com/catalogue/the-shellcoders-handbook_209/?promo=newYear'\n\nclass TestUserAddToBasketFromProductPage():\n\n @pytest.fixture(scope=\"function\", autouse=True)\n def setup(self, browser):\n registration_email = f'{str(time.time())}@fakemail.org'\n registration_password = secrets.token_urlsafe(8)\n self.link = link\n self.browser = browser\n self.page = ProductPage(self.browser, self.link)\n self.page.open()\n self.login_page = self.page.go_to_login_page()\n self.login_page = LoginPage(self.browser, self.link)\n self.login_page.register_new_user(registration_email, registration_password)\n self.login_page.should_be_authorized_user()\n\n def test_user_cant_see_success_message(self):\n self.page.should_not_be_success_message()\n\n @pytest.mark.need_review\n def test_user_can_add_product_to_basket(self):\n self.page.click_basket_button()\n\n@pytest.mark.need_review\ndef test_guest_can_add_product_to_basket(browser):\n browser = browser\n page = ProductPage(browser, link)\n page.open()\n page.click_basket_button()\n page.solve_quiz_and_get_code()\n page.should_be_right_book()\n\n@pytest.mark.skip\ndef test_guest_cant_see_success_message_after_adding_product_to_basket(self):\n browser = browser\n page = ProductPage(browser, link)\n page.open()\n page.click_basket_button()\n page.solve_quiz_and_get_code()\n page.should_not_be_success_message()\n\n\n@pytest.mark.skip\ndef test_message_disappeared_after_adding_product_to_basket(browser):\n browser = browser\n page = ProductPage(browser, link)\n page.open()\n page.click_basket_button()\n page.solve_quiz_and_get_code()\n page.should_disapear()\n\n@pytest.mark.skip\ndef test_guest_should_see_login_link_on_product_page(browser):\n browser = browser\n page = ProductPage(browser, link)\n page.open()\n page.should_be_login_link()\n\n@pytest.mark.need_review\ndef test_guest_can_go_to_login_page_from_product_page(browser):\n browser = browser\n page = ProductPage(browser, link)\n page.open()\n page.go_to_login_page()\n\n@pytest.mark.need_review\ndef test_guest_cant_see_product_in_basket_opened_from_product_page(browser):\n browser = browser\n page = ProductPage(browser, link)\n page.open()\n basket_page = page.go_to_basket_page()\n basket_page = BasketPage(browser, browser.current_url)\n basket_page.should_not_be_items_in_basket()\n basket_page.should_be_empty_basket()\n","repo_name":"Fidochek/SeleniumProject","sub_path":"test_product_page.py","file_name":"test_product_page.py","file_ext":"py","file_size_in_byte":2669,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"}
+{"seq_id":"44442903365","text":"from unittest import defaultTestLoader\nfrom nva.universalsearch.tests.base import SolrTestCase\n\nfrom os import environ\nfrom transaction import abort, commit\nfrom zope.component import getUtility\nfrom zope.schema.interfaces import IVocabularyFactory\nfrom collective.solr.interfaces import ISolrConnectionConfig\nfrom collective.solr.interfaces import ISearch\nfrom collective.solr.dispatcher import solrSearchResults\nfrom collective.solr.utils import activate\nfrom nva.universalsearch.interfaces import IUniversalSearchConfig\n\n\ndef indexForDifferentSystem(obj, system='Other'):\n from collective.solr.indexer import SolrIndexProcessor\n original = SolrIndexProcessor.getData\n\n def getData(self, obj, attributes=None):\n data, missing = original(self, obj, attributes)\n data['UID'] = system + '.' + data['UID'] # uid needs to be unique\n data['system'] = system\n return data, missing\n\n SolrIndexProcessor.getData = getData\n obj.processForm()\n commit()\n SolrIndexProcessor.getData = original\n\n\nclass SolrServerTests(SolrTestCase):\n\n def afterSetUp(self):\n activate()\n self.portal.REQUEST.RESPONSE.write = lambda x: x # ignore output\n self.config = getUtility(ISolrConnectionConfig)\n if 'SOLR_PORT' in environ:\n self.config.port = int(environ['SOLR_PORT'])\n self.maintenance = self.portal.unrestrictedTraverse('solr-maintenance')\n self.maintenance.clear()\n self.search = getUtility(ISearch)\n\n def beforeTearDown(self):\n # due to the `commit()` in the tests below the activation of the\n # solr support in `afterSetUp` needs to be explicitly reversed,\n # but first all uncommitted changes made in the tests are aborted...\n abort()\n self.config.active = False\n commit()\n\n def testObjectCanBeSearchedViaSystemIndex(self):\n self.folder.processForm(values={'title': 'Foo'})\n commit() # indexing happens on commit\n indexForDifferentSystem(self.folder)\n system = self.portal.Title()\n self.assertEqual(len(self.search('+system:\"%s\"' % system)), 1)\n # without specifying the 'system' we should get two results\n self.assertEqual(len(self.search('+system:*')), 2)\n\n def testFullUriIsStoredInSolr(self):\n self.folder.processForm(values={'title': 'Foo'})\n commit() # indexing happens on commit\n self.assertEqual([r.uri for r in self.search('*:*')],\n [self.folder.absolute_url()])\n\n def testFullCustomizedUriIsStoredInSolr(self):\n config = getUtility(IUniversalSearchConfig)\n config.site_url = 'http://foo.com'\n self.folder.processForm(values={'title': 'Foo'})\n commit() # indexing happens on commit\n self.assertEqual([r.uri for r in self.search('*:*')],\n ['http://foo.com/Members/' + self.folder.getId()])\n\n def testSystemsVocabulary(self):\n self.folder.processForm(values={'title': 'Foo'})\n commit() # indexing happens on commit\n indexForDifferentSystem(self.folder)\n vocab = getUtility(IVocabularyFactory, name='nva.universalsearch.systems')\n self.assertEqual([i.token for i in vocab(self.portal)],\n ['Other', 'Plone site'])\n\n def testSearchResultsAreFilteredBySystems(self):\n self.folder.processForm(values={'title': 'Foo'})\n commit() # indexing happens on commit\n indexForDifferentSystem(self.folder)\n # by default 'systems' isn't set and we're getting all results\n results = solrSearchResults(SearchableText='Foo')\n self.assertEqual(sorted([(r.Title, r.system) for r in results]),\n [('Foo', 'Other'), ('Foo', 'Plone site')])\n # after setting 'systems' we only get results for those...\n config = getUtility(IUniversalSearchConfig)\n config.systems = ['Plone site']\n results = solrSearchResults(SearchableText='Foo')\n self.assertEqual(sorted([(r.Title, r.system) for r in results]),\n [('Foo', 'Plone site')])\n\n def testSearchResultsCanBeLimitedViaRequestParameters(self):\n self.folder.processForm(values={'title': 'Foo'})\n commit() # indexing happens on commit\n indexForDifferentSystem(self.folder)\n config = getUtility(IUniversalSearchConfig)\n config.systems = ['Plone site', 'Other'] # all 'systems' are allowed\n # explicitly setting an allowed 'system' limits results\n request = dict(SearchableText='[* TO *]', system='Other')\n results = solrSearchResults(request)\n self.assertEqual(sorted([(r.Title, r.system) for r in results]),\n [('Foo', 'Other')])\n\n def testOtherSystemsCannotBeSearchedViaRequestParameters(self):\n self.folder.processForm(values={'title': 'Foo'})\n commit() # indexing happens on commit\n indexForDifferentSystem(self.folder)\n config = getUtility(IUniversalSearchConfig)\n config.systems = ['Plone site']\n # explicitly setting another 'system' mustn't yield results\n request = dict(SearchableText='[* TO *]', system='Other')\n results = solrSearchResults(request)\n self.assertEqual(sorted([(r.Title, r.system) for r in results]),\n [('Foo', 'Plone site')])\n\n\ndef test_suite():\n return defaultTestLoader.loadTestsFromName(__name__)\n","repo_name":"witsch/novareto","sub_path":"nva.universalsearch/trunk/src/nva/universalsearch/tests/test_indexes.py","file_name":"test_indexes.py","file_ext":"py","file_size_in_byte":5499,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"}
+{"seq_id":"41892482273","text":"from pmd_beamphysics.units import nice_array, nice_scale_prefix\nfrom pmd_beamphysics.labels import mathlabel\n\nimport numpy as np\n\nimport matplotlib.pyplot as plt\n\n\ndef plot_stats(genesis4_object, keys=[\"beam_xsize\", \"beam_ysize\"], tex=False, **kwargs):\n \"\"\"\n Plots stats\n\n \"\"\"\n nplots = len(keys)\n\n fig, axs = plt.subplots(nplots, **kwargs)\n\n # Make RHS axis for the solenoid field.\n xdat = genesis4_object.stat(\"zplot\")\n xmin = min(xdat)\n xmax = max(xdat)\n for i, key in enumerate(keys):\n ax = axs[i]\n\n ydat = genesis4_object.stat(key)\n ydat = np.mean(ydat, axis=1) # Average over slices\n\n ndat, factor, prefix = nice_array(ydat)\n unit = genesis4_object.units(key)\n units = f\"{prefix}{unit}\"\n # Hangle label\n ylabel = mathlabel(key, units=units, tex=tex)\n ax.set_ylabel(ylabel)\n ax.set_xlim(xmin, xmax)\n ax.plot(xdat, ndat)\n ax.set_xlabel(\"z (m)\")\n\n\ndef add_layout_to_axes(\n genesis4_object,\n *,\n ax=None,\n bounds=None,\n xfactor=1,\n add_legend=False,\n):\n \"\"\"\n Adds undulator layout to an axes.\n\n \"\"\"\n\n if bounds is None:\n zmin, zmax = 0, genesis4_object.stat(\"z\").max()\n else:\n zmin, zmax = bounds\n ax.set_xlim(zmin, zmax)\n\n dat = {}\n ax2 = ax.twinx()\n\n ax.set_xlabel(r\"$z$ (m)\")\n\n zlist = genesis4_object.stat(\"z\")\n\n lines = []\n for ax1, component, color, label, units in (\n (ax, \"aw\", \"red\", r\"$aw$\", \"1\"),\n (ax2, \"qf\", \"blue\", r\"Quad $k$\", r\"$1/m^2$\"),\n ):\n fz = genesis4_object.stat(component)\n\n y, factor, prefix = nice_array(fz)\n\n line = ax1.fill_between(zlist / xfactor, y, color=color, label=label, alpha=0.5)\n # lines += line\n\n ylabel = f\"{label} ({prefix}{units})\"\n ax1.set_ylabel(ylabel)\n\n labels = [line.get_label() for line in lines]\n if add_legend:\n ax.legend(lines, labels)\n\n\nfrom pmd_beamphysics.units import nice_array, nice_scale_prefix\nfrom pmd_beamphysics.labels import mathlabel\n\n\ndef plot_stats_with_layout(\n genesis4_object,\n ykeys=\"field_energy\",\n ykeys2=[],\n xkey=\"zplot\",\n xlim=None,\n ylim=None,\n ylim2=None,\n yscale='linear',\n yscale2='linear',\n nice=True,\n tex=False,\n include_layout=True,\n include_labels=True,\n include_legend=True,\n return_figure=False,\n **kwargs,\n):\n \"\"\"\n Plots stat output multiple keys.\n\n If a list of ykeys2 is given, these will be put on the right hand axis. This can also be given as a single key.\n\n Logical switches:\n nice: a nice SI prefix and scaling will be used to make the numbers reasonably sized. Default: True\n\n tex: use mathtext (TeX) for plot labels. Default: True\n\n include_legend: The plot will include the legend. Default: True\n\n include_layout: the layout plot will be displayed at the bottom. Default: True\n\n return_figure: return the figure object for further manipulation. Default: False\n\n \"\"\"\n if include_layout:\n fig, all_axis = plt.subplots(2, gridspec_kw={\"height_ratios\": [4, 1]}, **kwargs)\n ax_layout = all_axis[-1]\n ax_plot = [all_axis[0]]\n else:\n fig, all_axis = plt.subplots(**kwargs)\n ax_plot = [all_axis]\n\n # collect axes\n if isinstance(ykeys, str):\n ykeys = [ykeys]\n\n if ykeys2:\n if isinstance(ykeys2, str):\n ykeys2 = [ykeys2]\n ax_twinx = ax_plot[0].twinx()\n ax_plot.append(ax_twinx)\n\n # No need for a legend if there is only one plot\n if len(ykeys) == 1 and not ykeys2:\n include_legend = False\n\n # assert xkey == 'mean_z', 'TODO: other x keys'\n\n X = genesis4_object.stat(xkey)\n\n # Only get the data we need\n if xlim:\n good = np.logical_and(X >= xlim[0], X <= xlim[1])\n X = X[good]\n else:\n xlim = X.min(), X.max()\n good = slice(None, None, None) # everything\n\n # X axis scaling\n units_x = str(genesis4_object.units(xkey))\n if nice:\n X, factor_x, prefix_x = nice_array(X)\n units_x = prefix_x + units_x\n else:\n factor_x = 1\n\n # set all but the layout\n\n # Handle tex labels\n xlabel = mathlabel(xkey, units=units_x, tex=tex)\n\n for ax in ax_plot:\n ax.set_xlim(xlim[0] / factor_x, xlim[1] / factor_x)\n ax.set_xlabel(xlabel)\n\n # Draw for Y1 and Y2\n\n linestyles = [\"solid\", \"dashed\"]\n\n ii = -1 # counter for colors\n for ix, keys in enumerate([ykeys, ykeys2]):\n if not keys:\n continue\n ax = ax_plot[ix]\n linestyle = linestyles[ix]\n\n # Check that units are compatible\n ulist = [genesis4_object.units(key) for key in keys]\n if len(ulist) > 1:\n for u2 in ulist[1:]:\n assert ulist[0] == u2, f\"Incompatible units: {ulist[0]} and {u2}\"\n # String representation\n unit = str(ulist[0])\n\n # Data\n data = [genesis4_object.stat(key)[good] for key in keys]\n\n if nice:\n factor, prefix = nice_scale_prefix(np.ptp(data))\n unit = prefix + unit\n else:\n factor = 1\n\n # Make a line and point\n for key, dat in zip(keys, data):\n #\n ii += 1\n color = \"C\" + str(ii)\n\n # Handle tex labels\n label = mathlabel(key, units=unit, tex=tex)\n ax.plot(X, dat / factor, label=label, color=color, linestyle=linestyle)\n\n # Handle tex labels\n ylabel = mathlabel(*keys, units=unit, tex=tex)\n ax.set_ylabel(ylabel)\n \n # Scaling(e.g. \"linear\", \"log\", \"symlog\", \"logit\")\n if ix == 0:\n ax.set_yscale(yscale)\n else:\n ax_twinx.set_yscale(yscale2)\n\n # Set limits, considering the scaling.\n if ix == 0 and ylim:\n ymin = ylim[0]\n ymax = ylim[1]\n # Handle None and scaling\n if ymin is not None:\n ymin = ymin / factor\n if ymax is not None:\n ymax = ymax / factor\n new_ylim = (ymin, ymax)\n ax.set_ylim(new_ylim)\n # Set limits, considering the scaling.\n if ix == 1 and ylim2:\n pass\n # TODO\n if ylim2:\n ymin2 = ylim2[0]\n ymax2 = ylim2[1]\n # Handle None and scaling\n if ymin2 is not None:\n ymin2 = ymin2 / factor\n if ymax2 is not None:\n ymax2 = ymax2 / factor\n new_ylim2 = (ymin2, ymax2)\n ax_twinx.set_ylim(new_ylim2)\n else:\n pass\n\n # Collect legend\n if include_legend:\n lines = []\n labels = []\n for ax in ax_plot:\n a, b = ax.get_legend_handles_labels()\n lines += a\n labels += b\n ax_plot[0].legend(lines, labels, loc=\"best\")\n\n # Layout\n if include_layout:\n # Gives some space to the top plot\n ax_layout.set_ylim(-1, 1.5)\n\n # if xkey == 'mean_z':\n # ax_layout.set_xlim(xlim[0], xlim[1])\n # else:\n # ax_layout.set_xlabel('mean_z')\n # xlim = (0, I.stop)\n add_layout_to_axes(genesis4_object, ax=ax_layout, bounds=xlim)\n\n if return_figure:\n return fig\n","repo_name":"slaclab/lume-genesis","sub_path":"genesis/version4/plot.py","file_name":"plot.py","file_ext":"py","file_size_in_byte":7328,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"5"}
+{"seq_id":"25213796228","text":"\"\"\"\nscatter.py\n==========\n\nThis module provides a function to display a scatter plot using the ClusterFun library.\nIt allows users to create a scatter plot of two columns of data.\n\"\"\"\nfrom typing import Optional\n\nimport pandas as pd\n\nfrom clusterfun.config import Config\nfrom clusterfun.plot import Plot\nfrom clusterfun.plot_types import DOCSTRING_STANDARD\nfrom clusterfun.storage.local.helpers import get_columns_for_db\nfrom clusterfun.validation import validate\n\n\ndef scatter(\n df: pd.DataFrame,\n x: str,\n y: str,\n media: str,\n color: Optional[str] = None,\n bounding_box: Optional[str] = None,\n title: Optional[str] = None,\n show: bool = True,\n): # pylint: disable=too-many-arguments,missing-function-docstring\n cfg = Config(\n type=\"scatter\",\n x=x,\n y=y,\n media=media,\n columns=get_columns_for_db(df, media, \"scatter\", x, y),\n color=color,\n bounding_box=bounding_box,\n title=title,\n )\n validate(df, cfg)\n return Plot.save(df, cfg).show(show)\n\n\nscatter.__doc__ = (\n \"\"\"\n :param df: pd.DataFrame\n The dataframe with the data to plot\n :param x: str\n The column name of the data to plot on the x-axis.\n :param y: str\n The column name of the data to plot on the y-axis.\n \"\"\"\n + DOCSTRING_STANDARD\n)\n","repo_name":"gietema/clusterfun","sub_path":"clusterfun/plot_types/scatter.py","file_name":"scatter.py","file_ext":"py","file_size_in_byte":1356,"program_lang":"python","lang":"en","doc_type":"code","stars":27,"dataset":"github-code","pt":"5"}
+{"seq_id":"3639697738","text":"import tigre\nimport numpy as np\nfrom tigre.utilities import sample_loader\nfrom tigre.utilities import CTnoise\nimport tigre.algorithms as algs\nfrom matplotlib import pyplot as plt\n\n#%% Geometry\ngeo = tigre.geometry_default(high_resolution=False)\n\n#%% Load data and generate projections\n# define angles\nangles = np.linspace(0, 2 * np.pi, 100)\n# Load thorax phatom data\nhead = sample_loader.load_head_phantom(geo.nVoxel)\n# generate projections\nprojections = tigre.Ax(head, geo, angles)\n# add noise\nnoise_projections = CTnoise.add(projections, Poisson=1e5, Gaussian=np.array([0, 10]))\n\n#%% Usage CGLS\n#\n#\n# CGLS has the common 4 inputs for iterative algorithms in TIGRE:\n#\n# Projections, geometry, angles, and number of iterations\n#\n# Additionally it contains optional initialization tehcniques, but we\n# reccomend not using them. CGLS is already quite fast and using them may\n# lead to divergence.\n# The options are:\n# 'Init' Describes diferent initialization techniques.\n# - 'none' : Initializes the image to zeros (default)\n# - 'FDK' : intializes image to FDK reconstrucition\n# - 'multigrid': Initializes image by solving the problem in\n# small scale and increasing it when relative\n# convergence is reached.\n# - 'image' : Initialization using a user specified\n# image. Not recomended unless you really\n# know what you are doing.\n# 'InitImg' an image for the 'image' initialization. Avoid.\n\n# # use CGLS\nimgCGLS, normL2CGLS = algs.cgls(noise_projections, geo, angles, 30, computel2=True)\n# use LSQR\nimgLSQR, normL2LSQR = algs.lsqr(noise_projections, geo, angles, 30, computel2=True)\n# use LSMR\nimgLSMR, normL2LSMR = algs.lsmr(noise_projections, geo, angles, 30, computel2=True,lmbda=0)\nimgLSMR2, normL2LSMR2 = algs.lsmr(noise_projections, geo, angles, 30, computel2=True,lmbda=30)\n# use LSQR\nimghLSQR, normhL2LSQR = algs.hybrid_lsqr(noise_projections, geo, angles, 30, computel2=True)\n\n# AB/BA-GMRES\nimgabgmres, normhabgmres = algs.ab_gmres(noise_projections, geo, angles, 30, computel2=True)\nimgbagmres, normhbagmres = algs.ba_gmres(noise_projections, geo, angles, 30, computel2=True)\n# # AB/BA-GMRES with FDK backprojector\nimgabgmresfdk, normhabgmresfdk = algs.ab_gmres(noise_projections, geo, angles, 30, computel2=True,backprojector=\"FDK\")\nimgbagmresfdk, normhbagmresfdk = algs.ba_gmres(noise_projections, geo, angles, 30, computel2=True,backprojector=\"FDK\")\n\n\n# SIRT for comparison.\nimgSIRT, normL2SIRT = algs.sirt(noise_projections, geo, angles, 60, computel2=True)\n\n#%% plot results\n#\n# We can see that CGLS gets to the same L2 error in less amount of\n# iterations.\n\n\n\nplt.plot(np.vstack((normL2CGLS[0, :], normL2SIRT[0, 0:30],normL2LSMR[0, :],normL2LSMR2[0, :],normhL2LSQR[0, :],normhabgmres[0,:],normhbagmres[0,:],normhabgmresfdk[0,:],normhbagmresfdk[0,:])).T)\nplt.title(\"L2 error\")\nplt.xlabel(\"Iteration\")\nplt.ylabel(\"$ |Ax-b| $\")\nplt.gca().legend((\"CGLS\", \"SIRT\",\"LSMR lambda=0\", \"LSMR lambda=30\",\"hybrid LSQR\",\"AB-GMRES\",\"BA-GMRES\",\"AB-GMRES FDK\",\"BA-GMRES FDK\"))\nplt.show()\n# plot images\ntigre.plotimg(np.concatenate([np.concatenate([imgCGLS, imgSIRT, imgLSQR,imgabgmres,imgabgmresfdk],axis=1),np.concatenate([imgLSMR, imgLSMR2, imghLSQR,imgbagmres,imgbagmresfdk], axis=1)], axis=2), dim=\"z\", step=2,clims=[0, 2])\n# plot errors\ntigre.plotimg(np.concatenate([np.concatenate([head-imgCGLS, head-imgSIRT, head-imgLSQR, head-imgabgmres, head-imgabgmresfdk],axis=1),np.concatenate([head-imgLSMR, head-imgLSMR2, head-imghLSQR,head-imgbagmres,head-imgbagmresfdk], axis=1)], axis=2), dim=\"z\", slice=32)\n","repo_name":"CERN/TIGRE","sub_path":"Python/demos/d08_Algorithms03.py","file_name":"d08_Algorithms03.py","file_ext":"py","file_size_in_byte":3689,"program_lang":"python","lang":"en","doc_type":"code","stars":450,"dataset":"github-code","pt":"5"}
+{"seq_id":"44474578114","text":"from analyze import analyze\nfrom aylienapiclient import textapi\n\nclient = textapi.Client(\"\", \"\")\n\ndef summarizeArticle(url):\n\tsummary = \"\"\n\tsummaryResponse = client.Summarize({'url': url, 'sentences_number': 4})\n\tfor sentence in summaryResponse['sentences']:\n\t\tsummary += \"--\" + sentence +\"\\n\\n\"\n\treturn summary\n","repo_name":"bcaton85/Summary","sub_path":"backend/summarize.py","file_name":"summarize.py","file_ext":"py","file_size_in_byte":312,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"}
+{"seq_id":"11469682389","text":"n = int(input())\nd={}\nfor i in range(n):\n a = int(input())\n if a in d:\n d[a]+=1\n else:\n d.update({a:1})\na = list(d.keys())\nif(len(d)==2 and d[a[0]]==d[a[1]]):\n print(\"YES\")\n print(\"{} {}\".format(a[0],a[1]))\nelse:\n print(\"NO\")","repo_name":"shaarangg/CP-codes","sub_path":"codeforces/Python/fair_game.py","file_name":"fair_game.py","file_ext":"py","file_size_in_byte":257,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"}
+{"seq_id":"70878563991","text":"import request\nimport re\n\n\ndef part_one(rules, messages):\n patterns = to_patterns(rules)\n count = 0\n\n for message in messages:\n if re.fullmatch(patterns['0'], message):\n count += 1\n\n return count\n\n\ndef part_two(rules, messages):\n rules['8'] = '(42)+'\n rules['11'] = ' | '.join([' '.join(['42'] * i) + ' ' + ' '.join(['31'] * i) for i in range(1, 10)])\n patterns = to_patterns(rules)\n count = 0\n\n for message in messages:\n if re.fullmatch(patterns['0'], message):\n count += 1\n\n return count\n\n\ndef to_rules(text):\n rules = {}\n\n for line in text.splitlines():\n (key, value) = line.split(': ')\n rules[key] = value\n\n return rules\n\n\ndef to_patterns(rules):\n patterns = {}\n items = list(rules.items())\n\n while items:\n for i in reversed(range(len(items))):\n key, value = items[i]\n\n if not re.search(r'\\d', value):\n if re.search(r'\\|', value):\n patterns[key] = f'({value.replace(\" \", \"\")})'\n else:\n patterns[key] = value.replace(' ', '')\n\n items.pop(i)\n\n for key, value in patterns.items():\n for i in reversed(range(len(items))):\n k, v = items[i]\n m = re.sub(fr'(^|(?<=\\(|\\s)){key}((?=\\s|\\))|$)', value, v)\n\n if m:\n items[i] = (k, m)\n\n return patterns\n\n\ndef main():\n text = request.get('https://adventofcode.com/2020/day/19/input')\n inputs = text.strip().replace('\"', '').split('\\n\\n')\n rules = to_rules(inputs[0])\n messages = inputs[1].splitlines()\n print('* Part One:', part_one(rules, messages))\n print('** Part Two:', part_two(rules, messages))\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"fredrik-sy/AoC2020","sub_path":"day19.py","file_name":"day19.py","file_ext":"py","file_size_in_byte":1793,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"}
+{"seq_id":"22874300636","text":"import os\nfrom common.request_utils import RequestUtils\nfrom common.testdata_utils import TestdataUtils\n\ncurrent=os.path.dirname(__file__)\ndata_path=os.path.join(current,'../data/test_case1.xlsx')\nprint(data_path)\n\nallcase=TestdataUtils('Sheet1').get_testcase_data_list()\nfor case_info in allcase:\n\n Result=RequestUtils().request_by_step(case_info.get('case_info'))\n print(Result)\n","repo_name":"Mmeiapple/Request_unittest","sub_path":"api_testcase/excel_data.py","file_name":"excel_data.py","file_ext":"py","file_size_in_byte":387,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"}
+{"seq_id":"74407454872","text":"\nimport random\n\ndef chunks(lst, n):\n \"\"\"Yield successive n-sized chunks from lst.\"\"\"\n for i in range(0, len(lst), n):\n yield lst[i:i + n]\n\n\nnums = [random.uniform(1, 10) for i in range(10)]\n\n\nnums_groups = list(chunks(nums, 4))\n\n\nprint(nums_groups)","repo_name":"benjaminhuanghuang/code-snippets","sub_path":"list-chunk/list-chunk.py","file_name":"list-chunk.py","file_ext":"py","file_size_in_byte":261,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"}
+{"seq_id":"16305495388","text":"from trawler.browsers import BrowseBing, BrowseStackOverFlow, BrowseStackOverFlowDocumentation\nfrom trawler.browsers.exceptions import BrowerScrapeMethodNotImplemented\nimport pytest\nfrom trawler.settings import DEFAULT_MAX_RESULTS_PER_PAGE\n\n\ndef test_browse_with_bing():\n max_page = 1\n bing = BrowseBing(kw=\"Ravi RT Merugu\", max_page=max_page)\n bing.search()\n result = bing.data\n assert bing.data['result_count'] != 0\n assert bing.data[ 'result_count'] <= DEFAULT_MAX_RESULTS_PER_PAGE * max_page\n assert \"selenium-htmlunit\" == bing.shift_method()\n assert type(result) is dict\n assert \"result\" in result\n assert \"related_keywords\" in result\n bing.close()\n\n\ndef test_browse_with_bing_source_enin():\n max_page = 2\n bing = BrowseBing(kw=\"Ravi RT Merugu\", max_page=max_page, source=\"en-in\")\n bing.search()\n result = bing.data\n assert bing.data['result_count'] != 0\n assert bing.data['result_count'] <= DEFAULT_MAX_RESULTS_PER_PAGE * max_page\n assert \"selenium-htmlunit\" == bing.shift_method()\n assert type(result) is dict\n assert \"result\" in result\n assert \"related_keywords\" in result\n bing.close()\n\n\ndef test_browse_with_bing_source_enus():\n max_page = 1\n bing = BrowseBing(kw=\"Ravi RT Merugu\", max_page=max_page, source=\"en-us\")\n bing.search()\n result = bing.data\n assert bing.data['result_count'] != 0\n assert bing.data['result_count'] <= DEFAULT_MAX_RESULTS_PER_PAGE * max_page\n assert \"selenium-htmlunit\" == bing.shift_method()\n assert type(result) is dict\n assert \"result\" in result\n assert \"related_keywords\" in result\n bing.close()\n\n\n\ndef test_browser_with_stackoverflow():\n stack = BrowseStackOverFlow(kw=\"Python Exception errors\", max_page=1)\n stack.search()\n result = stack.data\n assert type(result) is dict\n assert \"result\" in result\n assert \"related_keywords\" in result\n stack.close()\n\n\ndef test_browser_no_nextpage():\n bing = BrowseBing(kw=\"XxXXXXXXxxxxxbas dans dsand msad asd amd ansd am dna smda sdn asdmas dm\", max_page=1)\n bing.search()\n result = bing.data\n assert result['next_url'] is None\n bing.close()\n\n\ndef test_browser_implamentation_error():\n with pytest.raises(BrowerScrapeMethodNotImplemented) as excinfo:\n bing = BrowseBing(kw=\"Hello\", max_page=1, method=\"chromejjj\")\n bing.search()\n bing.close()\n assert \"Not implemented\" in str(excinfo)\n","repo_name":"rrmerugu/trawler","sub_path":"tests/test_browser.py","file_name":"test_browser.py","file_ext":"py","file_size_in_byte":2426,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"5"}
+{"seq_id":"40561085508","text":"import subprocess\n\nobj = 'BHR001'\nfname = 'out-2mass-' + obj\nlobj = '-c=%s' % obj\n\n# command to execute in bash\ncommand = ['vizquery',\n '-source=II/246',\n '-c.bm=45x45',\n '-out=RAJ2000 DEJ2000 Jmag Jcmsig Hmag Hcmsig Kmag Kcmsig',\n '-out.form=mini',\n lobj,\n 'Jcmsig=<0.03',\n 'Hcmsig=<0.03',\n 'Kcmsig=<0.03',\n 'Qflg=AAA']\n\n# save data\nwith open(fname, 'wb') as out:\n p = subprocess.Popen(command, stdout=out)\n p.wait()\n\n# erease first 49 lines\nlines = open(fname).readlines()\nopen(fname, 'w').writelines(lines[49:-1])\n\nprint(\"\\nData for \" + obj + \" is ready!\\n\")\n","repo_name":"gracca/2MASSdist","sub_path":"data2MASSdist.py","file_name":"data2MASSdist.py","file_ext":"py","file_size_in_byte":661,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"}
+{"seq_id":"42505256018","text":"\"\"\"empty message\n\nRevision ID: 93b501c2e5f6\nRevises: 8c4cf25f4e2b\nCreate Date: 2018-08-27 18:01:12.364963\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = \"93b501c2e5f6\"\ndown_revision = \"8c4cf25f4e2b\"\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_table(\n \"comment\",\n sa.Column(\"id\", sa.Integer(), nullable=False),\n sa.Column(\"body\", sa.Text(), nullable=True),\n sa.Column(\"timestamp\", sa.DateTime(), nullable=True),\n sa.Column(\"user_id\", sa.Integer(), nullable=True),\n sa.Column(\"post_id\", sa.Integer(), nullable=True),\n sa.ForeignKeyConstraint([\"post_id\"], [\"post.id\"]),\n sa.ForeignKeyConstraint([\"user_id\"], [\"user.id\"]),\n sa.PrimaryKeyConstraint(\"id\"),\n )\n op.create_index(\n op.f(\"ix_comment_timestamp\"), \"comment\", [\"timestamp\"], unique=False\n )\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_index(op.f(\"ix_comment_timestamp\"), table_name=\"comment\")\n op.drop_table(\"comment\")\n # ### end Alembic commands ###\n","repo_name":"arajmaharjan/WOU-CIS-Cousework","sub_path":"Wolfit/migrations/versions/93b501c2e5f6_add_comments.py","file_name":"93b501c2e5f6_add_comments.py","file_ext":"py","file_size_in_byte":1229,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"5"}
+{"seq_id":"1662090246","text":"from typing import Dict, Type\nfrom os.path import basename\n\nfrom regex import search, compile\n\nfrom ...interfaces import ExtractLogInfosInterface\n\nclass ExtractLogInfos(ExtractLogInfosInterface):\n def __init__(self, log_path: str, error_class: Type[Exception]) -> None:\n self.__log_name = log_path\n self.__error_class = error_class\n self.__seconds = self.get_seconds()\n self.__bitrate = self.get_bitrate()\n \n def get_seconds(self) -> int:\n with open(self.__log_name, 'r') as f:\n file = f.readlines()\n for line in file:\n if 'Duration' in line:\n # Duration == Duration: 00:23:40.09...\n duration = line.strip().split(' ')[1][:-1:] # Extract the time and remove ','\n duration = duration.replace('.', ':')\n duration = duration.split(':')\n hour = int(duration[0]) * 3600\n minute = int(duration[1]) * 60\n second = int(duration[2])\n seconds = hour + minute + second\n return seconds\n return 1\n\n def get_bitrate(self) -> int:\n # ... Audio ... 44100 Hz ... 128 kb/s...\n bitrate_regex = compile(r'([0-9]{3} kb\\/s)') # Regex to extract bitrate\n with open(self.__log_name, 'r') as f:\n file = f.readlines()\n for line in file:\n #if search(hertz_regex, line):\n if 'bitrate' in line:\n bitrate_str_pos = search(bitrate_regex, line)\n if bitrate_str_pos == None:\n raise self.__error_class('Log Error!')\n else:\n bitrate_str_pos = bitrate_str_pos.span()\n bitrate_str = \\\n line[bitrate_str_pos[0]:bitrate_str_pos[1]].replace(' kb/s', '')\n return int(bitrate_str)\n return 128\n\n def get_current_file_size(self) -> Dict[str, int]:\n total_file_size_regex = compile(r'(?<=(audio:))(.*)(?=(kBs))')\n current_file_size_regex = compile(r'(?<=(size=))(.*)(?=(kB))')\n cases = (\n ('size', 'in conversion', current_file_size_regex),\n ('audio', 'completed', total_file_size_regex)\n ) # ('key', 'message', regex)\n with open(self.__log_name, 'r') as f:\n file = f.readlines()\n file = [line for line in file if line != '\\n'] # Ignore empty lines\n last_line = file[-1].replace(' ', '')\n\n # Verify error\n\n if 'Exiting' in last_line:\n raise self.__error_class('Conversion Error!')\n\n for case, message, regex in cases:\n if case in last_line:\n pos = search(regex, last_line).span()\n size = int(last_line[pos[0]:pos[1]])\n return {message: size}\n raise self.__error_class('Log Error!')\n \n def get_estimated_file_size(self) -> int:\n return int((self.__seconds * self.__bitrate) / 8)\n\n def get_filename(self) -> str:\n filename_regex = compile(r\"(?<=to\\s\\')(.*)(?=\\')\")\n with open(self.__log_name, 'r') as f:\n file = f.readlines()\n for line in file:\n if 'Output' in line:\n pos = search(filename_regex, line).span()\n if pos == None:\n raise self.__error_class('Log Error!')\n filename = line[pos[0] : pos[1]]\n return basename(filename) # Remove path and return filename\n raise self.__error_class('Log Error!')","repo_name":"JoaoEmanuell/mp3-api","sub_path":"routes/api/source/conversor/ffmpeg/extract_log_infos.py","file_name":"extract_log_infos.py","file_ext":"py","file_size_in_byte":3669,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"}
+{"seq_id":"12273812158","text":"import os\nimport time\nimport random\nfrom collections import Counter\n\nimport torch\nimport numpy as np\n\n\ndef cut_model_state_dict(state_dict):\n state_dict.pop(\"fc2.weight\")\n state_dict.pop(\"fc2.bias\")\n state_dict.pop(\"bn2.weight\")\n state_dict.pop(\"bn2.bias\")\n state_dict.pop(\"bn2.running_mean\")\n state_dict.pop(\"bn2.running_var\")\n state_dict.pop(\"bn2.num_batches_tracked\")\n state_dict.pop(\"center_features.weight\") if state_dict[\"center_features.weight\"] is not None else None\n\n\nclass Trainer:\n \"\"\"모델 학습 및 평가를 위한 클래스\"\"\"\n\n def __init__(self, model, args, logging=True, pretext=False):\n self.args = args\n self.model = model\n self.model.cuda()\n self.logging = logging\n self.pretext = pretext\n self.model_name = f\"{self.model.__class__.__name__}_{str(time.time()).split('.')[0]}\"\n self.best_accuracy = 0\n\n def train(self):\n \"\"\"모델을 학습\"\"\"\n self.write_log(f\"Start training {self.model_name}\", mode=\"w\")\n \n self.optimizer = torch.optim.Adam(self.model.parameters(), lr=self.args.lr)\n self.scheduler = torch.optim.lr_scheduler.MultiStepLR(self.optimizer, milestones=[30, 80, 120], gamma=0.333) # changed\n\n train_data_loader = self.model.get_train_data_loader()\n val_data_loader = self.model.get_test_data_loader()\n\n self.write_log(\"\")\n for key, val in self.args.__dict__.items():\n self.write_log(f\"{key}: {val}\")\n self.write_log(\"\")\n\n for epoch in range(self.args.epochs):\n start_time = time.time()\n self.write_log(f\"Epoch {epoch + 1:03d}/{self.args.epochs:03d} | LR: {self.optimizer.param_groups[0]['lr']:.6f}\")\n self.train_epoch(train_data_loader)\n self.write_log(f\"Time elapsed: {(time.time() - start_time) / 60:.2f} mins.\")\n\n if self.pretext: # pretext 모델인 경우 evaluation 없이 모델 저장\n torch.save(self.model.state_dict(), f\"results/train/weights/{self.model_name}.pt\")\n self.write_log(\"Saved model!\")\n elif (epoch + 1) % 10 == 0: # N 에폭마다 evaluation 진행\n self.eval(val_data_loader)\n \n def train_epoch(self, data_loader):\n \"\"\"모델을 1에폭 학습\"\"\"\n\n self.model.train()\n\n # 학습 추적을 위한 리스트\n cost_list = list()\n ind_cost_list = list()\n\n for batch_idx, data in enumerate(data_loader):\n cost, ind_costs = self.model.get_cost(data)\n\n self.optimizer.zero_grad()\n cost.backward()\n self.optimizer.step()\n\n cost_list.append(cost.item())\n ind_cost_list.append(ind_costs)\n \n if (batch_idx + 1) % 10 == 0 or (batch_idx + 1) == len(data_loader):\n self.write_log(f\"Batch: {batch_idx + 1:04d}/{len(data_loader):04d} | Cost: {np.mean(cost_list):.4f} {np.round(np.mean(ind_cost_list, axis=0), 3)}\")\n\n # 학습 추적을 위한 리스트 초기화\n cost_list = list()\n ind_cost_list = list()\n\n self.scheduler.step()\n \n def eval(self, data_loader, train=True, save=False):\n \"\"\"모델을 evaluation 혹은 추론\"\"\"\n self.model.eval()\n\n img1_path_list = list()\n img2_path_list = list()\n labels_list = list()\n label_preds_list = list()\n\n for idx, data in enumerate(data_loader):\n print(f\"Evaluation Batch: {idx + 1:04d}/{len(data_loader):04d}\", end=\"\\r\")\n\n img1_path, img2_path, labels, label_preds = self.model.evaluate(data)\n img1_path_list.extend(img1_path)\n img2_path_list.extend(img2_path)\n labels_list.extend(labels)\n label_preds_list.extend(label_preds)\n print()\n \n threshold = np.median(label_preds_list)\n self.write_log(f\"Threshold: {threshold:.4f}\")\n\n label_preds_list = np.array(label_preds_list) > threshold\n accuracy = np.mean(np.array(labels_list).astype(bool) == np.array(label_preds_list).astype(bool))\n self.write_log(f\"Accuracy: {accuracy:.6f}\")\n\n # 학습 중일 경우 최고의 validation 성능을 낸 모델 저장\n if train:\n if accuracy > self.best_accuracy:\n self.best_accuracy = accuracy\n\n # Inference에 사용되지 않는 layer를 덜어내고 모델을 저장\n state_dict = self.model.state_dict()\n cut_model_state_dict(state_dict)\n torch.save(state_dict, f\"results/train/weights/{self.model_name}.pt\")\n self.write_log(\"Saved best model!\")\n \n # 필요한 경우 prediction 결과를 저장\n if save:\n with open(f\"results/test/{self.args.model_weight.split('/')[-1].split('.')[0]}_preds.csv\", \"w\") as result:\n result.write(f\"image1,image2,label\\n\")\n for img1_path, img2_path, pred in zip(img1_path_list, img2_path_list, label_preds_list):\n result.write(f\"{img1_path},{img2_path},{int(pred)}\\n\")\n\n def write_log(self, msg, mode=\"a\"):\n \"\"\"로그 파일 작성\"\"\"\n if self.logging:\n with open(f\"results/train/logs/{self.model_name}.log\", mode) as log:\n log.write(f\"{msg}\\n\")\n print(msg)\n","repo_name":"jessekim-ck/2020-ai-challenge-04","sub_path":"src/trainer.py","file_name":"trainer.py","file_ext":"py","file_size_in_byte":5376,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"5"}
+{"seq_id":"42555411066","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport sys\nimport os\nfrom pyocd.core.helpers import ConnectHelper\nfrom pyocd.target.builtin import BUILTIN_TARGETS\n# from pyocd.target.pack import pack_target ManagedPacks\nfrom pyocd.target.pack.pack_target import ManagedPacks\nfrom PyQt5.QtWidgets import QApplication, QComboBox, QWidget, QVBoxLayout, QTableWidgetItem, QFileDialog, QMessageBox\nfrom daplink_flash_ui import *\nimport pyocd.core\nfrom pyocd.core.memory_map import MemoryType\n# import cmsis_pack_manager\nfrom pyocd.target.pack.cmsis_pack import (CmsisPack, MalformedCmsisPackError)\nfrom pyocd.flash.file_programmer import FileProgrammer\nimport logging\nfrom pyocd.target import TARGET\n\nclass Flash_Loader(object):\n\n def __init__(self):\n app = QApplication(sys.argv)\n self.window = QWidget()\n\n self.ui = Ui_Form()\n self.ui.setupUi(self.window)\n self.session = None\n\n\n probes = ConnectHelper.get_all_connected_probes(blocking=False)\n for probe in probes:\n self.ui.daplink_list.addItem(probe.description)\n if len(probes) > 0:\n self.probe = probes[0]\n # print(self.probe)\n else:\n self.probe = None\n \n\n # logger = logging.getLogger(__name__)\n # logger.setLevel(level=logging.DEBUG)\n\n # StreamHandler\n # stream_handler = logging.StreamHandler(self.ui.log.append)\n # stream_handler.setLevel(level=logging.DEBUG)\n # logger.addHandler(stream_handler)\n\n self.ui.flash.clicked.connect(self.flash_device_run)\n self.ui.update_dap.clicked.connect(self.update_daplink)\n self.ui.connect.clicked.connect(self.open_session)\n self.ui.selsec_firmware.clicked.connect(self.select_file)\n self.ui.daplink_list.currentIndexChanged.connect(self.daplink_change)\n\n self.ui.flash.setDisabled(True)\n self.ui.progressBar.setValue(0)\n self.window.show()\n app.exec_()\n\n\n def daplink_change(self):\n probes = ConnectHelper.get_all_connected_probes(blocking=False)\n\n for probe in probes:\n if probe.description == self.ui.daplink_list.currentText():\n self.probe = probe\n else:\n self.probe = None\n def open_session(self):\n if self.session is not None and self.session.is_open:\n self.session.close()\n\n if self.probe is None:\n QMessageBox.information(self.window, \"ERROR\", \"No probe\", QMessageBox.Ok)\n return\n\n target_device = \"stm32f103c8\"\n\n if target_device not in TARGET:\n QMessageBox.information(self.window, \"ERROR\", \"MCU not supported\", QMessageBox.Ok)\n return\n\n self.session = ConnectHelper.session_with_chosen_probe(\n target_override=target_device,unique_id=self.probe.unique_id)\n self.session.open()\n\n # print(self.probe.unique_id)\n board = self.session.board\n self.target = board.target\n\n memory_map = board.target.get_memory_map()\n ram_region = memory_map.get_default_region_of_type(MemoryType.RAM)\n rom_region = memory_map.get_boot_memory()\n\n self.addr_bin = rom_region.start\n self.ui.flash.setEnabled(True)\n\n def flash_device(self):\n print(\"flash device\")\n if os.path.exists(self.ui.filepath.text()):\n self.ui.log.append(\"Start flashing\")\n FileProgrammer(self.session, progress=self.progress_monitor).program(self.ui.filepath.text(), base_address=self.addr_bin)\n self.ui.log.append(\"Finish flashing\")\n else:\n QMessageBox.critical(self.window,\"ERROR\",\"Firmware is not exist\",QMessageBox.Yes)\n\n\n def flash_device_run(self):\n\n if os.path.exists(self.ui.filepath.text()):\n self.ui.log.append(\"Start flashing\")\n FileProgrammer(self.session, progress=self.progress_monitor).program(\n self.ui.filepath.text(), base_address=self.addr_bin)\n self.ui.log.append(\"Finish flashing\")\n self.target.reset()\n else:\n QMessageBox.critical(self.window,\"ERROR\",\"Firmware is not exist\",QMessageBox.Yes)\n \n\n\n def progress_monitor(self, amount):\n print(\"progress\")\n print(amount)\n self.ui.progressBar.setValue(amount * 100)\n\n\n\n def update_daplink(self):\n self.ui.daplink_list.clear()\n probes = ConnectHelper.get_all_connected_probes(blocking=False)\n\n for probe in probes:\n self.ui.daplink_list.addItem(probe.description)\n if len(probes) > 0:\n self.probe = probes[0]\n else:\n self.probe = None\n \n def select_file(self):\n filepath, filetype = QFileDialog.getOpenFileName(\n self.window, \"open fireware\", \"./\", \"hex(*.hex);;bin(*.bin);;\")\n # print(fname)\n self.ui.filepath.setText(filepath)\n\n\nif __name__ == '__main__':\n Flash_Loader()\n\n","repo_name":"zhuangzuoyi/daplink_flasher","sub_path":"pyocd_target/flash_loader.py","file_name":"flash_loader.py","file_ext":"py","file_size_in_byte":4950,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"}
+{"seq_id":"886383203","text":"import logging\nimport threading\nfrom abc import ABC, abstractmethod\nfrom threading import Condition\nfrom typing import Any, Dict, List, Union\n\nimport fed\nimport ray\n\nimport secretflow.distributed as sfd\nfrom secretflow.device.driver import reveal\nfrom secretflow.distributed.primitive import DISTRIBUTION_MODE\n\nfrom .device import PYU\n\nthread_local = threading.local()\n\nSERVER = \"server\"\nCLIENT = \"client\"\n\n\ndef get_role():\n return thread_local.link.role\n\n\ndef get_device():\n return thread_local.link.device\n\n\ndef set_mesh(link: 'Link'):\n thread_local.link = link\n\n\ndef send_to_clients(name, value, version):\n \"\"\"Send message to the target device.\n this function is non-blocking.\n\n Args:\n name: message name\n value: message value\n version: message version, used to distinguish between different training rounds\n \"\"\"\n thread_local.link.send(name, value, thread_local.link._clients, version)\n\n\ndef send_to_server(name, value, version):\n \"\"\"Send message to the target device.\n this function is non-blocking.\n\n Args:\n name: message name\n value: message value\n version: message version, used to distinguish between different training rounds\n \"\"\"\n thread_local.link.send(name, value, thread_local.link._server, version)\n\n\ndef recv_from_clients(name, version):\n \"\"\"\n Receive messages from the source device.\n this function is blocking\n\n Args:\n name: message name\n version: TODO: What is the purpose of the version parameter?\n\n Returns:\n The received message\n \"\"\"\n return thread_local.link.recv(name, thread_local.link._clients, version)\n\n\ndef recv_from_server(name, version):\n \"\"\"\n Receive messages from the source device.\n this function is blocking\n\n Args:\n name: message name\n version: message version, used to distinguish between different training rounds\n\n Returns:\n The received message\n \"\"\"\n return thread_local.link.recv(name, thread_local.link._server, version)\n\n\nclass Communicator(ABC):\n @abstractmethod\n def send(dest: PYU, data: Any, key: str):\n raise NotImplementedError()\n\n @abstractmethod\n def recv(src: PYU, keys: Union[str, List[str]]):\n raise NotImplementedError()\n\n\nclass FedCommunicator(Communicator):\n def __init__(self, partners: List[PYU]):\n self.parties = [partner.party for partner in partners]\n\n def send(self, dest: PYU, data: Any, key: str):\n assert dest.party in self.parties, f'Device {dest} is not in this communicator.'\n return fed.send(\n dest_party=dest.party,\n data=data,\n upstream_seq_id=key,\n downstream_seq_id=key,\n )\n\n def recv(self, src: PYU, keys: Union[str, List[str]]):\n is_single = isinstance(keys, str)\n if is_single:\n keys = [keys]\n\n vals = ray.get([fed.recv(src.party, src.party, key, key) for key in keys])\n return vals[0] if is_single else vals\n\n\nclass RayCommunicator(Communicator):\n def __init__(self):\n self._messages = {}\n self._cv = Condition()\n\n def links(self, links: Dict[PYU, ray.actor.ActorHandle]):\n self._links = links\n\n def send(self, dest: PYU, data: Any, key: str):\n assert dest in self._links, f'Device {dest} is not in this communicator.'\n logging.debug(f'send to dest {dest}')\n self._links[dest]._recv_message.remote(key, data)\n\n def _recv_message(self, key: str, value: Any):\n \"\"\"Receive message\n\n Args:\n key: The message key, consisting by source & destination device,\n message name, and unique identifier\n value: message body\n \"\"\"\n logging.debug(f'receive message from remote: {key}')\n with self._cv:\n self._messages[key] = value\n self._cv.notifyAll()\n\n def recv(self, src: PYU, keys: Union[str, List[str]]):\n logging.debug(f'receive message: {keys}')\n\n is_single = isinstance(keys, str)\n if is_single:\n keys = [keys]\n vals = {}\n with self._cv:\n while True:\n recv_keys = []\n for k in keys:\n if k in self._messages:\n vals[k] = self._messages.pop(k)\n recv_keys.append(k)\n\n for k in recv_keys:\n keys.remove(k)\n\n if len(keys) == 0:\n break\n self._cv.wait()\n\n return list(vals.values())[0] if is_single else list(vals.values())\n\n\nclass Link:\n \"\"\"A helper class for communication inside actor between several actors.\n\n You should not use this class directly but inherit it and decorate your\n child class with :py:meth:`~secretflow.device.proxy`.\n\n Examples\n --------\n >>> from secretflow.device import proxy\n >>> from seceretflow.device.link import Link, init_link\n >>>\n >>> @proxy\n >>> class PS(Link):\n >>> def run(self):\n >>> pass\n >>>\n >>>\n >>> @proxy\n >>> class Client(Link):\n >>> def run(self):\n >>> pass\n >>>\n >>> ps = PS()\n >>> clients = [Client() for i in range(2)]\n >>> init_link(ps, clients)\n >>> for client in clients:\n >>> init_link(client, ps)\n >>>\n \"\"\"\n\n def __init__(self, device: PYU, key_prefix: str = ''):\n \"\"\"Initialize\n\n Args:\n device: where this Link instance located, PYU\n \"\"\"\n self._device = device\n self._initialized = False\n self._clients = None\n self._server = None\n self._key_prefix = key_prefix\n self._comm = None\n\n def initialize(\n self, comm_or_links: Union[Communicator, Dict[PYU, ray.actor.ActorHandle]]\n ):\n if isinstance(comm_or_links, FedCommunicator):\n self._comm = comm_or_links\n else:\n self._comm = RayCommunicator()\n self._comm.links(comm_or_links)\n # Indicate success.\n return True\n\n @staticmethod\n def _create_key(\n src_device: Union[PYU, List[PYU]],\n dst_device: Union[PYU, List[PYU]],\n name: str,\n step_id: int = 0,\n key_prefix='',\n ):\n if isinstance(src_device, PYU) and isinstance(dst_device, PYU):\n return f'{key_prefix};{src_device};{dst_device};{name};{step_id}'\n elif isinstance(src_device, List):\n assert isinstance(dst_device, PYU), f'invalid dst_device: {dst_device}'\n return [\n f'{key_prefix};{device};{dst_device};{name};{step_id}'\n for device in src_device\n ]\n elif isinstance(dst_device, List):\n assert isinstance(src_device, PYU), f'invalid src_device: {src_device}'\n return [\n f'{key_prefix};{src_device};{device};{name};{step_id}'\n for device in dst_device\n ]\n else:\n assert False, f'invalid src_device: {src_device}, dst_device: {dst_device}'\n\n @property\n def clients(self):\n return self._clients\n\n @clients.setter\n def clients(self, clients: List[PYU]):\n self._clients = clients\n\n @property\n def server(self):\n return self._server\n\n @server.setter\n def server(self, server: PYU):\n self._server = server\n\n def send(\n self, name: str, value: Any, dst_device: Union[PYU, List[PYU]], step_id: int = 0\n ):\n \"\"\"Send message to target device.\n this function is non-blocking\n\n Args:\n name: message name\n value: message value\n dst_device: target device(s), can be a single device or a list of devices\n step_id: A process-level unique identifier to identify the communication\n \"\"\"\n assert isinstance(dst_device, PYU) or (\n isinstance(dst_device, List) and len(dst_device) > 0\n ), f'dst_device must be PYU or PYU list'\n\n key = self._create_key(\n self._device, dst_device, name, step_id, self._key_prefix\n )\n logging.debug(f'send message: {key}')\n\n if isinstance(dst_device, list):\n for msg_id, device in zip(key, dst_device):\n self._comm.send(device, value, msg_id)\n else:\n self._comm.send(dst_device, value, key)\n\n def _recv_message(self, key: str, value: Any):\n \"\"\"Receive message\n\n Args:\n key: The message key, consisting by source & destination device,\n message name, and unique identifier\n value: message body\n \"\"\"\n logging.debug(f'receive message from remote: {key}')\n self._comm._recv_message(key, value)\n\n def recv(\n self, name: str, src_device: Union[PYU, List[PYU]], step_id: int = 0\n ) -> Any:\n \"\"\"Receive messages from the source device.\n this function is blocking\n\n Args:\n name: The message name\n src_device: source device(s), can be a single device or a list of devices\n step_id: A process-level unique identifier to identify the communication\n\n Returns:\n The received message\n \"\"\"\n assert isinstance(src_device, PYU) or (\n isinstance(src_device, List) and len(src_device) > 0\n ), f'dst_device must be PYU or PYU list'\n\n key = self._create_key(\n src_device, self._device, name, step_id, self._key_prefix\n )\n logging.debug(f'receive message: {key}')\n return self._comm.recv(src=self._device, keys=key)\n\n\ndef init_link(link: Link, partners: List[Link]):\n if not isinstance(partners, list):\n partners = [partners]\n if sfd.get_distribution_mode() == DISTRIBUTION_MODE.PRODUCTION:\n comm = FedCommunicator([partner.device for partner in partners])\n # Use `get` here as a barrier to make sure that initialize is done at first.\n # Note that link should be a `proxy`ed actor.\n reveal(link.initialize(comm))\n else:\n reveal(link.initialize({partner.device: partner.data for partner in partners}))\n","repo_name":"secretflow/secretflow","sub_path":"secretflow/device/link.py","file_name":"link.py","file_ext":"py","file_size_in_byte":10133,"program_lang":"python","lang":"en","doc_type":"code","stars":2029,"dataset":"github-code","pt":"5"}
+{"seq_id":"6201034943","text":"import cv2\nimport numpy as np\nimport dlib\nfrom math import hypot\n\n# Loading Camera and Nose image and Creating mask\ncap = cv2.VideoCapture(0)\nleft_eye = cv2.imread(\"ojo_izq.png\")\nright_eye = cv2.imread(\"ojo_der.png\")\n_, frame = cap.read()\nrows, cols, _ = frame.shape\nl_eye_mask = np.zeros((rows, cols), np.uint8)\nr_eye_mask = np.zeros((rows, cols), np.uint8)\n# Loading Face detector\ndetector = dlib.get_frontal_face_detector()\npredictor = dlib.shape_predictor(\"shape_predictor_68_face_landmarks.dat\")\nwhile True:\n _, frame = cap.read()\n l_eye_mask.fill(0)\n r_eye_mask.fill(0)\n gray_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n faces = detector(frame)\n for face in faces:\n landmarks = predictor(gray_frame, face)\n # Nose coordinates\n top_left_eye = (landmarks.part(43).x, landmarks.part(43).y)\n bot_left_eye = (landmarks.part(47).x, landmarks.part(47).y)\n left_left_eye = (landmarks.part(42).x, landmarks.part(42).y)\n right_left_eye = (landmarks.part(45).x, landmarks.part(45).y)\n\n left_eye_width = round(abs(left_left_eye[0] - right_left_eye[0] * 1.05))\n left_eye_height = round(abs(top_left_eye[1] - bot_left_eye[1] * 1.05))\n\n top_right_eye = (landmarks.part(38).x, landmarks.part(38).y)\n bot_right_eye = (landmarks.part(40).x, landmarks.part(40).y)\n left_right_eye = (landmarks.part(36).x, landmarks.part(36).y)\n right_right_eye = (landmarks.part(39).x, landmarks.part(39).y)\n\n right_eye_width = round(abs(left_right_eye[0] - right_right_eye[0] * 1.05))\n right_eye_height = round(abs(top_right_eye[1] - bot_right_eye[1] * 1.05))\n # New left nose position\n left_e_top_left = (left_left_eye[0]), int((right_left_eye[1] - left_eye_height / 2))\n left_e_bottom_right = (right_left_eye[0]), int((right_left_eye[1] + left_eye_height / 2))\n # New right nose position\n right_e_top_left = (left_right_eye[0]), int((right_right_eye[1] - right_eye_height / 2))\n right_e_bottom_right = (right_right_eye[0]), int((right_right_eye[1] + right_eye_height / 2))\n\n # Adding the new left eye\n\n l_eye = cv2.resize(left_eye, (left_eye_width, left_eye_height))\n r_eye = cv2.resize(right_eye, (right_eye_width, right_eye_height))\n l_eye_gray = cv2.cvtColor(l_eye, cv2.COLOR_BGR2GRAY)\n r_eye_gray = cv2.cvtColor(r_eye, cv2.COLOR_BGR2GRAY)\n _, l_eye_mask = cv2.threshold(l_eye_gray, 200, 255, cv2.THRESH_BINARY)\n _, l_eye_mask_inv = cv2.threshold(l_eye_gray, 200, 255, cv2.THRESH_BINARY_INV)\n\n _, r_eye_mask = cv2.threshold(r_eye_gray, 200, 255, cv2.THRESH_BINARY)\n _, r_eye_mask_inv = cv2.threshold(r_eye_gray, 200, 255, cv2.THRESH_BINARY_INV)\n\n l_eye_area = frame[left_e_top_left[1]: left_e_top_left[1] + left_eye_height,\n left_e_top_left[0]: left_e_top_left[0] + left_eye_width]\n r_eye_area = frame[right_e_top_left[1]: right_e_top_left[1] + right_eye_height,\n right_e_top_left[0]: right_e_top_left[0] + right_eye_width]\n cv2.imshow(\"l_eye_mask\", l_eye_mask)\n l_eye_area_no_eye = cv2.bitwise_and(l_eye_area, l_eye_area, mask = l_eye_mask)\n l_eye_no_back = cv2.bitwise_and(l_eye, l_eye, mask=l_eye_mask_inv)\n\n r_eye_area_no_eye = cv2.bitwise_and(r_eye_area, r_eye_area, mask=r_eye_mask)\n r_eye_no_back = cv2.bitwise_and(r_eye, r_eye, mask=r_eye_mask_inv)\n\n cv2.imshow(\"l_Eye no eye\", l_eye_area_no_eye)\n final_l_eye = cv2.add(l_eye_no_back, l_eye_area_no_eye)\n final_r_eye = cv2.add(r_eye_no_back, r_eye_area_no_eye)\n cv2.imshow(\"Nose area\", l_eye_area)\n\n frame[left_e_top_left[1]: left_e_top_left[1] + left_eye_height,\n left_e_top_left[0]: left_e_top_left[0] + left_eye_width] = final_l_eye\n frame[right_e_top_left[1]: right_e_top_left[1] + right_eye_height,\n right_e_top_left[0]: right_e_top_left[0] + right_eye_width] = final_r_eye\n cv2.imshow(\"Nose pig\", l_eye)\n cv2.imshow(\"final nose\", final_l_eye)\n cv2.imshow(\"Frame\", frame)\n key = cv2.waitKey(1)\n if key == 27:\n break","repo_name":"abrahamdaf/semana_tec_filtro","sub_path":"Eye_filter/Crying_eyes.py","file_name":"Crying_eyes.py","file_ext":"py","file_size_in_byte":4178,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"}
+{"seq_id":"27382853424","text":"# Author Name: Ajay Meena\n# Codeforce : https://codeforces.com/profile/majay1638\nimport sys\nimport math\nimport bisect\nimport heapq\nfrom bisect import bisect_right\nfrom sys import stdin, stdout\n\n# -------------- INPUT FUNCTIONS ------------------\n\n\ndef get_ints_in_variables(): return map(\n int, sys.stdin.readline().strip().split())\n\n\ndef get_int(): return int(sys.stdin.readline())\n\n\ndef get_ints_in_list(): return list(\n map(int, sys.stdin.readline().strip().split()))\ndef get_list_of_list(n): return [list(\n map(int, sys.stdin.readline().strip().split())) for _ in range(n)]\n\n\ndef get_string(): return sys.stdin.readline().strip()\n\n# -------------- SOLUTION FUNCTION ------------------\n\n\ndef Solution(a, b, n):\n # Write Your Code Here\n onesCount = 0\n zeroCount = 0\n for c in a:\n if c == \"1\":\n onesCount += 1\n else:\n zeroCount += 1\n\n if a == b:\n print(\"YES\")\n else:\n flag = True\n changed = False\n for i in range(n-1, -1, -1):\n if ((a[i] != b[i] and not changed) or (a[i] == b[i] and changed)):\n if(zeroCount == onesCount):\n changed = not changed\n else:\n flag = False\n break\n if a[i] == \"1\":\n onesCount -= 1\n else:\n zeroCount -= 1\n if flag:\n print(\"YES\")\n else:\n print(\"NO\")\n\n\ndef main():\n # Take input Here and Call solution function\n for _ in range(get_int()):\n n = get_int()\n a = get_string()\n b = get_string()\n Solution(a, b, n)\n\n\n# calling main Function\nif __name__ == '__main__':\n main()\n","repo_name":"hacetheworld/competitive-programming-practices","sub_path":"contests/codeforce/div2/712/B_Flip_the_Bits.py","file_name":"B_Flip_the_Bits.py","file_ext":"py","file_size_in_byte":1709,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"5"}
+{"seq_id":"11154174885","text":"\"\"\"\n\n Caching mobile image resizer.\n\n Resizer both Zope internal and arbitary URL image resources.\n\n\"\"\"\n\n\n__license__ = \"GPL 2\"\n__copyright__ = \"2010 mFabrik Research Oy\"\n__author__ = \"Mikko Ohtamaa \"\n__docformat__ = \"epytext\"\n\nimport os\nimport md5\nimport urllib\nimport logging\nimport shutil\nfrom cStringIO import StringIO\n\nfrom AccessControl import Unauthorized\nfrom Acquisition import aq_inner\nimport zope.interface\n\nfrom zope.interface import implements\nfrom zope.component import getMultiAdapter, getUtility, queryUtility\nfrom zope.app.container.interfaces import INameChooser\nfrom Products.Five.browser.pagetemplatefile import ViewPageTemplateFile\nfrom Products.Five.browser import BrowserView\nfrom Products.statusmessages.interfaces import IStatusMessage\nfrom Products.CMFPlone.browser import ploneview\nfrom Products.CMFCore.utils import getToolByName\nfrom zope.app.component.hooks import getSite\nfrom plone.app.redirector.storage import RedirectionStorage\n\nfrom mobile.sniffer.utilities import get_user_agent, get_user_agent_hash\n\nfrom mobile.htmlprocessing.transformers.imageresizer import ImageResizer\n\nfrom gomobile.mobile.interfaces import IMobileImageProcessor, IUserAgentSniffer\nfrom gomobile.mobile.interfaces import IMobileRequestDiscriminator, MobileRequestType\nfrom gomobile.imageinfo.interfaces import IImageInfoUtility\nfrom gomobile.mobile.utilities import getMobileProperties\n\n# To not exceed this resize dimensions\nsafe_width = 1000\nsafe_height = 1000\n\nlogger = logging.getLogger(\"Resizer\")\n\n# Debug variable for unit tests\ncache_hits = 0\n\nDEFAULT_CACHE_PATH=\"/tmp/gomobile_image_cache\"\n\nVIEW_NAME = \"@@mobile_image_processor\"\n\nclass FSCache(object):\n \"\"\" Simple balanced folder based file system cache for images.\n\n Use cron job + timestamps to invalidate the cache.\n\n Each file path and name is hex digest of MD5 calculated from the cache key.\n Files are created in folder structure nested two levels to avoid too many files per one folder::\n\n DEFAULT_CACHE_PATH/00/00/000012341234\n DEFAULT_CACHE_PATH/00/10/001012341234\n DEFAULT_CACHE_PATH/10/00/100012341234\n DEFAULT_CACHE_PATH/10/10/101012341234\n\n \"\"\"\n def __init__(self, root_path):\n self.root_path = root_path\n\n def makePathKey(self, ob):\n \"\"\"\n Calculate hex digest.\n \"\"\"\n ikey = str(ob)\n return md5.new(ikey).hexdigest()\n\n def get(self, key, default=None):\n \"\"\" Get the cached file and update its timestamp.\n\n @return: Path to cached file or None if not cached\n \"\"\"\n\n global cache_hits\n\n logger.debug(\"Checking resizer image cache for \" + key)\n\n work_dir, path = self.getOrCreatePath(key)\n if not os.path.exists(path):\n return default\n else:\n\n # http://stackoverflow.com/questions/1158076/implement-touch-using-python\n # We set both access time and modified time, as the file may be\n # on relatime file system\n os.utime(path, None)\n cache_hits += 1\n return path\n\n def getOrCreatePath(self, key):\n \"\"\"\n @return: tuple (work dir path, final file path)\n \"\"\"\n path1 = key[0:2]\n path2 = key[2:4]\n\n # TODO: Do this only once and get rid of this\n #fspermissions.ensure_writable_folder(storage_folder)\n #fspermissions.ensure_writable_folder(os.path.join(storage_folder, path1))\n #fspermissions.ensure_writable_folder(os.path.join(storage_folder, path1, path2))\n path = os.path.join(self.root_path, path1, path2)\n\n if not os.path.exists(path):\n os.makedirs(path, 0x1FF)\n\n full_path = os.path.join(path, key)\n\n return path, full_path\n\n def makeTempFile(self, work_path):\n \"\"\"\n \"\"\"\n return os.path.join(work_path, os.tmpnam())\n\n\n def closeTempFile(self, temp, full):\n \"\"\" Perform final cache set as atomic FS operation.\n \"\"\"\n logger.debug(\"Created image cache file:\" + full)\n #os.rename(temp, full)\n # Fix for freebsd http://code.google.com/p/plonegomobile/issues/detail?id=9\n shutil.move(temp, full)\n\n def set(self, key, value):\n \"\"\"\n \"\"\"\n work_path, file_path = self.getOrCreatePath(key)\n\n # Create a cached copy\n temp = self.makeTempFile(work_path)\n file = open(temp, \"wb\")\n file.write(value)\n file.close()\n\n self.closeTempFile(temp, file_path)\n\n def invalidate(self):\n \"\"\" Nuke all files from the cache.\n\n One should do something smarter here.\n \"\"\"\n if os.path.exists(self.root_path):\n shutil.rmtree(self.root_path)\n\nclass HTMLMutator(ImageResizer):\n \"\"\"\n Rewrite
in HTML content code.\n\n Use mobile.htmlprocessing package and provide Plone specific callbacks.\n \"\"\"\n\n def __init__(self, baseURL, trusted, rewriteCallback):\n ImageResizer.__init__(self, baseURL, trusted)\n self.rewriteCallback = rewriteCallback\n\n def rewrite(self, url):\n return self.rewriteCallback(url)\n\nclass MobileImageProcessor(object):\n\n zope.interface.implements(IMobileImageProcessor)\n\n def __init__(self, context, request):\n self.context = context\n self.request = request\n\n def init(self):\n self.site = getSite()\n self.cache = FSCache(self.getCachePath())\n\n def getSecret(self):\n \"\"\" Avoid properties look up using cached value.\n\n @return: Unguessable string, unique to a site\n \"\"\"\n _secret = self.site.portal_properties.mobile_properties.image_resizer_secret\n return _secret\n\n\n def calculateSignature(self, **kwargs):\n \"\"\" Calculate protected MD5 for resizing parameters, so that input is protected against DoS attack \"\"\"\n\n logger.debug(\"Calculating signature from params:\" + str(kwargs))\n\n # Sort parameters by key name, as MD5 function\n # is sensitive to the order of the parameters\n # and Python dict does not guarantee the order\n params = list(kwargs.items())\n\n def key_comparison(x, y):\n \"\"\"\n \"\"\"\n return cmp(x[0], y[0])\n\n params.sort(key_comparison)\n\n concat = \"\"\n for key, value in params:\n concat += key + \"=\" + str(value)\n concat += self.getSecret()\n return md5.new(concat).hexdigest()\n\n def isUserAgentSpecific(self, url, properies):\n \"\"\" Determine whether the result of resize may vary by user agent.\n\n If we need to vary by user agent, insert a string based\n on HTTP_USER_AGENT to the resizer GET query.\n \"\"\"\n return True\n\n def finalizeViewArguments(self, properties):\n \"\"\"\n Make sure that input parameters are URL compliant.\n \"\"\"\n for key, val in properties.items():\n properties[key] = str(val)\n\n # Make it so that no one else can guess working resizer URLs\n secret = self.calculateSignature(**properties)\n properties[\"secret\"] = secret\n return properties\n\n def removeScale(self, imagePath):\n \"\"\"\n Helper function to remove scale view name from the image path.\n\n @param imagePath: Site root relative path to the image as list.\n \"\"\"\n last = imagePath[-1]\n\n # Assume ATContentType image scales\n if last.startswith(\"image_\"):\n imagePath = imagePath[0:-1]\n\n return imagePath\n\n def mapURL(self, url):\n \"\"\" Make image URL relative to site root.\n\n If possible, make URI relative to site root\n so that we can safely pass it around from a page to another.\n\n If URL is absolute, don't touch it.\n\n @param url: Image URL or URI as a string\n \"\"\"\n\n rs = RedirectionStorage()\n if rs.has_path(url):\n url = rs.get(url)\n\n\n # Make sure we are traversing the context chain without view object messing up things\n context = self.context.aq_inner\n\n if url.startswith(\"http://\") or url.startswith(\"https://\"):\n # external URL\n url = url\n elif \"++resource\" in url:\n # Zope 3 resources are mapped to the site root\n url = url\n else:\n # Map the context path to the site root\n if url.startswith(\"/\"):\n # Pass URL to resizer view relocated to the site root\n\n url = url[1:]\n else:\n # The URL is relative to the context path\n # Map URL to be relative to the site root\n\n site = getSite()\n\n # check if the context is folderish so that we can\n # traverse from the parent if it's not\n folderish = getattr(aq_base(context), 'isPrincipiaFolderish',\n False)\n try:\n if folderish:\n imageObject = context.unrestrictedTraverse(url)\n else:\n imageObject = \\\n context.aq_parent.unrestrictedTraverse(url)\n except Unauthorized:\n # The parent folder might be private and the image\n # public, in which case we should be able to view\n # the image after all.\n parent_path = '/'.join(url.split('/')[:-1])\n image_path = url.split('/')[-1]\n parent = site.unrestrictedTraverse(parent_path)\n imageObject = parent.restrictedTraverse(image_path)\n\n if (\"FileResource\" in imageObject.__class__.__name__):\n # Five mangling compatible way to detect image urls pointing to the resource directory\n # ...but this should not happen if images are accessed using ++resource syntax\n return url\n elif hasattr(imageObject, \"getPhysicalPath\"):\n physicalPath = imageObject.getPhysicalPath() # This path is relative to Zope Application server root\n#\n virtualPath = self.request.physicalPathToVirtualPath(physicalPath)\n\n # TODO: Assume Plone site is Zope app top level root object here\n\n # empty root node, site node\n assert len(physicalPath) > 2\n\n virtualPath = physicalPath[2:]\n\n virtualPath = self.removeScale(virtualPath)\n\n url = \"/\".join(virtualPath)\n else:\n raise RuntimeError(\"Unknown traversable image object:\" + str(imageObject))\n return url\n\n def getImageDownloadURL(self, url, properties={}):\n \"\"\"\n Return download URL for image which is put through image resizer.\n\n @param url: Source image URI, relative to context, or absolite URL\n\n @param properties: Extra options needed to be given to the resizer, e.g. padding, max width, etc.\n\n @return: String, URL where to resized image can be downloaded. This URL varies\n by the user agent.\n \"\"\"\n self.init()\n\n url = self.mapURL(url)\n\n # Prepare arguments for the image resizer view\n new_props = {\"conserve_aspect_ration\" : \"true\"}\n new_props.update(properties)\n new_props[\"url\"] = url\n\n if self.isUserAgentSpecific(url, new_props):\n # Check if the result may vary by user agnt\n new_props[\"user_agent_md5\"] = get_user_agent_hash(self.request)\n\n new_props = self.finalizeViewArguments(new_props)\n\n return self.site.absolute_url() + \"/\" + VIEW_NAME + \"?\" + urllib.urlencode(new_props)\n\n\n def processHTML(self, data, trusted):\n \"\"\" Process all
tags in HTML code.\n\n Some error filtering is performed for incoming string data,\n as there are some common cases related to browser based WYSIWYG\n which will make shit hit the fan.\n\n @param base_url: Base URL of HTML document - for resolving relative img paths\n\n @return: Mutated HTML output as a string\n \"\"\"\n\n self.init()\n\n base = self.context.absolute_url()\n\n # create mobile.heurestics helper\n mutator = HTMLMutator(base, trusted, self.getImageDownloadURL)\n\n if type(data) == str:\n data = unicode(data, \"utf-8\", errors=\"ignore\")\n\n # Need to fix Windows style new lines here or they will cause extra new lines in the output\n data = data.replace(u\"\\r\", u\"\")\n\n # Need to fix Unicode non-breaking space bar ( ) or it will be escaped in the output and appears wrong\n # Use XML/XHTML entity to present this evil character\n data = data.replace(u\"\\xA0\", u\" \")\n\n processed = mutator.process(data)\n\n return processed\n\n def getCachePath(self):\n \"\"\"\n @return: FS path where cached resized scales are stored\n \"\"\"\n image_resize_cache_path = getattr(self.context.portal_properties.mobile_properties, \"image_resize_cache_path\", DEFAULT_CACHE_PATH)\n return image_resize_cache_path\n\n\n\nclass ResizeViewHelper(BrowserView):\n \"\"\"\n Base class from where you can derivate your own image resizers or call this as a helper from your own views.\n\n \"\"\"\n\n def init(self):\n\n self.resizer = getMultiAdapter((self.context, self.request), IMobileImageProcessor)\n self.resizer.init()\n\n sniffer = getMultiAdapter((self.context, self.request), IUserAgentSniffer)\n self.ua = sniffer.getUserAgentRecord()\n\n\n def buildCacheKey(self, width, height):\n \"\"\"\n Build cache key for result image data.\n\n This varies by width and height if we know them.\n If we don't know, then we user agent string itself as a part of the key,\n so that different mobiles don't get wrong image from the cache.\n \"\"\"\n\n # We know the user agent so we know the resulting width and height in this stage\n if self.ua:\n key = str(width) + \"-\" + str(height) + \"-\"\n else:\n key = get_user_agent_hash(self.request)\n\n def add_param(key, value):\n key += \"-\"\n key += str(value)\n return key\n\n key = add_param(key, self.cache_key)\n key = add_param(key, self.conserve_aspect_ration)\n key = add_param(key, self.padding_width)\n\n return key\n\n def parseParameters(self, parameters):\n \"\"\" Parse parameters needed for\n\n \"\"\"\n self.width = parameters.get(\"width\", \"auto\")\n self.height = parameters.get(\"height\", \"auto\")\n self.padding_width = parameters.get(\"padding_width\", 0)\n self.conserve_aspect_ration = parameters.get(\"conserve_aspect_ration\", False)\n\n self.image = parameters.get(\"image\", None)\n self.url = parameters.get(\"url\", None)\n\n if not(self.image or self.url):\n raise RuntimeError(\"Needs either image or URL parameter\")\n\n self.cache_key = parameters.get(\"cache_key\", self.url)\n if not self.cache_key:\n raise RuntimeError(\"cache_key or URL parameter must be provided\")\n\n def resolveCacheFormat(self, data):\n \"\"\"\n Peek cached file first bytes to get the format.\n \"\"\"\n if data[0:3] == \"PNG\":\n return \"png\"\n elif data[0:3] == \"GIF\":\n return \"gif\"\n else:\n return \"jpeg\"\n\n\n def serve(self, width, height):\n \"\"\" Generate resized image or fetch one from cache.\n\n TODO: Clear up string / StringIO madness here in all those ifs\n \"\"\"\n key = self.buildCacheKey(width, height)\n path = self.resizer.cache.makePathKey(key)\n logger.debug(\"Performing mobile image resize cache look up \" + key + \" mapped to \" + path)\n\n file = self.resizer.cache.get(path)\n if file:\n f = open(file, \"rb\")\n data = f.read()\n f.close()\n format = self.resolveCacheFormat(data)\n value = data\n else:\n tool = getUtility(IImageInfoUtility)\n\n logger.debug(\"Resizing image to mobile dimensions %d %d\" % (width, height))\n\n if self.url:\n data, format = tool.getURLResizedImage(self.url, width, height, conserve_aspect_ration=self.conserve_aspect_ration)\n else:\n data, format = tool.resizeImage(self.image, width, height, conserve_aspect_ration=self.conserve_aspect_ration)\n\n # Mercifully cache broken images from remote HTTP downloads\n if data is None:\n value = \"\"\n else:\n value = data.getvalue()\n\n self.resizer.cache.set(path, value)\n\n if value == \"\":\n # We could not access the orignal image data\n self.request.response.setHeader(\"Content-type\", \"text/plain\")\n return \"Image resize error\"\n\n self.request.response.setHeader(\"Content-type\", \"image/\" + format)\n\n # TODO: Check whether we can stream response (no memory buffering)\n\n if hasattr(data, \"getvalue\"):\n # Looks like ZMedusa server cannot stream data to the client...\n # so we need to return it as memory buffered\n return data.getvalue()\n\n return data\n\n def resolveDimensions(self):\n \"\"\" Calculate final dimensions for the image.\n \"\"\"\n\n if self.ua:\n logger.debug(\"Using user agent:\" + str(self.ua.getMatchedUserAgent()))\n else:\n logger.debug(\"No user agent available for resolving the target image size\")\n\n if self.ua:\n canvas_width = self.ua.get(\"usableDisplayWidth\")\n canvas_height = self.ua.get(\"usableDisplayHeight\")\n else:\n canvas_width = None\n canvas_height = None\n\n # Fill in default info if user agent records are incomplete\n if not canvas_width:\n canvas_width = self.context.portal_properties.mobile_properties.default_canvas_width\n\n if not canvas_height:\n canvas_height = self.context.portal_properties.mobile_properties.default_canvas_height\n\n\n # Solve wanted width\n if self.width == \"auto\":\n width = canvas_width\n else:\n width = self.width\n\n # Make sure we have some margin available if defined\n width -= self.padding_width\n\n # Solve wanted height\n if self.height == \"auto\":\n height = canvas_height\n else:\n # Defined as a param\n height = self.height\n\n if width < 1 or width > safe_width:\n raise Unauthorized(\"Invalid width: %d\" % width)\n\n if height < 1 or height > safe_height:\n raise Unauthorized(\"Invalid height: %d\" % height)\n\n return width, height\n\n def __call__(self, parameters):\n\n self.init()\n self.parseParameters(parameters)\n width, height = self.resolveDimensions()\n return self.serve(width, height)\n\n\nclass ResizeView(ResizeViewHelper):\n \"\"\" Resizer view for arbitary images looked up by URL or Zope path.\n\n Automatic width or height parameter can be used. In this case\n we see whether we have sniffed the mobile screen size based\n on user agent sniffer middle. Use mobile browser canvas\n dimension in this case.\n\n If width/height is automatic, but no browser information is available\n fallback to default setting set in mobile_properties.\n\n HTTP GET query parameters are generated by MobileImageResizer.getImageDownloadURL()\n\n Special parameters:\n\n * override_secret: Set this query parameteter to site resizer secret code setting\n to override DoS preventing parameter signature check.\n Useful for debugging.\n\n The image results are cached on file-system. The cache path is configurable\n through *image_resize_cache_path* mobile parameter and defaults to /tmp/gomobile_image_cache.\n The cache is never cleaned up, so you are responsible to set a scheduled\n task to remove old files.\n \"\"\"\n\n\n def parseParameters(self):\n \"\"\" Parse incoming HTTP GET parameters.\n\n \"\"\"\n\n params = self.request.form\n\n padding_width = params.get(\"padding_width\", \"0\")\n self.padding_width = int(padding_width)\n\n conserve_aspect_ration = params.get(\"conserve_aspect_ration\", \"false\")\n self.conserve_aspect_ration = conserve_aspect_ration.lower() == \"true\"\n\n self.override_secret = params.get(\"override_secret\", None)\n\n self.width = params.get(\"width\", \"auto\")\n if self.width != \"auto\":\n self.width = int(self.width)\n\n self.height = params.get(\"height\", \"auto\")\n if self.height != \"auto\":\n self.height = int(self.height)\n\n self.url = params.get(\"url\", None)\n self.cache_key = self.url\n\n def checkSecret(self):\n \"\"\" Harden us against DoS attack.\n\n All query parameters are signed and check if the caller knows the correct signature.\n \"\"\"\n\n if self.override_secret:\n # Override parameter signature check\n # by directly providing shared secret as an HTTP query parameter\n # for testing.\n if self.override_secret != self.resizer.getSecret():\n raise Unauthorized(\"Wrong override_secret:\" + self.override_secret)\n else:\n\n # Verify that secret signs all other parameters\n params = {}\n params.update(self.request.form)\n secret = params.get(\"secret\", None)\n if secret:\n del params[\"secret\"]\n\n calculated = self.resizer.calculateSignature(**params)\n\n if calculated != secret:\n raise Unauthorized(\"Bad image resizer secret:\" + str(secret) + \" calculated:\" + str(calculated))\n\n\n\n def __call__(self):\n \"\"\"\n \"\"\"\n\n self.init()\n\n self.parseParameters()\n\n self.checkSecret()\n\n width, height = self.resolveDimensions()\n\n return self.serve(width, height)\n\nclass ClearCacheView(BrowserView):\n \"\"\"\n Expose clearing of mobile cache as s view.\n \"\"\"\n\n def __call__(self):\n \"\"\"\n TODO: Implement some smart timestamp checking here.\n \"\"\"\n\n resizer = getMultiAdapter((self.context, self.request), IMobileImageProcessor)\n\n resizer.init()\n\n # Check that the caller knows the secret\n secret = self.request.form.get(\"secret\", None)\n if secret != resizer.getSecret():\n raise Unauthorized(\"Wrong secret:\" + secret)\n\n resizer.cache.invalidate()\n\n properties = getMobileProperties(self.context.aq_inner, self.request)\n cache_folder = properties.image_resize_cache_path\n\n return \"Cache has been cleared:\" + cache_folder\n\nclass IHTMLImageRewriter(zope.interface.Interface):\n \"\"\"\n Declare RestrictedPython safe functions for HTMLImageRewriter view.\n \"\"\"\n\n def processHTML(html, trusted):\n pass\n\nclass HTMLImageRewriter(BrowserView):\n \"\"\"\n Template helper view to rewrite HTML structure
tags.\n\n For example, see document_view.pt in gomobiletheme.basic.\n\n Related tests are in gomobiletheme.basic.tests.\n \"\"\"\n\n def processHTML(self, html, trusted=True, only_for_mobile=False):\n \"\"\" Rewrite HTML for mobile compatible way.\n\n @param html: HTML code as a string\n\n @param trusted: If True do not clean up nasty tags like